1/*
2 * Copyright (c) 2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/mach_types.h>
30#include <machine/machine_routines.h>
31#include <kern/processor.h>
32#include <kern/kalloc.h>
33#include <sys/errno.h>
34#include <sys/vm.h>
35#include <kperf/buffer.h>
36#include <kern/thread.h>
37#if defined(__arm64__) || defined(__arm__)
38#include <arm/cpu_data_internal.h>
39#endif
40
41#include <kern/kpc.h>
42
43#include <kperf/kperf.h>
44#include <kperf/sample.h>
45#include <kperf/context.h>
46#include <kperf/action.h>
47
48uint32_t kpc_actionid[KPC_MAX_COUNTERS];
49
50#define COUNTERBUF_SIZE_PER_CPU (KPC_MAX_COUNTERS * sizeof(uint64_t))
51#define COUNTERBUF_SIZE (machine_info.logical_cpu_max * \
52 COUNTERBUF_SIZE_PER_CPU)
53
54/* locks */
55static lck_grp_attr_t *kpc_config_lckgrp_attr = NULL;
56static lck_grp_t *kpc_config_lckgrp = NULL;
57static lck_mtx_t kpc_config_lock;
58
59/* state specifying if all counters have been requested by kperf */
60static boolean_t force_all_ctrs = FALSE;
61
62/* power manager */
63static kpc_pm_handler_t kpc_pm_handler;
64static boolean_t kpc_pm_has_custom_config;
65static uint64_t kpc_pm_pmc_mask;
66#if MACH_ASSERT
67static bool kpc_calling_pm = false;
68#endif /* MACH_ASSERT */
69
70boolean_t kpc_context_switch_active = FALSE;
71bool kpc_supported = true;
72
73void
74kpc_common_init(void)
75{
76 kpc_config_lckgrp_attr = lck_grp_attr_alloc_init();
77 kpc_config_lckgrp = lck_grp_alloc_init("kpc", kpc_config_lckgrp_attr);
78 lck_mtx_init(&kpc_config_lock, kpc_config_lckgrp, LCK_ATTR_NULL);
79}
80
81boolean_t
82kpc_register_cpu(struct cpu_data *cpu_data)
83{
84 assert(cpu_data);
85 assert(cpu_data->cpu_kpc_buf[0] == NULL);
86 assert(cpu_data->cpu_kpc_buf[1] == NULL);
87 assert(cpu_data->cpu_kpc_shadow == NULL);
88 assert(cpu_data->cpu_kpc_reload == NULL);
89
90 /*
91 * Buffers allocated through kpc_counterbuf_alloc() are large enough to
92 * store all PMCs values from all CPUs. This mimics the userspace API.
93 * This does not suit well with the per-CPU kpc buffers, since:
94 * 1. Buffers don't need to be this large.
95 * 2. The actual number of CPUs is not known at this point.
96 *
97 * CPUs are asked to callout into kpc when being registered, we'll
98 * allocate the memory here.
99 */
100
101 if ((cpu_data->cpu_kpc_buf[0] = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL)
102 goto error;
103 if ((cpu_data->cpu_kpc_buf[1] = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL)
104 goto error;
105 if ((cpu_data->cpu_kpc_shadow = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL)
106 goto error;
107 if ((cpu_data->cpu_kpc_reload = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL)
108 goto error;
109
110 memset(cpu_data->cpu_kpc_buf[0], 0, COUNTERBUF_SIZE_PER_CPU);
111 memset(cpu_data->cpu_kpc_buf[1], 0, COUNTERBUF_SIZE_PER_CPU);
112 memset(cpu_data->cpu_kpc_shadow, 0, COUNTERBUF_SIZE_PER_CPU);
113 memset(cpu_data->cpu_kpc_reload, 0, COUNTERBUF_SIZE_PER_CPU);
114
115 /* success */
116 return TRUE;
117
118error:
119 kpc_unregister_cpu(cpu_data);
120 return FALSE;
121}
122
123void
124kpc_unregister_cpu(struct cpu_data *cpu_data)
125{
126 assert(cpu_data);
127 if (cpu_data->cpu_kpc_buf[0] != NULL) {
128 kfree(cpu_data->cpu_kpc_buf[0], COUNTERBUF_SIZE_PER_CPU);
129 cpu_data->cpu_kpc_buf[0] = NULL;
130 }
131 if (cpu_data->cpu_kpc_buf[1] != NULL) {
132 kfree(cpu_data->cpu_kpc_buf[1], COUNTERBUF_SIZE_PER_CPU);
133 cpu_data->cpu_kpc_buf[1] = NULL;
134 }
135 if (cpu_data->cpu_kpc_shadow != NULL) {
136 kfree(cpu_data->cpu_kpc_shadow, COUNTERBUF_SIZE_PER_CPU);
137 cpu_data->cpu_kpc_shadow = NULL;
138 }
139 if (cpu_data->cpu_kpc_reload != NULL) {
140 kfree(cpu_data->cpu_kpc_reload, COUNTERBUF_SIZE_PER_CPU);
141 cpu_data->cpu_kpc_reload = NULL;
142 }
143}
144
145
146static void
147kpc_task_set_forced_all_ctrs(task_t task, boolean_t state)
148{
149 assert(task);
150
151 task_lock(task);
152 if (state)
153 task->t_kpc |= TASK_KPC_FORCED_ALL_CTRS;
154 else
155 task->t_kpc &= ~TASK_KPC_FORCED_ALL_CTRS;
156 task_unlock(task);
157}
158
159static boolean_t
160kpc_task_get_forced_all_ctrs(task_t task)
161{
162 assert(task);
163 return task->t_kpc & TASK_KPC_FORCED_ALL_CTRS ? TRUE : FALSE;
164}
165
166int
167kpc_force_all_ctrs(task_t task, int val)
168{
169 boolean_t new_state = val ? TRUE : FALSE;
170 boolean_t old_state = kpc_get_force_all_ctrs();
171
172 /*
173 * Refuse to do the operation if the counters are already forced by
174 * another task.
175 */
176 if (kpc_get_force_all_ctrs() && !kpc_task_get_forced_all_ctrs(task))
177 return EACCES;
178
179 /* nothing to do if the state is not changing */
180 if (old_state == new_state)
181 return 0;
182
183 /* notify the power manager */
184 if (kpc_pm_handler) {
185#if MACH_ASSERT
186 kpc_calling_pm = true;
187#endif /* MACH_ASSERT */
188 kpc_pm_handler( new_state ? FALSE : TRUE );
189#if MACH_ASSERT
190 kpc_calling_pm = false;
191#endif /* MACH_ASSERT */
192 }
193
194 /*
195 * This is a force -- ensure that counters are forced, even if power
196 * management fails to acknowledge it.
197 */
198 if (force_all_ctrs != new_state) {
199 force_all_ctrs = new_state;
200 }
201
202 /* update the task bits */
203 kpc_task_set_forced_all_ctrs(task, new_state);
204
205 return 0;
206}
207
208void
209kpc_pm_acknowledge(boolean_t available_to_pm)
210{
211 /*
212 * Force-all-counters should still be true when the counters are being
213 * made available to power management and false when counters are going
214 * to be taken away.
215 */
216 assert(force_all_ctrs == available_to_pm);
217 /*
218 * Make sure power management isn't playing games with us.
219 */
220 assert(kpc_calling_pm == true);
221
222 /*
223 * Counters being available means no one is forcing all counters.
224 */
225 force_all_ctrs = available_to_pm ? FALSE : TRUE;
226}
227
228int
229kpc_get_force_all_ctrs(void)
230{
231 return force_all_ctrs;
232}
233
234boolean_t
235kpc_multiple_clients(void)
236{
237 return kpc_pm_handler != NULL;
238}
239
240boolean_t
241kpc_controls_fixed_counters(void)
242{
243 return !kpc_pm_handler || force_all_ctrs || !kpc_pm_has_custom_config;
244}
245
246boolean_t
247kpc_controls_counter(uint32_t ctr)
248{
249 uint64_t pmc_mask = 0ULL;
250
251 assert(ctr < (kpc_fixed_count() + kpc_configurable_count()));
252
253 if (ctr < kpc_fixed_count())
254 return kpc_controls_fixed_counters();
255
256 /*
257 * By default kpc manages all PMCs, but if the Power Manager registered
258 * with custom_config=TRUE, the Power Manager manages its reserved PMCs.
259 * However, kpc takes ownership back if a task acquired all PMCs via
260 * force_all_ctrs.
261 */
262 pmc_mask = (1ULL << (ctr - kpc_fixed_count()));
263 if ((pmc_mask & kpc_pm_pmc_mask) && kpc_pm_has_custom_config && !force_all_ctrs)
264 return FALSE;
265
266 return TRUE;
267}
268
269uint32_t
270kpc_get_running(void)
271{
272 uint64_t pmc_mask = 0;
273 uint32_t cur_state = 0;
274
275 if (kpc_is_running_fixed())
276 cur_state |= KPC_CLASS_FIXED_MASK;
277
278 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
279 if (kpc_is_running_configurable(pmc_mask))
280 cur_state |= KPC_CLASS_CONFIGURABLE_MASK;
281
282 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
283 if ((pmc_mask != 0) && kpc_is_running_configurable(pmc_mask))
284 cur_state |= KPC_CLASS_POWER_MASK;
285
286 return cur_state;
287}
288
289/* may be called from an IPI */
290int
291kpc_get_curcpu_counters(uint32_t classes, int *curcpu, uint64_t *buf)
292{
293 int enabled=0, offset=0;
294 uint64_t pmc_mask = 0ULL;
295
296 assert(buf);
297
298 enabled = ml_set_interrupts_enabled(FALSE);
299
300 /* grab counters and CPU number as close as possible */
301 if (curcpu)
302 *curcpu = current_processor()->cpu_id;
303
304 if (classes & KPC_CLASS_FIXED_MASK) {
305 kpc_get_fixed_counters(&buf[offset]);
306 offset += kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
307 }
308
309 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
310 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
311 kpc_get_configurable_counters(&buf[offset], pmc_mask);
312 offset += kpc_popcount(pmc_mask);
313 }
314
315 if (classes & KPC_CLASS_POWER_MASK) {
316 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
317 kpc_get_configurable_counters(&buf[offset], pmc_mask);
318 offset += kpc_popcount(pmc_mask);
319 }
320
321 ml_set_interrupts_enabled(enabled);
322
323 return offset;
324}
325
326/* generic counter reading function, public api */
327int
328kpc_get_cpu_counters(boolean_t all_cpus, uint32_t classes,
329 int *curcpu, uint64_t *buf)
330{
331 assert(buf);
332
333 /*
334 * Unlike reading the current CPU counters, reading counters from all
335 * CPUs is architecture dependent. This allows kpc to make the most of
336 * the platform if memory mapped registers is supported.
337 */
338 if (all_cpus)
339 return kpc_get_all_cpus_counters(classes, curcpu, buf);
340 else
341 return kpc_get_curcpu_counters(classes, curcpu, buf);
342}
343
344int
345kpc_get_shadow_counters(boolean_t all_cpus, uint32_t classes,
346 int *curcpu, uint64_t *buf)
347{
348 int curcpu_id = current_processor()->cpu_id;
349 uint32_t cfg_count = kpc_configurable_count(), offset = 0;
350 uint64_t pmc_mask = 0ULL;
351 boolean_t enabled;
352
353 assert(buf);
354
355 enabled = ml_set_interrupts_enabled(FALSE);
356
357 curcpu_id = current_processor()->cpu_id;
358 if (curcpu)
359 *curcpu = curcpu_id;
360
361 for (int cpu = 0; cpu < machine_info.logical_cpu_max; ++cpu) {
362 /* filter if the caller did not request all cpus */
363 if (!all_cpus && (cpu != curcpu_id))
364 continue;
365
366 if (classes & KPC_CLASS_FIXED_MASK) {
367 uint32_t count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
368 memcpy(&buf[offset], &FIXED_SHADOW_CPU(cpu, 0), count * sizeof(uint64_t));
369 offset += count;
370 }
371
372 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
373 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
374
375 for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr)
376 if ((1ULL << cfg_ctr) & pmc_mask)
377 buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr);
378 }
379
380 if (classes & KPC_CLASS_POWER_MASK) {
381 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
382
383 for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr)
384 if ((1ULL << cfg_ctr) & pmc_mask)
385 buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr);
386 }
387 }
388
389 ml_set_interrupts_enabled(enabled);
390
391 return offset;
392}
393
394uint32_t
395kpc_get_counter_count(uint32_t classes)
396{
397 uint32_t count = 0;
398
399 if (classes & KPC_CLASS_FIXED_MASK)
400 count += kpc_fixed_count();
401
402 if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) {
403 uint64_t pmc_msk = kpc_get_configurable_pmc_mask(classes);
404 uint32_t pmc_cnt = kpc_popcount(pmc_msk);
405 count += pmc_cnt;
406 }
407
408 return count;
409}
410
411uint32_t
412kpc_get_config_count(uint32_t classes)
413{
414 uint32_t count = 0;
415
416 if (classes & KPC_CLASS_FIXED_MASK)
417 count += kpc_fixed_config_count();
418
419 if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) {
420 uint64_t pmc_mask = kpc_get_configurable_pmc_mask(classes);
421 count += kpc_configurable_config_count(pmc_mask);
422 }
423
424 if ((classes & KPC_CLASS_RAWPMU_MASK) && !kpc_multiple_clients())
425 count += kpc_rawpmu_config_count();
426
427 return count;
428}
429
430int
431kpc_get_config(uint32_t classes, kpc_config_t *current_config)
432{
433 uint32_t count = 0;
434
435 assert(current_config);
436
437 if (classes & KPC_CLASS_FIXED_MASK) {
438 kpc_get_fixed_config(&current_config[count]);
439 count += kpc_get_config_count(KPC_CLASS_FIXED_MASK);
440 }
441
442 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
443 uint64_t pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
444 kpc_get_configurable_config(&current_config[count], pmc_mask);
445 count += kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK);
446 }
447
448 if (classes & KPC_CLASS_POWER_MASK) {
449 uint64_t pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
450 kpc_get_configurable_config(&current_config[count], pmc_mask);
451 count += kpc_get_config_count(KPC_CLASS_POWER_MASK);
452 }
453
454 if (classes & KPC_CLASS_RAWPMU_MASK)
455 {
456 // Client shouldn't ask for config words that aren't available.
457 // Most likely, they'd misinterpret the returned buffer if we
458 // allowed this.
459 if( kpc_multiple_clients() )
460 {
461 return EPERM;
462 }
463 kpc_get_rawpmu_config(&current_config[count]);
464 count += kpc_get_config_count(KPC_CLASS_RAWPMU_MASK);
465 }
466
467 return 0;
468}
469
470int
471kpc_set_config(uint32_t classes, kpc_config_t *configv)
472{
473 int ret = 0;
474 struct kpc_config_remote mp_config = {
475 .classes = classes, .configv = configv,
476 .pmc_mask = kpc_get_configurable_pmc_mask(classes)
477 };
478
479 assert(configv);
480
481 /* don't allow RAWPMU configuration when sharing counters */
482 if ((classes & KPC_CLASS_RAWPMU_MASK) && kpc_multiple_clients()) {
483 return EPERM;
484 }
485
486 /* no clients have the right to modify both classes */
487 if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) &&
488 (classes & (KPC_CLASS_POWER_MASK)))
489 {
490 return EPERM;
491 }
492
493 lck_mtx_lock(&kpc_config_lock);
494
495 /* translate the power class for the machine layer */
496 if (classes & KPC_CLASS_POWER_MASK)
497 mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
498
499 ret = kpc_set_config_arch( &mp_config );
500
501 lck_mtx_unlock(&kpc_config_lock);
502
503 return ret;
504}
505
506uint32_t
507kpc_get_counterbuf_size(void)
508{
509 return COUNTERBUF_SIZE;
510}
511
512/* allocate a buffer large enough for all possible counters */
513uint64_t *
514kpc_counterbuf_alloc(void)
515{
516 uint64_t *buf = NULL;
517
518 buf = kalloc_tag(COUNTERBUF_SIZE, VM_KERN_MEMORY_DIAG);
519 if (buf) {
520 bzero(buf, COUNTERBUF_SIZE);
521 }
522
523 return buf;
524}
525
526void
527kpc_counterbuf_free(uint64_t *buf)
528{
529 if (buf) {
530 kfree(buf, COUNTERBUF_SIZE);
531 }
532}
533
534void
535kpc_sample_kperf(uint32_t actionid)
536{
537 struct kperf_sample sbuf;
538
539 BUF_DATA(PERF_KPC_HNDLR | DBG_FUNC_START);
540
541 thread_t thread = current_thread();
542 task_t task = get_threadtask(thread);
543
544 struct kperf_context ctx = {
545 .cur_thread = thread,
546 .cur_task = task,
547 .cur_pid = task_pid(task),
548 .trigger_type = TRIGGER_TYPE_PMI,
549 .trigger_id = 0,
550 };
551
552 int r = kperf_sample(&sbuf, &ctx, actionid, SAMPLE_FLAG_PEND_USER);
553
554 BUF_INFO(PERF_KPC_HNDLR | DBG_FUNC_END, r);
555}
556
557
558int
559kpc_set_period(uint32_t classes, uint64_t *val)
560{
561 struct kpc_config_remote mp_config = {
562 .classes = classes, .configv = val,
563 .pmc_mask = kpc_get_configurable_pmc_mask(classes)
564 };
565
566 assert(val);
567
568 /* no clients have the right to modify both classes */
569 if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) &&
570 (classes & (KPC_CLASS_POWER_MASK)))
571 {
572 return EPERM;
573 }
574
575 lck_mtx_lock(&kpc_config_lock);
576
577#ifdef FIXED_COUNTER_SHADOW
578 if ((classes & KPC_CLASS_FIXED_MASK) && !kpc_controls_fixed_counters()) {
579 lck_mtx_unlock(&kpc_config_lock);
580 return EPERM;
581 }
582# else
583 if (classes & KPC_CLASS_FIXED_MASK) {
584 lck_mtx_unlock(&kpc_config_lock);
585 return EINVAL;
586 }
587#endif
588
589 /* translate the power class for the machine layer */
590 if (classes & KPC_CLASS_POWER_MASK)
591 mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
592
593 kprintf("setting period %u\n", classes);
594 kpc_set_period_arch( &mp_config );
595
596 lck_mtx_unlock(&kpc_config_lock);
597
598 return 0;
599}
600
601int
602kpc_get_period(uint32_t classes, uint64_t *val)
603{
604 uint32_t count = 0 ;
605 uint64_t pmc_mask = 0ULL;
606
607 assert(val);
608
609 lck_mtx_lock(&kpc_config_lock);
610
611 if (classes & KPC_CLASS_FIXED_MASK) {
612 /* convert reload values to periods */
613 count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
614 for (uint32_t i = 0; i < count; ++i)
615 *val++ = kpc_fixed_max() - FIXED_RELOAD(i);
616 }
617
618 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
619 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
620
621 /* convert reload values to periods */
622 count = kpc_configurable_count();
623 for (uint32_t i = 0; i < count; ++i)
624 if ((1ULL << i) & pmc_mask)
625 *val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i);
626 }
627
628 if (classes & KPC_CLASS_POWER_MASK) {
629 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
630
631 /* convert reload values to periods */
632 count = kpc_configurable_count();
633 for (uint32_t i = 0; i < count; ++i)
634 if ((1ULL << i) & pmc_mask)
635 *val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i);
636 }
637
638 lck_mtx_unlock(&kpc_config_lock);
639
640 return 0;
641}
642
643int
644kpc_set_actionid(uint32_t classes, uint32_t *val)
645{
646 uint32_t count = 0;
647 uint64_t pmc_mask = 0ULL;
648
649 assert(val);
650
651 /* NOTE: what happens if a pmi occurs while actionids are being
652 * set is undefined. */
653 lck_mtx_lock(&kpc_config_lock);
654
655 if (classes & KPC_CLASS_FIXED_MASK) {
656 count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
657 memcpy(&FIXED_ACTIONID(0), val, count*sizeof(uint32_t));
658 val += count;
659 }
660
661 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
662 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
663
664 count = kpc_configurable_count();
665 for (uint32_t i = 0; i < count; ++i)
666 if ((1ULL << i) & pmc_mask)
667 CONFIGURABLE_ACTIONID(i) = *val++;
668 }
669
670 if (classes & KPC_CLASS_POWER_MASK) {
671 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
672
673 count = kpc_configurable_count();
674 for (uint32_t i = 0; i < count; ++i)
675 if ((1ULL << i) & pmc_mask)
676 CONFIGURABLE_ACTIONID(i) = *val++;
677 }
678
679 lck_mtx_unlock(&kpc_config_lock);
680
681 return 0;
682}
683
684int kpc_get_actionid(uint32_t classes, uint32_t *val)
685{
686 uint32_t count = 0;
687 uint64_t pmc_mask = 0ULL;
688
689 assert(val);
690
691 lck_mtx_lock(&kpc_config_lock);
692
693 if (classes & KPC_CLASS_FIXED_MASK) {
694 count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
695 memcpy(val, &FIXED_ACTIONID(0), count*sizeof(uint32_t));
696 val += count;
697 }
698
699 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
700 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
701
702 count = kpc_configurable_count();
703 for (uint32_t i = 0; i < count; ++i)
704 if ((1ULL << i) & pmc_mask)
705 *val++ = CONFIGURABLE_ACTIONID(i);
706 }
707
708 if (classes & KPC_CLASS_POWER_MASK) {
709 pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
710
711 count = kpc_configurable_count();
712 for (uint32_t i = 0; i < count; ++i)
713 if ((1ULL << i) & pmc_mask)
714 *val++ = CONFIGURABLE_ACTIONID(i);
715 }
716
717 lck_mtx_unlock(&kpc_config_lock);
718
719 return 0;
720
721}
722
723int
724kpc_set_running(uint32_t classes)
725{
726 uint32_t all_cfg_classes = KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK;
727 struct kpc_running_remote mp_config = {
728 .classes = classes, .cfg_target_mask= 0ULL, .cfg_state_mask = 0ULL
729 };
730
731 /* target all available PMCs */
732 mp_config.cfg_target_mask = kpc_get_configurable_pmc_mask(all_cfg_classes);
733
734 /* translate the power class for the machine layer */
735 if (classes & KPC_CLASS_POWER_MASK)
736 mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK;
737
738 /* generate the state of each configurable PMCs */
739 mp_config.cfg_state_mask = kpc_get_configurable_pmc_mask(classes);
740
741 return kpc_set_running_arch(&mp_config);
742}
743
744boolean_t
745kpc_register_pm_handler(kpc_pm_handler_t handler)
746{
747 return kpc_reserve_pm_counters(0x38, handler, TRUE);
748}
749
750boolean_t
751kpc_reserve_pm_counters(uint64_t pmc_mask, kpc_pm_handler_t handler,
752 boolean_t custom_config)
753{
754 uint64_t all_mask = (1ULL << kpc_configurable_count()) - 1;
755 uint64_t req_mask = 0ULL;
756
757 /* pre-condition */
758 assert(handler != NULL);
759 assert(kpc_pm_handler == NULL);
760
761 /* check number of counters requested */
762 req_mask = (pmc_mask & all_mask);
763 assert(kpc_popcount(req_mask) <= kpc_configurable_count());
764
765 /* save the power manager states */
766 kpc_pm_has_custom_config = custom_config;
767 kpc_pm_pmc_mask = req_mask;
768 kpc_pm_handler = handler;
769
770 printf("kpc: pm registered pmc_mask=%llx custom_config=%d\n",
771 req_mask, custom_config);
772
773 /* post-condition */
774 {
775 uint32_t cfg_count = kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK);
776 uint32_t pwr_count = kpc_popcount(kpc_pm_pmc_mask);
777#pragma unused(cfg_count, pwr_count)
778 assert((cfg_count + pwr_count) == kpc_configurable_count());
779 }
780
781 return force_all_ctrs ? FALSE : TRUE;
782}
783
784void
785kpc_release_pm_counters(void)
786{
787 /* pre-condition */
788 assert(kpc_pm_handler != NULL);
789
790 /* release the counters */
791 kpc_pm_has_custom_config = FALSE;
792 kpc_pm_pmc_mask = 0ULL;
793 kpc_pm_handler = NULL;
794
795 printf("kpc: pm released counters\n");
796
797 /* post-condition */
798 assert(kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK) == kpc_configurable_count());
799}
800
801uint8_t
802kpc_popcount(uint64_t value)
803{
804 return __builtin_popcountll(value);
805}
806
807uint64_t
808kpc_get_configurable_pmc_mask(uint32_t classes)
809{
810 uint32_t configurable_count = kpc_configurable_count();
811 uint64_t cfg_mask = 0ULL, pwr_mask = 0ULL, all_cfg_pmcs_mask = 0ULL;
812
813 /* not configurable classes or no configurable counters */
814 if (((classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) == 0) ||
815 (configurable_count == 0))
816 {
817 goto exit;
818 }
819
820 assert(configurable_count < 64);
821 all_cfg_pmcs_mask = (1ULL << configurable_count) - 1;
822
823 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
824 if (force_all_ctrs == TRUE)
825 cfg_mask |= all_cfg_pmcs_mask;
826 else
827 cfg_mask |= (~kpc_pm_pmc_mask) & all_cfg_pmcs_mask;
828 }
829
830 /*
831 * The power class exists iff:
832 * - No tasks acquired all PMCs
833 * - PM registered and uses kpc to interact with PMCs
834 */
835 if ((force_all_ctrs == FALSE) &&
836 (kpc_pm_handler != NULL) &&
837 (kpc_pm_has_custom_config == FALSE) &&
838 (classes & KPC_CLASS_POWER_MASK))
839 {
840 pwr_mask |= kpc_pm_pmc_mask & all_cfg_pmcs_mask;
841 }
842
843exit:
844 /* post-conditions */
845 assert( ((cfg_mask | pwr_mask) & (~all_cfg_pmcs_mask)) == 0 );
846 assert( kpc_popcount(cfg_mask | pwr_mask) <= kpc_configurable_count() );
847 assert( (cfg_mask & pwr_mask) == 0ULL );
848
849 return cfg_mask | pwr_mask;
850}
851