1/*
2 * Copyright (c) 2017-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#ifndef KERN_MONOTONIC_H
29#define KERN_MONOTONIC_H
30
31#if CONFIG_CPU_COUNTERS
32
33#include <stdbool.h>
34#include <stdint.h>
35#include <sys/cdefs.h>
36
37__BEGIN_DECLS
38
39extern bool mt_debug;
40extern _Atomic uint64_t mt_pmis;
41extern _Atomic uint64_t mt_retrograde;
42
43void mt_fixed_counts(uint64_t *counts);
44uint64_t mt_cur_cpu_instrs(void);
45uint64_t mt_cur_cpu_cycles(void);
46void mt_cur_cpu_cycles_instrs_speculative(uint64_t *cycles, uint64_t *instrs);
47
48bool mt_acquire_counters(void);
49bool mt_owns_counters(void);
50void mt_ownership_change(bool available);
51void mt_release_counters(void);
52
53__END_DECLS
54
55#if MACH_KERNEL_PRIVATE
56
57#include <kern/thread.h>
58#include <kern/task.h>
59#include <stdbool.h>
60
61__BEGIN_DECLS
62
63#if defined(__arm__) || defined(__arm64__)
64#include <arm/cpu_data_internal.h>
65#elif defined(__x86_64__)
66#include <i386/cpu_data.h>
67#else /* !defined(__arm__) && !defined(__arm64__) && !defined(__x86_64__) */
68#error unsupported architecture
69#endif /* !defined(__arm__) && !defined(__arm64__) && !defined(__x86_64__) */
70
71void mt_update_fixed_counts(void);
72
73/*
74 * Private API for the platform layers.
75 */
76
77/*
78 * Called once early in boot, before CPU initialization occurs (where
79 * `mt_cpu_up` is called).
80 *
81 * This allows monotonic to detect if the hardware supports performance counters
82 * and install the global PMI handler.
83 */
84void mt_early_init(void);
85
86/*
87 * Called when a core is idling and exiting from idle.
88 */
89void mt_cpu_idle(cpu_data_t *cpu);
90void mt_cpu_run(cpu_data_t *cpu);
91
92/*
93 * Called when a core is shutting down or powering up.
94 */
95void mt_cpu_down(cpu_data_t *cpu);
96void mt_cpu_up(cpu_data_t *cpu);
97
98/*
99 * Called while single-threaded when the system is going to sleep.
100 */
101void mt_sleep(void);
102
103/*
104 * Called on each CPU as the system is waking from sleep.
105 */
106void mt_wake_per_core(void);
107
108/*
109 * "Up-call" to the Mach layer to update counters from a PMI.
110 */
111uint64_t mt_cpu_update_count(cpu_data_t *cpu, unsigned int ctr);
112
113/*
114 * Private API for the performance controller callout.
115 */
116void mt_perfcontrol(uint64_t *instrs, uint64_t *cycles);
117
118/*
119 * Private API for microstackshot.
120 */
121typedef void (*mt_pmi_fn)(bool user_mode, void *ctx);
122int mt_microstackshot_start(unsigned int ctr, uint64_t period, mt_pmi_fn fn,
123 void *ctx);
124int mt_microstackshot_stop(void);
125
126__END_DECLS
127
128#endif /* MACH_KERNEL_PRIVATE */
129
130#endif /* CONFIG_CPU_COUNTERS */
131
132#endif /* !defined(KERN_MONOTONIC_H) */
133