1/*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#ifndef KERN_MONOTONIC_H
29#define KERN_MONOTONIC_H
30
31#include <stdbool.h>
32#include <stdint.h>
33#include <sys/cdefs.h>
34
35__BEGIN_DECLS
36
37extern bool mt_debug;
38extern _Atomic uint64_t mt_pmis;
39extern _Atomic uint64_t mt_retrograde;
40
41void mt_fixed_counts(uint64_t *counts);
42void mt_cur_thread_fixed_counts(uint64_t *counts);
43void mt_cur_task_fixed_counts(uint64_t *counts);
44uint64_t mt_cur_cpu_instrs(void);
45uint64_t mt_cur_cpu_cycles(void);
46uint64_t mt_cur_thread_instrs(void);
47uint64_t mt_cur_thread_cycles(void);
48
49__END_DECLS
50
51#if MACH_KERNEL_PRIVATE
52
53#include <kern/thread.h>
54#include <kern/task.h>
55#include <stdbool.h>
56
57__BEGIN_DECLS
58
59#if defined(__arm__) || defined(__arm64__)
60#include <arm/cpu_data_internal.h>
61#elif defined(__x86_64__)
62#include <i386/cpu_data.h>
63#else /* !defined(__arm__) && !defined(__arm64__) && !defined(__x86_64__) */
64#error unsupported architecture
65#endif /* !defined(__arm__) && !defined(__arm64__) && !defined(__x86_64__) */
66
67void mt_update_fixed_counts(void);
68void mt_update_task(task_t task, thread_t thread);
69bool mt_update_thread(thread_t thread);
70int mt_fixed_thread_counts(thread_t thread, uint64_t *counts_out);
71int mt_fixed_task_counts(task_t task, uint64_t *counts_out);
72
73/*
74 * Private API for the platform layers.
75 */
76
77/*
78 * Called once early in boot, before CPU initialization occurs (where
79 * `mt_cpu_up` is called).
80 *
81 * This allows monotonic to detect if the hardware supports performance counters
82 * and install the global PMI handler.
83 */
84void mt_early_init(void);
85
86/*
87 * Called when a core is idling and exiting from idle.
88 */
89void mt_cpu_idle(cpu_data_t *cpu);
90void mt_cpu_run(cpu_data_t *cpu);
91
92/*
93 * Called when a core is shutting down or powering up.
94 */
95void mt_cpu_down(cpu_data_t *cpu);
96void mt_cpu_up(cpu_data_t *cpu);
97
98/*
99 * Called while single-threaded when the system is going to sleep.
100 */
101void mt_sleep(void);
102
103/*
104 * Called on each CPU as the system is waking from sleep.
105 */
106void mt_wake_per_core(void);
107
108#if __ARM_CLUSTER_COUNT__
109/*
110 * Called when a cluster is initialized.
111 */
112void mt_cluster_init(void);
113#endif /* __ARM_CLUSTER_COUNT__ */
114
115/*
116 * "Up-call" to the Mach layer to update counters from a PMI.
117 */
118uint64_t mt_cpu_update_count(cpu_data_t *cpu, unsigned int ctr);
119
120/*
121 * Private API for the scheduler.
122 */
123
124/*
125 * Called when a thread is switching off-core or expires its quantum.
126 */
127void mt_sched_update(thread_t thread);
128
129/*
130 * Called when a thread is terminating to save its counters into the task. The
131 * task lock must be held and the thread should be removed from the task's
132 * thread list in that same critical section.
133 */
134void mt_terminate_update(task_t task, thread_t thread);
135
136/*
137 * Private API for the performance controller callout.
138 */
139void mt_perfcontrol(uint64_t *instrs, uint64_t *cycles);
140
141/*
142 * Private API for stackshot.
143 */
144void mt_stackshot_thread(thread_t thread, uint64_t *instrs, uint64_t *cycles);
145void mt_stackshot_task(task_t task, uint64_t *instrs, uint64_t *cycles);
146
147/*
148 * Private API for microstackshot.
149 */
150typedef void (*mt_pmi_fn)(bool user_mode, void *ctx);
151int mt_microstackshot_start(unsigned int ctr, uint64_t period, mt_pmi_fn fn,
152 void *ctx);
153int mt_microstackshot_stop(void);
154
155__END_DECLS
156
157#endif /* MACH_KERNEL_PRIVATE */
158
159#endif /* !defined(KERN_MONOTONIC_H) */
160