1 | /* |
2 | * Copyright (c) 2011-2019 Apple Computer, Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #ifndef KPERF_H |
30 | #define KPERF_H |
31 | |
32 | #include <kern/thread.h> |
33 | #include <kern/locks.h> |
34 | |
35 | extern lck_grp_t kperf_lck_grp; |
36 | |
37 | /* the trigger types supported by kperf */ |
38 | #define TRIGGER_TYPE_TIMER (0) |
39 | #define TRIGGER_TYPE_PMI (1) |
40 | #define TRIGGER_TYPE_KDEBUG (2) |
41 | #define TRIGGER_TYPE_LAZY_WAIT (3) |
42 | #define TRIGGER_TYPE_LAZY_CPU (3) |
43 | |
44 | uint32_t kperf_get_thread_ast(thread_t thread); |
45 | void kperf_set_thread_ast(thread_t thread, uint32_t flags); |
46 | |
47 | /* |
48 | * Get and set dirtiness of thread, so kperf can track whether the thread |
49 | * has been dispatched since it last looked. |
50 | */ |
51 | boolean_t kperf_thread_get_dirty(thread_t thread); |
52 | void kperf_thread_set_dirty(thread_t thread, boolean_t dirty); |
53 | |
54 | /* |
55 | * Initialize the rest of kperf lazily, upon first use. May be called multiple times. |
56 | * The ktrace_lock must be held. |
57 | */ |
58 | void kperf_setup(void); |
59 | |
60 | /* |
61 | * Configure kperf during boot and check the boot args. |
62 | */ |
63 | extern void kperf_init_early(void); |
64 | |
65 | bool kperf_is_sampling(void); |
66 | int kperf_enable_sampling(void); |
67 | int kperf_disable_sampling(void); |
68 | int kperf_port_to_pid(mach_port_name_t portname); |
69 | |
70 | /* get a per-CPU sample buffer */ |
71 | struct kperf_sample *kperf_intr_sample_buffer(void); |
72 | |
73 | enum kperf_sampling { |
74 | KPERF_SAMPLING_OFF, |
75 | KPERF_SAMPLING_SHUTDOWN, |
76 | KPERF_SAMPLING_ON, |
77 | }; |
78 | |
79 | extern enum kperf_sampling _Atomic kperf_status; |
80 | |
81 | #pragma mark - external callbacks |
82 | |
83 | /* |
84 | * Set up kperf during system startup. |
85 | */ |
86 | void kperf_init(void); |
87 | |
88 | /* |
89 | * kperf AST handler |
90 | * |
91 | * Prevent inlining, since the sampling function allocates on the stack and |
92 | * branches calling ast_taken (but never on a kperf AST) may blow their stacks. |
93 | */ |
94 | extern __attribute__((noinline)) void kperf_thread_ast_handler(thread_t thread); |
95 | |
96 | /* |
97 | * Update whether the on-CPU callback should be called. |
98 | */ |
99 | void kperf_on_cpu_update(void); |
100 | |
101 | /* |
102 | * Should only be called by the scheduler when `thread` is switching on-CPU. |
103 | */ |
104 | static inline void |
105 | kperf_on_cpu(thread_t thread, thread_continue_t continuation, |
106 | uintptr_t *starting_fp) |
107 | { |
108 | extern boolean_t kperf_on_cpu_active; |
109 | void kperf_on_cpu_internal(thread_t thread, thread_continue_t continuation, |
110 | uintptr_t *starting_fp); |
111 | |
112 | if (__improbable(kperf_on_cpu_active)) { |
113 | kperf_on_cpu_internal(thread, continuation, starting_fp); |
114 | } |
115 | } |
116 | |
117 | /* |
118 | * Should only be called by the scheduler when `thread` is switching off-CPU. |
119 | */ |
120 | static inline void |
121 | kperf_off_cpu(thread_t thread) |
122 | { |
123 | extern unsigned int kperf_lazy_cpu_action; |
124 | void kperf_lazy_off_cpu(thread_t thread); |
125 | |
126 | if (__improbable(kperf_lazy_cpu_action != 0)) { |
127 | kperf_lazy_off_cpu(thread); |
128 | } |
129 | } |
130 | |
131 | /* |
132 | * Should only be called by the scheduler when `thread` is made runnable. |
133 | */ |
134 | static inline void |
135 | kperf_make_runnable(thread_t thread, int interrupt) |
136 | { |
137 | extern unsigned int kperf_lazy_cpu_action; |
138 | void kperf_lazy_make_runnable(thread_t thread, bool interrupt); |
139 | |
140 | if (__improbable(kperf_lazy_cpu_action != 0)) { |
141 | kperf_lazy_make_runnable(thread, interrupt); |
142 | } |
143 | } |
144 | |
145 | static inline void |
146 | kperf_running_setup(processor_t processor, uint64_t now) |
147 | { |
148 | if (kperf_is_sampling()) { |
149 | extern void kptimer_running_setup(processor_t, uint64_t now); |
150 | kptimer_running_setup(processor, now); |
151 | } |
152 | } |
153 | |
154 | /* |
155 | * Should only be called by platform code at the end of each interrupt. |
156 | */ |
157 | static inline void |
158 | kperf_interrupt(void) |
159 | { |
160 | extern unsigned int kperf_lazy_cpu_action; |
161 | extern void kperf_lazy_cpu_sample(thread_t thread, unsigned int flags, |
162 | bool interrupt); |
163 | |
164 | if (__improbable(kperf_lazy_cpu_action != 0)) { |
165 | kperf_lazy_cpu_sample(NULL, flags: 0, true); |
166 | } |
167 | } |
168 | |
169 | /* |
170 | * Should only be called by kdebug when an event with `debugid` is emitted |
171 | * from the frame starting at `starting_fp`. |
172 | */ |
173 | static inline void |
174 | kperf_kdebug_callback(uint32_t debugid, uintptr_t *starting_fp) |
175 | { |
176 | extern boolean_t kperf_kdebug_active; |
177 | void kperf_kdebug_handler(uint32_t debugid, uintptr_t *starting_fp); |
178 | |
179 | if (__improbable(kperf_kdebug_active)) { |
180 | kperf_kdebug_handler(debugid, starting_fp); |
181 | } |
182 | } |
183 | |
184 | /* |
185 | * Should only be called by platform code to indicate kperf's per-CPU timer |
186 | * has expired on the current CPU `cpuid` at time `now`. |
187 | */ |
188 | void kperf_timer_expire(void *param0, void *param1); |
189 | |
190 | /* |
191 | * Used by ktrace to reset kperf. ktrace_lock must be held. |
192 | */ |
193 | extern void kperf_reset(void); |
194 | |
195 | /* |
196 | * Configure kperf from the kernel (e.g. during boot). |
197 | */ |
198 | void kperf_kernel_configure(const char *config); |
199 | |
200 | #endif /* !defined(KPERF_H) */ |
201 | |