1/*
2 * Copyright (c) 2011-2018 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#ifndef KPERF_H
30#define KPERF_H
31
32#include <kern/thread.h>
33#include <kern/locks.h>
34
35extern lck_grp_t kperf_lck_grp;
36
37/* the trigger types supported by kperf */
38#define TRIGGER_TYPE_TIMER (0)
39#define TRIGGER_TYPE_PMI (1)
40#define TRIGGER_TYPE_KDEBUG (2)
41#define TRIGGER_TYPE_LAZY_WAIT (3)
42#define TRIGGER_TYPE_LAZY_CPU (3)
43
44/* helpers to get and set AST flags on a thread */
45uint32_t kperf_get_thread_flags(thread_t thread);
46void kperf_set_thread_flags(thread_t thread, uint32_t flags);
47
48/*
49 * Get and set dirtiness of thread, so kperf can track whether the thread
50 * has been dispatched since it last looked.
51 */
52boolean_t kperf_thread_get_dirty(thread_t thread);
53void kperf_thread_set_dirty(thread_t thread, boolean_t dirty);
54
55/* possible states of kperf sampling */
56#define KPERF_SAMPLING_OFF (0)
57#define KPERF_SAMPLING_ON (1)
58#define KPERF_SAMPLING_SHUTDOWN (2)
59
60/*
61 * Initialize kperf. Must be called before use and can be called multiple times.
62 */
63extern int kperf_init(void);
64
65/* get and set sampling status */
66extern unsigned kperf_sampling_status(void);
67extern int kperf_sampling_enable(void);
68extern int kperf_sampling_disable(void);
69
70/* get a per-CPU sample buffer */
71struct kperf_sample *kperf_intr_sample_buffer(void);
72
73/*
74 * Callbacks into kperf from other systems.
75 */
76
77/*
78 * kperf AST handler
79 *
80 * Prevent inlining, since the sampling function allocates on the stack and
81 * branches calling ast_taken (but never on a kperf AST) may blow their stacks.
82 */
83extern __attribute__((noinline)) void kperf_thread_ast_handler(thread_t thread);
84
85/* update whether the callback is set */
86void kperf_on_cpu_update(void);
87
88/* for scheduler switching threads on */
89static inline void
90kperf_on_cpu(thread_t thread, thread_continue_t continuation,
91 uintptr_t *starting_fp)
92{
93 extern boolean_t kperf_on_cpu_active;
94 void kperf_on_cpu_internal(thread_t thread, thread_continue_t continuation,
95 uintptr_t *starting_fp);
96
97 if (__improbable(kperf_on_cpu_active)) {
98 kperf_on_cpu_internal(thread, continuation, starting_fp);
99 }
100}
101
102/* for scheduler switching threads off */
103static inline void
104kperf_off_cpu(thread_t thread)
105{
106 extern unsigned int kperf_lazy_cpu_action;
107 void kperf_lazy_off_cpu(thread_t thread);
108
109 if (__improbable(kperf_lazy_cpu_action != 0)) {
110 kperf_lazy_off_cpu(thread);
111 }
112}
113
114/* for scheduler making threads runnable */
115static inline void
116kperf_make_runnable(thread_t thread, int interrupt)
117{
118 extern unsigned int kperf_lazy_cpu_action;
119 void kperf_lazy_make_runnable(thread_t thread, bool interrupt);
120
121 if (__improbable(kperf_lazy_cpu_action != 0)) {
122 kperf_lazy_make_runnable(thread, interrupt);
123 }
124}
125
126/* for interrupt handler epilogue */
127static inline void
128kperf_interrupt(void)
129{
130 extern unsigned int kperf_lazy_cpu_action;
131 extern void kperf_lazy_cpu_sample(thread_t thread, unsigned int flags,
132 bool interrupt);
133
134 if (__improbable(kperf_lazy_cpu_action != 0)) {
135 kperf_lazy_cpu_sample(current_thread(), 0, true);
136 }
137}
138
139/* for kdebug on every traced event */
140static inline void
141kperf_kdebug_callback(uint32_t debugid, uintptr_t *starting_fp)
142{
143 extern boolean_t kperf_kdebug_active;
144 void kperf_kdebug_handler(uint32_t debugid, uintptr_t *starting_fp);
145
146 if (__improbable(kperf_kdebug_active)) {
147 kperf_kdebug_handler(debugid, starting_fp);
148 }
149}
150
151/*
152 * Used by ktrace to reset kperf. ktrace_lock must be held.
153 */
154extern void kperf_reset(void);
155
156/*
157 * Configure kperf from the kernel (e.g. during boot).
158 */
159void kperf_kernel_configure(const char *config);
160
161/* given a task port, find out its pid */
162int kperf_port_to_pid(mach_port_name_t portname);
163
164#if DEVELOPMENT || DEBUG
165extern _Atomic long long kperf_pending_ipis;
166#endif /* DEVELOPMENT || DEBUG */
167
168#endif /* !defined(KPERF_H) */
169