1 | /* |
2 | * Copyright (c) 2011 Apple Computer, Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | /* Sample thread data */ |
30 | |
31 | #include <kern/debug.h> /* panic */ |
32 | #include <kern/thread.h> /* thread_* */ |
33 | #include <kern/policy_internal.h> /* TASK_POLICY_* */ |
34 | #include <mach/mach_types.h> |
35 | #include <sys/errno.h> |
36 | |
37 | #include <kperf/kperf.h> |
38 | #include <kperf/buffer.h> |
39 | #include <kperf/context.h> |
40 | #include <kperf/thread_samplers.h> |
41 | #include <kperf/ast.h> |
42 | |
43 | #include <kern/monotonic.h> |
44 | #include <machine/monotonic.h> |
45 | |
46 | extern boolean_t stackshot_thread_is_idle_worker_unsafe(thread_t thread); |
47 | |
48 | /* |
49 | * XXX Deprecated, use thread scheduling sampler instead. |
50 | * |
51 | * Taken from AppleProfileGetRunModeOfThread and CHUD. Still here for |
52 | * backwards compatibility. |
53 | */ |
54 | |
55 | #define KPERF_TI_RUNNING (1U << 0) |
56 | #define KPERF_TI_RUNNABLE (1U << 1) |
57 | #define KPERF_TI_WAIT (1U << 2) |
58 | #define KPERF_TI_UNINT (1U << 3) |
59 | #define KPERF_TI_SUSP (1U << 4) |
60 | #define KPERF_TI_TERMINATE (1U << 5) |
61 | #define KPERF_TI_IDLE (1U << 6) |
62 | |
63 | static uint32_t |
64 | kperf_thread_info_runmode_legacy(thread_t thread) |
65 | { |
66 | uint32_t kperf_state = 0; |
67 | int sched_state = thread->state; |
68 | processor_t last_processor = thread->last_processor; |
69 | |
70 | if ((last_processor != PROCESSOR_NULL) && (thread == last_processor->active_thread)) { |
71 | kperf_state |= KPERF_TI_RUNNING; |
72 | } |
73 | if (sched_state & TH_RUN) { |
74 | kperf_state |= KPERF_TI_RUNNABLE; |
75 | } |
76 | if (sched_state & TH_WAIT) { |
77 | kperf_state |= KPERF_TI_WAIT; |
78 | } |
79 | if (sched_state & TH_UNINT) { |
80 | kperf_state |= KPERF_TI_UNINT; |
81 | } |
82 | if (sched_state & TH_SUSP) { |
83 | kperf_state |= KPERF_TI_SUSP; |
84 | } |
85 | if (sched_state & TH_TERMINATE) { |
86 | kperf_state |= KPERF_TI_TERMINATE; |
87 | } |
88 | if (sched_state & TH_IDLE) { |
89 | kperf_state |= KPERF_TI_IDLE; |
90 | } |
91 | |
92 | #if defined(XNU_TARGET_OS_OSX) |
93 | /* on desktop, if state is blank, leave not idle set */ |
94 | if (kperf_state == 0) { |
95 | return TH_IDLE << 16; |
96 | } |
97 | #endif /* defined(XNU_TARGET_OS_OSX) */ |
98 | |
99 | /* high two bytes are inverted mask, low two bytes are normal */ |
100 | return ((~kperf_state & 0xffff) << 16) | (kperf_state & 0xffff); |
101 | } |
102 | |
103 | void |
104 | kperf_thread_info_sample(struct kperf_thread_info *ti, struct kperf_context *context) |
105 | { |
106 | thread_t cur_thread = context->cur_thread; |
107 | |
108 | BUF_INFO(PERF_TI_SAMPLE, (uintptr_t)thread_tid(cur_thread)); |
109 | |
110 | ti->kpthi_pid = context->cur_pid; |
111 | ti->kpthi_tid = thread_tid(thread: cur_thread); |
112 | ti->kpthi_dq_addr = thread_dispatchqaddr(thread: cur_thread); |
113 | ti->kpthi_runmode = kperf_thread_info_runmode_legacy(thread: cur_thread); |
114 | |
115 | BUF_VERB(PERF_TI_SAMPLE | DBG_FUNC_END); |
116 | } |
117 | |
118 | void |
119 | kperf_thread_info_log(struct kperf_thread_info *ti) |
120 | { |
121 | BUF_DATA(PERF_TI_DATA, ti->kpthi_pid, ti->kpthi_tid /* K64-only */, |
122 | ti->kpthi_dq_addr, ti->kpthi_runmode); |
123 | } |
124 | |
125 | /* |
126 | * Scheduling information reports inputs and outputs of the scheduler state for |
127 | * a thread. |
128 | */ |
129 | |
130 | void |
131 | kperf_thread_scheduling_sample(struct kperf_thread_scheduling *thsc, |
132 | struct kperf_context *context) |
133 | { |
134 | assert(thsc != NULL); |
135 | assert(context != NULL); |
136 | |
137 | thread_t thread = context->cur_thread; |
138 | |
139 | BUF_INFO(PERF_TI_SCHEDSAMPLE | DBG_FUNC_START, (uintptr_t)thread_tid(thread)); |
140 | |
141 | struct recount_times_mach times = { 0 }; |
142 | if (thread == current_thread()) { |
143 | boolean_t interrupt_state = ml_set_interrupts_enabled(FALSE); |
144 | times = recount_current_thread_times(); |
145 | ml_set_interrupts_enabled(enable: interrupt_state); |
146 | } else { |
147 | times = recount_thread_times(thread); |
148 | } |
149 | thsc->kpthsc_user_time = times.rtm_user; |
150 | thsc->kpthsc_system_time = times.rtm_system; |
151 | |
152 | thsc->kpthsc_runnable_time = timer_grab(timer: &thread->runnable_timer); |
153 | thsc->kpthsc_state = thread->state; |
154 | thsc->kpthsc_base_priority = thread->base_pri; |
155 | thsc->kpthsc_sched_priority = thread->sched_pri; |
156 | thsc->kpthsc_effective_qos = thread->effective_policy.thep_qos; |
157 | thsc->kpthsc_requested_qos = thread->requested_policy.thrp_qos; |
158 | thsc->kpthsc_requested_qos_override = MAX(thread->requested_policy.thrp_qos_override, |
159 | thread->requested_policy.thrp_qos_workq_override); |
160 | thsc->kpthsc_requested_qos_promote = thread->requested_policy.thrp_qos_promote; |
161 | thsc->kpthsc_requested_qos_kevent_override = MAX( |
162 | thread->requested_policy.thrp_qos_kevent_override, |
163 | thread->requested_policy.thrp_qos_wlsvc_override); |
164 | thsc->kpthsc_requested_qos_sync_ipc_override = THREAD_QOS_UNSPECIFIED; |
165 | thsc->kpthsc_effective_latency_qos = thread->effective_policy.thep_latency_qos; |
166 | |
167 | BUF_INFO(PERF_TI_SCHEDSAMPLE | DBG_FUNC_END); |
168 | } |
169 | |
170 | |
171 | void |
172 | kperf_thread_scheduling_log(struct kperf_thread_scheduling *thsc) |
173 | { |
174 | assert(thsc != NULL); |
175 | #if defined(__LP64__) |
176 | BUF_DATA(PERF_TI_SCHEDDATA_2, thsc->kpthsc_user_time, |
177 | thsc->kpthsc_system_time, |
178 | (((uint64_t)thsc->kpthsc_base_priority) << 48) |
179 | | ((uint64_t)thsc->kpthsc_sched_priority << 32) |
180 | | ((uint64_t)(thsc->kpthsc_state & 0xff) << 24) |
181 | | (thsc->kpthsc_effective_qos << 6) |
182 | | (thsc->kpthsc_requested_qos << 3) |
183 | | thsc->kpthsc_requested_qos_override, |
184 | ((uint64_t)thsc->kpthsc_effective_latency_qos << 61) |
185 | | ((uint64_t)thsc->kpthsc_requested_qos_promote << 58) |
186 | | ((uint64_t)thsc->kpthsc_requested_qos_kevent_override << 55) |
187 | ); |
188 | BUF_DATA(PERF_TI_SCHEDDATA_3, thsc->kpthsc_runnable_time); |
189 | #else |
190 | BUF_DATA(PERF_TI_SCHEDDATA1_32, UPPER_32(thsc->kpthsc_user_time), |
191 | LOWER_32(thsc->kpthsc_user_time), |
192 | UPPER_32(thsc->kpthsc_system_time), |
193 | LOWER_32(thsc->kpthsc_system_time) |
194 | ); |
195 | BUF_DATA(PERF_TI_SCHEDDATA2_32_2, (((uint32_t)thsc->kpthsc_base_priority) << 16) |
196 | | thsc->kpthsc_sched_priority, |
197 | ((thsc->kpthsc_state & 0xff) << 24) |
198 | | (thsc->kpthsc_effective_qos << 6) |
199 | | (thsc->kpthsc_requested_qos << 3) |
200 | | thsc->kpthsc_requested_qos_override, |
201 | ((uint32_t)thsc->kpthsc_effective_latency_qos << 29) |
202 | | ((uint32_t)thsc->kpthsc_requested_qos_promote << 26) |
203 | | ((uint32_t)thsc->kpthsc_requested_qos_kevent_override << 23) |
204 | ); |
205 | BUF_DATA(PERF_TI_SCHEDDATA3_32, UPPER_32(thsc->kpthsc_runnable_time), |
206 | LOWER_32(thsc->kpthsc_runnable_time)); |
207 | #endif /* defined(__LP64__) */ |
208 | } |
209 | |
210 | /* |
211 | * Snapshot information maintains parity with stackshot information for other, |
212 | * miscellaneous information about threads. |
213 | */ |
214 | |
215 | #define KPERF_THREAD_SNAPSHOT_DARWIN_BG (1U << 0); |
216 | #define KPERF_THREAD_SNAPSHOT_PASSIVE_IO (1U << 1); |
217 | #define KPERF_THREAD_SNAPSHOT_GFI (1U << 2); |
218 | #define KPERF_THREAD_SNAPSHOT_IDLE_WQ (1U << 3); |
219 | #define KPERF_THREAD_SNAPSHOT_EXCLAVES_RPC (1U << 4); |
220 | /* max is 1U << 7 */ |
221 | |
222 | void |
223 | kperf_thread_snapshot_sample(struct kperf_thread_snapshot *thsn, |
224 | struct kperf_context *context) |
225 | { |
226 | assert(thsn != NULL); |
227 | assert(context != NULL); |
228 | |
229 | thread_t thread = context->cur_thread; |
230 | |
231 | BUF_INFO(PERF_TI_SNAPSAMPLE | DBG_FUNC_START, (uintptr_t)thread_tid(thread)); |
232 | |
233 | thsn->kpthsn_last_made_runnable_time = thread->last_made_runnable_time; |
234 | |
235 | thsn->kpthsn_flags = 0; |
236 | if (thread->effective_policy.thep_darwinbg) { |
237 | thsn->kpthsn_flags |= KPERF_THREAD_SNAPSHOT_DARWIN_BG; |
238 | } |
239 | if (proc_get_effective_thread_policy(thread, TASK_POLICY_PASSIVE_IO)) { |
240 | thsn->kpthsn_flags |= KPERF_THREAD_SNAPSHOT_PASSIVE_IO; |
241 | } |
242 | if (thread->options & TH_OPT_GLOBAL_FORCED_IDLE) { |
243 | thsn->kpthsn_flags |= KPERF_THREAD_SNAPSHOT_GFI |
244 | } |
245 | if (stackshot_thread_is_idle_worker_unsafe(thread)) { |
246 | thsn->kpthsn_flags |= KPERF_THREAD_SNAPSHOT_IDLE_WQ; |
247 | } |
248 | #if CONFIG_EXCLAVES |
249 | if (thread->th_exclaves_state & TH_EXCLAVES_RPC) { |
250 | thsn->kpthsn_flags |= KPERF_THREAD_SNAPSHOT_EXCLAVES_RPC; |
251 | } |
252 | #endif /* CONFIG_EXCLAVES */ |
253 | |
254 | thsn->kpthsn_suspend_count = thread->suspend_count; |
255 | /* |
256 | * Only have room for 8-bits in the trace event, so truncate here. |
257 | */ |
258 | thsn->kpthsn_io_tier = (uint8_t)proc_get_effective_thread_policy(thread, TASK_POLICY_IO); |
259 | |
260 | BUF_VERB(PERF_TI_SNAPSAMPLE | DBG_FUNC_END); |
261 | } |
262 | |
263 | void |
264 | kperf_thread_snapshot_log(struct kperf_thread_snapshot *thsn) |
265 | { |
266 | assert(thsn != NULL); |
267 | #if defined(__LP64__) |
268 | BUF_DATA(PERF_TI_SNAPDATA, thsn->kpthsn_flags | ((uint32_t)(thsn->kpthsn_suspend_count) << 8) |
269 | | (thsn->kpthsn_io_tier << 24), |
270 | thsn->kpthsn_last_made_runnable_time); |
271 | #else |
272 | BUF_DATA(PERF_TI_SNAPDATA_32, thsn->kpthsn_flags | ((uint32_t)(thsn->kpthsn_suspend_count) << 8) |
273 | | (thsn->kpthsn_io_tier << 24), |
274 | UPPER_32(thsn->kpthsn_last_made_runnable_time), |
275 | LOWER_32(thsn->kpthsn_last_made_runnable_time)); |
276 | #endif /* defined(__LP64__) */ |
277 | } |
278 | |
279 | /* |
280 | * Dispatch information only contains the dispatch queue serial number from |
281 | * libdispatch. |
282 | * |
283 | * It's a separate sampler because queue data must be copied in from user space. |
284 | */ |
285 | |
286 | void |
287 | kperf_thread_dispatch_sample(struct kperf_thread_dispatch *thdi, |
288 | struct kperf_context *context) |
289 | { |
290 | assert(thdi != NULL); |
291 | assert(context != NULL); |
292 | |
293 | thread_t thread = context->cur_thread; |
294 | |
295 | BUF_INFO(PERF_TI_DISPSAMPLE | DBG_FUNC_START, |
296 | (uintptr_t)thread_tid(thread)); |
297 | |
298 | task_t task = get_threadtask(thread); |
299 | size_t user_addr_size = task_has_64Bit_addr(task) ? 8 : 4; |
300 | thdi->kpthdi_dq_serialno = 0; |
301 | thdi->kpthdi_dq_label[0] = '\0'; |
302 | int error = 0; |
303 | |
304 | /* |
305 | * The dispatch queue address points to a struct that contains |
306 | * information about the dispatch queue. Use task-level offsets to |
307 | * find the serial number and label of the dispatch queue. |
308 | */ |
309 | assert(task != kernel_task); |
310 | uint64_t user_dq_key_addr = thread_dispatchqaddr(thread); |
311 | if (user_dq_key_addr == 0) { |
312 | error = ENOENT; |
313 | goto out; |
314 | } |
315 | |
316 | uint64_t user_dq_addr = 0; |
317 | if ((error = copyin((user_addr_t)user_dq_key_addr, &user_dq_addr, |
318 | user_addr_size)) != 0) { |
319 | goto out; |
320 | } |
321 | |
322 | if (user_dq_addr == 0) { |
323 | error = EINVAL; |
324 | goto out; |
325 | } |
326 | |
327 | uint64_t serialno_offset = get_task_dispatchqueue_serialno_offset(task); |
328 | uint64_t user_dq_serialno_addr = 0; |
329 | if (os_add_overflow(user_dq_addr, serialno_offset, |
330 | &user_dq_serialno_addr)) { |
331 | error = EOVERFLOW; |
332 | goto out; |
333 | } |
334 | |
335 | if ((error = copyin((user_addr_t)user_dq_serialno_addr, |
336 | &(thdi->kpthdi_dq_serialno), user_addr_size)) != 0) { |
337 | goto out; |
338 | } |
339 | |
340 | uint64_t lbl_offset = get_task_dispatchqueue_label_offset(task); |
341 | if (lbl_offset == 0) { |
342 | error = ENOBUFS; |
343 | goto out; |
344 | } |
345 | |
346 | uint64_t user_dqlbl_ptr_addr = 0; |
347 | if (os_add_overflow(user_dq_addr, lbl_offset, &user_dqlbl_ptr_addr)) { |
348 | error = EOVERFLOW; |
349 | goto out; |
350 | } |
351 | |
352 | uint64_t user_dqlbl_addr = 0; |
353 | /* |
354 | * The label isn't embedded in the struct -- it just holds a |
355 | * pointer to the label string, NUL-terminated. |
356 | */ |
357 | if ((error = copyin((user_addr_t)user_dqlbl_ptr_addr, &user_dqlbl_addr, |
358 | user_addr_size)) != 0) { |
359 | goto out; |
360 | } |
361 | |
362 | vm_size_t copied = 0; |
363 | if ((error = copyinstr(user_addr: (user_addr_t)user_dqlbl_addr, |
364 | kernel_addr: thdi->kpthdi_dq_label, max: sizeof(thdi->kpthdi_dq_label), |
365 | actual: &copied)) != 0) { |
366 | goto out; |
367 | } |
368 | thdi->kpthdi_dq_label[sizeof(thdi->kpthdi_dq_label) - 1] = '\0'; |
369 | |
370 | out: |
371 | BUF_VERB(PERF_TI_DISPSAMPLE | DBG_FUNC_END, error); |
372 | } |
373 | |
374 | int |
375 | kperf_thread_dispatch_pend(struct kperf_context *context, |
376 | unsigned int actionid) |
377 | { |
378 | return kperf_ast_pend(thread: context->cur_thread, T_KPERF_AST_DISPATCH, |
379 | actionid); |
380 | } |
381 | |
382 | void |
383 | kperf_thread_dispatch_log(struct kperf_thread_dispatch *thdi) |
384 | { |
385 | assert(thdi != NULL); |
386 | #if defined(__LP64__) |
387 | BUF_DATA(PERF_TI_DISPDATA, thdi->kpthdi_dq_serialno); |
388 | #else |
389 | BUF_DATA(PERF_TI_DISPDATA_32, UPPER_32(thdi->kpthdi_dq_serialno), |
390 | LOWER_32(thdi->kpthdi_dq_serialno)); |
391 | #endif /* defined(__LP64__) */ |
392 | |
393 | if (thdi->kpthdi_dq_label[0] != '\0') { |
394 | kernel_debug_string_simple(PERF_TI_DISPLABEL, str: thdi->kpthdi_dq_label); |
395 | } |
396 | } |
397 | |
398 | /* |
399 | * A bit different from other samplers -- since logging disables interrupts, |
400 | * it's a fine place to sample the thread counters. |
401 | */ |
402 | void |
403 | kperf_thread_inscyc_log(struct kperf_context *context) |
404 | { |
405 | #if CONFIG_PERVASIVE_CPI |
406 | thread_t cur_thread = current_thread(); |
407 | |
408 | if (context->cur_thread != cur_thread) { |
409 | /* can't safely access another thread's counters */ |
410 | return; |
411 | } |
412 | |
413 | struct recount_usage usage = { 0 }; |
414 | struct recount_usage perf_only = { 0 }; |
415 | recount_current_thread_usage_perf_only(&usage, &perf_only); |
416 | BUF_DATA(PERF_TI_INSCYCDATA, recount_usage_instructions(&usage), |
417 | recount_usage_cycles(&usage), recount_usage_instructions(&perf_only), |
418 | recount_usage_cycles(&perf_only)); |
419 | #else /* CONFIG_PERVASIVE_CPI */ |
420 | #pragma unused(context) |
421 | #endif /* !CONFIG_PERVASIVE_CPI */ |
422 | } |
423 | |