1 | /* |
2 | * Copyright (c) 2011-2021 Apple Computer, Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | /* |
30 | * This file manages the timers used for on-CPU samples and PET. |
31 | * |
32 | * Each timer configured by a tool is represented by a kptimer structure. |
33 | * The timer calls present in each structure are used to schedule CPU-local |
34 | * timers. As each timer fires, that CPU samples itself and schedules another |
35 | * timer to fire at the next deadline. The first timer to fire across all CPUs |
36 | * determines that deadline. This causes the timers to fire at a consistent |
37 | * cadence. |
38 | * |
39 | * Traditional PET uses a timer call to wake up its sampling thread and take |
40 | * on-CPU samples. |
41 | * |
42 | * Synchronization for start and stop is provided by the ktrace subsystem lock. |
43 | * Global state is stored in a single struct, to ease debugging. |
44 | */ |
45 | |
46 | #include <mach/mach_types.h> |
47 | #include <kern/cpu_data.h> /* current_thread() */ |
48 | #include <kern/kalloc.h> |
49 | #include <kern/timer_queue.h> |
50 | #include <libkern/section_keywords.h> |
51 | #include <stdatomic.h> |
52 | #include <sys/errno.h> |
53 | #include <sys/vm.h> |
54 | #include <sys/ktrace.h> |
55 | |
56 | #include <machine/machine_routines.h> |
57 | #if defined(__x86_64__) |
58 | #include <i386/mp.h> |
59 | #endif /* defined(__x86_64__) */ |
60 | |
61 | #include <kperf/kperf.h> |
62 | #include <kperf/buffer.h> |
63 | #include <kperf/context.h> |
64 | #include <kperf/action.h> |
65 | #include <kperf/kptimer.h> |
66 | #include <kperf/pet.h> |
67 | #include <kperf/sample.h> |
68 | |
69 | #define KPTIMER_PET_INACTIVE (999) |
70 | #define KPTIMER_MAX (8) |
71 | |
72 | struct kptimer { |
73 | uint32_t kt_actionid; |
74 | uint64_t kt_period_abs; |
75 | /* |
76 | * The `kt_cur_deadline` field represents when the timer should next fire. |
77 | * It's used to synchronize between timers firing on each CPU. In the timer |
78 | * handler, each CPU will take the `kt_lock` and see if the |
79 | * `kt_cur_deadline` still needs to be updated for the timer fire. If so, |
80 | * it updates it and logs the timer fire event under the lock. |
81 | */ |
82 | lck_spin_t kt_lock; |
83 | uint64_t kt_cur_deadline; |
84 | |
85 | #if DEVELOPMENT || DEBUG |
86 | /* |
87 | * To be set by the timer leader as a debugging aid for timeouts, if kperf |
88 | * happens to be on-CPU when they occur. |
89 | */ |
90 | uint64_t kt_fire_time; |
91 | #endif /* DEVELOPMENT || DEBUG */ |
92 | }; |
93 | |
94 | static struct { |
95 | struct kptimer *g_timers; |
96 | uint64_t *g_cpu_deadlines; |
97 | unsigned int g_ntimers; |
98 | unsigned int g_pet_timerid; |
99 | |
100 | bool g_setup:1; |
101 | bool g_pet_active:1; |
102 | bool g_started:1; |
103 | |
104 | struct timer_call g_pet_timer; |
105 | } kptimer = { |
106 | .g_pet_timerid = KPTIMER_PET_INACTIVE, |
107 | }; |
108 | |
109 | SECURITY_READ_ONLY_LATE(static uint64_t) kptimer_minperiods_mtu[KTPL_MAX]; |
110 | |
111 | /* |
112 | * Enforce a minimum timer period to prevent interrupt storms. |
113 | */ |
114 | const uint64_t kptimer_minperiods_ns[KTPL_MAX] = { |
115 | #if defined(__x86_64__) |
116 | [KTPL_FG] = 20 * NSEC_PER_USEC, /* The minimum timer period in xnu, period. */ |
117 | [KTPL_BG] = 1 * NSEC_PER_MSEC, |
118 | [KTPL_FG_PET] = 1 * NSEC_PER_MSEC, |
119 | [KTPL_BG_PET] = 1 * NSEC_PER_MSEC, |
120 | #elif defined(__arm64__) |
121 | [KTPL_FG] = 50 * NSEC_PER_USEC, |
122 | [KTPL_BG] = 1 * NSEC_PER_MSEC, |
123 | [KTPL_FG_PET] = 1 * NSEC_PER_MSEC, |
124 | [KTPL_BG_PET] = 1 * NSEC_PER_MSEC, |
125 | #else |
126 | #error unexpected architecture |
127 | #endif |
128 | }; |
129 | |
130 | static void kptimer_pet_handler(void * __unused param1, void * __unused param2); |
131 | static void kptimer_stop_cpu(processor_t processor); |
132 | |
133 | void |
134 | kptimer_init(void) |
135 | { |
136 | for (int i = 0; i < KTPL_MAX; i++) { |
137 | nanoseconds_to_absolutetime(nanoseconds: kptimer_minperiods_ns[i], |
138 | result: &kptimer_minperiods_mtu[i]); |
139 | } |
140 | } |
141 | |
142 | static void |
143 | kptimer_set_cpu_deadline(int cpuid, int timerid, uint64_t deadline) |
144 | { |
145 | kptimer.g_cpu_deadlines[(cpuid * KPTIMER_MAX) + timerid] = |
146 | deadline; |
147 | } |
148 | |
149 | static void |
150 | kptimer_setup(void) |
151 | { |
152 | if (kptimer.g_setup) { |
153 | return; |
154 | } |
155 | static lck_grp_t kptimer_lock_grp; |
156 | lck_grp_init(grp: &kptimer_lock_grp, grp_name: "kptimer" , LCK_GRP_ATTR_NULL); |
157 | |
158 | const size_t timers_size = KPTIMER_MAX * sizeof(struct kptimer); |
159 | kptimer.g_timers = zalloc_permanent_tag(size: timers_size, |
160 | ZALIGN(struct kptimer), VM_KERN_MEMORY_DIAG); |
161 | for (int i = 0; i < KPTIMER_MAX; i++) { |
162 | lck_spin_init(lck: &kptimer.g_timers[i].kt_lock, grp: &kptimer_lock_grp, |
163 | LCK_ATTR_NULL); |
164 | } |
165 | |
166 | const size_t deadlines_size = machine_info.logical_cpu_max * KPTIMER_MAX * |
167 | sizeof(kptimer.g_cpu_deadlines[0]); |
168 | kptimer.g_cpu_deadlines = zalloc_permanent_tag(size: deadlines_size, |
169 | ZALIGN_64, VM_KERN_MEMORY_DIAG); |
170 | for (int i = 0; i < KPTIMER_MAX; i++) { |
171 | for (int j = 0; j < machine_info.logical_cpu_max; j++) { |
172 | kptimer_set_cpu_deadline(cpuid: j, timerid: i, EndOfAllTime); |
173 | } |
174 | } |
175 | |
176 | timer_call_setup(call: &kptimer.g_pet_timer, func: kptimer_pet_handler, NULL); |
177 | |
178 | kptimer.g_setup = true; |
179 | } |
180 | |
181 | void |
182 | kptimer_reset(void) |
183 | { |
184 | kptimer_stop(); |
185 | kptimer_set_pet_timerid(KPTIMER_PET_INACTIVE); |
186 | |
187 | for (unsigned int i = 0; i < kptimer.g_ntimers; i++) { |
188 | kptimer.g_timers[i].kt_period_abs = 0; |
189 | kptimer.g_timers[i].kt_actionid = 0; |
190 | for (int j = 0; j < machine_info.logical_cpu_max; j++) { |
191 | kptimer_set_cpu_deadline(cpuid: j, timerid: i, EndOfAllTime); |
192 | } |
193 | } |
194 | } |
195 | |
196 | #pragma mark - deadline management |
197 | |
198 | static uint64_t |
199 | kptimer_get_cpu_deadline(int cpuid, int timerid) |
200 | { |
201 | return kptimer.g_cpu_deadlines[(cpuid * KPTIMER_MAX) + timerid]; |
202 | } |
203 | |
204 | static void |
205 | kptimer_sample_curcpu(unsigned int actionid, unsigned int timerid, |
206 | uint32_t flags) |
207 | { |
208 | struct kperf_sample *intbuf = kperf_intr_sample_buffer(); |
209 | #if DEVELOPMENT || DEBUG |
210 | intbuf->sample_time = mach_absolute_time(); |
211 | #endif /* DEVELOPMENT || DEBUG */ |
212 | |
213 | BUF_DATA(PERF_TM_HNDLR | DBG_FUNC_START); |
214 | |
215 | thread_t thread = current_thread(); |
216 | task_t task = get_threadtask(thread); |
217 | struct kperf_context ctx = { |
218 | .cur_thread = thread, |
219 | .cur_task = task, |
220 | .cur_pid = task_pid(task), |
221 | .trigger_type = TRIGGER_TYPE_TIMER, |
222 | .trigger_id = timerid, |
223 | }; |
224 | |
225 | (void)kperf_sample(sbuf: intbuf, ctx: &ctx, actionid, |
226 | SAMPLE_FLAG_PEND_USER | flags); |
227 | |
228 | BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END); |
229 | } |
230 | |
231 | static void |
232 | kptimer_lock(struct kptimer *timer) |
233 | { |
234 | lck_spin_lock(lck: &timer->kt_lock); |
235 | } |
236 | |
237 | static void |
238 | kptimer_unlock(struct kptimer *timer) |
239 | { |
240 | lck_spin_unlock(lck: &timer->kt_lock); |
241 | } |
242 | |
243 | /* |
244 | * If the deadline expired in the past, find the next deadline to program, |
245 | * locked into the cadence provided by the period. |
246 | */ |
247 | static inline uint64_t |
248 | dead_reckon_deadline(uint64_t now, uint64_t deadline, uint64_t period) |
249 | { |
250 | if (deadline < now) { |
251 | uint64_t time_since = now - deadline; |
252 | uint64_t = period - (time_since % period); |
253 | return now + extra_time; |
254 | } |
255 | return deadline; |
256 | } |
257 | |
258 | static uint64_t |
259 | kptimer_fire(struct kptimer *timer, unsigned int timerid, |
260 | uint64_t deadline, int __unused cpuid, uint64_t now) |
261 | { |
262 | bool first = false; |
263 | uint64_t next_deadline = deadline + timer->kt_period_abs; |
264 | |
265 | /* |
266 | * It's not straightforward to replace this lock with a compare-exchange, |
267 | * since the PERF_TM_FIRE event must be emitted *before* any subsequent |
268 | * PERF_TM_HNDLR events, so tools can understand the handlers are responding |
269 | * to this timer fire. |
270 | */ |
271 | kptimer_lock(timer); |
272 | if (timer->kt_cur_deadline < next_deadline) { |
273 | first = true; |
274 | next_deadline = dead_reckon_deadline(now, deadline: next_deadline, |
275 | period: timer->kt_period_abs); |
276 | timer->kt_cur_deadline = next_deadline; |
277 | BUF_DATA(PERF_TM_FIRE, timerid, timerid == kptimer.g_pet_timerid, |
278 | timer->kt_period_abs, timer->kt_actionid); |
279 | #if DEVELOPMENT || DEBUG |
280 | /* |
281 | * Debugging aid to see the last time this timer fired. |
282 | */ |
283 | timer->kt_fire_time = mach_absolute_time(); |
284 | #endif /* DEVELOPMENT || DEBUG */ |
285 | if (timerid == kptimer.g_pet_timerid && kppet_get_lightweight_pet()) { |
286 | os_atomic_inc(&kppet_gencount, relaxed); |
287 | } |
288 | } else { |
289 | /* |
290 | * In case this CPU has missed several timer fires, get it back on track |
291 | * by synchronizing with the latest timer fire. |
292 | */ |
293 | next_deadline = timer->kt_cur_deadline; |
294 | } |
295 | kptimer_unlock(timer); |
296 | |
297 | if (!first && !kperf_action_has_non_system(actionid: timer->kt_actionid)) { |
298 | /* |
299 | * The first timer to fire will sample the system, so there's |
300 | * no need to run other timers if those are the only samplers |
301 | * for this action. |
302 | */ |
303 | return next_deadline; |
304 | } |
305 | |
306 | kptimer_sample_curcpu(actionid: timer->kt_actionid, timerid, |
307 | flags: first ? SAMPLE_FLAG_SYSTEM : 0); |
308 | |
309 | return next_deadline; |
310 | } |
311 | |
312 | /* |
313 | * Determine which of the timers fired. |
314 | */ |
315 | void |
316 | kptimer_expire(processor_t processor, int cpuid, uint64_t now) |
317 | { |
318 | uint64_t min_deadline = UINT64_MAX; |
319 | |
320 | enum kperf_sampling status = os_atomic_load(&kperf_status, acquire); |
321 | switch (status) { |
322 | case KPERF_SAMPLING_ON: |
323 | break; |
324 | case KPERF_SAMPLING_SHUTDOWN: |
325 | kptimer_stop_cpu(processor); |
326 | return; |
327 | case KPERF_SAMPLING_OFF: |
328 | panic("kperf: timer fired at %llu, but sampling is disabled" , now); |
329 | default: |
330 | panic("kperf: unknown sampling state 0x%x" , status); |
331 | } |
332 | |
333 | for (unsigned int i = 0; i < kptimer.g_ntimers; i++) { |
334 | struct kptimer *timer = &kptimer.g_timers[i]; |
335 | if (timer->kt_period_abs == 0) { |
336 | continue; |
337 | } |
338 | |
339 | uint64_t cpudeadline = kptimer_get_cpu_deadline(cpuid, timerid: i); |
340 | if (now > cpudeadline) { |
341 | uint64_t deadline = kptimer_fire(timer, timerid: i, deadline: cpudeadline, cpuid, now); |
342 | if (deadline == 0) { |
343 | kptimer_set_cpu_deadline(cpuid, timerid: i, EndOfAllTime); |
344 | } else { |
345 | kptimer_set_cpu_deadline(cpuid, timerid: i, deadline); |
346 | if (deadline < min_deadline) { |
347 | min_deadline = deadline; |
348 | } |
349 | } |
350 | } |
351 | } |
352 | if (min_deadline < UINT64_MAX) { |
353 | running_timer_enter(processor, timer: RUNNING_TIMER_KPERF, NULL, |
354 | deadline: min_deadline, now: mach_absolute_time()); |
355 | } |
356 | } |
357 | |
358 | #pragma mark - start/stop |
359 | |
360 | static void |
361 | kptimer_broadcast(void (*fn)(void *)) |
362 | { |
363 | ktrace_assert_lock_held(); |
364 | |
365 | #if defined(__x86_64__) |
366 | (void)mp_cpus_call(CPUMASK_ALL, ASYNC, fn, NULL); |
367 | #else /* defined(__x86_64__) */ |
368 | _Atomic uint32_t xcsync = 0; |
369 | cpu_broadcast_xcall((uint32_t *)&xcsync, TRUE /* include self */, fn, |
370 | &xcsync); |
371 | #endif /* !defined(__x86_64__) */ |
372 | } |
373 | |
374 | static void |
375 | kptimer_broadcast_ack(void *arg) |
376 | { |
377 | #if defined(__x86_64__) |
378 | #pragma unused(arg) |
379 | #else /* defined(__x86_64__) */ |
380 | _Atomic uint32_t *xcsync = arg; |
381 | int pending = os_atomic_dec(xcsync, relaxed); |
382 | if (pending == 0) { |
383 | thread_wakeup(xcsync); |
384 | } |
385 | #endif /* !defined(__x86_64__) */ |
386 | } |
387 | |
388 | static void |
389 | kptimer_sample_pet_remote(void * __unused arg) |
390 | { |
391 | if (!kperf_is_sampling()) { |
392 | return; |
393 | } |
394 | struct kptimer *timer = &kptimer.g_timers[kptimer.g_pet_timerid]; |
395 | kptimer_sample_curcpu(actionid: timer->kt_actionid, timerid: kptimer.g_pet_timerid, flags: 0); |
396 | } |
397 | |
398 | #if !defined(__x86_64__) |
399 | |
400 | #include <arm/cpu_internal.h> |
401 | |
402 | void kperf_signal_handler(void); |
403 | void |
404 | kperf_signal_handler(void) |
405 | { |
406 | kptimer_sample_pet_remote(NULL); |
407 | } |
408 | |
409 | #endif /* !defined(__x86_64__) */ |
410 | |
411 | #include <stdatomic.h> |
412 | _Atomic uint64_t mycounter = 0; |
413 | |
414 | static void |
415 | kptimer_broadcast_pet(void) |
416 | { |
417 | atomic_fetch_add(&mycounter, 1); |
418 | #if defined(__x86_64__) |
419 | (void)mp_cpus_call(CPUMASK_OTHERS, NOSYNC, kptimer_sample_pet_remote, |
420 | NULL); |
421 | #else /* defined(__x86_64__) */ |
422 | int curcpu = cpu_number(); |
423 | for (int i = 0; i < machine_info.logical_cpu_max; i++) { |
424 | if (i != curcpu) { |
425 | cpu_signal(target: cpu_datap(cpu: i), SIGPkppet, NULL, NULL); |
426 | } |
427 | } |
428 | #endif /* !defined(__x86_64__) */ |
429 | } |
430 | |
431 | static void |
432 | kptimer_pet_handler(void * __unused param1, void * __unused param2) |
433 | { |
434 | if (!kptimer.g_pet_active) { |
435 | return; |
436 | } |
437 | |
438 | struct kptimer *timer = &kptimer.g_timers[kptimer.g_pet_timerid]; |
439 | |
440 | BUF_DATA(PERF_TM_FIRE, kptimer.g_pet_timerid, 1, timer->kt_period_abs, |
441 | timer->kt_actionid); |
442 | |
443 | /* |
444 | * To get the on-CPU samples as close to this timer fire as possible, first |
445 | * broadcast to them to sample themselves. |
446 | */ |
447 | kptimer_broadcast_pet(); |
448 | |
449 | /* |
450 | * Wakeup the PET thread afterwards so it's not inadvertently sampled (it's a |
451 | * high-priority kernel thread). If the scheduler needs to IPI to run it, |
452 | * that IPI will be handled after the IPIs issued during the broadcast. |
453 | */ |
454 | kppet_wake_thread(); |
455 | |
456 | /* |
457 | * Finally, sample this CPU, who's stacks and state have been preserved while |
458 | * running this handler. Make sure to include system measurements. |
459 | */ |
460 | kptimer_sample_curcpu(actionid: timer->kt_actionid, timerid: kptimer.g_pet_timerid, |
461 | SAMPLE_FLAG_SYSTEM); |
462 | |
463 | BUF_INFO(PERF_TM_FIRE | DBG_FUNC_END); |
464 | |
465 | /* |
466 | * The PET thread will re-arm the timer when it's done. |
467 | */ |
468 | } |
469 | |
470 | void |
471 | kptimer_pet_enter(uint64_t sampledur_abs) |
472 | { |
473 | if (!kperf_is_sampling()) { |
474 | return; |
475 | } |
476 | |
477 | uint64_t period_abs = kptimer.g_timers[kptimer.g_pet_timerid].kt_period_abs; |
478 | uint64_t orig_period_abs = period_abs; |
479 | |
480 | if (period_abs > sampledur_abs) { |
481 | period_abs -= sampledur_abs; |
482 | } |
483 | period_abs = MAX(kptimer_min_period_abs(true), period_abs); |
484 | uint64_t deadline_abs = mach_absolute_time() + period_abs; |
485 | |
486 | BUF_INFO(PERF_PET_SCHED, orig_period_abs, period_abs, sampledur_abs, |
487 | deadline_abs); |
488 | |
489 | timer_call_enter(call: &kptimer.g_pet_timer, deadline: deadline_abs, TIMER_CALL_SYS_CRITICAL); |
490 | } |
491 | |
492 | static uint64_t |
493 | kptimer_earliest_deadline(processor_t processor, uint64_t now) |
494 | { |
495 | uint64_t min_deadline = UINT64_MAX; |
496 | for (unsigned int i = 0; i < kptimer.g_ntimers; i++) { |
497 | struct kptimer *timer = &kptimer.g_timers[i]; |
498 | uint64_t cur_deadline = timer->kt_cur_deadline; |
499 | if (cur_deadline == 0) { |
500 | continue; |
501 | } |
502 | cur_deadline = dead_reckon_deadline(now, deadline: cur_deadline, |
503 | period: timer->kt_period_abs); |
504 | kptimer_set_cpu_deadline(cpuid: processor->cpu_id, timerid: i, deadline: cur_deadline); |
505 | if (cur_deadline < min_deadline) { |
506 | min_deadline = cur_deadline; |
507 | } |
508 | } |
509 | return min_deadline; |
510 | } |
511 | |
512 | void kptimer_running_setup(processor_t processor, uint64_t now); |
513 | void |
514 | kptimer_running_setup(processor_t processor, uint64_t now) |
515 | { |
516 | uint64_t deadline = kptimer_earliest_deadline(processor, now); |
517 | if (deadline < UINT64_MAX) { |
518 | running_timer_setup(processor, timer: RUNNING_TIMER_KPERF, NULL, deadline, |
519 | now); |
520 | } |
521 | } |
522 | |
523 | static void |
524 | kptimer_start_cpu(processor_t processor) |
525 | { |
526 | uint64_t now = mach_absolute_time(); |
527 | uint64_t deadline = kptimer_earliest_deadline(processor, now); |
528 | if (deadline < UINT64_MAX) { |
529 | running_timer_enter(processor, timer: RUNNING_TIMER_KPERF, NULL, deadline, |
530 | now); |
531 | } |
532 | } |
533 | |
534 | static void |
535 | kptimer_start_remote(void *arg) |
536 | { |
537 | kptimer_start_cpu(processor: current_processor()); |
538 | kptimer_broadcast_ack(arg); |
539 | } |
540 | |
541 | static void |
542 | kptimer_stop_cpu(processor_t processor) |
543 | { |
544 | for (unsigned int i = 0; i < kptimer.g_ntimers; i++) { |
545 | kptimer_set_cpu_deadline(cpuid: processor->cpu_id, timerid: i, EndOfAllTime); |
546 | } |
547 | running_timer_cancel(processor, timer: RUNNING_TIMER_KPERF); |
548 | } |
549 | |
550 | void |
551 | kptimer_stop_curcpu(void) |
552 | { |
553 | kptimer_stop_cpu(processor: current_processor()); |
554 | } |
555 | |
556 | static void |
557 | kptimer_stop_remote(void * __unused arg) |
558 | { |
559 | assert(ml_get_interrupts_enabled() == FALSE); |
560 | kptimer_stop_cpu(processor: current_processor()); |
561 | kptimer_broadcast_ack(arg); |
562 | } |
563 | |
564 | /* |
565 | * Called when a CPU is brought online. Handles the cases where the kperf timer may have |
566 | * been either enabled or disabled while the CPU was offline (preventing the enabling/disabling |
567 | * IPIs from reaching this CPU). |
568 | */ |
569 | void |
570 | kptimer_curcpu_up(void) |
571 | { |
572 | enum kperf_sampling status = os_atomic_load(&kperf_status, acquire); |
573 | processor_t processor = current_processor(); |
574 | |
575 | assert(ml_get_interrupts_enabled() == FALSE); |
576 | |
577 | /* |
578 | * If the CPU was taken offline, THEN kperf was enabled, this CPU would have missed |
579 | * the enabling IPI, so fix that here. Also, if the CPU was taken offline (after having |
580 | * enabled kperf), recompute the deadline (since we may have missed a timer update) and |
581 | * keep the timer enabled. |
582 | */ |
583 | if (status == KPERF_SAMPLING_ON) { |
584 | kptimer_start_cpu(processor); |
585 | } else { |
586 | /* |
587 | * Similarly, If the CPU is resuming after having previously armed the kperf timer |
588 | * before going down, and kperf is currently disabled, disable the kperf running |
589 | * timer on this CPU. |
590 | */ |
591 | kptimer_stop_cpu(processor); |
592 | } |
593 | } |
594 | |
595 | void |
596 | kptimer_start(void) |
597 | { |
598 | ktrace_assert_lock_held(); |
599 | |
600 | if (kptimer.g_started) { |
601 | return; |
602 | } |
603 | |
604 | uint64_t now = mach_absolute_time(); |
605 | unsigned int ntimers_active = 0; |
606 | kptimer.g_started = true; |
607 | for (unsigned int i = 0; i < kptimer.g_ntimers; i++) { |
608 | struct kptimer *timer = &kptimer.g_timers[i]; |
609 | if (timer->kt_period_abs == 0 || timer->kt_actionid == 0) { |
610 | /* |
611 | * No period or action means the timer is inactive. |
612 | */ |
613 | continue; |
614 | } else if (!kppet_get_lightweight_pet() && |
615 | i == kptimer.g_pet_timerid) { |
616 | kptimer.g_pet_active = true; |
617 | timer_call_enter(call: &kptimer.g_pet_timer, deadline: now + timer->kt_period_abs, |
618 | TIMER_CALL_SYS_CRITICAL); |
619 | } else { |
620 | timer->kt_cur_deadline = now + timer->kt_period_abs; |
621 | ntimers_active++; |
622 | } |
623 | } |
624 | if (ntimers_active > 0) { |
625 | kptimer_broadcast(fn: kptimer_start_remote); |
626 | } |
627 | } |
628 | |
629 | void |
630 | kptimer_stop(void) |
631 | { |
632 | ktrace_assert_lock_held(); |
633 | |
634 | if (!kptimer.g_started) { |
635 | return; |
636 | } |
637 | |
638 | int intrs_en = ml_set_interrupts_enabled(FALSE); |
639 | |
640 | if (kptimer.g_pet_active) { |
641 | kptimer.g_pet_active = false; |
642 | timer_call_cancel(call: &kptimer.g_pet_timer); |
643 | } |
644 | kptimer.g_started = false; |
645 | kptimer_broadcast(fn: kptimer_stop_remote); |
646 | for (unsigned int i = 0; i < kptimer.g_ntimers; i++) { |
647 | kptimer.g_timers[i].kt_cur_deadline = 0; |
648 | } |
649 | |
650 | ml_set_interrupts_enabled(enable: intrs_en); |
651 | } |
652 | |
653 | #pragma mark - accessors |
654 | |
655 | int |
656 | kptimer_get_period(unsigned int timerid, uint64_t *period_abs) |
657 | { |
658 | if (timerid >= kptimer.g_ntimers) { |
659 | return EINVAL; |
660 | } |
661 | *period_abs = kptimer.g_timers[timerid].kt_period_abs; |
662 | return 0; |
663 | } |
664 | |
665 | int |
666 | kptimer_set_period(unsigned int timerid, uint64_t period_abs) |
667 | { |
668 | if (timerid >= kptimer.g_ntimers) { |
669 | return EINVAL; |
670 | } |
671 | if (kptimer.g_started) { |
672 | return EBUSY; |
673 | } |
674 | |
675 | bool pet = kptimer.g_pet_timerid == timerid; |
676 | uint64_t min_period = kptimer_min_period_abs(pet); |
677 | if (period_abs != 0 && period_abs < min_period) { |
678 | period_abs = min_period; |
679 | } |
680 | if (pet && !kppet_get_lightweight_pet()) { |
681 | kppet_config(actionid: kptimer.g_timers[timerid].kt_actionid); |
682 | } |
683 | |
684 | kptimer.g_timers[timerid].kt_period_abs = period_abs; |
685 | return 0; |
686 | } |
687 | |
688 | int |
689 | kptimer_get_action(unsigned int timerid, unsigned int *actionid) |
690 | { |
691 | if (timerid >= kptimer.g_ntimers) { |
692 | return EINVAL; |
693 | } |
694 | *actionid = kptimer.g_timers[timerid].kt_actionid; |
695 | return 0; |
696 | } |
697 | |
698 | int |
699 | kptimer_set_action(unsigned int timerid, unsigned int actionid) |
700 | { |
701 | if (timerid >= kptimer.g_ntimers) { |
702 | return EINVAL; |
703 | } |
704 | if (kptimer.g_started) { |
705 | return EBUSY; |
706 | } |
707 | |
708 | kptimer.g_timers[timerid].kt_actionid = actionid; |
709 | if (kptimer.g_pet_timerid == timerid && !kppet_get_lightweight_pet()) { |
710 | kppet_config(actionid); |
711 | } |
712 | return 0; |
713 | } |
714 | |
715 | unsigned int |
716 | kptimer_get_count(void) |
717 | { |
718 | return kptimer.g_ntimers; |
719 | } |
720 | |
721 | int |
722 | kptimer_set_count(unsigned int count) |
723 | { |
724 | kptimer_setup(); |
725 | if (kptimer.g_started) { |
726 | return EBUSY; |
727 | } |
728 | if (count > KPTIMER_MAX) { |
729 | return EINVAL; |
730 | } |
731 | kptimer.g_ntimers = count; |
732 | return 0; |
733 | } |
734 | |
735 | uint64_t |
736 | kptimer_min_period_abs(bool pet) |
737 | { |
738 | enum kptimer_period_limit limit = 0; |
739 | if (ktrace_background_active()) { |
740 | limit = pet ? KTPL_BG_PET : KTPL_BG; |
741 | } else { |
742 | limit = pet ? KTPL_FG_PET : KTPL_FG; |
743 | } |
744 | return kptimer_minperiods_mtu[limit]; |
745 | } |
746 | |
747 | uint32_t |
748 | kptimer_get_pet_timerid(void) |
749 | { |
750 | return kptimer.g_pet_timerid; |
751 | } |
752 | |
753 | int |
754 | kptimer_set_pet_timerid(uint32_t petid) |
755 | { |
756 | if (kptimer.g_started) { |
757 | return EBUSY; |
758 | } |
759 | if (petid >= kptimer.g_ntimers) { |
760 | kppet_config(actionid: 0); |
761 | } else { |
762 | kppet_config(actionid: kptimer.g_timers[petid].kt_actionid); |
763 | uint64_t period_abs = MAX(kptimer_min_period_abs(true), |
764 | kptimer.g_timers[petid].kt_period_abs); |
765 | kptimer.g_timers[petid].kt_period_abs = period_abs; |
766 | } |
767 | |
768 | kptimer.g_pet_timerid = petid; |
769 | |
770 | return 0; |
771 | } |
772 | |