1/*
2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/*
30 * Called from a trigger. Actually takes the data from the different
31 * modules and puts them in a buffer
32 */
33
34#include <mach/mach_types.h>
35#include <machine/machine_routines.h>
36#include <kern/kalloc.h>
37#include <kern/debug.h> /* panic */
38#include <kern/thread.h>
39#include <sys/errno.h>
40#include <sys/vm.h>
41#include <vm/vm_object.h>
42#include <vm/vm_page.h>
43#include <vm/vm_pageout.h>
44
45#include <kperf/action.h>
46#include <kperf/ast.h>
47#include <kperf/buffer.h>
48#include <kperf/callstack.h>
49#include <kperf/context.h>
50#include <kperf/kdebug_trigger.h>
51#include <kperf/kperf.h>
52#include <kperf/kperf_kpc.h>
53#include <kperf/kperf_timer.h>
54#include <kperf/pet.h>
55#include <kperf/sample.h>
56#include <kperf/thread_samplers.h>
57
58#define ACTION_MAX (32)
59
60/* the list of different actions to take */
61struct action {
62 uint32_t sample;
63 uint32_t ucallstack_depth;
64 uint32_t kcallstack_depth;
65 uint32_t userdata;
66 int pid_filter;
67};
68
69/* the list of actions */
70static unsigned int actionc = 0;
71static struct action *actionv = NULL;
72
73/* should emit tracepoint on context switch */
74int kperf_kdebug_cswitch = 0;
75
76bool
77kperf_action_has_non_system(unsigned int actionid)
78{
79 if (actionid > actionc) {
80 return false;
81 }
82
83 if (actionv[actionid - 1].sample & ~SAMPLER_SYS_MEM) {
84 return true;
85 } else {
86 return false;
87 }
88}
89
90bool
91kperf_action_has_task(unsigned int actionid)
92{
93 if (actionid > actionc) {
94 return false;
95 }
96
97 return (actionv[actionid - 1].sample & SAMPLER_TASK_MASK);
98}
99
100bool
101kperf_action_has_thread(unsigned int actionid)
102{
103 if (actionid > actionc) {
104 return false;
105 }
106
107 return (actionv[actionid - 1].sample & SAMPLER_THREAD_MASK);
108}
109
110static void
111kperf_system_memory_log(void)
112{
113 BUF_DATA(PERF_MI_SYS_DATA, (uintptr_t)vm_page_free_count,
114 (uintptr_t)vm_page_wire_count, (uintptr_t)vm_page_external_count,
115 (uintptr_t)(vm_page_active_count + vm_page_inactive_count +
116 vm_page_speculative_count));
117 BUF_DATA(PERF_MI_SYS_DATA_2, (uintptr_t)vm_page_anonymous_count,
118 (uintptr_t)vm_page_internal_count,
119 (uintptr_t)vm_pageout_vminfo.vm_pageout_compressions,
120 (uintptr_t)VM_PAGE_COMPRESSOR_COUNT);
121}
122
123static kern_return_t
124kperf_sample_internal(struct kperf_sample *sbuf,
125 struct kperf_context *context,
126 unsigned sample_what, unsigned sample_flags,
127 unsigned actionid, uint32_t ucallstack_depth)
128{
129 int pended_ucallstack = 0;
130 int pended_th_dispatch = 0;
131 bool on_idle_thread = false;
132 uint32_t userdata = actionid;
133 bool task_only = false;
134
135 /* not much point continuing here, but what to do ? return
136 * Shutdown? cut a tracepoint and continue?
137 */
138 if (sample_what == 0) {
139 return SAMPLE_CONTINUE;
140 }
141
142 /* callstacks should be explicitly ignored */
143 if (sample_flags & SAMPLE_FLAG_EMPTY_CALLSTACK) {
144 sample_what &= ~(SAMPLER_KSTACK | SAMPLER_USTACK);
145 }
146
147 if (sample_flags & SAMPLE_FLAG_ONLY_SYSTEM) {
148 sample_what &= SAMPLER_SYS_MEM;
149 }
150
151 assert((sample_flags & (SAMPLE_FLAG_THREAD_ONLY | SAMPLE_FLAG_TASK_ONLY))
152 != (SAMPLE_FLAG_THREAD_ONLY | SAMPLE_FLAG_TASK_ONLY));
153 if (sample_flags & SAMPLE_FLAG_THREAD_ONLY) {
154 sample_what &= SAMPLER_THREAD_MASK;
155 }
156 if (sample_flags & SAMPLE_FLAG_TASK_ONLY) {
157 task_only = true;
158 sample_what &= SAMPLER_TASK_MASK;
159 }
160
161 if (!task_only) {
162 context->cur_thread->kperf_pet_gen = kperf_pet_gen;
163 }
164 bool is_kernel = (context->cur_pid == 0);
165
166 if (actionid && actionid <= actionc) {
167 sbuf->kcallstack.nframes = actionv[actionid - 1].kcallstack_depth;
168 } else {
169 sbuf->kcallstack.nframes = MAX_CALLSTACK_FRAMES;
170 }
171
172 if (ucallstack_depth) {
173 sbuf->ucallstack.nframes = ucallstack_depth;
174 } else {
175 sbuf->ucallstack.nframes = MAX_CALLSTACK_FRAMES;
176 }
177
178 sbuf->kcallstack.flags = CALLSTACK_VALID;
179 sbuf->ucallstack.flags = CALLSTACK_VALID;
180
181 /* an event occurred. Sample everything and dump it in a
182 * buffer.
183 */
184
185 /* collect data from samplers */
186 if (sample_what & SAMPLER_TH_INFO) {
187 kperf_thread_info_sample(&sbuf->th_info, context);
188
189 /* See if we should drop idle thread samples */
190 if (!(sample_flags & SAMPLE_FLAG_IDLE_THREADS)) {
191 if (sbuf->th_info.kpthi_runmode & 0x40) {
192 on_idle_thread = true;
193 goto log_sample;
194 }
195 }
196 }
197
198 if (sample_what & SAMPLER_TH_SNAPSHOT) {
199 kperf_thread_snapshot_sample(&(sbuf->th_snapshot), context);
200 }
201 if (sample_what & SAMPLER_TH_SCHEDULING) {
202 kperf_thread_scheduling_sample(&(sbuf->th_scheduling), context);
203 }
204 if (sample_what & SAMPLER_KSTACK) {
205 if (sample_flags & SAMPLE_FLAG_CONTINUATION) {
206 kperf_continuation_sample(&(sbuf->kcallstack), context);
207 /* outside of interrupt context, backtrace the current thread */
208 } else if (sample_flags & SAMPLE_FLAG_NON_INTERRUPT) {
209 kperf_backtrace_sample(&(sbuf->kcallstack), context);
210 } else {
211 kperf_kcallstack_sample(&(sbuf->kcallstack), context);
212 }
213 }
214 if (sample_what & SAMPLER_TK_SNAPSHOT) {
215 kperf_task_snapshot_sample(context->cur_task, &(sbuf->tk_snapshot));
216 }
217
218 /* sensitive ones */
219 if (!is_kernel) {
220 if (sample_what & SAMPLER_MEMINFO) {
221 kperf_meminfo_sample(context->cur_task, &(sbuf->meminfo));
222 }
223
224 if (sample_flags & SAMPLE_FLAG_PEND_USER) {
225 if (sample_what & SAMPLER_USTACK) {
226 pended_ucallstack = kperf_ucallstack_pend(context, sbuf->ucallstack.nframes);
227 }
228
229 if (sample_what & SAMPLER_TH_DISPATCH) {
230 pended_th_dispatch = kperf_thread_dispatch_pend(context);
231 }
232 } else {
233 if (sample_what & SAMPLER_USTACK) {
234 kperf_ucallstack_sample(&(sbuf->ucallstack), context);
235 }
236
237 if (sample_what & SAMPLER_TH_DISPATCH) {
238 kperf_thread_dispatch_sample(&(sbuf->th_dispatch), context);
239 }
240 }
241 }
242
243 if (sample_what & SAMPLER_PMC_THREAD) {
244 kperf_kpc_thread_sample(&(sbuf->kpcdata), sample_what);
245 } else if (sample_what & SAMPLER_PMC_CPU) {
246 kperf_kpc_cpu_sample(&(sbuf->kpcdata), sample_what);
247 }
248
249log_sample:
250 /* lookup the user tag, if any */
251 if (actionid && (actionid <= actionc)) {
252 userdata = actionv[actionid - 1].userdata;
253 }
254
255 /* avoid logging if this sample only pended samples */
256 if (sample_flags & SAMPLE_FLAG_PEND_USER &&
257 !(sample_what & ~(SAMPLER_USTACK | SAMPLER_TH_DISPATCH)))
258 {
259 return SAMPLE_CONTINUE;
260 }
261
262 /* stash the data into the buffer
263 * interrupts off to ensure we don't get split
264 */
265 boolean_t enabled = ml_set_interrupts_enabled(FALSE);
266
267 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_START, sample_what,
268 actionid, userdata, sample_flags);
269
270 if (sample_flags & SAMPLE_FLAG_SYSTEM) {
271 if (sample_what & SAMPLER_SYS_MEM) {
272 kperf_system_memory_log();
273 }
274 }
275 if (on_idle_thread) {
276 goto log_sample_end;
277 }
278
279 if (sample_what & SAMPLER_TH_INFO) {
280 kperf_thread_info_log(&sbuf->th_info);
281 }
282 if (sample_what & SAMPLER_TH_SCHEDULING) {
283 kperf_thread_scheduling_log(&(sbuf->th_scheduling));
284 }
285 if (sample_what & SAMPLER_TH_SNAPSHOT) {
286 kperf_thread_snapshot_log(&(sbuf->th_snapshot));
287 }
288 if (sample_what & SAMPLER_KSTACK) {
289 kperf_kcallstack_log(&sbuf->kcallstack);
290 }
291 if (sample_what & SAMPLER_TH_INSCYC) {
292 kperf_thread_inscyc_log(context);
293 }
294 if (sample_what & SAMPLER_TK_SNAPSHOT) {
295 kperf_task_snapshot_log(&(sbuf->tk_snapshot));
296 }
297 if (sample_what & SAMPLER_TK_INFO) {
298 kperf_task_info_log(context);
299 }
300
301 /* dump user stuff */
302 if (!is_kernel) {
303 /* dump meminfo */
304 if (sample_what & SAMPLER_MEMINFO) {
305 kperf_meminfo_log(&(sbuf->meminfo));
306 }
307
308 if (sample_flags & SAMPLE_FLAG_PEND_USER) {
309 if (pended_ucallstack) {
310 BUF_INFO(PERF_CS_UPEND);
311 }
312
313 if (pended_th_dispatch) {
314 BUF_INFO(PERF_TI_DISPPEND);
315 }
316 } else {
317 if (sample_what & SAMPLER_USTACK) {
318 kperf_ucallstack_log(&(sbuf->ucallstack));
319 }
320
321 if (sample_what & SAMPLER_TH_DISPATCH) {
322 kperf_thread_dispatch_log(&(sbuf->th_dispatch));
323 }
324 }
325 }
326
327 if (sample_what & SAMPLER_PMC_THREAD) {
328 kperf_kpc_thread_log(&(sbuf->kpcdata));
329 } else if (sample_what & SAMPLER_PMC_CPU) {
330 kperf_kpc_cpu_log(&(sbuf->kpcdata));
331 }
332
333log_sample_end:
334 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_END, sample_what, on_idle_thread ? 1 : 0);
335
336 /* intrs back on */
337 ml_set_interrupts_enabled(enabled);
338
339 return SAMPLE_CONTINUE;
340}
341
342/* Translate actionid into sample bits and take a sample */
343kern_return_t
344kperf_sample(struct kperf_sample *sbuf,
345 struct kperf_context *context,
346 unsigned actionid, unsigned sample_flags)
347{
348 /* work out what to sample, if anything */
349 if ((actionid > actionc) || (actionid == 0)) {
350 return SAMPLE_SHUTDOWN;
351 }
352
353 /* check the pid filter against the context's current pid.
354 * filter pid == -1 means any pid
355 */
356 int pid_filter = actionv[actionid - 1].pid_filter;
357 if ((pid_filter != -1) && (pid_filter != context->cur_pid)) {
358 return SAMPLE_CONTINUE;
359 }
360
361 /* the samplers to run */
362 unsigned int sample_what = actionv[actionid - 1].sample;
363
364 /* do the actual sample operation */
365 return kperf_sample_internal(sbuf, context, sample_what,
366 sample_flags, actionid,
367 actionv[actionid - 1].ucallstack_depth);
368}
369
370void
371kperf_kdebug_handler(uint32_t debugid, uintptr_t *starting_fp)
372{
373 uint32_t sample_flags = SAMPLE_FLAG_PEND_USER;
374 struct kperf_sample *sample = NULL;
375 kern_return_t kr = KERN_SUCCESS;
376 int s;
377
378 if (!kperf_kdebug_should_trigger(debugid)) {
379 return;
380 }
381
382 BUF_VERB(PERF_KDBG_HNDLR | DBG_FUNC_START, debugid);
383
384 thread_t thread = current_thread();
385 task_t task = get_threadtask(thread);
386 struct kperf_context ctx = {
387 .cur_thread = thread,
388 .cur_task = task,
389 .cur_pid = task_pid(task),
390 .trigger_type = TRIGGER_TYPE_KDEBUG,
391 .trigger_id = 0,
392 };
393
394 s = ml_set_interrupts_enabled(0);
395
396 sample = kperf_intr_sample_buffer();
397
398 if (!ml_at_interrupt_context()) {
399 sample_flags |= SAMPLE_FLAG_NON_INTERRUPT;
400 ctx.starting_fp = starting_fp;
401 }
402
403 kr = kperf_sample(sample, &ctx, kperf_kdebug_get_action(), sample_flags);
404
405 ml_set_interrupts_enabled(s);
406 BUF_VERB(PERF_KDBG_HNDLR | DBG_FUNC_END, kr);
407}
408
409/*
410 * This function allocates >2.3KB of the stack. Prevent the compiler from
411 * inlining this function into ast_taken and ensure the stack memory is only
412 * allocated for the kperf AST.
413 */
414__attribute__((noinline))
415void
416kperf_thread_ast_handler(thread_t thread)
417{
418 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_START, thread, kperf_get_thread_flags(thread));
419
420 /* ~2KB of the stack for the sample since this is called from AST */
421 struct kperf_sample sbuf;
422 memset(&sbuf, 0, sizeof(struct kperf_sample));
423
424 task_t task = get_threadtask(thread);
425
426 if (task_did_exec(task) || task_is_exec_copy(task)) {
427 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_END, SAMPLE_CONTINUE);
428 return;
429 }
430
431 /* make a context, take a sample */
432 struct kperf_context ctx = {
433 .cur_thread = thread,
434 .cur_task = task,
435 .cur_pid = task_pid(task),
436 };
437
438 /* decode the flags to determine what to sample */
439 unsigned int sample_what = 0;
440 uint32_t flags = kperf_get_thread_flags(thread);
441
442 if (flags & T_KPERF_AST_DISPATCH) {
443 sample_what |= SAMPLER_TH_DISPATCH;
444 }
445 if (flags & T_KPERF_AST_CALLSTACK) {
446 sample_what |= SAMPLER_USTACK;
447 sample_what |= SAMPLER_TH_INFO;
448 }
449
450 uint32_t ucallstack_depth = T_KPERF_GET_CALLSTACK_DEPTH(flags);
451
452 int r = kperf_sample_internal(&sbuf, &ctx, sample_what, 0, 0, ucallstack_depth);
453
454 BUF_INFO(PERF_AST_HNDLR | DBG_FUNC_END, r);
455}
456
457/* register AST bits */
458int
459kperf_ast_pend(thread_t thread, uint32_t set_flags)
460{
461 /* can only pend on the current thread */
462 if (thread != current_thread()) {
463 panic("pending to non-current thread");
464 }
465
466 /* get our current bits */
467 uint32_t flags = kperf_get_thread_flags(thread);
468
469 /* see if it's already been done or pended */
470 if (!(flags & set_flags)) {
471 /* set the bit on the thread */
472 flags |= set_flags;
473 kperf_set_thread_flags(thread, flags);
474
475 /* set the actual AST */
476 act_set_kperf(thread);
477 return 1;
478 }
479
480 return 0;
481}
482
483void
484kperf_ast_set_callstack_depth(thread_t thread, uint32_t depth)
485{
486 uint32_t ast_flags = kperf_get_thread_flags(thread);
487 uint32_t existing_callstack_depth = T_KPERF_GET_CALLSTACK_DEPTH(ast_flags);
488
489 if (existing_callstack_depth != depth) {
490 ast_flags &= ~T_KPERF_SET_CALLSTACK_DEPTH(depth);
491 ast_flags |= T_KPERF_SET_CALLSTACK_DEPTH(depth);
492
493 kperf_set_thread_flags(thread, ast_flags);
494 }
495}
496
497int
498kperf_kdbg_cswitch_get(void)
499{
500 return kperf_kdebug_cswitch;
501}
502
503int
504kperf_kdbg_cswitch_set(int newval)
505{
506 kperf_kdebug_cswitch = newval;
507 kperf_on_cpu_update();
508
509 return 0;
510}
511
512/*
513 * Action configuration
514 */
515unsigned int
516kperf_action_get_count(void)
517{
518 return actionc;
519}
520
521int
522kperf_action_set_samplers(unsigned actionid, uint32_t samplers)
523{
524 if ((actionid > actionc) || (actionid == 0)) {
525 return EINVAL;
526 }
527
528 /* disallow both CPU and thread counters to be sampled in the same
529 * action */
530 if ((samplers & SAMPLER_PMC_THREAD) && (samplers & SAMPLER_PMC_CPU)) {
531 return EINVAL;
532 }
533
534 actionv[actionid - 1].sample = samplers;
535
536 return 0;
537}
538
539int
540kperf_action_get_samplers(unsigned actionid, uint32_t *samplers_out)
541{
542 if ((actionid > actionc)) {
543 return EINVAL;
544 }
545
546 if (actionid == 0) {
547 *samplers_out = 0; /* "NULL" action */
548 } else {
549 *samplers_out = actionv[actionid - 1].sample;
550 }
551
552 return 0;
553}
554
555int
556kperf_action_set_userdata(unsigned actionid, uint32_t userdata)
557{
558 if ((actionid > actionc) || (actionid == 0)) {
559 return EINVAL;
560 }
561
562 actionv[actionid - 1].userdata = userdata;
563
564 return 0;
565}
566
567int
568kperf_action_get_userdata(unsigned actionid, uint32_t *userdata_out)
569{
570 if ((actionid > actionc)) {
571 return EINVAL;
572 }
573
574 if (actionid == 0) {
575 *userdata_out = 0; /* "NULL" action */
576 } else {
577 *userdata_out = actionv[actionid - 1].userdata;
578 }
579
580 return 0;
581}
582
583int
584kperf_action_set_filter(unsigned actionid, int pid)
585{
586 if ((actionid > actionc) || (actionid == 0)) {
587 return EINVAL;
588 }
589
590 actionv[actionid - 1].pid_filter = pid;
591
592 return 0;
593}
594
595int
596kperf_action_get_filter(unsigned actionid, int *pid_out)
597{
598 if ((actionid > actionc)) {
599 return EINVAL;
600 }
601
602 if (actionid == 0) {
603 *pid_out = -1; /* "NULL" action */
604 } else {
605 *pid_out = actionv[actionid - 1].pid_filter;
606 }
607
608 return 0;
609}
610
611void
612kperf_action_reset(void)
613{
614 for (unsigned int i = 0; i < actionc; i++) {
615 kperf_action_set_samplers(i + 1, 0);
616 kperf_action_set_userdata(i + 1, 0);
617 kperf_action_set_filter(i + 1, -1);
618 kperf_action_set_ucallstack_depth(i + 1, MAX_CALLSTACK_FRAMES);
619 kperf_action_set_kcallstack_depth(i + 1, MAX_CALLSTACK_FRAMES);
620 }
621}
622
623int
624kperf_action_set_count(unsigned count)
625{
626 struct action *new_actionv = NULL, *old_actionv = NULL;
627 unsigned old_count;
628
629 /* easy no-op */
630 if (count == actionc) {
631 return 0;
632 }
633
634 /* TODO: allow shrinking? */
635 if (count < actionc) {
636 return EINVAL;
637 }
638
639 /* cap it for good measure */
640 if (count > ACTION_MAX) {
641 return EINVAL;
642 }
643
644 /* creating the action arror for the first time. create a few
645 * more things, too.
646 */
647 if (actionc == 0) {
648 int r;
649 if ((r = kperf_init())) {
650 return r;
651 }
652 }
653
654 /* create a new array */
655 new_actionv = kalloc_tag(count * sizeof(*new_actionv), VM_KERN_MEMORY_DIAG);
656 if (new_actionv == NULL) {
657 return ENOMEM;
658 }
659
660 old_actionv = actionv;
661 old_count = actionc;
662
663 if (old_actionv != NULL) {
664 memcpy(new_actionv, actionv, actionc * sizeof(*actionv));
665 }
666
667 memset(&(new_actionv[actionc]), 0, (count - old_count) * sizeof(*actionv));
668
669 for (unsigned int i = old_count; i < count; i++) {
670 new_actionv[i].pid_filter = -1;
671 new_actionv[i].ucallstack_depth = MAX_CALLSTACK_FRAMES;
672 new_actionv[i].kcallstack_depth = MAX_CALLSTACK_FRAMES;
673 }
674
675 actionv = new_actionv;
676 actionc = count;
677
678 if (old_actionv != NULL) {
679 kfree(old_actionv, old_count * sizeof(*actionv));
680 }
681
682 return 0;
683}
684
685int
686kperf_action_set_ucallstack_depth(unsigned action_id, uint32_t depth)
687{
688 if ((action_id > actionc) || (action_id == 0)) {
689 return EINVAL;
690 }
691
692 if (depth > MAX_CALLSTACK_FRAMES) {
693 return EINVAL;
694 }
695
696 actionv[action_id - 1].ucallstack_depth = depth;
697
698 return 0;
699}
700
701int
702kperf_action_set_kcallstack_depth(unsigned action_id, uint32_t depth)
703{
704 if ((action_id > actionc) || (action_id == 0)) {
705 return EINVAL;
706 }
707
708 if (depth > MAX_CALLSTACK_FRAMES) {
709 return EINVAL;
710 }
711
712 actionv[action_id - 1].kcallstack_depth = depth;
713
714 return 0;
715}
716
717int
718kperf_action_get_ucallstack_depth(unsigned action_id, uint32_t * depth_out)
719{
720 if ((action_id > actionc)) {
721 return EINVAL;
722 }
723
724 assert(depth_out);
725
726 if (action_id == 0) {
727 *depth_out = MAX_CALLSTACK_FRAMES;
728 } else {
729 *depth_out = actionv[action_id - 1].ucallstack_depth;
730 }
731
732 return 0;
733}
734
735int
736kperf_action_get_kcallstack_depth(unsigned action_id, uint32_t * depth_out)
737{
738 if ((action_id > actionc)) {
739 return EINVAL;
740 }
741
742 assert(depth_out);
743
744 if (action_id == 0) {
745 *depth_out = MAX_CALLSTACK_FRAMES;
746 } else {
747 *depth_out = actionv[action_id - 1].kcallstack_depth;
748 }
749
750 return 0;
751}
752