1/*
2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/* Collect kernel callstacks */
30
31#include <mach/mach_types.h>
32#include <kern/thread.h>
33#include <kern/backtrace.h>
34#include <vm/vm_map.h>
35#include <kperf/buffer.h>
36#include <kperf/context.h>
37#include <kperf/callstack.h>
38#include <kperf/ast.h>
39#include <sys/errno.h>
40
41#if defined(__arm__) || defined(__arm64__)
42#include <arm/cpu_data.h>
43#include <arm/cpu_data_internal.h>
44#endif
45
46static void
47callstack_fixup_user(struct callstack *cs, thread_t thread)
48{
49 uint64_t fixup_val = 0;
50 assert(cs->nframes < MAX_CALLSTACK_FRAMES);
51
52#if defined(__x86_64__)
53 user_addr_t sp_user;
54 bool user_64;
55 x86_saved_state_t *state;
56
57 state = get_user_regs(thread);
58 if (!state) {
59 goto out;
60 }
61
62 user_64 = is_saved_state64(state);
63 if (user_64) {
64 sp_user = saved_state64(state)->isf.rsp;
65 } else {
66 sp_user = saved_state32(state)->uesp;
67 }
68
69 if (thread == current_thread()) {
70 (void)copyin(sp_user, (char *)&fixup_val,
71 user_64 ? sizeof(uint64_t) : sizeof(uint32_t));
72 } else {
73 (void)vm_map_read_user(get_task_map(get_threadtask(thread)), sp_user,
74 &fixup_val, user_64 ? sizeof(uint64_t) : sizeof(uint32_t));
75 }
76
77#elif defined(__arm64__) || defined(__arm__)
78
79 struct arm_saved_state *state = get_user_regs(thread);
80 if (!state) {
81 goto out;
82 }
83
84 /* encode thumb mode into low bit of PC */
85 if (get_saved_state_cpsr(state) & PSR_TF) {
86 cs->frames[0] |= 1ULL;
87 }
88
89 fixup_val = get_saved_state_lr(state);
90
91#else
92#error "callstack_fixup_user: unsupported architecture"
93#endif
94
95out:
96 cs->frames[cs->nframes++] = fixup_val;
97}
98
99#if defined(__x86_64__)
100
101__attribute__((used))
102static kern_return_t
103interrupted_kernel_sp_value(uintptr_t *sp_val)
104{
105 x86_saved_state_t *state;
106 uintptr_t sp;
107 bool state_64;
108 uint64_t cs;
109 uintptr_t top, bottom;
110
111 state = current_cpu_datap()->cpu_int_state;
112 if (!state) {
113 return KERN_FAILURE;
114 }
115
116 state_64 = is_saved_state64(state);
117
118 if (state_64) {
119 cs = saved_state64(state)->isf.cs;
120 } else {
121 cs = saved_state32(state)->cs;
122 }
123 /* return early if interrupted a thread in user space */
124 if ((cs & SEL_PL) == SEL_PL_U) {
125 return KERN_FAILURE;
126 }
127
128 if (state_64) {
129 sp = saved_state64(state)->isf.rsp;
130 } else {
131 sp = saved_state32(state)->uesp;
132 }
133
134 /* make sure the stack pointer is pointing somewhere in this stack */
135 bottom = current_thread()->kernel_stack;
136 top = bottom + kernel_stack_size;
137 if (sp >= bottom && sp < top) {
138 return KERN_FAILURE;
139 }
140
141 *sp_val = *(uintptr_t *)sp;
142 return KERN_SUCCESS;
143}
144
145#elif defined(__arm64__)
146
147__attribute__((used))
148static kern_return_t
149interrupted_kernel_lr(uintptr_t *lr)
150{
151 struct arm_saved_state *state;
152
153 state = getCpuDatap()->cpu_int_state;
154
155 /* return early if interrupted a thread in user space */
156 if (PSR64_IS_USER(get_saved_state_cpsr(state))) {
157 return KERN_FAILURE;
158 }
159
160 *lr = get_saved_state_lr(state);
161 return KERN_SUCCESS;
162}
163
164#elif defined(__arm__)
165
166__attribute__((used))
167static kern_return_t
168interrupted_kernel_lr(uintptr_t *lr)
169{
170 struct arm_saved_state *state;
171
172 state = getCpuDatap()->cpu_int_state;
173
174 /* return early if interrupted a thread in user space */
175 if (PSR_IS_USER(get_saved_state_cpsr(state))) {
176 return KERN_FAILURE;
177 }
178
179 *lr = get_saved_state_lr(state);
180 return KERN_SUCCESS;
181}
182
183#else /* defined(__arm__) */
184#error "interrupted_kernel_{sp,lr}: unsupported architecture"
185#endif /* !defined(__arm__) */
186
187
188static void
189callstack_fixup_interrupted(struct callstack *cs)
190{
191 uintptr_t fixup_val = 0;
192 assert(cs->nframes < MAX_CALLSTACK_FRAMES);
193
194 /*
195 * Only provide arbitrary data on development or debug kernels.
196 */
197#if DEVELOPMENT || DEBUG
198#if defined(__x86_64__)
199 (void)interrupted_kernel_sp_value(&fixup_val);
200#elif defined(__arm64__) || defined(__arm__)
201 (void)interrupted_kernel_lr(&fixup_val);
202#endif /* defined(__x86_64__) */
203#endif /* DEVELOPMENT || DEBUG */
204
205 assert(cs->flags & CALLSTACK_KERNEL);
206 cs->frames[cs->nframes++] = fixup_val;
207}
208
209void
210kperf_continuation_sample(struct callstack *cs, struct kperf_context *context)
211{
212 thread_t thread;
213
214 assert(cs != NULL);
215 assert(context != NULL);
216
217 thread = context->cur_thread;
218 assert(thread != NULL);
219 assert(thread->continuation != NULL);
220
221 cs->flags = CALLSTACK_CONTINUATION | CALLSTACK_VALID | CALLSTACK_KERNEL;
222#ifdef __LP64__
223 cs->flags |= CALLSTACK_64BIT;
224#endif
225
226 cs->nframes = 1;
227 cs->frames[0] = VM_KERNEL_UNSLIDE(thread->continuation);
228}
229
230void
231kperf_backtrace_sample(struct callstack *cs, struct kperf_context *context)
232{
233 assert(cs != NULL);
234 assert(context != NULL);
235 assert(context->cur_thread == current_thread());
236
237 cs->flags = CALLSTACK_KERNEL | CALLSTACK_KERNEL_WORDS;
238#ifdef __LP64__
239 cs->flags |= CALLSTACK_64BIT;
240#endif
241
242 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_START, 1);
243
244 cs->nframes = backtrace_frame((uintptr_t *)&(cs->frames), cs->nframes - 1,
245 context->starting_fp);
246 if (cs->nframes > 0) {
247 cs->flags |= CALLSTACK_VALID;
248 /*
249 * Fake the value pointed to by the stack pointer or the link
250 * register for symbolicators.
251 */
252 cs->frames[cs->nframes + 1] = 0;
253 cs->nframes += 1;
254 }
255
256 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_END, cs->nframes);
257}
258
259kern_return_t chudxnu_thread_get_callstack64_kperf(thread_t thread,
260 uint64_t *callStack, mach_msg_type_number_t *count,
261 boolean_t user_only);
262
263void
264kperf_kcallstack_sample(struct callstack *cs, struct kperf_context *context)
265{
266 thread_t thread;
267
268 assert(cs != NULL);
269 assert(context != NULL);
270 assert(cs->nframes <= MAX_CALLSTACK_FRAMES);
271
272 thread = context->cur_thread;
273 assert(thread != NULL);
274
275 BUF_INFO(PERF_CS_KSAMPLE | DBG_FUNC_START, (uintptr_t)thread_tid(thread),
276 cs->nframes);
277
278 cs->flags = CALLSTACK_KERNEL;
279
280#ifdef __LP64__
281 cs->flags |= CALLSTACK_64BIT;
282#endif
283
284 if (ml_at_interrupt_context()) {
285 assert(thread == current_thread());
286 cs->flags |= CALLSTACK_KERNEL_WORDS;
287 cs->nframes = backtrace_interrupted((uintptr_t *)cs->frames,
288 cs->nframes - 1);
289 if (cs->nframes != 0) {
290 callstack_fixup_interrupted(cs);
291 }
292 } else {
293 /*
294 * Rely on legacy CHUD backtracer to backtrace kernel stacks on
295 * other threads.
296 */
297 kern_return_t kr;
298 kr = chudxnu_thread_get_callstack64_kperf(thread, cs->frames,
299 &cs->nframes, FALSE);
300 if (kr == KERN_SUCCESS) {
301 cs->flags |= CALLSTACK_VALID;
302 } else if (kr == KERN_RESOURCE_SHORTAGE) {
303 cs->flags |= CALLSTACK_VALID;
304 cs->flags |= CALLSTACK_TRUNCATED;
305 } else {
306 cs->nframes = 0;
307 }
308 }
309
310 if (cs->nframes == 0) {
311 BUF_INFO(PERF_CS_ERROR, ERR_GETSTACK);
312 }
313
314 BUF_INFO(PERF_CS_KSAMPLE | DBG_FUNC_END, (uintptr_t)thread_tid(thread), cs->flags, cs->nframes);
315}
316
317void
318kperf_ucallstack_sample(struct callstack *cs, struct kperf_context *context)
319{
320 thread_t thread;
321 bool user_64 = false;
322 int err;
323
324 assert(cs != NULL);
325 assert(context != NULL);
326 assert(cs->nframes <= MAX_CALLSTACK_FRAMES);
327 assert(ml_get_interrupts_enabled() == TRUE);
328
329 thread = context->cur_thread;
330 assert(thread != NULL);
331
332 BUF_INFO(PERF_CS_USAMPLE | DBG_FUNC_START, (uintptr_t)thread_tid(thread),
333 cs->nframes);
334
335 cs->flags = 0;
336
337 err = backtrace_thread_user(thread, (uintptr_t *)cs->frames,
338 cs->nframes - 1, &cs->nframes, &user_64);
339 cs->flags |= CALLSTACK_KERNEL_WORDS;
340 if (user_64) {
341 cs->flags |= CALLSTACK_64BIT;
342 }
343
344 if (!err || err == EFAULT) {
345 callstack_fixup_user(cs, thread);
346 cs->flags |= CALLSTACK_VALID;
347 } else {
348 cs->nframes = 0;
349 BUF_INFO(PERF_CS_ERROR, ERR_GETSTACK, err);
350 }
351
352 BUF_INFO(PERF_CS_USAMPLE | DBG_FUNC_END, (uintptr_t)thread_tid(thread),
353 cs->flags, cs->nframes);
354}
355
356static inline uintptr_t
357scrub_word(uintptr_t *bt, int n_frames, int frame, bool kern)
358{
359 if (frame < n_frames) {
360 if (kern) {
361 return VM_KERNEL_UNSLIDE(bt[frame]);
362 } else {
363 return bt[frame];
364 }
365 } else {
366 return 0;
367 }
368}
369
370static inline uintptr_t
371scrub_frame(uint64_t *bt, int n_frames, int frame)
372{
373 if (frame < n_frames) {
374 return (uintptr_t)(bt[frame]);
375 } else {
376 return 0;
377 }
378}
379
380static void
381callstack_log(struct callstack *cs, uint32_t hcode, uint32_t dcode)
382{
383 BUF_VERB(PERF_CS_LOG | DBG_FUNC_START, cs->flags, cs->nframes);
384
385 /* framing information for the stack */
386 BUF_DATA(hcode, cs->flags, cs->nframes);
387
388 /* how many batches of 4 */
389 unsigned int nframes = cs->nframes;
390 unsigned int n = nframes / 4;
391 unsigned int ovf = nframes % 4;
392 if (ovf != 0) {
393 n++;
394 }
395
396 bool kern = cs->flags & CALLSTACK_KERNEL;
397
398 if (cs->flags & CALLSTACK_KERNEL_WORDS) {
399 uintptr_t *frames = (uintptr_t *)cs->frames;
400 for (unsigned int i = 0; i < n; i++) {
401 unsigned int j = i * 4;
402 BUF_DATA(dcode,
403 scrub_word(frames, nframes, j + 0, kern),
404 scrub_word(frames, nframes, j + 1, kern),
405 scrub_word(frames, nframes, j + 2, kern),
406 scrub_word(frames, nframes, j + 3, kern));
407 }
408 } else {
409 for (unsigned int i = 0; i < n; i++) {
410 uint64_t *frames = cs->frames;
411 unsigned int j = i * 4;
412 BUF_DATA(dcode,
413 scrub_frame(frames, nframes, j + 0),
414 scrub_frame(frames, nframes, j + 1),
415 scrub_frame(frames, nframes, j + 2),
416 scrub_frame(frames, nframes, j + 3));
417 }
418 }
419
420 BUF_VERB(PERF_CS_LOG | DBG_FUNC_END, cs->flags, cs->nframes);
421}
422
423void
424kperf_kcallstack_log( struct callstack *cs )
425{
426 callstack_log(cs, PERF_CS_KHDR, PERF_CS_KDATA);
427}
428
429void
430kperf_ucallstack_log( struct callstack *cs )
431{
432 callstack_log(cs, PERF_CS_UHDR, PERF_CS_UDATA);
433}
434
435int
436kperf_ucallstack_pend(struct kperf_context * context, uint32_t depth)
437{
438 int did_pend = kperf_ast_pend(context->cur_thread, T_KPERF_AST_CALLSTACK);
439 kperf_ast_set_callstack_depth(context->cur_thread, depth);
440
441 return did_pend;
442}
443
444static kern_return_t
445chudxnu_kern_read(void *dstaddr, vm_offset_t srcaddr, vm_size_t size)
446{
447 return ((ml_nofault_copy(srcaddr, (vm_offset_t)dstaddr, size) == size) ?
448 KERN_SUCCESS : KERN_FAILURE);
449}
450
451static kern_return_t
452chudxnu_task_read(
453 task_t task,
454 void *kernaddr,
455 uint64_t usraddr,
456 vm_size_t size)
457{
458 //ppc version ported to arm
459 kern_return_t ret = KERN_SUCCESS;
460
461 if (ml_at_interrupt_context()) {
462 return KERN_FAILURE; // can't look at tasks on interrupt stack
463 }
464
465 if (current_task() == task) {
466 thread_t cur_thr = current_thread();
467 vm_offset_t recover_handler = cur_thr->recover;
468
469 if (copyin(usraddr, kernaddr, size)) {
470 ret = KERN_FAILURE;
471 }
472 cur_thr->recover = recover_handler;
473 } else {
474 vm_map_t map = get_task_map(task);
475 ret = vm_map_read_user(map, usraddr, kernaddr, size);
476 }
477
478 return ret;
479}
480
481static inline uint64_t
482chudxnu_vm_unslide( uint64_t ptr, int kaddr )
483{
484 if (!kaddr)
485 return ptr;
486
487 return VM_KERNEL_UNSLIDE(ptr);
488}
489
490#if __arm__
491#define ARM_SUPERVISOR_MODE(cpsr) ((((cpsr) & PSR_MODE_MASK) != PSR_USER_MODE) ? TRUE : FALSE)
492#define CS_FLAG_EXTRASP 1 // capture extra sp register
493static kern_return_t
494chudxnu_thread_get_callstack64_internal(
495 thread_t thread,
496 uint64_t *callStack,
497 mach_msg_type_number_t *count,
498 boolean_t user_only,
499 int flags)
500{
501 kern_return_t kr;
502 task_t task;
503 uint64_t currPC=0ULL, currLR=0ULL, currSP=0ULL;
504 uint64_t prevPC = 0ULL;
505 uint32_t kernStackMin = thread->kernel_stack;
506 uint32_t kernStackMax = kernStackMin + kernel_stack_size;
507 uint64_t *buffer = callStack;
508 uint32_t frame[2];
509 int bufferIndex = 0;
510 int bufferMaxIndex = 0;
511 boolean_t supervisor = FALSE;
512 struct arm_saved_state *state = NULL;
513 uint32_t *fp=NULL, *nextFramePointer=NULL, *topfp=NULL;
514 uint64_t pc = 0ULL;
515
516 task = get_threadtask(thread);
517
518 bufferMaxIndex = *count;
519 //get thread state
520 if (user_only)
521 state = find_user_regs(thread);
522 else
523 state = find_kern_regs(thread);
524
525 if (!state) {
526 *count = 0;
527 return KERN_FAILURE;
528 }
529
530 /* make sure it is safe to dereference before you do it */
531 supervisor = ARM_SUPERVISOR_MODE(state->cpsr);
532
533 /* can't take a kernel callstack if we've got a user frame */
534 if( !user_only && !supervisor )
535 return KERN_FAILURE;
536
537 /*
538 * Reserve space for saving LR (and sometimes SP) at the end of the
539 * backtrace.
540 */
541 if (flags & CS_FLAG_EXTRASP) {
542 bufferMaxIndex -= 2;
543 } else {
544 bufferMaxIndex -= 1;
545 }
546
547 if (bufferMaxIndex < 2) {
548 *count = 0;
549 return KERN_RESOURCE_SHORTAGE;
550 }
551
552 currPC = (uint64_t)state->pc; /* r15 */
553 if (state->cpsr & PSR_TF)
554 currPC |= 1ULL; /* encode thumb mode into low bit of PC */
555
556 currLR = (uint64_t)state->lr; /* r14 */
557 currSP = (uint64_t)state->sp; /* r13 */
558
559 fp = (uint32_t *)state->r[7]; /* frame pointer */
560 topfp = fp;
561
562 bufferIndex = 0; // start with a stack of size zero
563 buffer[bufferIndex++] = chudxnu_vm_unslide(currPC, supervisor); // save PC in position 0.
564
565 // Now, fill buffer with stack backtraces.
566 while (bufferIndex < bufferMaxIndex) {
567 pc = 0ULL;
568 /*
569 * Below the frame pointer, the following values are saved:
570 * -> FP
571 */
572
573 /*
574 * Note that we read the pc even for the first stack frame
575 * (which, in theory, is always empty because the callee fills
576 * it in just before it lowers the stack. However, if we
577 * catch the program in between filling in the return address
578 * and lowering the stack, we want to still have a valid
579 * backtrace. FixupStack correctly disregards this value if
580 * necessary.
581 */
582
583 if((uint32_t)fp == 0 || ((uint32_t)fp & 0x3) != 0) {
584 /* frame pointer is invalid - stop backtracing */
585 pc = 0ULL;
586 break;
587 }
588
589 if (supervisor) {
590 if (((uint32_t)fp > kernStackMax) ||
591 ((uint32_t)fp < kernStackMin)) {
592 kr = KERN_FAILURE;
593 } else {
594 kr = chudxnu_kern_read(&frame,
595 (vm_offset_t)fp,
596 (vm_size_t)sizeof(frame));
597 if (kr == KERN_SUCCESS) {
598 pc = (uint64_t)frame[1];
599 nextFramePointer = (uint32_t *) (frame[0]);
600 } else {
601 pc = 0ULL;
602 nextFramePointer = 0ULL;
603 kr = KERN_FAILURE;
604 }
605 }
606 } else {
607 kr = chudxnu_task_read(task,
608 &frame,
609 (((uint64_t)(uint32_t)fp) & 0x00000000FFFFFFFFULL),
610 sizeof(frame));
611 if (kr == KERN_SUCCESS) {
612 pc = (uint64_t) frame[1];
613 nextFramePointer = (uint32_t *) (frame[0]);
614 } else {
615 pc = 0ULL;
616 nextFramePointer = 0ULL;
617 kr = KERN_FAILURE;
618 }
619 }
620
621 if (kr != KERN_SUCCESS) {
622 pc = 0ULL;
623 break;
624 }
625
626 if (nextFramePointer) {
627 buffer[bufferIndex++] = chudxnu_vm_unslide(pc, supervisor);
628 prevPC = pc;
629 }
630
631 if (nextFramePointer < fp)
632 break;
633 else
634 fp = nextFramePointer;
635 }
636
637 if (bufferIndex >= bufferMaxIndex) {
638 bufferIndex = bufferMaxIndex;
639 kr = KERN_RESOURCE_SHORTAGE;
640 } else {
641 kr = KERN_SUCCESS;
642 }
643
644 // Save link register and R13 (sp) at bottom of stack (used for later fixup).
645 buffer[bufferIndex++] = chudxnu_vm_unslide(currLR, supervisor);
646 if( flags & CS_FLAG_EXTRASP )
647 buffer[bufferIndex++] = chudxnu_vm_unslide(currSP, supervisor);
648
649 *count = bufferIndex;
650 return kr;
651
652
653}
654
655kern_return_t
656chudxnu_thread_get_callstack64_kperf(
657 thread_t thread,
658 uint64_t *callStack,
659 mach_msg_type_number_t *count,
660 boolean_t user_only)
661{
662 return chudxnu_thread_get_callstack64_internal( thread, callStack, count, user_only, 0 );
663}
664#elif __arm64__
665
666
667// chudxnu_thread_get_callstack gathers a raw callstack along with any information needed to
668// fix it up later (in case we stopped program as it was saving values into prev stack frame, etc.)
669// after sampling has finished.
670//
671// For an N-entry callstack:
672//
673// [0] current pc
674// [1..N-3] stack frames (including current one)
675// [N-2] current LR (return value if we're in a leaf function)
676// [N-1] current r0 (in case we've saved LR in r0) (optional)
677//
678//
679#define ARM_SUPERVISOR_MODE(cpsr) ((((cpsr) & PSR_MODE_MASK) != PSR_USER_MODE) ? TRUE : FALSE)
680
681#define CS_FLAG_EXTRASP 1 // capture extra sp register
682
683static kern_return_t
684chudxnu_thread_get_callstack64_internal(
685 thread_t thread,
686 uint64_t *callStack,
687 mach_msg_type_number_t *count,
688 boolean_t user_only,
689 int flags)
690{
691 kern_return_t kr = KERN_SUCCESS;
692 task_t task;
693 uint64_t currPC=0ULL, currLR=0ULL, currSP=0ULL;
694 uint64_t prevPC = 0ULL;
695 uint64_t kernStackMin = thread->kernel_stack;
696 uint64_t kernStackMax = kernStackMin + kernel_stack_size;
697 uint64_t *buffer = callStack;
698 int bufferIndex = 0;
699 int bufferMaxIndex = 0;
700 boolean_t kernel = FALSE;
701 struct arm_saved_state *sstate = NULL;
702 uint64_t pc = 0ULL;
703
704 task = get_threadtask(thread);
705 bufferMaxIndex = *count;
706 //get thread state
707 if (user_only)
708 sstate = find_user_regs(thread);
709 else
710 sstate = find_kern_regs(thread);
711
712 if (!sstate) {
713 *count = 0;
714 return KERN_FAILURE;
715 }
716
717 if (is_saved_state64(sstate)) {
718 struct arm_saved_state64 *state = NULL;
719 uint64_t *fp=NULL, *nextFramePointer=NULL, *topfp=NULL;
720 uint64_t frame[2];
721
722 state = saved_state64(sstate);
723
724 /* make sure it is safe to dereference before you do it */
725 kernel = PSR64_IS_KERNEL(state->cpsr);
726
727 /* can't take a kernel callstack if we've got a user frame */
728 if( !user_only && !kernel )
729 return KERN_FAILURE;
730
731 /*
732 * Reserve space for saving LR (and sometimes SP) at the end of the
733 * backtrace.
734 */
735 if (flags & CS_FLAG_EXTRASP) {
736 bufferMaxIndex -= 2;
737 } else {
738 bufferMaxIndex -= 1;
739 }
740
741 if (bufferMaxIndex < 2) {
742 *count = 0;
743 return KERN_RESOURCE_SHORTAGE;
744 }
745
746 currPC = state->pc;
747 currLR = state->lr;
748 currSP = state->sp;
749
750 fp = (uint64_t *)state->fp; /* frame pointer */
751 topfp = fp;
752
753 bufferIndex = 0; // start with a stack of size zero
754 buffer[bufferIndex++] = chudxnu_vm_unslide(currPC, kernel); // save PC in position 0.
755
756 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_START, kernel, 0);
757
758 // Now, fill buffer with stack backtraces.
759 while (bufferIndex < bufferMaxIndex) {
760 pc = 0ULL;
761 /*
762 * Below the frame pointer, the following values are saved:
763 * -> FP
764 */
765
766 /*
767 * Note that we read the pc even for the first stack frame
768 * (which, in theory, is always empty because the callee fills
769 * it in just before it lowers the stack. However, if we
770 * catch the program in between filling in the return address
771 * and lowering the stack, we want to still have a valid
772 * backtrace. FixupStack correctly disregards this value if
773 * necessary.
774 */
775
776 if((uint64_t)fp == 0 || ((uint64_t)fp & 0x3) != 0) {
777 /* frame pointer is invalid - stop backtracing */
778 pc = 0ULL;
779 break;
780 }
781
782 if (kernel) {
783 if (((uint64_t)fp > kernStackMax) ||
784 ((uint64_t)fp < kernStackMin)) {
785 kr = KERN_FAILURE;
786 } else {
787 kr = chudxnu_kern_read(&frame,
788 (vm_offset_t)fp,
789 (vm_size_t)sizeof(frame));
790 if (kr == KERN_SUCCESS) {
791 pc = frame[1];
792 nextFramePointer = (uint64_t *)frame[0];
793 } else {
794 pc = 0ULL;
795 nextFramePointer = 0ULL;
796 kr = KERN_FAILURE;
797 }
798 }
799 } else {
800 kr = chudxnu_task_read(task,
801 &frame,
802 (vm_offset_t)fp,
803 (vm_size_t)sizeof(frame));
804 if (kr == KERN_SUCCESS) {
805 pc = frame[1];
806 nextFramePointer = (uint64_t *)(frame[0]);
807 } else {
808 pc = 0ULL;
809 nextFramePointer = 0ULL;
810 kr = KERN_FAILURE;
811 }
812 }
813
814 if (kr != KERN_SUCCESS) {
815 pc = 0ULL;
816 break;
817 }
818
819 if (nextFramePointer) {
820 buffer[bufferIndex++] = chudxnu_vm_unslide(pc, kernel);
821 prevPC = pc;
822 }
823
824 if (nextFramePointer < fp)
825 break;
826 else
827 fp = nextFramePointer;
828 }
829
830 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_END, bufferIndex);
831
832 if (bufferIndex >= bufferMaxIndex) {
833 bufferIndex = bufferMaxIndex;
834 kr = KERN_RESOURCE_SHORTAGE;
835 } else {
836 kr = KERN_SUCCESS;
837 }
838
839 // Save link register and SP at bottom of stack (used for later fixup).
840 buffer[bufferIndex++] = chudxnu_vm_unslide(currLR, kernel);
841 if( flags & CS_FLAG_EXTRASP )
842 buffer[bufferIndex++] = chudxnu_vm_unslide(currSP, kernel);
843 } else {
844 struct arm_saved_state32 *state = NULL;
845 uint32_t *fp=NULL, *nextFramePointer=NULL, *topfp=NULL;
846
847 /* 64-bit kernel stacks, 32-bit user stacks */
848 uint64_t frame[2];
849 uint32_t frame32[2];
850
851 state = saved_state32(sstate);
852
853 /* make sure it is safe to dereference before you do it */
854 kernel = ARM_SUPERVISOR_MODE(state->cpsr);
855
856 /* can't take a kernel callstack if we've got a user frame */
857 if( !user_only && !kernel )
858 return KERN_FAILURE;
859
860 /*
861 * Reserve space for saving LR (and sometimes SP) at the end of the
862 * backtrace.
863 */
864 if (flags & CS_FLAG_EXTRASP) {
865 bufferMaxIndex -= 2;
866 } else {
867 bufferMaxIndex -= 1;
868 }
869
870 if (bufferMaxIndex < 2) {
871 *count = 0;
872 return KERN_RESOURCE_SHORTAGE;
873 }
874
875 currPC = (uint64_t)state->pc; /* r15 */
876 if (state->cpsr & PSR_TF)
877 currPC |= 1ULL; /* encode thumb mode into low bit of PC */
878
879 currLR = (uint64_t)state->lr; /* r14 */
880 currSP = (uint64_t)state->sp; /* r13 */
881
882 fp = (uint32_t *)(uintptr_t)state->r[7]; /* frame pointer */
883 topfp = fp;
884
885 bufferIndex = 0; // start with a stack of size zero
886 buffer[bufferIndex++] = chudxnu_vm_unslide(currPC, kernel); // save PC in position 0.
887
888 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_START, kernel, 1);
889
890 // Now, fill buffer with stack backtraces.
891 while (bufferIndex < bufferMaxIndex) {
892 pc = 0ULL;
893 /*
894 * Below the frame pointer, the following values are saved:
895 * -> FP
896 */
897
898 /*
899 * Note that we read the pc even for the first stack frame
900 * (which, in theory, is always empty because the callee fills
901 * it in just before it lowers the stack. However, if we
902 * catch the program in between filling in the return address
903 * and lowering the stack, we want to still have a valid
904 * backtrace. FixupStack correctly disregards this value if
905 * necessary.
906 */
907
908 if((uint32_t)fp == 0 || ((uint32_t)fp & 0x3) != 0) {
909 /* frame pointer is invalid - stop backtracing */
910 pc = 0ULL;
911 break;
912 }
913
914 if (kernel) {
915 if (((uint32_t)fp > kernStackMax) ||
916 ((uint32_t)fp < kernStackMin)) {
917 kr = KERN_FAILURE;
918 } else {
919 kr = chudxnu_kern_read(&frame,
920 (vm_offset_t)fp,
921 (vm_size_t)sizeof(frame));
922 if (kr == KERN_SUCCESS) {
923 pc = (uint64_t)frame[1];
924 nextFramePointer = (uint32_t *) (frame[0]);
925 } else {
926 pc = 0ULL;
927 nextFramePointer = 0ULL;
928 kr = KERN_FAILURE;
929 }
930 }
931 } else {
932 kr = chudxnu_task_read(task,
933 &frame32,
934 (((uint64_t)(uint32_t)fp) & 0x00000000FFFFFFFFULL),
935 sizeof(frame32));
936 if (kr == KERN_SUCCESS) {
937 pc = (uint64_t)frame32[1];
938 nextFramePointer = (uint32_t *)(uintptr_t)(frame32[0]);
939 } else {
940 pc = 0ULL;
941 nextFramePointer = 0ULL;
942 kr = KERN_FAILURE;
943 }
944 }
945
946 if (kr != KERN_SUCCESS) {
947 pc = 0ULL;
948 break;
949 }
950
951 if (nextFramePointer) {
952 buffer[bufferIndex++] = chudxnu_vm_unslide(pc, kernel);
953 prevPC = pc;
954 }
955
956 if (nextFramePointer < fp)
957 break;
958 else
959 fp = nextFramePointer;
960 }
961
962 BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_END, bufferIndex);
963
964 /* clamp callstack size to max */
965 if (bufferIndex >= bufferMaxIndex) {
966 bufferIndex = bufferMaxIndex;
967 kr = KERN_RESOURCE_SHORTAGE;
968 } else {
969 /* ignore all other failures */
970 kr = KERN_SUCCESS;
971 }
972
973 // Save link register and R13 (sp) at bottom of stack (used for later fixup).
974 buffer[bufferIndex++] = chudxnu_vm_unslide(currLR, kernel);
975 if( flags & CS_FLAG_EXTRASP )
976 buffer[bufferIndex++] = chudxnu_vm_unslide(currSP, kernel);
977 }
978
979 *count = bufferIndex;
980 return kr;
981}
982
983kern_return_t
984chudxnu_thread_get_callstack64_kperf(
985 thread_t thread,
986 uint64_t *callStack,
987 mach_msg_type_number_t *count,
988 boolean_t user_only)
989{
990 return chudxnu_thread_get_callstack64_internal( thread, callStack, count, user_only, 0 );
991}
992#elif __x86_64__
993
994#define VALID_STACK_ADDRESS(supervisor, addr, minKernAddr, maxKernAddr) (supervisor ? (addr>=minKernAddr && addr<=maxKernAddr) : TRUE)
995// don't try to read in the hole
996#define VALID_STACK_ADDRESS64(supervisor, addr, minKernAddr, maxKernAddr) \
997(supervisor ? ((uint64_t)addr >= minKernAddr && (uint64_t)addr <= maxKernAddr) : \
998((uint64_t)addr != 0ULL && ((uint64_t)addr <= 0x00007FFFFFFFFFFFULL || (uint64_t)addr >= 0xFFFF800000000000ULL)))
999
1000typedef struct _cframe64_t {
1001 uint64_t prevFP; // can't use a real pointer here until we're a 64 bit kernel
1002 uint64_t caller;
1003 uint64_t args[0];
1004}cframe64_t;
1005
1006
1007typedef struct _cframe_t {
1008 uint32_t prev; // this is really a user32-space pointer to the previous frame
1009 uint32_t caller;
1010 uint32_t args[0];
1011} cframe_t;
1012
1013extern void * find_user_regs(thread_t);
1014extern x86_saved_state32_t *find_kern_regs(thread_t);
1015
1016static kern_return_t do_kernel_backtrace(
1017 thread_t thread,
1018 struct x86_kernel_state *regs,
1019 uint64_t *frames,
1020 mach_msg_type_number_t *start_idx,
1021 mach_msg_type_number_t max_idx)
1022{
1023 uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
1024 uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size;
1025 mach_msg_type_number_t ct = *start_idx;
1026 kern_return_t kr = KERN_FAILURE;
1027
1028#if __LP64__
1029 uint64_t currPC = 0ULL;
1030 uint64_t currFP = 0ULL;
1031 uint64_t prevPC = 0ULL;
1032 uint64_t prevFP = 0ULL;
1033 if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_rip), sizeof(uint64_t))) {
1034 return KERN_FAILURE;
1035 }
1036 if(KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_rbp), sizeof(uint64_t))) {
1037 return KERN_FAILURE;
1038 }
1039#else
1040 uint32_t currPC = 0U;
1041 uint32_t currFP = 0U;
1042 uint32_t prevPC = 0U;
1043 uint32_t prevFP = 0U;
1044 if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_eip), sizeof(uint32_t))) {
1045 return KERN_FAILURE;
1046 }
1047 if(KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_ebp), sizeof(uint32_t))) {
1048 return KERN_FAILURE;
1049 }
1050#endif
1051
1052 if(*start_idx >= max_idx)
1053 return KERN_RESOURCE_SHORTAGE; // no frames traced
1054
1055 if(!currPC) {
1056 return KERN_FAILURE;
1057 }
1058
1059 frames[ct++] = chudxnu_vm_unslide((uint64_t)currPC, 1);
1060
1061 // build a backtrace of this kernel state
1062#if __LP64__
1063 while(VALID_STACK_ADDRESS64(TRUE, currFP, kernStackMin, kernStackMax)) {
1064 // this is the address where caller lives in the user thread
1065 uint64_t caller = currFP + sizeof(uint64_t);
1066#else
1067 while(VALID_STACK_ADDRESS(TRUE, currFP, kernStackMin, kernStackMax)) {
1068 uint32_t caller = (uint32_t)currFP + sizeof(uint32_t);
1069#endif
1070
1071 if(!currFP || !currPC) {
1072 currPC = 0;
1073 break;
1074 }
1075
1076 if(ct >= max_idx) {
1077 *start_idx = ct;
1078 return KERN_RESOURCE_SHORTAGE;
1079 }
1080
1081 /* read our caller */
1082 kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(currPC));
1083
1084 if(kr != KERN_SUCCESS || !currPC) {
1085 currPC = 0UL;
1086 break;
1087 }
1088
1089 /*
1090 * retrive contents of the frame pointer and advance to the next stack
1091 * frame if it's valid
1092 */
1093 prevFP = 0;
1094 kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(currPC));
1095
1096#if __LP64__
1097 if(VALID_STACK_ADDRESS64(TRUE, prevFP, kernStackMin, kernStackMax)) {
1098#else
1099 if(VALID_STACK_ADDRESS(TRUE, prevFP, kernStackMin, kernStackMax)) {
1100#endif
1101 frames[ct++] = chudxnu_vm_unslide((uint64_t)currPC, 1);
1102 prevPC = currPC;
1103 }
1104 if(prevFP <= currFP) {
1105 break;
1106 } else {
1107 currFP = prevFP;
1108 }
1109 }
1110
1111 *start_idx = ct;
1112 return KERN_SUCCESS;
1113}
1114
1115
1116
1117static kern_return_t do_backtrace32(
1118 task_t task,
1119 thread_t thread,
1120 x86_saved_state32_t *regs,
1121 uint64_t *frames,
1122 mach_msg_type_number_t *start_idx,
1123 mach_msg_type_number_t max_idx,
1124 boolean_t supervisor)
1125{
1126 uint32_t tmpWord = 0UL;
1127 uint64_t currPC = (uint64_t) regs->eip;
1128 uint64_t currFP = (uint64_t) regs->ebp;
1129 uint64_t prevPC = 0ULL;
1130 uint64_t prevFP = 0ULL;
1131 uint64_t kernStackMin = thread->kernel_stack;
1132 uint64_t kernStackMax = kernStackMin + kernel_stack_size;
1133 mach_msg_type_number_t ct = *start_idx;
1134 kern_return_t kr = KERN_FAILURE;
1135
1136 if(ct >= max_idx)
1137 return KERN_RESOURCE_SHORTAGE; // no frames traced
1138
1139 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1140
1141 // build a backtrace of this 32 bit state.
1142 while(VALID_STACK_ADDRESS(supervisor, currFP, kernStackMin, kernStackMax)) {
1143 cframe_t *fp = (cframe_t *) (uintptr_t) currFP;
1144
1145 if(!currFP) {
1146 currPC = 0;
1147 break;
1148 }
1149
1150 if(ct >= max_idx) {
1151 *start_idx = ct;
1152 return KERN_RESOURCE_SHORTAGE;
1153 }
1154
1155 /* read our caller */
1156 if(supervisor) {
1157 kr = chudxnu_kern_read(&tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
1158 } else {
1159 kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
1160 }
1161
1162 if(kr != KERN_SUCCESS) {
1163 currPC = 0ULL;
1164 break;
1165 }
1166
1167 currPC = (uint64_t) tmpWord; // promote 32 bit address
1168
1169 /*
1170 * retrive contents of the frame pointer and advance to the next stack
1171 * frame if it's valid
1172 */
1173 prevFP = 0;
1174 if(supervisor) {
1175 kr = chudxnu_kern_read(&tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
1176 } else {
1177 kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
1178 }
1179 prevFP = (uint64_t) tmpWord; // promote 32 bit address
1180
1181 if(prevFP) {
1182 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1183 prevPC = currPC;
1184 }
1185 if(prevFP < currFP) {
1186 break;
1187 } else {
1188 currFP = prevFP;
1189 }
1190 }
1191
1192 *start_idx = ct;
1193 return KERN_SUCCESS;
1194}
1195
1196static kern_return_t do_backtrace64(
1197 task_t task,
1198 thread_t thread,
1199 x86_saved_state64_t *regs,
1200 uint64_t *frames,
1201 mach_msg_type_number_t *start_idx,
1202 mach_msg_type_number_t max_idx,
1203 boolean_t supervisor)
1204{
1205 uint64_t currPC = regs->isf.rip;
1206 uint64_t currFP = regs->rbp;
1207 uint64_t prevPC = 0ULL;
1208 uint64_t prevFP = 0ULL;
1209 uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
1210 uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size;
1211 mach_msg_type_number_t ct = *start_idx;
1212 kern_return_t kr = KERN_FAILURE;
1213
1214 if(*start_idx >= max_idx)
1215 return KERN_RESOURCE_SHORTAGE; // no frames traced
1216
1217 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1218
1219 // build a backtrace of this 32 bit state.
1220 while(VALID_STACK_ADDRESS64(supervisor, currFP, kernStackMin, kernStackMax)) {
1221 // this is the address where caller lives in the user thread
1222 uint64_t caller = currFP + sizeof(uint64_t);
1223
1224 if(!currFP) {
1225 currPC = 0;
1226 break;
1227 }
1228
1229 if(ct >= max_idx) {
1230 *start_idx = ct;
1231 return KERN_RESOURCE_SHORTAGE;
1232 }
1233
1234 /* read our caller */
1235 if(supervisor) {
1236 kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(uint64_t));
1237 } else {
1238 kr = chudxnu_task_read(task, &currPC, caller, sizeof(uint64_t));
1239 }
1240
1241 if(kr != KERN_SUCCESS) {
1242 currPC = 0ULL;
1243 break;
1244 }
1245
1246 /*
1247 * retrive contents of the frame pointer and advance to the next stack
1248 * frame if it's valid
1249 */
1250 prevFP = 0;
1251 if(supervisor) {
1252 kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(uint64_t));
1253 } else {
1254 kr = chudxnu_task_read(task, &prevFP, currFP, sizeof(uint64_t));
1255 }
1256
1257 if(VALID_STACK_ADDRESS64(supervisor, prevFP, kernStackMin, kernStackMax)) {
1258 frames[ct++] = chudxnu_vm_unslide(currPC, supervisor);
1259 prevPC = currPC;
1260 }
1261 if(prevFP < currFP) {
1262 break;
1263 } else {
1264 currFP = prevFP;
1265 }
1266 }
1267
1268 *start_idx = ct;
1269 return KERN_SUCCESS;
1270}
1271
1272static kern_return_t
1273chudxnu_thread_get_callstack64_internal(
1274 thread_t thread,
1275 uint64_t *callstack,
1276 mach_msg_type_number_t *count,
1277 boolean_t user_only,
1278 boolean_t kern_only)
1279{
1280 kern_return_t kr = KERN_FAILURE;
1281 task_t task = thread->task;
1282 uint64_t currPC = 0ULL;
1283 boolean_t supervisor = FALSE;
1284 mach_msg_type_number_t bufferIndex = 0;
1285 mach_msg_type_number_t bufferMaxIndex = *count;
1286 x86_saved_state_t *tagged_regs = NULL; // kernel register state
1287 x86_saved_state64_t *regs64 = NULL;
1288 x86_saved_state32_t *regs32 = NULL;
1289 x86_saved_state32_t *u_regs32 = NULL;
1290 x86_saved_state64_t *u_regs64 = NULL;
1291 struct x86_kernel_state *kregs = NULL;
1292
1293 if(ml_at_interrupt_context()) {
1294
1295 if(user_only) {
1296 /* can't backtrace user state on interrupt stack. */
1297 return KERN_FAILURE;
1298 }
1299
1300 /* backtracing at interrupt context? */
1301 if(thread == current_thread() && current_cpu_datap()->cpu_int_state) {
1302 /*
1303 * Locate the registers for the interrupted thread, assuming it is
1304 * current_thread().
1305 */
1306 tagged_regs = current_cpu_datap()->cpu_int_state;
1307
1308 if(is_saved_state64(tagged_regs)) {
1309 /* 64 bit registers */
1310 regs64 = saved_state64(tagged_regs);
1311 supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
1312 } else {
1313 /* 32 bit registers */
1314 regs32 = saved_state32(tagged_regs);
1315 supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
1316 }
1317 }
1318 }
1319
1320 if(!ml_at_interrupt_context() && kernel_task == task) {
1321
1322 if(!thread->kernel_stack) {
1323 return KERN_FAILURE;
1324 }
1325
1326 // Kernel thread not at interrupt context
1327 kregs = (struct x86_kernel_state *)NULL;
1328
1329 // nofault read of the thread->kernel_stack pointer
1330 if(KERN_SUCCESS != chudxnu_kern_read(&kregs, (vm_offset_t)&(thread->kernel_stack), sizeof(void *))) {
1331 return KERN_FAILURE;
1332 }
1333
1334 // Adjust to find the saved kernel state
1335 kregs = STACK_IKS((vm_offset_t)(uintptr_t)kregs);
1336
1337 supervisor = TRUE;
1338 } else if(!tagged_regs) {
1339 /*
1340 * not at interrupt context, or tracing a different thread than
1341 * current_thread() at interrupt context
1342 */
1343 tagged_regs = USER_STATE(thread);
1344 if(is_saved_state64(tagged_regs)) {
1345 /* 64 bit registers */
1346 regs64 = saved_state64(tagged_regs);
1347 supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
1348 } else {
1349 /* 32 bit registers */
1350 regs32 = saved_state32(tagged_regs);
1351 supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
1352 }
1353 }
1354
1355 *count = 0;
1356
1357 if(supervisor) {
1358 // the caller only wants a user callstack.
1359 if(user_only) {
1360 // bail - we've only got kernel state
1361 return KERN_FAILURE;
1362 }
1363 } else {
1364 // regs32(64) is not in supervisor mode.
1365 u_regs32 = regs32;
1366 u_regs64 = regs64;
1367 regs32 = NULL;
1368 regs64 = NULL;
1369 }
1370
1371 if (user_only) {
1372 /* we only want to backtrace the user mode */
1373 if(!(u_regs32 || u_regs64)) {
1374 /* no user state to look at */
1375 return KERN_FAILURE;
1376 }
1377 }
1378
1379 /*
1380 * Order of preference for top of stack:
1381 * 64 bit kernel state (not likely)
1382 * 32 bit kernel state
1383 * 64 bit user land state
1384 * 32 bit user land state
1385 */
1386
1387 if(kregs) {
1388 /*
1389 * nofault read of the registers from the kernel stack (as they can
1390 * disappear on the fly).
1391 */
1392
1393 if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(kregs->k_rip), sizeof(uint64_t))) {
1394 return KERN_FAILURE;
1395 }
1396 } else if(regs64) {
1397 currPC = regs64->isf.rip;
1398 } else if(regs32) {
1399 currPC = (uint64_t) regs32->eip;
1400 } else if(u_regs64) {
1401 currPC = u_regs64->isf.rip;
1402 } else if(u_regs32) {
1403 currPC = (uint64_t) u_regs32->eip;
1404 }
1405
1406 if(!currPC) {
1407 /* no top of the stack, bail out */
1408 return KERN_FAILURE;
1409 }
1410
1411 bufferIndex = 0;
1412
1413 if(bufferMaxIndex < 1) {
1414 *count = 0;
1415 return KERN_RESOURCE_SHORTAGE;
1416 }
1417
1418 /* backtrace kernel */
1419 if(kregs) {
1420 addr64_t address = 0ULL;
1421 size_t size = 0UL;
1422
1423 // do the backtrace
1424 kr = do_kernel_backtrace(thread, kregs, callstack, &bufferIndex, bufferMaxIndex);
1425
1426 // and do a nofault read of (r|e)sp
1427 uint64_t rsp = 0ULL;
1428 size = sizeof(uint64_t);
1429
1430 if(KERN_SUCCESS != chudxnu_kern_read(&address, (vm_offset_t)&(kregs->k_rsp), size)) {
1431 address = 0ULL;
1432 }
1433
1434 if(address && KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t)address, size) && bufferIndex < bufferMaxIndex) {
1435 callstack[bufferIndex++] = (uint64_t)rsp;
1436 }
1437 } else if(regs64) {
1438 uint64_t rsp = 0ULL;
1439
1440 // backtrace the 64bit side.
1441 kr = do_backtrace64(task, thread, regs64, callstack, &bufferIndex,
1442 bufferMaxIndex - 1, TRUE);
1443
1444 if(KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t) regs64->isf.rsp, sizeof(uint64_t)) &&
1445 bufferIndex < bufferMaxIndex) {
1446 callstack[bufferIndex++] = rsp;
1447 }
1448
1449 } else if(regs32) {
1450 uint32_t esp = 0UL;
1451
1452 // backtrace the 32bit side.
1453 kr = do_backtrace32(task, thread, regs32, callstack, &bufferIndex,
1454 bufferMaxIndex - 1, TRUE);
1455
1456 if(KERN_SUCCESS == chudxnu_kern_read(&esp, (vm_offset_t) regs32->uesp, sizeof(uint32_t)) &&
1457 bufferIndex < bufferMaxIndex) {
1458 callstack[bufferIndex++] = (uint64_t) esp;
1459 }
1460 } else if(u_regs64 && !kern_only) {
1461 /* backtrace user land */
1462 uint64_t rsp = 0ULL;
1463
1464 kr = do_backtrace64(task, thread, u_regs64, callstack, &bufferIndex,
1465 bufferMaxIndex - 1, FALSE);
1466
1467 if(KERN_SUCCESS == chudxnu_task_read(task, &rsp, (addr64_t) u_regs64->isf.rsp, sizeof(uint64_t)) &&
1468 bufferIndex < bufferMaxIndex) {
1469 callstack[bufferIndex++] = rsp;
1470 }
1471
1472 } else if(u_regs32 && !kern_only) {
1473 uint32_t esp = 0UL;
1474
1475 kr = do_backtrace32(task, thread, u_regs32, callstack, &bufferIndex,
1476 bufferMaxIndex - 1, FALSE);
1477
1478 if(KERN_SUCCESS == chudxnu_task_read(task, &esp, (addr64_t) u_regs32->uesp, sizeof(uint32_t)) &&
1479 bufferIndex < bufferMaxIndex) {
1480 callstack[bufferIndex++] = (uint64_t) esp;
1481 }
1482 }
1483
1484 *count = bufferIndex;
1485 return kr;
1486}
1487
1488__private_extern__
1489kern_return_t chudxnu_thread_get_callstack64_kperf(
1490 thread_t thread,
1491 uint64_t *callstack,
1492 mach_msg_type_number_t *count,
1493 boolean_t is_user)
1494{
1495 return chudxnu_thread_get_callstack64_internal(thread, callstack, count, is_user, !is_user);
1496}
1497#else /* !__arm__ && !__arm64__ && !__x86_64__ */
1498#error kperf: unsupported architecture
1499#endif /* !__arm__ && !__arm64__ && !__x86_64__ */
1500