1/*
2 * Copyright (c) 2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <stddef.h>
30#include <stdint.h>
31
32#include <kern/assert.h>
33#include <kern/backtrace.h>
34#include <kern/thread.h>
35#include <sys/errno.h>
36#include <vm/vm_map.h>
37
38#if defined(__arm__) || defined(__arm64__)
39#include <arm/cpu_data.h>
40#include <arm/cpu_data_internal.h>
41#endif
42
43
44
45uint32_t __attribute__((noinline))
46backtrace(uintptr_t *bt, uint32_t max_frames)
47{
48 return backtrace_frame(bt, max_frames, __builtin_frame_address(0));
49}
50
51/*
52 * This function captures a backtrace from the current stack and returns the
53 * number of frames captured, limited by max_frames and starting at start_frame.
54 * It's fast because it does no checking to make sure there isn't bad data.
55 * Since it's only called from threads that we're going to keep executing,
56 * if there's bad data we were going to die eventually. If this function is
57 * inlined, it doesn't record the frame of the function it's inside (because
58 * there's no stack frame).
59 */
60uint32_t __attribute__((noinline,not_tail_called))
61backtrace_frame(uintptr_t *bt, uint32_t max_frames, void *start_frame)
62{
63 thread_t thread = current_thread();
64 uintptr_t *fp;
65 uint32_t frame_index = 0;
66 uintptr_t top, bottom;
67 bool in_valid_stack;
68
69 assert(bt != NULL);
70 assert(max_frames > 0);
71
72 fp = start_frame;
73 bottom = thread->kernel_stack;
74 top = bottom + kernel_stack_size;
75
76#define IN_STK_BOUNDS(__addr) \
77 (((uintptr_t)(__addr) >= (uintptr_t)bottom) && \
78 ((uintptr_t)(__addr) < (uintptr_t)top))
79
80 in_valid_stack = IN_STK_BOUNDS(fp);
81
82 if (!in_valid_stack) {
83 fp = NULL;
84 }
85
86 while (fp != NULL && frame_index < max_frames) {
87 uintptr_t *next_fp = (uintptr_t *)*fp;
88 uintptr_t ret_addr = *(fp + 1); /* return address is one word higher than frame pointer */
89
90 /*
91 * If the frame pointer is 0, backtracing has reached the top of
92 * the stack and there is no return address. Some stacks might not
93 * have set this up, so bounds check, as well.
94 */
95 in_valid_stack = IN_STK_BOUNDS(next_fp);
96
97 if (next_fp == NULL || !in_valid_stack)
98 {
99 break;
100 }
101
102 bt[frame_index++] = ret_addr;
103
104 /* stacks grow down; backtracing should be moving to higher addresses */
105 if (next_fp <= fp) {
106 break;
107 }
108 fp = next_fp;
109 }
110
111 return frame_index;
112#undef IN_STK_BOUNDS
113}
114
115#if defined(__x86_64__)
116
117static kern_return_t
118interrupted_kernel_pc_fp(uintptr_t *pc, uintptr_t *fp)
119{
120 x86_saved_state_t *state;
121 bool state_64;
122 uint64_t cs;
123
124 state = current_cpu_datap()->cpu_int_state;
125 if (!state) {
126 return KERN_FAILURE;
127 }
128
129 state_64 = is_saved_state64(state);
130
131 if (state_64) {
132 cs = saved_state64(state)->isf.cs;
133 } else {
134 cs = saved_state32(state)->cs;
135 }
136 /* return early if interrupted a thread in user space */
137 if ((cs & SEL_PL) == SEL_PL_U) {
138 return KERN_FAILURE;
139 }
140
141 if (state_64) {
142 *pc = saved_state64(state)->isf.rip;
143 *fp = saved_state64(state)->rbp;
144 } else {
145 *pc = saved_state32(state)->eip;
146 *fp = saved_state32(state)->ebp;
147 }
148 return KERN_SUCCESS;
149}
150
151#elif defined(__arm64__)
152
153static kern_return_t
154interrupted_kernel_pc_fp(uintptr_t *pc, uintptr_t *fp)
155{
156 struct arm_saved_state *state;
157 bool state_64;
158
159 state = getCpuDatap()->cpu_int_state;
160 if (!state) {
161 return KERN_FAILURE;
162 }
163 state_64 = is_saved_state64(state);
164
165 /* return early if interrupted a thread in user space */
166 if (PSR64_IS_USER(get_saved_state_cpsr(state))) {
167 return KERN_FAILURE;
168 }
169
170 *pc = get_saved_state_pc(state);
171 *fp = get_saved_state_fp(state);
172 return KERN_SUCCESS;
173}
174
175#elif defined(__arm__)
176
177static kern_return_t
178interrupted_kernel_pc_fp(uintptr_t *pc, uintptr_t *fp)
179{
180 struct arm_saved_state *state;
181
182 state = getCpuDatap()->cpu_int_state;
183 if (!state) {
184 return KERN_FAILURE;
185 }
186
187 /* return early if interrupted a thread in user space */
188 if (PSR_IS_USER(get_saved_state_cpsr(state))) {
189 return KERN_FAILURE;
190 }
191
192 *pc = get_saved_state_pc(state);
193 *fp = get_saved_state_fp(state);
194 return KERN_SUCCESS;
195}
196
197#else /* defined(__arm__) */
198#error "interrupted_kernel_pc_fp: unsupported architecture"
199#endif /* !defined(__arm__) */
200
201uint32_t
202backtrace_interrupted(uintptr_t *bt, uint32_t max_frames)
203{
204 uintptr_t pc;
205 uintptr_t fp;
206 kern_return_t kr;
207
208 assert(bt != NULL);
209 assert(max_frames > 0);
210 assert(ml_at_interrupt_context() == TRUE);
211
212 kr = interrupted_kernel_pc_fp(&pc, &fp);
213 if (kr != KERN_SUCCESS) {
214 return 0;
215 }
216
217 bt[0] = pc;
218 if (max_frames == 1) {
219 return 1;
220 }
221
222 return backtrace_frame(bt + 1, max_frames - 1, (void *)fp) + 1;
223}
224
225int
226backtrace_user(uintptr_t *bt, uint32_t max_frames, uint32_t *frames_out,
227 bool *user_64_out)
228{
229 return backtrace_thread_user(current_thread(), bt, max_frames, frames_out,
230 user_64_out);
231}
232
233int
234backtrace_thread_user(void *thread, uintptr_t *bt, uint32_t max_frames,
235 uint32_t *frames_out, bool *user_64_out)
236{
237 bool user_64;
238 uintptr_t pc, fp, next_fp;
239 vm_map_t map = NULL, old_map = NULL;
240 uint32_t frame_index = 0;
241 int err = 0;
242 size_t frame_size;
243
244 assert(bt != NULL);
245 assert(max_frames > 0);
246 assert(frames_out != NULL);
247 assert(user_64_out != NULL);
248
249#if defined(__x86_64__)
250
251 /* don't allow a malformed user stack to copyin arbitrary kernel data */
252#define INVALID_USER_FP(FP) ((FP) == 0 || !IS_USERADDR64_CANONICAL((FP)))
253
254 x86_saved_state_t *state = get_user_regs(thread);
255
256 if (!state) {
257 return EINVAL;
258 }
259
260 user_64 = is_saved_state64(state);
261 if (user_64) {
262 pc = saved_state64(state)->isf.rip;
263 fp = saved_state64(state)->rbp;
264 } else {
265 pc = saved_state32(state)->eip;
266 fp = saved_state32(state)->ebp;
267 }
268
269#elif defined(__arm64__)
270
271 /* ARM expects stack frames to be aligned to 16 bytes */
272#define INVALID_USER_FP(FP) ((FP) == 0 || ((FP) & 0x3UL) != 0UL)
273
274 struct arm_saved_state *state = get_user_regs(thread);
275 if (!state) {
276 return EINVAL;
277 }
278
279 user_64 = is_saved_state64(state);
280 pc = get_saved_state_pc(state);
281 fp = get_saved_state_fp(state);
282
283#elif defined(__arm__)
284
285 /* ARM expects stack frames to be aligned to 16 bytes */
286#define INVALID_USER_FP(FP) ((FP) == 0 || ((FP) & 0x3UL) != 0UL)
287
288 struct arm_saved_state *state = get_user_regs(thread);
289 if (!state) {
290 return EINVAL;
291 }
292
293 user_64 = false;
294 pc = get_saved_state_pc(state);
295 fp = get_saved_state_fp(state);
296
297#else /* defined(__arm__) */
298#error "backtrace_thread_user: unsupported architecture"
299#endif /* !defined(__arm__) */
300
301 if (max_frames == 0) {
302 goto out;
303 }
304
305 bt[frame_index++] = pc;
306
307 if (frame_index >= max_frames) {
308 goto out;
309 }
310
311 if (INVALID_USER_FP(fp)) {
312 goto out;
313 }
314
315 assert(ml_get_interrupts_enabled() == TRUE);
316 if (!ml_get_interrupts_enabled()) {
317 return EINVAL;
318 }
319
320 union {
321 struct {
322 uint64_t fp;
323 uint64_t ret;
324 } u64;
325 struct {
326 uint32_t fp;
327 uint32_t ret;
328 } u32;
329 } frame;
330
331 frame_size = 2 * (user_64 ? sizeof(uint64_t) : sizeof(uint32_t));
332
333 /* switch to the correct map, for copyin */
334 if (thread != current_thread()) {
335 map = get_task_map_reference(get_threadtask(thread));
336 if (map == NULL) {
337 return EINVAL;
338 }
339 old_map = vm_map_switch(map);
340 } else {
341 map = NULL;
342 }
343
344 while (fp != 0 && frame_index < max_frames) {
345 err = copyin(fp, (char *)&frame, frame_size);
346 if (err) {
347 goto out;
348 }
349
350 next_fp = user_64 ? frame.u64.fp : frame.u32.fp;
351
352 if (INVALID_USER_FP(next_fp)) {
353 break;
354 }
355
356 uintptr_t ret_addr = user_64 ? frame.u64.ret : frame.u32.ret;
357 bt[frame_index++] = ret_addr;
358
359 /* stacks grow down; backtracing should be moving to higher addresses */
360 if (next_fp <= fp) {
361 break;
362 }
363 fp = next_fp;
364 }
365
366out:
367 if (map) {
368 (void)vm_map_switch(old_map);
369 vm_map_deallocate(map);
370 }
371
372 *user_64_out = user_64;
373 *frames_out = frame_index;
374 return err;
375#undef INVALID_USER_FP
376}
377