1 | // Copyright (c) 2016-2021 Apple Inc. All rights reserved. |
2 | // |
3 | // @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
4 | // |
5 | // This file contains Original Code and/or Modifications of Original Code |
6 | // as defined in and that are subject to the Apple Public Source License |
7 | // Version 2.0 (the 'License'). You may not use this file except in |
8 | // compliance with the License. The rights granted to you under the License |
9 | // may not be used to create, or enable the creation or redistribution of, |
10 | // unlawful or unlicensed copies of an Apple operating system, or to |
11 | // circumvent, violate, or enable the circumvention or violation of, any |
12 | // terms of an Apple operating system software license agreement. |
13 | // |
14 | // Please obtain a copy of the License at |
15 | // http://www.opensource.apple.com/apsl/ and read it before using this file. |
16 | // |
17 | // The Original Code and all software distributed under the License are |
18 | // distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
19 | // EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
20 | // INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
21 | // FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
22 | // Please see the License for the specific language governing rights and |
23 | // limitations under the License. |
24 | // |
25 | // @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
26 | |
27 | #include <stddef.h> |
28 | #include <stdint.h> |
29 | |
30 | #include <kern/assert.h> |
31 | #include <kern/backtrace.h> |
32 | #include <kern/cambria_layout.h> |
33 | #include <kern/thread.h> |
34 | #include <machine/machine_routines.h> |
35 | #include <sys/errno.h> |
36 | #include <vm/vm_map.h> |
37 | |
38 | #if defined(__arm64__) |
39 | #include <arm/cpu_data.h> |
40 | #include <arm/cpu_data_internal.h> |
41 | #endif // defined(__arm64__) |
42 | |
43 | #if defined(HAS_APPLE_PAC) |
44 | #include <ptrauth.h> |
45 | #endif // defined(HAS_APPLE_PAC) |
46 | |
47 | #if __x86_64__ |
48 | static void |
49 | _backtrace_packed_out_of_reach(void) |
50 | { |
51 | /* |
52 | * This symbol is used to replace frames that have been "JIT-ed" |
53 | * or dynamically inserted in the kernel by some kext in a regular |
54 | * VM mapping that might be outside of the filesets. |
55 | * |
56 | * This is an Intel only issue. |
57 | */ |
58 | } |
59 | #endif |
60 | |
61 | // Pack an address according to a particular packing format. |
62 | static size_t |
63 | _backtrace_pack_addr(backtrace_pack_t packing, uint8_t *dst, size_t dst_size, |
64 | uintptr_t addr) |
65 | { |
66 | switch (packing) { |
67 | case BTP_NONE: |
68 | if (dst_size >= sizeof(addr)) { |
69 | memcpy(dst, src: &addr, n: sizeof(addr)); |
70 | } |
71 | return sizeof(addr); |
72 | case BTP_KERN_OFFSET_32:; |
73 | uintptr_t addr_delta = addr - vm_kernel_stext; |
74 | int32_t addr_packed = (int32_t)addr_delta; |
75 | #if __x86_64__ |
76 | if ((uintptr_t)(int32_t)addr_delta != addr_delta) { |
77 | addr = (vm_offset_t)&_backtrace_packed_out_of_reach; |
78 | addr_delta = addr - vm_kernel_stext; |
79 | addr_packed = (int32_t)addr_delta; |
80 | } |
81 | #else |
82 | assert((uintptr_t)(int32_t)addr_delta == addr_delta); |
83 | #endif |
84 | if (dst_size >= sizeof(addr_packed)) { |
85 | memcpy(dst, src: &addr_packed, n: sizeof(addr_packed)); |
86 | } |
87 | return sizeof(addr_packed); |
88 | default: |
89 | panic("backtrace: unknown packing format %d" , packing); |
90 | } |
91 | } |
92 | |
93 | // Since it's only called from threads that we're going to keep executing, |
94 | // if there's bad data the system is going to die eventually. If this function |
95 | // is inlined, it doesn't record the frame of the function it's inside (because |
96 | // there's no stack frame), so prevent that. |
97 | static size_t __attribute__((noinline, not_tail_called)) |
98 | backtrace_internal(backtrace_pack_t packing, uint8_t *bt, |
99 | size_t btsize, void *start_frame, int64_t addr_offset, |
100 | backtrace_info_t *info_out) |
101 | { |
102 | thread_t thread = current_thread(); |
103 | uintptr_t *fp; |
104 | size_t size_used = 0; |
105 | uintptr_t top, bottom; |
106 | bool in_valid_stack; |
107 | assert(bt != NULL); |
108 | assert(btsize > 0); |
109 | |
110 | fp = start_frame; |
111 | #if defined(HAS_APPLE_PAC) |
112 | fp = ptrauth_strip(fp, ptrauth_key_frame_pointer); |
113 | #endif |
114 | bottom = thread->kernel_stack; |
115 | top = bottom + kernel_stack_size; |
116 | |
117 | #define IN_STK_BOUNDS(__addr) \ |
118 | (((uintptr_t)(__addr) >= (uintptr_t)bottom) && \ |
119 | ((uintptr_t)(__addr) < (uintptr_t)top)) |
120 | |
121 | in_valid_stack = IN_STK_BOUNDS(fp) || ml_addr_in_non_xnu_stack(addr: (uintptr_t)fp); |
122 | |
123 | if (!in_valid_stack) { |
124 | fp = NULL; |
125 | } |
126 | |
127 | while (fp != NULL && size_used < btsize) { |
128 | uintptr_t *next_fp = (uintptr_t *)*fp; |
129 | #if defined(HAS_APPLE_PAC) |
130 | next_fp = ptrauth_strip(next_fp, ptrauth_key_frame_pointer); |
131 | #endif |
132 | // Return address is one word higher than frame pointer. |
133 | uintptr_t ret_addr = *(fp + 1); |
134 | |
135 | // If the frame pointer is 0, backtracing has reached the top of |
136 | // the stack and there is no return address. Some stacks might not |
137 | // have set this up, so bounds check, as well. |
138 | in_valid_stack = IN_STK_BOUNDS(next_fp) || ml_addr_in_non_xnu_stack(addr: (uintptr_t)next_fp); |
139 | |
140 | if (next_fp == NULL || !in_valid_stack) { |
141 | break; |
142 | } |
143 | |
144 | #if defined(HAS_APPLE_PAC) |
145 | // Return addresses are signed by arm64e ABI, so strip it. |
146 | uintptr_t pc = (uintptr_t)ptrauth_strip((void *)ret_addr, |
147 | ptrauth_key_return_address); |
148 | #else // defined(HAS_APPLE_PAC) |
149 | uintptr_t pc = ret_addr; |
150 | #endif // !defined(HAS_APPLE_PAC) |
151 | pc += addr_offset; |
152 | size_used += _backtrace_pack_addr(packing, dst: bt + size_used, |
153 | dst_size: btsize - size_used, addr: pc); |
154 | |
155 | // Stacks grow down; backtracing should always be moving to higher |
156 | // addresses except when a frame is stitching between two different |
157 | // stacks. |
158 | if (next_fp <= fp) { |
159 | // This check is verbose; it is basically checking whether this |
160 | // thread is switching between the kernel stack and a non-XNU stack |
161 | // (or between one non-XNU stack and another, as there can be more |
162 | // than one). If not, then stop the backtrace as stack switching |
163 | // should be the only reason as to why the next FP would be lower |
164 | // than the current FP. |
165 | if (!ml_addr_in_non_xnu_stack(addr: (uintptr_t)fp) && |
166 | !ml_addr_in_non_xnu_stack(addr: (uintptr_t)next_fp)) { |
167 | break; |
168 | } |
169 | } |
170 | fp = next_fp; |
171 | } |
172 | |
173 | if (info_out) { |
174 | backtrace_info_t info = BTI_NONE; |
175 | #if __LP64__ |
176 | info |= BTI_64_BIT; |
177 | #endif |
178 | if (fp != NULL && size_used >= btsize) { |
179 | info |= BTI_TRUNCATED; |
180 | } |
181 | *info_out = info; |
182 | } |
183 | |
184 | return size_used; |
185 | #undef IN_STK_BOUNDS |
186 | } |
187 | |
188 | static kern_return_t |
189 | interrupted_kernel_pc_fp(uintptr_t *pc, uintptr_t *fp) |
190 | { |
191 | #if defined(__x86_64__) |
192 | x86_saved_state_t *state; |
193 | bool state_64; |
194 | uint64_t cs; |
195 | |
196 | state = current_cpu_datap()->cpu_int_state; |
197 | if (!state) { |
198 | return KERN_FAILURE; |
199 | } |
200 | |
201 | state_64 = is_saved_state64(state); |
202 | |
203 | if (state_64) { |
204 | cs = saved_state64(state)->isf.cs; |
205 | } else { |
206 | cs = saved_state32(state)->cs; |
207 | } |
208 | // Return early if interrupted a thread in user space. |
209 | if ((cs & SEL_PL) == SEL_PL_U) { |
210 | return KERN_FAILURE; |
211 | } |
212 | |
213 | if (state_64) { |
214 | *pc = saved_state64(state)->isf.rip; |
215 | *fp = saved_state64(state)->rbp; |
216 | } else { |
217 | *pc = saved_state32(state)->eip; |
218 | *fp = saved_state32(state)->ebp; |
219 | } |
220 | |
221 | #elif defined(__arm64__) |
222 | |
223 | struct arm_saved_state *state; |
224 | |
225 | state = getCpuDatap()->cpu_int_state; |
226 | if (!state) { |
227 | return KERN_FAILURE; |
228 | } |
229 | |
230 | // Return early if interrupted a thread in user space. |
231 | if (PSR64_IS_USER(get_saved_state_cpsr(state))) { |
232 | return KERN_FAILURE; |
233 | } |
234 | |
235 | *pc = ml_get_backtrace_pc(state); |
236 | *fp = get_saved_state_fp(iss: state); |
237 | |
238 | #else // !defined(__arm64__) && !defined(__x86_64__) |
239 | #error "unsupported architecture" |
240 | #endif // !defined(__arm64__) && !defined(__x86_64__) |
241 | |
242 | return KERN_SUCCESS; |
243 | } |
244 | |
245 | __attribute__((always_inline)) |
246 | static uintptr_t |
247 | _backtrace_preamble(struct backtrace_control *ctl, uintptr_t *start_frame_out) |
248 | { |
249 | backtrace_flags_t flags = ctl ? ctl->btc_flags : 0; |
250 | uintptr_t start_frame = ctl ? ctl->btc_frame_addr : 0; |
251 | uintptr_t pc = 0; |
252 | if (flags & BTF_KERN_INTERRUPTED) { |
253 | assert(ml_at_interrupt_context() == TRUE); |
254 | |
255 | uintptr_t fp; |
256 | kern_return_t kr = interrupted_kernel_pc_fp(pc: &pc, fp: &fp); |
257 | if (kr != KERN_SUCCESS) { |
258 | return 0; |
259 | } |
260 | *start_frame_out = start_frame ?: fp; |
261 | } else if (start_frame == 0) { |
262 | *start_frame_out = (uintptr_t)__builtin_frame_address(0); |
263 | } else { |
264 | *start_frame_out = start_frame; |
265 | } |
266 | return pc; |
267 | } |
268 | |
269 | unsigned int __attribute__((noinline)) |
270 | backtrace(uintptr_t *bt, unsigned int max_frames, |
271 | struct backtrace_control *ctl, backtrace_info_t *info_out) |
272 | { |
273 | unsigned int len_adj = 0; |
274 | uintptr_t start_frame = ctl ? ctl->btc_frame_addr : 0; |
275 | uintptr_t pc = _backtrace_preamble(ctl, start_frame_out: &start_frame); |
276 | if (pc) { |
277 | bt[0] = pc; |
278 | if (max_frames == 1) { |
279 | return 1; |
280 | } |
281 | bt += 1; |
282 | max_frames -= 1; |
283 | len_adj += 1; |
284 | } |
285 | |
286 | size_t size = backtrace_internal(packing: BTP_NONE, bt: (uint8_t *)bt, |
287 | btsize: max_frames * sizeof(uintptr_t), start_frame: (void *)start_frame, |
288 | addr_offset: ctl ? ctl->btc_addr_offset : 0, info_out); |
289 | // NULL-terminate the list, if space is available. |
290 | unsigned int len = size / sizeof(uintptr_t); |
291 | if (len != max_frames) { |
292 | bt[len] = 0; |
293 | } |
294 | |
295 | return len + len_adj; |
296 | } |
297 | |
298 | // Backtrace the current thread's kernel stack as a packed representation. |
299 | size_t |
300 | backtrace_packed(backtrace_pack_t packing, uint8_t *bt, size_t btsize, |
301 | struct backtrace_control *ctl, |
302 | backtrace_info_t *info_out) |
303 | { |
304 | unsigned int size_adj = 0; |
305 | uintptr_t start_frame = ctl ? ctl->btc_frame_addr : 0; |
306 | uintptr_t pc = _backtrace_preamble(ctl, start_frame_out: &start_frame); |
307 | if (pc) { |
308 | size_adj = _backtrace_pack_addr(packing, dst: bt, dst_size: btsize, addr: pc); |
309 | if (size_adj >= btsize) { |
310 | return size_adj; |
311 | } |
312 | btsize -= size_adj; |
313 | } |
314 | |
315 | size_t written_size = backtrace_internal(packing, bt: (uint8_t *)bt, btsize, |
316 | start_frame: (void *)start_frame, addr_offset: ctl ? ctl->btc_addr_offset : 0, info_out); |
317 | return written_size + size_adj; |
318 | } |
319 | |
320 | // Convert an array of addresses to a packed representation. |
321 | size_t |
322 | backtrace_pack(backtrace_pack_t packing, uint8_t *dst, size_t dst_size, |
323 | const uintptr_t *src, unsigned int src_len) |
324 | { |
325 | size_t dst_offset = 0; |
326 | for (unsigned int i = 0; i < src_len; i++) { |
327 | size_t pack_size = _backtrace_pack_addr(packing, dst: dst + dst_offset, |
328 | dst_size: dst_size - dst_offset, addr: src[i]); |
329 | if (dst_offset + pack_size >= dst_size) { |
330 | return dst_offset; |
331 | } |
332 | dst_offset += pack_size; |
333 | } |
334 | return dst_offset; |
335 | } |
336 | |
337 | // Convert a packed backtrace to an array of addresses. |
338 | unsigned int |
339 | backtrace_unpack(backtrace_pack_t packing, uintptr_t *dst, unsigned int dst_len, |
340 | const uint8_t *src, size_t src_size) |
341 | { |
342 | switch (packing) { |
343 | case BTP_NONE:; |
344 | size_t unpack_size = MIN(dst_len * sizeof(uintptr_t), src_size); |
345 | memmove(dst, src, n: unpack_size); |
346 | return (unsigned int)(unpack_size / sizeof(uintptr_t)); |
347 | case BTP_KERN_OFFSET_32:; |
348 | unsigned int src_len = src_size / sizeof(int32_t); |
349 | unsigned int unpack_len = MIN(src_len, dst_len); |
350 | for (unsigned int i = 0; i < unpack_len; i++) { |
351 | int32_t addr = 0; |
352 | memcpy(dst: &addr, src: src + i * sizeof(int32_t), n: sizeof(int32_t)); |
353 | dst[i] = vm_kernel_stext + (uintptr_t)addr; |
354 | } |
355 | return unpack_len; |
356 | default: |
357 | panic("backtrace: unknown packing format %d" , packing); |
358 | } |
359 | } |
360 | |
361 | static errno_t |
362 | _backtrace_copyin(void * __unused ctx, void *dst, user_addr_t src, size_t size) |
363 | { |
364 | return copyin((user_addr_t)src, dst, size); |
365 | } |
366 | |
367 | errno_t |
368 | backtrace_user_copy_error(void *ctx, void *dst, user_addr_t src, size_t size) |
369 | { |
370 | #pragma unused(ctx, dst, src, size) |
371 | return EFAULT; |
372 | } |
373 | |
374 | unsigned int |
375 | backtrace_user(uintptr_t *bt, unsigned int max_frames, |
376 | const struct backtrace_control *ctl_in, |
377 | struct backtrace_user_info *info_out) |
378 | { |
379 | static const struct backtrace_control ctl_default = { |
380 | .btc_user_copy = _backtrace_copyin, |
381 | }; |
382 | const struct backtrace_control *ctl = ctl_in ?: &ctl_default; |
383 | uintptr_t pc = 0, next_fp = 0; |
384 | uintptr_t fp = ctl->btc_frame_addr; |
385 | bool custom_fp = fp != 0; |
386 | int64_t addr_offset = ctl ? ctl->btc_addr_offset : 0; |
387 | vm_map_t map = NULL, old_map = NULL; |
388 | unsigned int frame_index = 0; |
389 | int error = 0; |
390 | size_t frame_size = 0; |
391 | bool truncated = false; |
392 | bool user_64 = false; |
393 | bool allow_async = true; |
394 | bool has_async = false; |
395 | uintptr_t async_frame_addr = 0; |
396 | unsigned int async_index = 0; |
397 | |
398 | backtrace_user_copy_fn copy = ctl->btc_user_copy ?: _backtrace_copyin; |
399 | bool custom_copy = copy != _backtrace_copyin; |
400 | void *ctx = ctl->btc_user_copy_context; |
401 | |
402 | void *thread = ctl->btc_user_thread; |
403 | void *cur_thread = NULL; |
404 | if (thread == NULL) { |
405 | cur_thread = current_thread(); |
406 | thread = cur_thread; |
407 | } |
408 | task_t task = get_threadtask(thread); |
409 | |
410 | assert(task != NULL); |
411 | assert(bt != NULL); |
412 | assert(max_frames > 0); |
413 | |
414 | if (!custom_copy) { |
415 | assert(ml_get_interrupts_enabled() == TRUE); |
416 | if (!ml_get_interrupts_enabled()) { |
417 | error = EDEADLK; |
418 | } |
419 | |
420 | if (cur_thread == NULL) { |
421 | cur_thread = current_thread(); |
422 | } |
423 | if (thread != cur_thread) { |
424 | map = get_task_map_reference(task); |
425 | if (map == NULL) { |
426 | error = ENOMEM; |
427 | goto out; |
428 | } |
429 | old_map = vm_map_switch(map); |
430 | } |
431 | } |
432 | |
433 | #define SWIFT_ASYNC_FP_BIT (0x1ULL << 60) |
434 | #define SWIFT_ASYNC_FP(FP) (((FP) & SWIFT_ASYNC_FP_BIT) != 0) |
435 | #define SWIFT_ASYNC_FP_CLEAR(FP) ((FP) & ~SWIFT_ASYNC_FP_BIT) |
436 | |
437 | #if defined(__x86_64__) |
438 | |
439 | // Don't allow a malformed user stack to copy arbitrary kernel data. |
440 | #define INVALID_USER_FP(FP) ((FP) == 0 || !IS_USERADDR64_CANONICAL((FP))) |
441 | |
442 | x86_saved_state_t *state = get_user_regs(thread); |
443 | if (!state) { |
444 | error = EINVAL; |
445 | goto out; |
446 | } |
447 | |
448 | user_64 = is_saved_state64(state); |
449 | if (user_64) { |
450 | pc = saved_state64(state)->isf.rip; |
451 | fp = fp != 0 ? fp : saved_state64(state)->rbp; |
452 | } else { |
453 | pc = saved_state32(state)->eip; |
454 | fp = fp != 0 ? fp : saved_state32(state)->ebp; |
455 | } |
456 | |
457 | #elif defined(__arm64__) |
458 | |
459 | struct arm_saved_state *state = get_user_regs(thread); |
460 | if (!state) { |
461 | error = EINVAL; |
462 | goto out; |
463 | } |
464 | |
465 | user_64 = is_saved_state64(iss: state); |
466 | pc = get_saved_state_pc(iss: state); |
467 | fp = fp != 0 ? fp : get_saved_state_fp(iss: state); |
468 | |
469 | // ARM expects stack frames to be aligned to 16 bytes. |
470 | #define INVALID_USER_FP(FP) (((FP) & 0x3UL) != 0UL) |
471 | |
472 | #else // defined(__arm64__) || defined(__x86_64__) |
473 | #error "unsupported architecture" |
474 | #endif // !defined(__arm64__) && !defined(__x86_64__) |
475 | |
476 | // Only capture the save state PC without a custom frame pointer to walk. |
477 | if (!ctl || ctl->btc_frame_addr == 0) { |
478 | bt[frame_index++] = pc + addr_offset; |
479 | } |
480 | |
481 | if (frame_index >= max_frames) { |
482 | goto out; |
483 | } |
484 | |
485 | if (fp == 0) { |
486 | // If the FP is zeroed, then there's no stack to walk, by design. This |
487 | // happens for workq threads that are being sent back to user space or |
488 | // during boot-strapping operations on other kinds of threads. |
489 | goto out; |
490 | } else if (INVALID_USER_FP(fp)) { |
491 | // Still capture the PC in this case, but mark the stack as truncated |
492 | // and "faulting." (Using the frame pointer on a call stack would cause |
493 | // an exception.) |
494 | error = EFAULT; |
495 | truncated = true; |
496 | goto out; |
497 | } |
498 | |
499 | union { |
500 | struct { |
501 | uint64_t fp; |
502 | uint64_t ret; |
503 | } u64; |
504 | struct { |
505 | uint32_t fp; |
506 | uint32_t ret; |
507 | } u32; |
508 | } frame; |
509 | |
510 | frame_size = 2 * (user_64 ? 8 : 4); |
511 | |
512 | while (fp != 0 && frame_index < max_frames) { |
513 | error = copy(ctx, (char *)&frame, fp, frame_size); |
514 | if (error) { |
515 | truncated = true; |
516 | goto out; |
517 | } |
518 | |
519 | // Capture this return address before tripping over any errors finding |
520 | // the next frame to follow. |
521 | uintptr_t ret_addr = user_64 ? frame.u64.ret : frame.u32.ret; |
522 | #if defined(HAS_APPLE_PAC) |
523 | // Return addresses are signed by arm64e ABI, so strip off the auth |
524 | // bits. |
525 | bt[frame_index++] = (uintptr_t)ptrauth_strip((void *)ret_addr, |
526 | ptrauth_key_return_address) + addr_offset; |
527 | #else // defined(HAS_APPLE_PAC) |
528 | bt[frame_index++] = ret_addr + addr_offset; |
529 | #endif // !defined(HAS_APPLE_PAC) |
530 | |
531 | // Find the next frame to follow. |
532 | next_fp = user_64 ? frame.u64.fp : frame.u32.fp; |
533 | bool async_frame = allow_async && SWIFT_ASYNC_FP(next_fp); |
534 | // There is no 32-bit ABI for Swift async call stacks. |
535 | if (user_64 && async_frame) { |
536 | async_index = frame_index - 1; |
537 | // The async context pointer is just below the stack frame. |
538 | user_addr_t async_ctx_ptr = fp - 8; |
539 | user_addr_t async_ctx = 0; |
540 | error = copy(ctx, (char *)&async_ctx, async_ctx_ptr, |
541 | sizeof(async_ctx)); |
542 | if (error) { |
543 | goto out; |
544 | } |
545 | #if defined(HAS_APPLE_PAC) |
546 | async_frame_addr = (uintptr_t)ptrauth_strip((void *)async_ctx, |
547 | ptrauth_key_process_dependent_data); |
548 | #else // defined(HAS_APPLE_PAC) |
549 | async_frame_addr = (uintptr_t)async_ctx; |
550 | #endif // !defined(HAS_APPLE_PAC) |
551 | has_async = true; |
552 | allow_async = false; |
553 | } |
554 | next_fp = SWIFT_ASYNC_FP_CLEAR(next_fp); |
555 | #if defined(HAS_APPLE_PAC) |
556 | next_fp = (uintptr_t)ptrauth_strip((void *)next_fp, |
557 | ptrauth_key_frame_pointer); |
558 | #endif // defined(HAS_APPLE_PAC) |
559 | if (INVALID_USER_FP(next_fp)) { |
560 | break; |
561 | } |
562 | |
563 | // Stacks grow down; backtracing should be moving to higher addresses, |
564 | // unless a custom frame pointer is provided, in which case, an async |
565 | // stack might be walked, which is allocated on the heap in any order. |
566 | if ((next_fp == fp) || (!custom_fp && next_fp < fp)) { |
567 | break; |
568 | } |
569 | fp = next_fp; |
570 | } |
571 | |
572 | out: |
573 | if (old_map != NULL) { |
574 | (void)vm_map_switch(map: old_map); |
575 | vm_map_deallocate(map); |
576 | } |
577 | |
578 | // NULL-terminate the list, if space is available. |
579 | if (frame_index < max_frames) { |
580 | bt[frame_index] = 0; |
581 | } |
582 | |
583 | if (info_out) { |
584 | info_out->btui_error = error; |
585 | backtrace_info_t info = user_64 ? BTI_64_BIT : BTI_NONE; |
586 | bool out_of_space = !INVALID_USER_FP(fp) && frame_index == max_frames; |
587 | if (truncated || out_of_space) { |
588 | info |= BTI_TRUNCATED; |
589 | } |
590 | if (out_of_space && error == 0) { |
591 | info_out->btui_next_frame_addr = fp; |
592 | } |
593 | info_out->btui_info = info; |
594 | info_out->btui_async_start_index = async_index; |
595 | info_out->btui_async_frame_addr = async_frame_addr; |
596 | } |
597 | |
598 | return frame_index; |
599 | } |
600 | |