| 1 | /* |
| 2 | * Copyright (c) 2005-2012 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | |
| 29 | // NOTE: This file is only c++ so I can get static initialisers going |
| 30 | #include <libkern/OSDebug.h> |
| 31 | #include <IOKit/IOLib.h> |
| 32 | |
| 33 | #include <sys/cdefs.h> |
| 34 | |
| 35 | #include <stdarg.h> |
| 36 | #include <mach/mach_types.h> |
| 37 | #include <mach/kmod.h> |
| 38 | #include <kern/locks.h> |
| 39 | |
| 40 | #include <libkern/libkern.h> // From bsd's libkern directory |
| 41 | #include <mach/vm_param.h> |
| 42 | |
| 43 | #include <sys/kdebug.h> |
| 44 | #include <kern/thread.h> |
| 45 | |
| 46 | #if defined(HAS_APPLE_PAC) |
| 47 | #include <ptrauth.h> |
| 48 | #endif |
| 49 | |
| 50 | extern int etext; |
| 51 | __BEGIN_DECLS |
| 52 | // From osmfk/kern/thread.h but considered to be private |
| 53 | extern vm_offset_t min_valid_stack_address(void); |
| 54 | extern vm_offset_t max_valid_stack_address(void); |
| 55 | |
| 56 | // From osfmk/kern/printf.c |
| 57 | extern boolean_t doprnt_hide_pointers; |
| 58 | |
| 59 | // From osfmk/kmod.c |
| 60 | extern void kmod_dump_log(vm_offset_t *addr, unsigned int cnt, boolean_t doUnslide); |
| 61 | |
| 62 | extern addr64_t kvtophys(vm_offset_t va); |
| 63 | #if __arm__ |
| 64 | extern int copyinframe(vm_address_t fp, char *frame); |
| 65 | #elif defined(__arm64__) |
| 66 | extern int copyinframe(vm_address_t fp, char *frame, boolean_t is64bit); |
| 67 | #endif |
| 68 | |
| 69 | __END_DECLS |
| 70 | |
| 71 | extern lck_grp_t * IOLockGroup; |
| 72 | |
| 73 | static lck_mtx_t *sOSReportLock = lck_mtx_alloc_init(grp: IOLockGroup, LCK_ATTR_NULL); |
| 74 | |
| 75 | /* Report a message with a 4 entry backtrace - very slow */ |
| 76 | void |
| 77 | OSReportWithBacktrace(const char *str, ...) |
| 78 | { |
| 79 | char buf[128]; |
| 80 | void *bt[9] = {}; |
| 81 | const unsigned cnt = sizeof(bt) / sizeof(bt[0]); |
| 82 | va_list listp; |
| 83 | |
| 84 | // Ignore the our and our callers stackframes, skipping frames 0 & 1 |
| 85 | (void) OSBacktrace(bt, maxAddrs: cnt); |
| 86 | |
| 87 | va_start(listp, str); |
| 88 | vsnprintf(buf, sizeof(buf), str, listp); |
| 89 | va_end(listp); |
| 90 | |
| 91 | lck_mtx_lock(lck: sOSReportLock); |
| 92 | { |
| 93 | boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers; |
| 94 | doprnt_hide_pointers = FALSE; |
| 95 | printf("%s\nBacktrace 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n" , buf, |
| 96 | (unsigned long) VM_KERNEL_UNSLIDE(bt[2]), (unsigned long) VM_KERNEL_UNSLIDE(bt[3]), |
| 97 | (unsigned long) VM_KERNEL_UNSLIDE(bt[4]), (unsigned long) VM_KERNEL_UNSLIDE(bt[5]), |
| 98 | (unsigned long) VM_KERNEL_UNSLIDE(bt[6]), (unsigned long) VM_KERNEL_UNSLIDE(bt[7]), |
| 99 | (unsigned long) VM_KERNEL_UNSLIDE(bt[8])); |
| 100 | kmod_dump_log(addr: (vm_offset_t *) &bt[2], cnt: cnt - 2, TRUE); |
| 101 | doprnt_hide_pointers = old_doprnt_hide_pointers; |
| 102 | } |
| 103 | lck_mtx_unlock(lck: sOSReportLock); |
| 104 | } |
| 105 | |
| 106 | static vm_offset_t minstackaddr = min_valid_stack_address(); |
| 107 | static vm_offset_t maxstackaddr = max_valid_stack_address(); |
| 108 | |
| 109 | |
| 110 | #if __x86_64__ |
| 111 | #define x86_64_RETURN_OFFSET 8 |
| 112 | static unsigned int |
| 113 | x86_64_validate_raddr(vm_offset_t raddr) |
| 114 | { |
| 115 | return (raddr > VM_MIN_KERNEL_AND_KEXT_ADDRESS) && |
| 116 | (raddr < VM_MAX_KERNEL_ADDRESS); |
| 117 | } |
| 118 | static unsigned int |
| 119 | x86_64_validate_stackptr(vm_offset_t stackptr) |
| 120 | { |
| 121 | /* Existence and alignment check |
| 122 | */ |
| 123 | if (!stackptr || (stackptr & 0x7) || !x86_64_validate_raddr(stackptr)) { |
| 124 | return 0; |
| 125 | } |
| 126 | |
| 127 | /* Is a virtual->physical translation present? |
| 128 | */ |
| 129 | if (!kvtophys(stackptr)) { |
| 130 | return 0; |
| 131 | } |
| 132 | |
| 133 | /* Check if the return address lies on the same page; |
| 134 | * If not, verify that a translation exists. |
| 135 | */ |
| 136 | if (((PAGE_SIZE - (stackptr & PAGE_MASK)) < x86_64_RETURN_OFFSET) && |
| 137 | !kvtophys(stackptr + x86_64_RETURN_OFFSET)) { |
| 138 | return 0; |
| 139 | } |
| 140 | return 1; |
| 141 | } |
| 142 | #endif |
| 143 | |
| 144 | void |
| 145 | OSPrintBacktrace(void) |
| 146 | { |
| 147 | void * btbuf[20]; |
| 148 | int tmp = OSBacktrace(bt: btbuf, maxAddrs: 20); |
| 149 | int i; |
| 150 | for (i = 0; i < tmp; i++) { |
| 151 | kprintf(fmt: "bt[%.2d] = %p\n" , i, btbuf[i]); |
| 152 | } |
| 153 | } |
| 154 | |
| 155 | unsigned |
| 156 | OSBacktrace(void **bt, unsigned maxAddrs) |
| 157 | { |
| 158 | unsigned frame; |
| 159 | if (!current_thread()) { |
| 160 | return 0; |
| 161 | } |
| 162 | |
| 163 | #if __x86_64__ |
| 164 | #define SANE_x86_64_FRAME_SIZE (kernel_stack_size >> 1) |
| 165 | vm_offset_t stackptr, stackptr_prev, raddr; |
| 166 | unsigned frame_index = 0; |
| 167 | /* Obtain current frame pointer */ |
| 168 | |
| 169 | __asm__ volatile ("movq %%rbp, %0" : "=m" (stackptr)); |
| 170 | |
| 171 | if (!x86_64_validate_stackptr(stackptr)) { |
| 172 | goto pad; |
| 173 | } |
| 174 | |
| 175 | raddr = *((vm_offset_t *) (stackptr + x86_64_RETURN_OFFSET)); |
| 176 | |
| 177 | if (!x86_64_validate_raddr(raddr)) { |
| 178 | goto pad; |
| 179 | } |
| 180 | |
| 181 | bt[frame_index++] = (void *) raddr; |
| 182 | |
| 183 | for (; frame_index < maxAddrs; frame_index++) { |
| 184 | stackptr_prev = stackptr; |
| 185 | stackptr = *((vm_offset_t *) stackptr_prev); |
| 186 | |
| 187 | if (!x86_64_validate_stackptr(stackptr)) { |
| 188 | break; |
| 189 | } |
| 190 | /* Stack grows downwards */ |
| 191 | if (stackptr < stackptr_prev) { |
| 192 | break; |
| 193 | } |
| 194 | |
| 195 | if ((stackptr - stackptr_prev) > SANE_x86_64_FRAME_SIZE) { |
| 196 | break; |
| 197 | } |
| 198 | |
| 199 | raddr = *((vm_offset_t *) (stackptr + x86_64_RETURN_OFFSET)); |
| 200 | |
| 201 | if (!x86_64_validate_raddr(raddr)) { |
| 202 | break; |
| 203 | } |
| 204 | |
| 205 | bt[frame_index] = (void *) raddr; |
| 206 | } |
| 207 | pad: |
| 208 | frame = frame_index; |
| 209 | |
| 210 | for (; frame_index < maxAddrs; frame_index++) { |
| 211 | bt[frame_index] = (void *) NULL; |
| 212 | } |
| 213 | #elif __arm__ || __arm64__ |
| 214 | uint32_t i = 0; |
| 215 | uintptr_t frameb[2]; |
| 216 | uintptr_t fp = 0; |
| 217 | |
| 218 | // get the current frame pointer for this thread |
| 219 | #if defined(__arm__) |
| 220 | #define OSBacktraceFrameAlignOK(x) (((x) & 0x3) == 0) |
| 221 | __asm__ volatile ("mov %0,r7" : "=r" (fp)); |
| 222 | #elif defined(__arm64__) |
| 223 | #define OSBacktraceFrameAlignOK(x) (((x) & 0xf) == 0) |
| 224 | __asm__ volatile ("mov %0, fp" : "=r" (fp)); |
| 225 | #else |
| 226 | #error Unknown architecture. |
| 227 | #endif |
| 228 | |
| 229 | // now crawl up the stack recording the link value of each frame |
| 230 | do { |
| 231 | // check bounds |
| 232 | if ((fp == 0) || (!OSBacktraceFrameAlignOK(fp)) || (fp > VM_MAX_KERNEL_ADDRESS) || (fp < VM_MIN_KERNEL_AND_KEXT_ADDRESS)) { |
| 233 | break; |
| 234 | } |
| 235 | // safely read frame |
| 236 | #ifdef __arm64__ |
| 237 | if (copyinframe(fp, frame: (char*)frameb, TRUE) != 0) { |
| 238 | #else |
| 239 | if (copyinframe(fp, (char*)frameb) != 0) { |
| 240 | #endif |
| 241 | break; |
| 242 | } |
| 243 | |
| 244 | // No need to use copyin as this is always a kernel address, see check above |
| 245 | #if defined(HAS_APPLE_PAC) |
| 246 | /* return addresses on stack signed by arm64e ABI */ |
| 247 | bt[i] = ptrauth_strip((void*)frameb[1], ptrauth_key_return_address); // link register |
| 248 | #else |
| 249 | bt[i] = (void*)frameb[1]; // link register |
| 250 | #endif |
| 251 | fp = frameb[0]; |
| 252 | #if defined(HAS_APPLE_PAC) |
| 253 | fp = (uintptr_t)ptrauth_strip((void *)fp, ptrauth_key_frame_pointer); |
| 254 | #endif |
| 255 | } while (++i < maxAddrs); |
| 256 | frame = i; |
| 257 | #else |
| 258 | #error arch |
| 259 | #endif |
| 260 | return frame; |
| 261 | } |
| 262 | |