1 | /* |
2 | * Copyright (c) 2000-2016 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #include <mach/mach_types.h> |
30 | #include <mach/exception_types.h> |
31 | #include <arm/exception.h> |
32 | #include <arm/pmap.h> |
33 | #include <arm64/proc_reg.h> |
34 | #include <arm/thread.h> |
35 | #include <arm/trap_internal.h> |
36 | #include <arm/cpu_data_internal.h> |
37 | #include <kdp/kdp_internal.h> |
38 | #include <kern/debug.h> |
39 | #include <IOKit/IOPlatformExpert.h> |
40 | #include <libkern/OSAtomic.h> |
41 | #include <vm/vm_map.h> |
42 | #include <arm/misc_protos.h> |
43 | |
44 | #if defined(HAS_APPLE_PAC) |
45 | #include <ptrauth.h> |
46 | #endif |
47 | |
48 | #define KDP_TEST_HARNESS 0 |
49 | #if KDP_TEST_HARNESS |
50 | #define dprintf(x) kprintf x |
51 | #else |
52 | #define dprintf(x) do {} while (0) |
53 | #endif |
54 | |
55 | void halt_all_cpus(boolean_t); |
56 | void kdp_call(void); |
57 | int kdp_getc(void); |
58 | int machine_trace_thread(thread_t thread, |
59 | char * tracepos, |
60 | char * tracebound, |
61 | int nframes, |
62 | uint32_t * thread_trace_flags); |
63 | int machine_trace_thread64(thread_t thread, |
64 | char * tracepos, |
65 | char * tracebound, |
66 | int nframes, |
67 | uint32_t * thread_trace_flags); |
68 | |
69 | void kdp_trap(unsigned int, struct arm_saved_state * saved_state); |
70 | |
71 | extern bool machine_trace_thread_validate_kva(vm_offset_t addr); |
72 | |
73 | #if CONFIG_KDP_INTERACTIVE_DEBUGGING |
74 | void |
75 | kdp_exception( |
76 | unsigned char * pkt, int * len, unsigned short * remote_port, unsigned int exception, unsigned int code, unsigned int subcode) |
77 | { |
78 | struct { |
79 | kdp_exception_t pkt; |
80 | kdp_exc_info_t exc; |
81 | } aligned_pkt; |
82 | |
83 | kdp_exception_t * rq = (kdp_exception_t *)&aligned_pkt; |
84 | |
85 | bcopy(src: (char *)pkt, dst: (char *)rq, n: sizeof(*rq)); |
86 | rq->hdr.request = KDP_EXCEPTION; |
87 | rq->hdr.is_reply = 0; |
88 | rq->hdr.seq = kdp.exception_seq; |
89 | rq->hdr.key = 0; |
90 | rq->hdr.len = sizeof(*rq) + sizeof(kdp_exc_info_t); |
91 | |
92 | rq->n_exc_info = 1; |
93 | rq->exc_info[0].cpu = 0; |
94 | rq->exc_info[0].exception = exception; |
95 | rq->exc_info[0].code = code; |
96 | rq->exc_info[0].subcode = subcode; |
97 | |
98 | rq->hdr.len += rq->n_exc_info * sizeof(kdp_exc_info_t); |
99 | |
100 | bcopy(src: (char *)rq, dst: (char *)pkt, n: rq->hdr.len); |
101 | |
102 | kdp.exception_ack_needed = TRUE; |
103 | |
104 | *remote_port = kdp.exception_port; |
105 | *len = rq->hdr.len; |
106 | } |
107 | |
108 | boolean_t |
109 | kdp_exception_ack(unsigned char * pkt, int len) |
110 | { |
111 | kdp_exception_ack_t aligned_pkt; |
112 | kdp_exception_ack_t * rq = (kdp_exception_ack_t *)&aligned_pkt; |
113 | |
114 | if ((unsigned)len < sizeof(*rq)) { |
115 | return FALSE; |
116 | } |
117 | |
118 | bcopy(src: (char *)pkt, dst: (char *)rq, n: sizeof(*rq)); |
119 | |
120 | if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION) { |
121 | return FALSE; |
122 | } |
123 | |
124 | dprintf(("kdp_exception_ack seq %x %x\n" , rq->hdr.seq, kdp.exception_seq)); |
125 | |
126 | if (rq->hdr.seq == kdp.exception_seq) { |
127 | kdp.exception_ack_needed = FALSE; |
128 | kdp.exception_seq++; |
129 | } |
130 | return TRUE; |
131 | } |
132 | |
133 | static void |
134 | kdp_getintegerstate(char * out_state) |
135 | { |
136 | #if defined(__arm64__) |
137 | struct arm_thread_state64 thread_state64; |
138 | arm_saved_state_t *saved_state; |
139 | |
140 | saved_state = kdp.saved_state; |
141 | assert(is_saved_state64(saved_state)); |
142 | |
143 | bzero(s: (char *) &thread_state64, n: sizeof(struct arm_thread_state64)); |
144 | |
145 | saved_state_to_thread_state64(saved_state, &thread_state64); |
146 | |
147 | bcopy(src: (char *) &thread_state64, dst: (char *) out_state, n: sizeof(struct arm_thread_state64)); |
148 | #else |
149 | #error Unknown architecture. |
150 | #endif |
151 | } |
152 | |
153 | kdp_error_t |
154 | kdp_machine_read_regs(__unused unsigned int cpu, unsigned int flavor, char * data, int * size) |
155 | { |
156 | switch (flavor) { |
157 | #if defined(__arm64__) |
158 | case ARM_THREAD_STATE64: |
159 | dprintf(("kdp_readregs THREAD_STATE64\n" )); |
160 | kdp_getintegerstate(out_state: data); |
161 | *size = ARM_THREAD_STATE64_COUNT * sizeof(int); |
162 | return KDPERR_NO_ERROR; |
163 | #endif |
164 | |
165 | case ARM_VFP_STATE: |
166 | dprintf(("kdp_readregs THREAD_FPSTATE\n" )); |
167 | bzero(s: (char *) data, n: sizeof(struct arm_vfp_state)); |
168 | *size = ARM_VFP_STATE_COUNT * sizeof(int); |
169 | return KDPERR_NO_ERROR; |
170 | |
171 | default: |
172 | dprintf(("kdp_readregs bad flavor %d\n" )); |
173 | return KDPERR_BADFLAVOR; |
174 | } |
175 | } |
176 | |
177 | static void |
178 | kdp_setintegerstate(char * state_in) |
179 | { |
180 | #if defined(__arm64__) |
181 | struct arm_thread_state64 thread_state64; |
182 | struct arm_saved_state *saved_state; |
183 | |
184 | bcopy(src: (char *) state_in, dst: (char *) &thread_state64, n: sizeof(struct arm_thread_state64)); |
185 | saved_state = kdp.saved_state; |
186 | assert(is_saved_state64(saved_state)); |
187 | |
188 | /* |
189 | * thread_state64_to_saved_state() expects the target thread to be EL0 |
190 | * state and ignores attempts to change many CPSR bits. |
191 | * kdp_setintegerstate() is rarely used and is gated behind significant |
192 | * security boundaries. So rather than creating a variant of |
193 | * thread_state64_to_saved_state() just for kdp_setintegerstate(), it's |
194 | * simpler to reset CPSR.M before converting, then adjust CPSR after |
195 | * conversion. |
196 | */ |
197 | uint32_t cpsr = get_saved_state_cpsr(iss: saved_state); |
198 | cpsr &= ~(PSR64_MODE_EL_MASK); |
199 | cpsr |= PSR64_MODE_EL0; |
200 | set_saved_state_cpsr(iss: saved_state, cpsr); |
201 | thread_state64_to_saved_state(&thread_state64, saved_state); |
202 | set_saved_state_cpsr(iss: saved_state, cpsr: thread_state64.cpsr); |
203 | #else |
204 | #error Unknown architecture. |
205 | #endif |
206 | } |
207 | |
208 | kdp_error_t |
209 | kdp_machine_write_regs(__unused unsigned int cpu, unsigned int flavor, char * data, __unused int * size) |
210 | { |
211 | switch (flavor) { |
212 | #if defined(__arm64__) |
213 | case ARM_THREAD_STATE64: |
214 | dprintf(("kdp_writeregs THREAD_STATE64\n" )); |
215 | kdp_setintegerstate(state_in: data); |
216 | return KDPERR_NO_ERROR; |
217 | #endif |
218 | |
219 | case ARM_VFP_STATE: |
220 | dprintf(("kdp_writeregs THREAD_FPSTATE\n" )); |
221 | return KDPERR_NO_ERROR; |
222 | |
223 | default: |
224 | dprintf(("kdp_writeregs bad flavor %d\n" )); |
225 | return KDPERR_BADFLAVOR; |
226 | } |
227 | } |
228 | |
229 | void |
230 | kdp_machine_hostinfo(kdp_hostinfo_t * hostinfo) |
231 | { |
232 | hostinfo->cpus_mask = 1; |
233 | hostinfo->cpu_type = slot_type(slot_num: 0); |
234 | hostinfo->cpu_subtype = slot_subtype(slot_num: 0); |
235 | } |
236 | |
237 | __attribute__((noreturn)) |
238 | void |
239 | kdp_panic(const char * fmt, ...) |
240 | { |
241 | #pragma clang diagnostic push |
242 | #pragma clang diagnostic ignored "-Wformat-nonliteral" |
243 | char kdp_fmt[256]; |
244 | va_list args; |
245 | |
246 | va_start(args, fmt); |
247 | (void) snprintf(kdp_fmt, sizeof(kdp_fmt), "kdp panic: %s" , fmt); |
248 | vprintf(format: kdp_fmt, ap: args); |
249 | va_end(args); |
250 | |
251 | while (1) { |
252 | } |
253 | ; |
254 | #pragma clang diagnostic pop |
255 | } |
256 | |
257 | int |
258 | kdp_intr_disbl(void) |
259 | { |
260 | return splhigh(); |
261 | } |
262 | |
263 | void |
264 | kdp_intr_enbl(int s) |
265 | { |
266 | splx(s); |
267 | } |
268 | |
269 | void |
270 | kdp_us_spin(int usec) |
271 | { |
272 | delay(usec: usec / 100); |
273 | } |
274 | |
275 | void |
276 | kdp_call(void) |
277 | { |
278 | Debugger(message: "inline call to debugger(machine_startup)" ); |
279 | } |
280 | |
281 | int |
282 | kdp_getc(void) |
283 | { |
284 | return console_try_read_char(); |
285 | } |
286 | |
287 | void |
288 | kdp_machine_get_breakinsn(uint8_t * bytes, uint32_t * size) |
289 | { |
290 | *(uint32_t *)bytes = GDB_TRAP_INSTR1; |
291 | *size = sizeof(uint32_t); |
292 | } |
293 | |
294 | void |
295 | kdp_sync_cache(void) |
296 | { |
297 | } |
298 | |
299 | int |
300 | kdp_machine_ioport_read(kdp_readioport_req_t * rq, caddr_t data, uint16_t lcpu) |
301 | { |
302 | #pragma unused(rq, data, lcpu) |
303 | return 0; |
304 | } |
305 | |
306 | int |
307 | kdp_machine_ioport_write(kdp_writeioport_req_t * rq, caddr_t data, uint16_t lcpu) |
308 | { |
309 | #pragma unused(rq, data, lcpu) |
310 | return 0; |
311 | } |
312 | |
313 | int |
314 | kdp_machine_msr64_read(kdp_readmsr64_req_t *rq, caddr_t data, uint16_t lcpu) |
315 | { |
316 | #pragma unused(rq, data, lcpu) |
317 | return 0; |
318 | } |
319 | |
320 | int |
321 | kdp_machine_msr64_write(kdp_writemsr64_req_t *rq, caddr_t data, uint16_t lcpu) |
322 | { |
323 | #pragma unused(rq, data, lcpu) |
324 | return 0; |
325 | } |
326 | #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */ |
327 | |
328 | void |
329 | kdp_trap(unsigned int exception, struct arm_saved_state * saved_state) |
330 | { |
331 | handle_debugger_trap(exception, code: 0, subcode: 0, state: saved_state); |
332 | |
333 | #if defined(__arm64__) |
334 | assert(is_saved_state64(saved_state)); |
335 | |
336 | #if HAS_APPLE_PAC |
337 | MANIPULATE_SIGNED_THREAD_STATE(saved_state, |
338 | "ldr w6, [x1] \n" |
339 | "mov w7, %[GDB_TRAP_INSTR1_L] \n" |
340 | "movk w7, %[GDB_TRAP_INSTR1_H], lsl #16 \n" |
341 | "cmp w6, w7 \n" |
342 | "b.eq 1f \n" |
343 | "mov w7, %[GDB_TRAP_INSTR2_L] \n" |
344 | "movk w7, %[GDB_TRAP_INSTR2_H], lsl #16 \n" |
345 | "cmp w6, w7 \n" |
346 | "b.ne 0f \n" |
347 | "1: \n" |
348 | "add x1, x1, #4 \n" |
349 | "str x1, [x0, %[SS64_PC]] \n" , |
350 | [GDB_TRAP_INSTR1_L] "i" (GDB_TRAP_INSTR1 & 0xFFFF), |
351 | [GDB_TRAP_INSTR1_H] "i" (GDB_TRAP_INSTR1 >> 16), |
352 | [GDB_TRAP_INSTR2_L] "i" (GDB_TRAP_INSTR2 & 0xFFFF), |
353 | [GDB_TRAP_INSTR2_H] "i" (GDB_TRAP_INSTR2 >> 16) |
354 | ); |
355 | #else |
356 | uint32_t instr = *((uint32_t *)get_saved_state_pc(saved_state)); |
357 | |
358 | /* |
359 | * As long as we are using the arm32 trap encoding to handling |
360 | * traps to the debugger, we should identify both variants and |
361 | * increment for both of them. |
362 | */ |
363 | if ((instr == GDB_TRAP_INSTR1) || (instr == GDB_TRAP_INSTR2)) { |
364 | saved_state64(saved_state)->pc += 4; |
365 | } |
366 | #endif |
367 | |
368 | #else |
369 | #error Unknown architecture. |
370 | #endif |
371 | } |
372 | |
373 | #define ARM32_LR_OFFSET 4 |
374 | #define ARM64_LR_OFFSET 8 |
375 | |
376 | /* |
377 | * Since sizeof (struct thread_snapshot) % 4 == 2 |
378 | * make sure the compiler does not try to use word-aligned |
379 | * access to this data, which can result in alignment faults |
380 | * that can't be emulated in KDP context. |
381 | */ |
382 | typedef uint32_t uint32_align2_t __attribute__((aligned(2))); |
383 | |
384 | /* |
385 | * @function _was_in_userspace |
386 | * |
387 | * @abstract Unused function used to indicate that a CPU was in userspace |
388 | * before it was IPI'd to enter the Debugger context. |
389 | * |
390 | * @discussion This function should never actually be called. |
391 | */ |
392 | void __attribute__((__noreturn__)) |
393 | _was_in_userspace(void) |
394 | { |
395 | panic("%s: should not have been invoked." , __FUNCTION__); |
396 | } |
397 | |
398 | int |
399 | machine_trace_thread64(thread_t thread, |
400 | char * tracepos, |
401 | char * tracebound, |
402 | int nframes, |
403 | uint32_t * thread_trace_flags) |
404 | { |
405 | #if defined(__arm64__) |
406 | |
407 | uint64_t * tracebuf = (uint64_t *)tracepos; |
408 | vm_size_t framesize = sizeof(uint64_t); |
409 | |
410 | vm_offset_t stacklimit = 0; |
411 | vm_offset_t stacklimit_bottom = 0; |
412 | int framecount = 0; |
413 | vm_offset_t pc = 0; |
414 | vm_offset_t fp = 0; |
415 | vm_offset_t sp = 0; |
416 | vm_offset_t prevfp = 0; |
417 | uint64_t prevlr = 0; |
418 | vm_offset_t kern_virt_addr = 0; |
419 | |
420 | nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0; |
421 | if (!nframes) { |
422 | return 0; |
423 | } |
424 | framecount = 0; |
425 | |
426 | struct arm_saved_state *state = thread->machine.kpcb; |
427 | if (state != NULL) { |
428 | fp = state->ss_64.fp; |
429 | |
430 | prevlr = state->ss_64.lr; |
431 | pc = state->ss_64.pc; |
432 | sp = state->ss_64.sp; |
433 | } else { |
434 | /* kstackptr may not always be there, so recompute it */ |
435 | arm_kernel_saved_state_t *kstate = &thread_get_kernel_state(thread)->machine.ss; |
436 | |
437 | fp = kstate->fp; |
438 | prevlr = kstate->lr; |
439 | pc = kstate->pc_was_in_userspace ? (register_t)ptrauth_strip((void *)&_was_in_userspace, ptrauth_key_function_pointer) : 0; |
440 | sp = kstate->sp; |
441 | } |
442 | |
443 | stacklimit = VM_MAX_KERNEL_ADDRESS; |
444 | stacklimit_bottom = VM_MIN_KERNEL_ADDRESS; |
445 | |
446 | if (!prevlr && !fp && !sp && !pc) { |
447 | return 0; |
448 | } |
449 | |
450 | prevlr = VM_KERNEL_UNSLIDE(prevlr); |
451 | |
452 | for (; framecount < nframes; framecount++) { |
453 | *tracebuf++ = prevlr; |
454 | |
455 | /* Invalid frame */ |
456 | if (!fp) { |
457 | break; |
458 | } |
459 | /* |
460 | * Unaligned frame; given that the stack register must always be |
461 | * 16-byte aligned, we are assured 8-byte alignment of the saved |
462 | * frame pointer and link register. |
463 | */ |
464 | if (fp & 0x0000007) { |
465 | break; |
466 | } |
467 | /* Frame is out of range, maybe a user FP while doing kernel BT */ |
468 | if (fp > stacklimit) { |
469 | break; |
470 | } |
471 | if (fp < stacklimit_bottom) { |
472 | break; |
473 | } |
474 | /* Stack grows downward */ |
475 | if (fp < prevfp) { |
476 | bool switched_stacks = false; |
477 | |
478 | /* |
479 | * As a special case, sometimes we are backtracing out of an interrupt |
480 | * handler, and the stack jumps downward because of the memory allocation |
481 | * pattern during early boot due to KASLR. |
482 | */ |
483 | int cpu; |
484 | int max_cpu = ml_get_max_cpu_number(); |
485 | |
486 | for (cpu = 0; cpu <= max_cpu; cpu++) { |
487 | cpu_data_t *target_cpu_datap; |
488 | |
489 | target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr; |
490 | if (target_cpu_datap == (cpu_data_t *)NULL) { |
491 | continue; |
492 | } |
493 | |
494 | if (prevfp >= (target_cpu_datap->intstack_top - INTSTACK_SIZE) && prevfp < target_cpu_datap->intstack_top) { |
495 | switched_stacks = true; |
496 | break; |
497 | } |
498 | #if defined(__arm64__) |
499 | if (prevfp >= (target_cpu_datap->excepstack_top - EXCEPSTACK_SIZE) && prevfp < target_cpu_datap->excepstack_top) { |
500 | switched_stacks = true; |
501 | break; |
502 | } |
503 | #endif |
504 | } |
505 | |
506 | /** |
507 | * The stack could be "growing upwards" because this frame is |
508 | * stitching two different stacks together. There can be more than |
509 | * one non-XNU stack so if both frames are in non-XNU stacks but it |
510 | * looks like the stack is growing upward, then assume that we've |
511 | * switched from one non-XNU stack to another. |
512 | */ |
513 | if ((ml_addr_in_non_xnu_stack(addr: prevfp) != ml_addr_in_non_xnu_stack(addr: fp)) || |
514 | (ml_addr_in_non_xnu_stack(addr: prevfp) && ml_addr_in_non_xnu_stack(addr: fp))) { |
515 | switched_stacks = true; |
516 | } |
517 | |
518 | if (!switched_stacks) { |
519 | /* Corrupt frame pointer? */ |
520 | break; |
521 | } |
522 | } |
523 | |
524 | /* Assume there's a saved link register, and read it */ |
525 | kern_virt_addr = fp + ARM64_LR_OFFSET; |
526 | bool ok = machine_trace_thread_validate_kva(addr: kern_virt_addr); |
527 | if (!ok) { |
528 | if (thread_trace_flags != NULL) { |
529 | *thread_trace_flags |= kThreadTruncatedBT; |
530 | } |
531 | |
532 | break; |
533 | } |
534 | |
535 | prevlr = *(uint64_t *)kern_virt_addr; |
536 | #if defined(HAS_APPLE_PAC) |
537 | /* return addresses on stack signed by arm64e ABI */ |
538 | prevlr = (uint64_t) ptrauth_strip((void *)prevlr, ptrauth_key_return_address); |
539 | #endif |
540 | prevlr = VM_KERNEL_UNSLIDE(prevlr); |
541 | |
542 | prevfp = fp; |
543 | /* Next frame */ |
544 | kern_virt_addr = fp; |
545 | ok = machine_trace_thread_validate_kva(addr: kern_virt_addr); |
546 | if (!ok) { |
547 | if (thread_trace_flags != NULL) { |
548 | *thread_trace_flags |= kThreadTruncatedBT; |
549 | } |
550 | fp = 0; |
551 | break; |
552 | } |
553 | |
554 | fp = *(uint64_t *)kern_virt_addr; |
555 | #if defined(HAS_APPLE_PAC) |
556 | /* frame pointers on stack signed by arm64e ABI */ |
557 | fp = (uint64_t) ptrauth_strip((void *)fp, ptrauth_key_frame_pointer); |
558 | #endif |
559 | } |
560 | return (int)(((char *)tracebuf) - tracepos); |
561 | #else |
562 | #error Unknown architecture. |
563 | #endif |
564 | } |
565 | |
566 | void |
567 | kdp_ml_enter_debugger(void) |
568 | { |
569 | __asm__ volatile (".long 0xe7ffdefe" ); |
570 | } |
571 | |