1 | /* |
2 | * Copyright (c) 2017 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #include <arm/cpu_data_internal.h> |
30 | #include <arm/dbgwrap.h> |
31 | #include <arm64/proc_reg.h> |
32 | #include <machine/atomic.h> |
33 | #include <pexpert/arm64/board_config.h> |
34 | |
35 | #define DBGWRAP_REG_OFFSET 0 |
36 | #define DBGWRAP_DBGHALT (1ULL << 31) |
37 | #define DBGWRAP_DBGACK (1ULL << 28) |
38 | |
39 | #define EDDTRRX_REG_OFFSET 0x80 |
40 | #define EDITR_REG_OFFSET 0x84 |
41 | #define EDSCR_REG_OFFSET 0x88 |
42 | #define EDSCR_TXFULL (1ULL << 29) |
43 | #define EDSCR_ITE (1ULL << 24) |
44 | #define EDSCR_MA (1ULL << 20) |
45 | #define EDSCR_ERR (1ULL << 6) |
46 | #define EDDTRTX_REG_OFFSET 0x8C |
47 | #define EDRCR_REG_OFFSET 0x90 |
48 | #define EDRCR_CSE (1ULL << 2) |
49 | #define EDPRSR_REG_OFFSET 0x314 |
50 | #define EDPRSR_OSLK (1ULL << 5) |
51 | |
52 | #define MAX_EDITR_RETRIES 16 |
53 | |
54 | /* Older SoCs require 32-bit accesses for DBGWRAP; |
55 | * newer ones require 64-bit accesses. */ |
56 | #ifdef HAS_32BIT_DBGWRAP |
57 | typedef uint32_t dbgwrap_reg_t; |
58 | #else |
59 | typedef uint64_t dbgwrap_reg_t; |
60 | #endif |
61 | |
62 | #if DEVELOPMENT || DEBUG |
63 | #define MAX_STUFFED_INSTRS 64 |
64 | uint32_t stuffed_instrs[MAX_STUFFED_INSTRS]; |
65 | volatile uint32_t stuffed_instr_count = 0; |
66 | #endif |
67 | |
68 | static volatile uint32_t halt_from_cpu = (uint32_t)-1; |
69 | |
70 | boolean_t |
71 | ml_dbgwrap_cpu_is_halted(int cpu_index) |
72 | { |
73 | cpu_data_t *cdp = cpu_datap(cpu_index); |
74 | if ((cdp == NULL) || (cdp->coresight_base[CORESIGHT_UTT] == 0)) |
75 | return FALSE; |
76 | |
77 | return ((*(volatile dbgwrap_reg_t *)(cdp->coresight_base[CORESIGHT_UTT] + DBGWRAP_REG_OFFSET) & DBGWRAP_DBGACK) != 0); |
78 | } |
79 | |
80 | dbgwrap_status_t |
81 | ml_dbgwrap_wait_cpu_halted(int cpu_index, uint64_t timeout_ns) |
82 | { |
83 | cpu_data_t *cdp = cpu_datap(cpu_index); |
84 | if ((cdp == NULL) || (cdp->coresight_base[CORESIGHT_UTT] == 0)) |
85 | return DBGWRAP_ERR_UNSUPPORTED; |
86 | |
87 | volatile dbgwrap_reg_t *dbgWrapReg = (volatile dbgwrap_reg_t *)(cdp->coresight_base[CORESIGHT_UTT] + DBGWRAP_REG_OFFSET); |
88 | |
89 | uint64_t interval; |
90 | nanoseconds_to_absolutetime(timeout_ns, &interval); |
91 | uint64_t deadline = mach_absolute_time() + interval; |
92 | while (!(*dbgWrapReg & DBGWRAP_DBGACK)) { |
93 | if (mach_absolute_time() > deadline) |
94 | return DBGWRAP_ERR_HALT_TIMEOUT; |
95 | } |
96 | |
97 | return DBGWRAP_SUCCESS; |
98 | } |
99 | |
100 | dbgwrap_status_t |
101 | ml_dbgwrap_halt_cpu(int cpu_index, uint64_t timeout_ns) |
102 | { |
103 | cpu_data_t *cdp = cpu_datap(cpu_index); |
104 | if ((cdp == NULL) || (cdp->coresight_base[CORESIGHT_UTT] == 0)) |
105 | return DBGWRAP_ERR_UNSUPPORTED; |
106 | |
107 | /* Only one cpu is allowed to initiate the halt sequence, to prevent cpus from cross-halting |
108 | * each other. The first cpu to request a halt may then halt any and all other cpus besides itself. */ |
109 | int curcpu = cpu_number(); |
110 | if (cpu_index == curcpu) |
111 | return DBGWRAP_ERR_SELF_HALT; |
112 | |
113 | if (!hw_compare_and_store((uint32_t)-1, (unsigned int)curcpu, &halt_from_cpu) && |
114 | (halt_from_cpu != (uint32_t)curcpu)) |
115 | return DBGWRAP_ERR_INPROGRESS; |
116 | |
117 | volatile dbgwrap_reg_t *dbgWrapReg = (volatile dbgwrap_reg_t *)(cdp->coresight_base[CORESIGHT_UTT] + DBGWRAP_REG_OFFSET); |
118 | |
119 | if (ml_dbgwrap_cpu_is_halted(cpu_index)) |
120 | return DBGWRAP_WARN_ALREADY_HALTED; |
121 | |
122 | /* Clear all other writable bits besides dbgHalt; none of the power-down or reset bits must be set. */ |
123 | *dbgWrapReg = DBGWRAP_DBGHALT; |
124 | |
125 | if (timeout_ns != 0) { |
126 | dbgwrap_status_t stat = ml_dbgwrap_wait_cpu_halted(cpu_index, timeout_ns); |
127 | return stat; |
128 | } |
129 | else |
130 | return DBGWRAP_SUCCESS; |
131 | } |
132 | |
133 | static void |
134 | ml_dbgwrap_stuff_instr(cpu_data_t *cdp, uint32_t instr, uint64_t timeout_ns, dbgwrap_status_t *status) |
135 | { |
136 | if (*status < 0) |
137 | return; |
138 | |
139 | volatile uint32_t *editr = (volatile uint32_t *)(cdp->coresight_base[CORESIGHT_ED] + EDITR_REG_OFFSET); |
140 | volatile uint32_t *edscr = (volatile uint32_t *)(cdp->coresight_base[CORESIGHT_ED] + EDSCR_REG_OFFSET); |
141 | volatile uint32_t *edrcr = (volatile uint32_t *)(cdp->coresight_base[CORESIGHT_ED] + EDRCR_REG_OFFSET); |
142 | |
143 | int retries = 0; |
144 | |
145 | uint64_t interval; |
146 | nanoseconds_to_absolutetime(timeout_ns, &interval); |
147 | uint64_t deadline = mach_absolute_time() + interval; |
148 | |
149 | #if DEVELOPMENT || DEBUG |
150 | uint32_t stuffed_instr_index = hw_atomic_add(&stuffed_instr_count, 1); |
151 | stuffed_instrs[(stuffed_instr_index - 1) % MAX_STUFFED_INSTRS] = instr; |
152 | #endif |
153 | |
154 | do { |
155 | *editr = instr; |
156 | volatile uint32_t edscr_val; |
157 | while (!((edscr_val = *edscr) & EDSCR_ITE)) { |
158 | if (mach_absolute_time() > deadline) { |
159 | *status = DBGWRAP_ERR_INSTR_TIMEOUT; |
160 | return; |
161 | } |
162 | if (edscr_val & EDSCR_ERR) |
163 | break; |
164 | } |
165 | if (edscr_val & EDSCR_ERR) { |
166 | /* If memory access mode was enable by a debugger, clear it. |
167 | * This will cause ERR to be set on any attempt to use EDITR. */ |
168 | if (edscr_val & EDSCR_MA) |
169 | *edscr = edscr_val & ~EDSCR_MA; |
170 | *edrcr = EDRCR_CSE; |
171 | ++retries; |
172 | } else |
173 | break; |
174 | } while (retries < MAX_EDITR_RETRIES); |
175 | |
176 | if (retries >= MAX_EDITR_RETRIES) { |
177 | *status = DBGWRAP_ERR_INSTR_ERROR; |
178 | return; |
179 | } |
180 | } |
181 | |
182 | static uint64_t |
183 | ml_dbgwrap_read_dtr(cpu_data_t *cdp, uint64_t timeout_ns, dbgwrap_status_t *status) |
184 | { |
185 | if (*status < 0) |
186 | return 0; |
187 | |
188 | uint64_t interval; |
189 | nanoseconds_to_absolutetime(timeout_ns, &interval); |
190 | uint64_t deadline = mach_absolute_time() + interval; |
191 | |
192 | /* Per armv8 debug spec, writes to DBGDTR_EL0 on target cpu will set EDSCR.TXFull, |
193 | * with bits 63:32 available in EDDTRRX and bits 31:0 availabe in EDDTRTX. */ |
194 | volatile uint32_t *edscr = (volatile uint32_t *)(cdp->coresight_base[CORESIGHT_ED] + EDSCR_REG_OFFSET); |
195 | |
196 | while (!(*edscr & EDSCR_TXFULL)) { |
197 | if (*edscr & EDSCR_ERR) { |
198 | *status = DBGWRAP_ERR_INSTR_ERROR; |
199 | return 0; |
200 | } |
201 | if (mach_absolute_time() > deadline) { |
202 | *status = DBGWRAP_ERR_INSTR_TIMEOUT; |
203 | return 0; |
204 | } |
205 | } |
206 | |
207 | uint32_t dtrrx = *((volatile uint32_t*)(cdp->coresight_base[CORESIGHT_ED] + EDDTRRX_REG_OFFSET)); |
208 | uint32_t dtrtx = *((volatile uint32_t*)(cdp->coresight_base[CORESIGHT_ED] + EDDTRTX_REG_OFFSET)); |
209 | |
210 | return (((uint64_t)dtrrx << 32) | dtrtx); |
211 | } |
212 | |
213 | dbgwrap_status_t |
214 | ml_dbgwrap_halt_cpu_with_state(int cpu_index, uint64_t timeout_ns, dbgwrap_thread_state_t *state) |
215 | { |
216 | cpu_data_t *cdp = cpu_datap(cpu_index); |
217 | if ((cdp == NULL) || (cdp->coresight_base[CORESIGHT_ED] == 0)) |
218 | return DBGWRAP_ERR_UNSUPPORTED; |
219 | |
220 | /* Ensure memory-mapped coresight registers can be written */ |
221 | *((volatile uint32_t *)(cdp->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR)) = ARM_DBG_LOCK_ACCESS_KEY; |
222 | |
223 | dbgwrap_status_t status = ml_dbgwrap_halt_cpu(cpu_index, timeout_ns); |
224 | |
225 | /* A core that is not fully powered (e.g. idling in wfi) can still be halted; the dbgwrap |
226 | * register and certain coresight registers such EDPRSR are in the always-on domain. |
227 | * However, EDSCR/EDITR are not in the always-on domain and will generate a parity abort |
228 | * on read. EDPRSR can be safely read in all cases, and the OS lock defaults to being set |
229 | * but we clear it first thing, so use that to detect the offline state. */ |
230 | if (*((volatile uint32_t *)(cdp->coresight_base[CORESIGHT_ED] + EDPRSR_REG_OFFSET)) & EDPRSR_OSLK) { |
231 | bzero(state, sizeof(*state)); |
232 | return DBGWRAP_WARN_CPU_OFFLINE; |
233 | } |
234 | |
235 | uint32_t instr; |
236 | |
237 | for (unsigned int i = 0; i < (sizeof(state->x) / sizeof(state->x[0])); ++i) { |
238 | instr = (0xD51U << 20) | (2 << 19) | (3 << 16) | (4 << 8) | i; // msr DBGDTR0, x<i> |
239 | ml_dbgwrap_stuff_instr(cdp, instr, timeout_ns, &status); |
240 | state->x[i] = ml_dbgwrap_read_dtr(cdp, timeout_ns, &status); |
241 | } |
242 | |
243 | instr = (0xD51U << 20) | (2 << 19) | (3 << 16) | (4 << 8) | 29; // msr DBGDTR0, fp |
244 | ml_dbgwrap_stuff_instr(cdp, instr, timeout_ns, &status); |
245 | state->fp = ml_dbgwrap_read_dtr(cdp, timeout_ns, &status); |
246 | |
247 | instr = (0xD51U << 20) | (2 << 19) | (3 << 16) | (4 << 8) | 30; // msr DBGDTR0, lr |
248 | ml_dbgwrap_stuff_instr(cdp, instr, timeout_ns, &status); |
249 | state->lr = ml_dbgwrap_read_dtr(cdp, timeout_ns, &status); |
250 | |
251 | /* Stack pointer (x31) can't be used as a register operand for msr; register 31 is treated as xzr |
252 | * rather than sp when used as the transfer operand there. Instead, load sp into a GPR |
253 | * we've already saved off and then store that register in the DTR. I've chosen x18 |
254 | * as the temporary GPR since it's reserved by the arm64 ABI and unused by xnu, so overwriting |
255 | * it poses the least risk of causing trouble for external debuggers. */ |
256 | |
257 | instr = (0x91U << 24) | (31 << 5) | 18; // mov x18, sp |
258 | ml_dbgwrap_stuff_instr(cdp, instr, timeout_ns, &status); |
259 | instr = (0xD51U << 20) | (2 << 19) | (3 << 16) | (4 << 8) | 18; // msr DBGDTR0, x18 |
260 | ml_dbgwrap_stuff_instr(cdp, instr, timeout_ns, &status); |
261 | state->sp = ml_dbgwrap_read_dtr(cdp, timeout_ns, &status); |
262 | |
263 | /* reading PC (e.g. through adr) is undefined in debug state. Instead use DLR_EL0, |
264 | * which contains PC at time of entry into debug state.*/ |
265 | |
266 | instr = (0xD53U << 20) | (1 << 19) | (3 << 16) | (4 << 12) | (5 << 8) | (1 << 5) | 18; // mrs x18, DLR_EL0 |
267 | ml_dbgwrap_stuff_instr(cdp, instr, timeout_ns, &status); |
268 | instr = (0xD51U << 20) | (2 << 19) | (3 << 16) | (4 << 8) | 18; // msr DBGDTR0, x18 |
269 | ml_dbgwrap_stuff_instr(cdp, instr, timeout_ns, &status); |
270 | state->pc = ml_dbgwrap_read_dtr(cdp, timeout_ns, &status); |
271 | |
272 | /* reading CPSR is undefined in debug state. Instead use DSPSR_EL0, |
273 | * which contains CPSR at time of entry into debug state.*/ |
274 | instr = (0xD53U << 20) | (1 << 19) | (3 << 16) | (4 << 12) | (5 << 8) | 18; // mrs x18, DSPSR_EL0 |
275 | ml_dbgwrap_stuff_instr(cdp, instr, timeout_ns, &status); |
276 | instr = (0xD51U << 20) | (2 << 19) | (3 << 16) | (4 << 8) | 18; // msr DBGDTR0, x18 |
277 | ml_dbgwrap_stuff_instr(cdp, instr, timeout_ns, &status); |
278 | state->cpsr = (uint32_t)ml_dbgwrap_read_dtr(cdp, timeout_ns, &status); |
279 | |
280 | return status; |
281 | } |
282 | |
283 | |
284 | |