1 | /* |
2 | * Copyright (c) 2012-2016 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #include <arm/caches_internal.h> |
30 | #include <arm/cpu_data.h> |
31 | #include <arm/cpu_data_internal.h> |
32 | #include <arm/misc_protos.h> |
33 | #include <arm/thread.h> |
34 | #include <arm/rtclock.h> |
35 | #include <arm/trap.h> /* for IS_ARM_GDB_TRAP() et al */ |
36 | #include <arm64/proc_reg.h> |
37 | #include <arm64/machine_machdep.h> |
38 | #include <arm64/monotonic.h> |
39 | |
40 | #include <kern/debug.h> |
41 | #include <kern/thread.h> |
42 | #include <mach/exception.h> |
43 | #include <mach/vm_types.h> |
44 | #include <mach/machine/thread_status.h> |
45 | |
46 | #include <machine/atomic.h> |
47 | #include <machine/machlimits.h> |
48 | |
49 | #include <pexpert/arm/protos.h> |
50 | |
51 | #include <vm/vm_page.h> |
52 | #include <vm/pmap.h> |
53 | #include <vm/vm_fault.h> |
54 | #include <vm/vm_kern.h> |
55 | |
56 | #include <sys/kdebug.h> |
57 | #include <kperf/kperf.h> |
58 | |
59 | #include <kern/policy_internal.h> |
60 | #if CONFIG_TELEMETRY |
61 | #include <kern/telemetry.h> |
62 | #endif |
63 | |
64 | #include <prng/random.h> |
65 | |
66 | #ifndef __arm64__ |
67 | #error Should only be compiling for arm64. |
68 | #endif |
69 | |
70 | #define TEST_CONTEXT32_SANITY(context) \ |
71 | (context->ss.ash.flavor == ARM_SAVED_STATE32 && context->ss.ash.count == ARM_SAVED_STATE32_COUNT && \ |
72 | context->ns.nsh.flavor == ARM_NEON_SAVED_STATE32 && context->ns.nsh.count == ARM_NEON_SAVED_STATE32_COUNT) |
73 | |
74 | #define TEST_CONTEXT64_SANITY(context) \ |
75 | (context->ss.ash.flavor == ARM_SAVED_STATE64 && context->ss.ash.count == ARM_SAVED_STATE64_COUNT && \ |
76 | context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64 && context->ns.nsh.count == ARM_NEON_SAVED_STATE64_COUNT) |
77 | |
78 | #define ASSERT_CONTEXT_SANITY(context) \ |
79 | assert(TEST_CONTEXT32_SANITY(context) || TEST_CONTEXT64_SANITY(context)) |
80 | |
81 | |
82 | #define COPYIN(src, dst, size) \ |
83 | (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \ |
84 | copyin_kern(src, dst, size) \ |
85 | : \ |
86 | copyin(src, dst, size) |
87 | |
88 | #define COPYOUT(src, dst, size) \ |
89 | (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \ |
90 | copyout_kern(src, dst, size) \ |
91 | : \ |
92 | copyout(src, dst, size) |
93 | |
94 | // Below is for concatenating a string param to a string literal |
95 | #define STR1(x) #x |
96 | #define STR(x) STR1(x) |
97 | |
98 | void panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *ss); |
99 | |
100 | void sleh_synchronous_sp1(arm_context_t *, uint32_t, vm_offset_t); |
101 | void sleh_synchronous(arm_context_t *, uint32_t, vm_offset_t); |
102 | void sleh_irq(arm_saved_state_t *); |
103 | void sleh_fiq(arm_saved_state_t *); |
104 | void sleh_serror(arm_context_t *context, uint32_t esr, vm_offset_t far); |
105 | void sleh_invalid_stack(arm_context_t *context, uint32_t esr, vm_offset_t far); |
106 | |
107 | static void sleh_interrupt_handler_prologue(arm_saved_state_t *, unsigned int type); |
108 | static void sleh_interrupt_handler_epilogue(void); |
109 | |
110 | static void handle_svc(arm_saved_state_t *); |
111 | static void handle_mach_absolute_time_trap(arm_saved_state_t *); |
112 | static void handle_mach_continuous_time_trap(arm_saved_state_t *); |
113 | |
114 | static void handle_msr_trap(arm_saved_state_t *state, uint32_t iss); |
115 | |
116 | extern kern_return_t arm_fast_fault(pmap_t, vm_map_address_t, vm_prot_t, boolean_t); |
117 | |
118 | static void handle_uncategorized(arm_saved_state_t *, boolean_t); |
119 | static void handle_breakpoint(arm_saved_state_t *); |
120 | |
121 | typedef void(*abort_inspector_t)(uint32_t, fault_status_t *, vm_prot_t *); |
122 | static void inspect_instruction_abort(uint32_t, fault_status_t *, vm_prot_t *); |
123 | static void inspect_data_abort(uint32_t, fault_status_t *, vm_prot_t *); |
124 | |
125 | static int is_vm_fault(fault_status_t); |
126 | static int is_translation_fault(fault_status_t); |
127 | static int is_alignment_fault(fault_status_t); |
128 | |
129 | typedef void(*abort_handler_t)(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t); |
130 | static void handle_user_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t); |
131 | static void handle_kernel_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t); |
132 | |
133 | static void handle_pc_align(arm_saved_state_t *ss); |
134 | static void handle_sp_align(arm_saved_state_t *ss); |
135 | static void handle_sw_step_debug(arm_saved_state_t *ss); |
136 | static void handle_wf_trap(arm_saved_state_t *ss); |
137 | |
138 | static void handle_watchpoint(vm_offset_t fault_addr); |
139 | |
140 | static void handle_abort(arm_saved_state_t *, uint32_t, vm_offset_t, vm_offset_t, abort_inspector_t, abort_handler_t); |
141 | |
142 | static void handle_user_trapped_instruction32(arm_saved_state_t *, uint32_t esr); |
143 | |
144 | static void handle_simd_trap(arm_saved_state_t *, uint32_t esr); |
145 | |
146 | extern void mach_kauth_cred_uthread_update(void); |
147 | void mach_syscall_trace_exit(unsigned int retval, unsigned int call_number); |
148 | |
149 | struct uthread; |
150 | struct proc; |
151 | |
152 | extern void |
153 | unix_syscall(struct arm_saved_state * regs, thread_t thread_act, |
154 | struct uthread * uthread, struct proc * proc); |
155 | |
156 | extern void |
157 | mach_syscall(struct arm_saved_state*); |
158 | |
159 | #if CONFIG_DTRACE |
160 | extern kern_return_t dtrace_user_probe(arm_saved_state_t* regs); |
161 | extern boolean_t dtrace_tally_fault(user_addr_t); |
162 | |
163 | /* Traps for userland processing. Can't include bsd/sys/fasttrap_isa.h, so copy and paste the trap instructions |
164 | over from that file. Need to keep these in sync! */ |
165 | #define FASTTRAP_ARM32_INSTR 0xe7ffdefc |
166 | #define FASTTRAP_THUMB32_INSTR 0xdefc |
167 | #define FASTTRAP_ARM64_INSTR 0xe7eeee7e |
168 | |
169 | #define FASTTRAP_ARM32_RET_INSTR 0xe7ffdefb |
170 | #define FASTTRAP_THUMB32_RET_INSTR 0xdefb |
171 | #define FASTTRAP_ARM64_RET_INSTR 0xe7eeee7d |
172 | |
173 | /* See <rdar://problem/4613924> */ |
174 | perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */ |
175 | #endif |
176 | |
177 | #if CONFIG_PGTRACE |
178 | extern boolean_t pgtrace_enabled; |
179 | #endif |
180 | |
181 | #if __ARM_PAN_AVAILABLE__ |
182 | #ifdef CONFIG_XNUPOST |
183 | extern vm_offset_t pan_test_addr; |
184 | extern vm_offset_t pan_ro_addr; |
185 | extern volatile int pan_exception_level; |
186 | extern volatile char pan_fault_value; |
187 | #endif |
188 | #endif |
189 | |
190 | #if defined(APPLECYCLONE) |
191 | #define CPU_NAME "Cyclone" |
192 | #elif defined(APPLETYPHOON) |
193 | #define CPU_NAME "Typhoon" |
194 | #elif defined(APPLETWISTER) |
195 | #define CPU_NAME "Twister" |
196 | #elif defined(APPLEHURRICANE) |
197 | #define CPU_NAME "Hurricane" |
198 | #else |
199 | #define CPU_NAME "Unknown" |
200 | #endif |
201 | |
202 | #if (CONFIG_KERNEL_INTEGRITY && defined(KERNEL_INTEGRITY_WT)) |
203 | #define ESR_WT_SERROR(esr) (((esr) & 0xffffff00) == 0xbf575400) |
204 | #define ESR_WT_REASON(esr) ((esr) & 0xff) |
205 | |
206 | #define WT_REASON_NONE 0 |
207 | #define WT_REASON_INTEGRITY_FAIL 1 |
208 | #define WT_REASON_BAD_SYSCALL 2 |
209 | #define WT_REASON_NOT_LOCKED 3 |
210 | #define WT_REASON_ALREADY_LOCKED 4 |
211 | #define WT_REASON_SW_REQ 5 |
212 | #define WT_REASON_PT_INVALID 6 |
213 | #define WT_REASON_PT_VIOLATION 7 |
214 | #define WT_REASON_REG_VIOLATION 8 |
215 | #endif |
216 | |
217 | |
218 | extern vm_offset_t static_memory_end; |
219 | |
220 | static inline unsigned |
221 | __ror(unsigned value, unsigned shift) |
222 | { |
223 | return (((unsigned)(value) >> (unsigned)(shift)) | |
224 | (unsigned)(value) << ((unsigned)(sizeof(unsigned) * CHAR_BIT) - (unsigned)(shift))); |
225 | } |
226 | |
227 | static void |
228 | arm64_implementation_specific_error(arm_saved_state_t *state, uint32_t esr, vm_offset_t far) |
229 | { |
230 | #if defined(APPLE_ARM64_ARCH_FAMILY) |
231 | uint64_t fed_err_sts, mmu_err_sts, lsu_err_sts; |
232 | #if defined(NO_ECORE) |
233 | uint64_t l2c_err_sts, l2c_err_adr, l2c_err_inf; |
234 | |
235 | mmu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS)); |
236 | l2c_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS)); |
237 | l2c_err_adr = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR)); |
238 | l2c_err_inf = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF)); |
239 | lsu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS)); |
240 | fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS)); |
241 | |
242 | panic_plain("Unhandled " CPU_NAME |
243 | " implementation specific error. state=%p esr=%#x far=%p\n" |
244 | "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n" |
245 | "\tl2c_err_sts:%p, l2c_err_adr:%p, l2c_err_inf:%p\n" , |
246 | state, esr, (void *)far, |
247 | (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts, |
248 | (void *)l2c_err_sts, (void *)l2c_err_adr, (void *)l2c_err_inf); |
249 | |
250 | #elif defined(HAS_MIGSTS) |
251 | uint64_t l2c_err_sts, l2c_err_adr, l2c_err_inf, mpidr, migsts; |
252 | |
253 | mpidr = __builtin_arm_rsr64("MPIDR_EL1" ); |
254 | migsts = __builtin_arm_rsr64(STR(ARM64_REG_MIGSTS_EL1)); |
255 | mmu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS)); |
256 | l2c_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS)); |
257 | l2c_err_adr = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR)); |
258 | l2c_err_inf = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF)); |
259 | lsu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS)); |
260 | fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS)); |
261 | |
262 | panic_plain("Unhandled " CPU_NAME |
263 | " implementation specific error. state=%p esr=%#x far=%p p-core?%d migsts=%p\n" |
264 | "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n" |
265 | "\tl2c_err_sts:%p, l2c_err_adr:%p, l2c_err_inf:%p\n" , |
266 | state, esr, (void *)far, !!(mpidr & MPIDR_PNE), (void *)migsts, |
267 | (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts, |
268 | (void *)l2c_err_sts, (void *)l2c_err_adr, (void *)l2c_err_inf); |
269 | #else // !defined(NO_ECORE) && !defined(HAS_MIGSTS) |
270 | uint64_t llc_err_sts, llc_err_adr, llc_err_inf, mpidr; |
271 | |
272 | mpidr = __builtin_arm_rsr64("MPIDR_EL1" ); |
273 | |
274 | if (mpidr & MPIDR_PNE) { |
275 | mmu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS)); |
276 | lsu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS)); |
277 | fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS)); |
278 | } else { |
279 | mmu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_E_MMU_ERR_STS)); |
280 | lsu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_E_LSU_ERR_STS)); |
281 | fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_E_FED_ERR_STS)); |
282 | } |
283 | |
284 | llc_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS)); |
285 | llc_err_adr = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR)); |
286 | llc_err_inf = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF)); |
287 | |
288 | panic_plain("Unhandled " CPU_NAME |
289 | " implementation specific error. state=%p esr=%#x far=%p p-core?%d\n" |
290 | "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n" |
291 | "\tllc_err_sts:%p, llc_err_adr:%p, llc_err_inf:%p\n" , |
292 | state, esr, (void *)far, !!(mpidr & MPIDR_PNE), |
293 | (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts, |
294 | (void *)llc_err_sts, (void *)llc_err_adr, (void *)llc_err_inf); |
295 | #endif |
296 | #else // !defined(APPLE_ARM64_ARCH_FAMILY) |
297 | #pragma unused (state, esr, far) |
298 | panic_plain("Unhandled implementation specific error\n" ); |
299 | #endif |
300 | } |
301 | |
302 | #if CONFIG_KERNEL_INTEGRITY |
303 | #pragma clang diagnostic push |
304 | #pragma clang diagnostic ignored "-Wunused-parameter" |
305 | static void |
306 | kernel_integrity_error_handler(uint32_t esr, vm_offset_t far) { |
307 | #if defined(KERNEL_INTEGRITY_WT) |
308 | #if (DEVELOPMENT || DEBUG) |
309 | if (ESR_WT_SERROR(esr)) { |
310 | switch (ESR_WT_REASON(esr)) { |
311 | case WT_REASON_INTEGRITY_FAIL: |
312 | panic_plain("Kernel integrity, violation in frame 0x%016lx." , far); |
313 | case WT_REASON_BAD_SYSCALL: |
314 | panic_plain("Kernel integrity, bad syscall." ); |
315 | case WT_REASON_NOT_LOCKED: |
316 | panic_plain("Kernel integrity, not locked." ); |
317 | case WT_REASON_ALREADY_LOCKED: |
318 | panic_plain("Kernel integrity, already locked." ); |
319 | case WT_REASON_SW_REQ: |
320 | panic_plain("Kernel integrity, software request." ); |
321 | case WT_REASON_PT_INVALID: |
322 | panic_plain("Kernel integrity, encountered invalid TTE/PTE while " |
323 | "walking 0x%016lx." , far); |
324 | case WT_REASON_PT_VIOLATION: |
325 | panic_plain("Kernel integrity, violation in mapping 0x%016lx." , |
326 | far); |
327 | case WT_REASON_REG_VIOLATION: |
328 | panic_plain("Kernel integrity, violation in system register %d." , |
329 | (unsigned) far); |
330 | default: |
331 | panic_plain("Kernel integrity, unknown (esr=0x%08x)." , esr); |
332 | } |
333 | } |
334 | #else |
335 | if (ESR_WT_SERROR(esr)) { |
336 | panic_plain("SError esr: 0x%08x far: 0x%016lx." , esr, far); |
337 | } |
338 | #endif |
339 | #endif |
340 | } |
341 | #pragma clang diagnostic pop |
342 | #endif |
343 | |
344 | static void |
345 | arm64_platform_error(arm_saved_state_t *state, uint32_t esr, vm_offset_t far) |
346 | { |
347 | cpu_data_t *cdp = getCpuDatap(); |
348 | |
349 | #if CONFIG_KERNEL_INTEGRITY |
350 | kernel_integrity_error_handler(esr, far); |
351 | #endif |
352 | |
353 | if (cdp->platform_error_handler != (platform_error_handler_t) NULL) |
354 | (*(platform_error_handler_t)cdp->platform_error_handler) (cdp->cpu_id, far); |
355 | else |
356 | arm64_implementation_specific_error(state, esr, far); |
357 | } |
358 | |
359 | void |
360 | panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *ss) |
361 | { |
362 | boolean_t ss_valid; |
363 | |
364 | ss_valid = is_saved_state64(ss); |
365 | arm_saved_state64_t *state = saved_state64(ss); |
366 | |
367 | panic_plain("%s (saved state: %p%s)\n" |
368 | "\t x0: 0x%016llx x1: 0x%016llx x2: 0x%016llx x3: 0x%016llx\n" |
369 | "\t x4: 0x%016llx x5: 0x%016llx x6: 0x%016llx x7: 0x%016llx\n" |
370 | "\t x8: 0x%016llx x9: 0x%016llx x10: 0x%016llx x11: 0x%016llx\n" |
371 | "\t x12: 0x%016llx x13: 0x%016llx x14: 0x%016llx x15: 0x%016llx\n" |
372 | "\t x16: 0x%016llx x17: 0x%016llx x18: 0x%016llx x19: 0x%016llx\n" |
373 | "\t x20: 0x%016llx x21: 0x%016llx x22: 0x%016llx x23: 0x%016llx\n" |
374 | "\t x24: 0x%016llx x25: 0x%016llx x26: 0x%016llx x27: 0x%016llx\n" |
375 | "\t x28: 0x%016llx fp: 0x%016llx lr: 0x%016llx sp: 0x%016llx\n" |
376 | "\t pc: 0x%016llx cpsr: 0x%08x esr: 0x%08x far: 0x%016llx\n" , |
377 | msg, ss, (ss_valid ? "" : " INVALID" ), |
378 | state->x[0], state->x[1], state->x[2], state->x[3], |
379 | state->x[4], state->x[5], state->x[6], state->x[7], |
380 | state->x[8], state->x[9], state->x[10], state->x[11], |
381 | state->x[12], state->x[13], state->x[14], state->x[15], |
382 | state->x[16], state->x[17], state->x[18], state->x[19], |
383 | state->x[20], state->x[21], state->x[22], state->x[23], |
384 | state->x[24], state->x[25], state->x[26], state->x[27], |
385 | state->x[28], state->fp, state->lr, state->sp, |
386 | state->pc, state->cpsr, state->esr, state->far); |
387 | } |
388 | |
389 | |
390 | void |
391 | sleh_synchronous_sp1(arm_context_t *context, uint32_t esr, vm_offset_t far __unused) |
392 | { |
393 | esr_exception_class_t class = ESR_EC(esr); |
394 | arm_saved_state_t *state = &context->ss; |
395 | |
396 | switch (class) { |
397 | case ESR_EC_UNCATEGORIZED: |
398 | { |
399 | uint32_t instr = *((uint32_t*)get_saved_state_pc(state)); |
400 | if (IS_ARM_GDB_TRAP(instr)) |
401 | DebuggerCall(EXC_BREAKPOINT, state); |
402 | // Intentionally fall through to panic if we return from the debugger |
403 | } |
404 | default: |
405 | panic_with_thread_kernel_state("Synchronous exception taken while SP1 selected" , state); |
406 | } |
407 | } |
408 | |
409 | void |
410 | sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far) |
411 | { |
412 | esr_exception_class_t class = ESR_EC(esr); |
413 | arm_saved_state_t *state = &context->ss; |
414 | vm_offset_t recover = 0; |
415 | thread_t thread = current_thread(); |
416 | |
417 | ASSERT_CONTEXT_SANITY(context); |
418 | |
419 | /* Don't run exception handler with recover handler set in case of double fault */ |
420 | if (thread->recover) { |
421 | recover = thread->recover; |
422 | thread->recover = (vm_offset_t)NULL; |
423 | } |
424 | |
425 | /* Inherit the interrupt masks from previous context */ |
426 | if (SPSR_INTERRUPTS_ENABLED(get_saved_state_cpsr(state))) |
427 | ml_set_interrupts_enabled(TRUE); |
428 | |
429 | switch (class) { |
430 | case ESR_EC_SVC_64: |
431 | if (!is_saved_state64(state) || !PSR64_IS_USER(get_saved_state_cpsr(state))) { |
432 | panic("Invalid SVC_64 context" ); |
433 | } |
434 | |
435 | handle_svc(state); |
436 | break; |
437 | |
438 | case ESR_EC_DABORT_EL0: |
439 | handle_abort(state, esr, far, recover, inspect_data_abort, handle_user_abort); |
440 | assert(0); /* Unreachable */ |
441 | |
442 | case ESR_EC_MSR_TRAP: |
443 | handle_msr_trap(state, ESR_ISS(esr)); |
444 | break; |
445 | |
446 | case ESR_EC_IABORT_EL0: |
447 | handle_abort(state, esr, far, recover, inspect_instruction_abort, handle_user_abort); |
448 | assert(0); /* Unreachable */ |
449 | |
450 | case ESR_EC_IABORT_EL1: |
451 | |
452 | panic_with_thread_kernel_state("Kernel instruction fetch abort" , state); |
453 | |
454 | case ESR_EC_PC_ALIGN: |
455 | handle_pc_align(state); |
456 | assert(0); /* Unreachable */ |
457 | break; |
458 | |
459 | case ESR_EC_DABORT_EL1: |
460 | handle_abort(state, esr, far, recover, inspect_data_abort, handle_kernel_abort); |
461 | break; |
462 | |
463 | case ESR_EC_UNCATEGORIZED: |
464 | assert(!ESR_ISS(esr)); |
465 | |
466 | handle_uncategorized(&context->ss, ESR_INSTR_IS_2BYTES(esr)); |
467 | /* TODO: Uncomment this after stackshot uses a brk instruction |
468 | * rather than an undefined instruction, as stackshot is the |
469 | * only case where we want to return to the first-level handler. |
470 | */ |
471 | //assert(0); /* Unreachable */ |
472 | break; |
473 | |
474 | case ESR_EC_SP_ALIGN: |
475 | handle_sp_align(state); |
476 | assert(0); /* Unreachable */ |
477 | break; |
478 | |
479 | case ESR_EC_BKPT_AARCH32: |
480 | handle_breakpoint(state); |
481 | assert(0); /* Unreachable */ |
482 | break; |
483 | |
484 | case ESR_EC_BRK_AARCH64: |
485 | if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) { |
486 | |
487 | kprintf("Breakpoint instruction exception from kernel. Hanging here (by design).\n" ); |
488 | for (;;); |
489 | |
490 | __unreachable_ok_push |
491 | DebuggerCall(EXC_BREAKPOINT, &context->ss); |
492 | break; |
493 | __unreachable_ok_pop |
494 | } else { |
495 | handle_breakpoint(state); |
496 | assert(0); /* Unreachable */ |
497 | } |
498 | |
499 | case ESR_EC_BKPT_REG_MATCH_EL0: |
500 | if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) { |
501 | handle_breakpoint(state); |
502 | assert(0); /* Unreachable */ |
503 | } |
504 | panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p" , |
505 | class, state, class, esr, (void *)far); |
506 | assert(0); /* Unreachable */ |
507 | break; |
508 | |
509 | case ESR_EC_BKPT_REG_MATCH_EL1: |
510 | if (!PE_i_can_has_debugger(NULL) && FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) { |
511 | kprintf("Hardware Breakpoint Debug exception from kernel. Hanging here (by design).\n" ); |
512 | for (;;); |
513 | |
514 | __unreachable_ok_push |
515 | DebuggerCall(EXC_BREAKPOINT, &context->ss); |
516 | break; |
517 | __unreachable_ok_pop |
518 | } |
519 | panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p" , |
520 | class, state, class, esr, (void *)far); |
521 | assert(0); /* Unreachable */ |
522 | break; |
523 | |
524 | case ESR_EC_SW_STEP_DEBUG_EL0: |
525 | if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) { |
526 | handle_sw_step_debug(state); |
527 | assert(0); /* Unreachable */ |
528 | } |
529 | panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p" , |
530 | class, state, class, esr, (void *)far); |
531 | assert(0); /* Unreachable */ |
532 | break; |
533 | |
534 | case ESR_EC_SW_STEP_DEBUG_EL1: |
535 | if (!PE_i_can_has_debugger(NULL) && FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) { |
536 | kprintf("Software Step Debug exception from kernel. Hanging here (by design).\n" ); |
537 | for (;;); |
538 | |
539 | __unreachable_ok_push |
540 | DebuggerCall(EXC_BREAKPOINT, &context->ss); |
541 | break; |
542 | __unreachable_ok_pop |
543 | } |
544 | panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p" , |
545 | class, state, class, esr, (void *)far); |
546 | assert(0); /* Unreachable */ |
547 | break; |
548 | |
549 | case ESR_EC_WATCHPT_MATCH_EL0: |
550 | if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) { |
551 | handle_watchpoint(far); |
552 | assert(0); /* Unreachable */ |
553 | } |
554 | panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p" , |
555 | class, state, class, esr, (void *)far); |
556 | assert(0); /* Unreachable */ |
557 | break; |
558 | |
559 | case ESR_EC_WATCHPT_MATCH_EL1: |
560 | /* |
561 | * If we hit a watchpoint in kernel mode, probably in a copyin/copyout which we don't want to |
562 | * abort. Turn off watchpoints and keep going; we'll turn them back on in return_from_exception.. |
563 | */ |
564 | if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) { |
565 | arm_debug_set(NULL); |
566 | break; /* return to first level handler */ |
567 | } |
568 | panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p" , |
569 | class, state, class, esr, (void *)far); |
570 | assert(0); /* Unreachable */ |
571 | break; |
572 | |
573 | case ESR_EC_TRAP_SIMD_FP: |
574 | handle_simd_trap(state, esr); |
575 | assert(0); |
576 | break; |
577 | |
578 | case ESR_EC_ILLEGAL_INSTR_SET: |
579 | if (EXCB_ACTION_RERUN != |
580 | ex_cb_invoke(EXCB_CLASS_ILLEGAL_INSTR_SET, far)) { |
581 | // instruction is not re-executed |
582 | panic("Illegal instruction set exception. state=%p class=%u esr=%u far=%p spsr=0x%x" , |
583 | state, class, esr, (void *)far, get_saved_state_cpsr(state)); |
584 | assert(0); |
585 | } |
586 | // must clear this fault in PSR to re-run |
587 | set_saved_state_cpsr(state, get_saved_state_cpsr(state) & (~PSR64_IL)); |
588 | break; |
589 | |
590 | case ESR_EC_MCR_MRC_CP15_TRAP: |
591 | case ESR_EC_MCRR_MRRC_CP15_TRAP: |
592 | case ESR_EC_MCR_MRC_CP14_TRAP: |
593 | case ESR_EC_LDC_STC_CP14_TRAP: |
594 | case ESR_EC_MCRR_MRRC_CP14_TRAP: |
595 | handle_user_trapped_instruction32(state, esr); |
596 | assert(0); |
597 | break; |
598 | |
599 | case ESR_EC_WFI_WFE: |
600 | // Use of WFI or WFE instruction when they have been disabled for EL0 |
601 | handle_wf_trap(state); |
602 | assert(0); /* Unreachable */ |
603 | break; |
604 | |
605 | default: |
606 | panic("Unsupported synchronous exception. state=%p class=%u esr=%u far=%p" , |
607 | state, class, esr, (void *)far); |
608 | assert(0); /* Unreachable */ |
609 | break; |
610 | } |
611 | |
612 | if (recover) |
613 | thread->recover = recover; |
614 | } |
615 | |
616 | /* |
617 | * Uncategorized exceptions are a catch-all for general execution errors. |
618 | * ARM64_TODO: For now, we assume this is for undefined instruction exceptions. |
619 | */ |
620 | static void |
621 | handle_uncategorized(arm_saved_state_t *state, boolean_t instrLen2) |
622 | { |
623 | exception_type_t exception = EXC_BAD_INSTRUCTION; |
624 | mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED}; |
625 | mach_msg_type_number_t numcodes = 2; |
626 | uint32_t instr = 0; |
627 | |
628 | if (instrLen2) { |
629 | uint16_t instr16 = 0; |
630 | COPYIN(get_saved_state_pc(state), (char *)&instr16, sizeof(instr16)); |
631 | |
632 | instr = instr16; |
633 | } else { |
634 | COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr)); |
635 | } |
636 | |
637 | #if CONFIG_DTRACE |
638 | if (tempDTraceTrapHook && (tempDTraceTrapHook(exception, state, 0, 0) == KERN_SUCCESS)) { |
639 | return; |
640 | } |
641 | |
642 | if (PSR64_IS_USER64(get_saved_state_cpsr(state))) { |
643 | /* |
644 | * For a 64bit user process, we care about all 4 bytes of the |
645 | * instr. |
646 | */ |
647 | if (instr == FASTTRAP_ARM64_INSTR || instr == FASTTRAP_ARM64_RET_INSTR) { |
648 | if (dtrace_user_probe(state) == KERN_SUCCESS) |
649 | return; |
650 | } |
651 | } else if (PSR64_IS_USER32(get_saved_state_cpsr(state))) { |
652 | /* |
653 | * For a 32bit user process, we check for thumb mode, in |
654 | * which case we only care about a 2 byte instruction length. |
655 | * For non-thumb mode, we care about all 4 bytes of the instructin. |
656 | */ |
657 | if (get_saved_state_cpsr(state) & PSR64_MODE_USER32_THUMB) { |
658 | if (((uint16_t)instr == FASTTRAP_THUMB32_INSTR) || |
659 | ((uint16_t)instr == FASTTRAP_THUMB32_RET_INSTR)) { |
660 | if (dtrace_user_probe(state) == KERN_SUCCESS) { |
661 | return; |
662 | } |
663 | } |
664 | } else { |
665 | if ((instr == FASTTRAP_ARM32_INSTR) || |
666 | (instr == FASTTRAP_ARM32_RET_INSTR)) { |
667 | if (dtrace_user_probe(state) == KERN_SUCCESS) { |
668 | return; |
669 | } |
670 | } |
671 | } |
672 | } |
673 | |
674 | #endif /* CONFIG_DTRACE */ |
675 | |
676 | if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) { |
677 | if (IS_ARM_GDB_TRAP(instr)) { |
678 | boolean_t interrupt_state; |
679 | vm_offset_t kstackptr; |
680 | exception = EXC_BREAKPOINT; |
681 | |
682 | interrupt_state = ml_set_interrupts_enabled(FALSE); |
683 | |
684 | /* Save off the context here (so that the debug logic |
685 | * can see the original state of this thread). |
686 | */ |
687 | kstackptr = (vm_offset_t) current_thread()->machine.kstackptr; |
688 | if (kstackptr) { |
689 | ((thread_kernel_state_t) kstackptr)->machine.ss = *state; |
690 | } |
691 | |
692 | /* Hop into the debugger (typically either due to a |
693 | * fatal exception, an explicit panic, or a stackshot |
694 | * request. |
695 | */ |
696 | DebuggerCall(exception, state); |
697 | |
698 | (void) ml_set_interrupts_enabled(interrupt_state); |
699 | return; |
700 | } else { |
701 | panic("Undefined kernel instruction: pc=%p instr=%x\n" , (void*)get_saved_state_pc(state), instr); |
702 | } |
703 | } |
704 | |
705 | /* |
706 | * Check for GDB breakpoint via illegal opcode. |
707 | */ |
708 | if (instrLen2) { |
709 | if (IS_THUMB_GDB_TRAP(instr)) { |
710 | exception = EXC_BREAKPOINT; |
711 | codes[0] = EXC_ARM_BREAKPOINT; |
712 | codes[1] = instr; |
713 | } else { |
714 | codes[1] = instr; |
715 | } |
716 | } else { |
717 | if (IS_ARM_GDB_TRAP(instr)) { |
718 | exception = EXC_BREAKPOINT; |
719 | codes[0] = EXC_ARM_BREAKPOINT; |
720 | codes[1] = instr; |
721 | } else if (IS_THUMB_GDB_TRAP((instr & 0xFFFF))) { |
722 | exception = EXC_BREAKPOINT; |
723 | codes[0] = EXC_ARM_BREAKPOINT; |
724 | codes[1] = instr & 0xFFFF; |
725 | } else if (IS_THUMB_GDB_TRAP((instr >> 16))) { |
726 | exception = EXC_BREAKPOINT; |
727 | codes[0] = EXC_ARM_BREAKPOINT; |
728 | codes[1] = instr >> 16; |
729 | } else { |
730 | codes[1] = instr; |
731 | } |
732 | } |
733 | |
734 | exception_triage(exception, codes, numcodes); |
735 | assert(0); /* NOTREACHED */ |
736 | } |
737 | |
738 | static void |
739 | handle_breakpoint(arm_saved_state_t *state) |
740 | { |
741 | exception_type_t exception = EXC_BREAKPOINT; |
742 | mach_exception_data_type_t codes[2] = {EXC_ARM_BREAKPOINT}; |
743 | mach_msg_type_number_t numcodes = 2; |
744 | |
745 | codes[1] = get_saved_state_pc(state); |
746 | exception_triage(exception, codes, numcodes); |
747 | assert(0); /* NOTREACHED */ |
748 | } |
749 | |
750 | static void |
751 | handle_watchpoint(vm_offset_t fault_addr) |
752 | { |
753 | exception_type_t exception = EXC_BREAKPOINT; |
754 | mach_exception_data_type_t codes[2] = {EXC_ARM_DA_DEBUG}; |
755 | mach_msg_type_number_t numcodes = 2; |
756 | |
757 | codes[1] = fault_addr; |
758 | exception_triage(exception, codes, numcodes); |
759 | assert(0); /* NOTREACHED */ |
760 | } |
761 | |
762 | static void |
763 | handle_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, vm_offset_t recover, |
764 | abort_inspector_t inspect_abort, abort_handler_t handler) |
765 | { |
766 | fault_status_t fault_code; |
767 | vm_prot_t fault_type; |
768 | |
769 | inspect_abort(ESR_ISS(esr), &fault_code, &fault_type); |
770 | handler(state, esr, fault_addr, fault_code, fault_type, recover); |
771 | } |
772 | |
773 | static void |
774 | inspect_instruction_abort(uint32_t iss, fault_status_t *fault_code, vm_prot_t *fault_type) |
775 | { |
776 | getCpuDatap()->cpu_stat.instr_ex_cnt++; |
777 | *fault_code = ISS_IA_FSC(iss); |
778 | *fault_type = (VM_PROT_READ | VM_PROT_EXECUTE); |
779 | } |
780 | |
781 | static void |
782 | inspect_data_abort(uint32_t iss, fault_status_t *fault_code, vm_prot_t *fault_type) |
783 | { |
784 | getCpuDatap()->cpu_stat.data_ex_cnt++; |
785 | *fault_code = ISS_DA_FSC(iss); |
786 | |
787 | /* Cache operations report faults as write access. Change these to read access. */ |
788 | if ((iss & ISS_DA_WNR) && !(iss & ISS_DA_CM)) { |
789 | *fault_type = (VM_PROT_READ | VM_PROT_WRITE); |
790 | } else { |
791 | *fault_type = (VM_PROT_READ); |
792 | } |
793 | } |
794 | |
795 | static void |
796 | handle_pc_align(arm_saved_state_t *ss) |
797 | { |
798 | exception_type_t exc; |
799 | mach_exception_data_type_t codes[2]; |
800 | mach_msg_type_number_t numcodes = 2; |
801 | |
802 | if (!PSR64_IS_USER(get_saved_state_cpsr(ss))) { |
803 | panic_with_thread_kernel_state("PC alignment exception from kernel." , ss); |
804 | } |
805 | |
806 | exc = EXC_BAD_ACCESS; |
807 | codes[0] = EXC_ARM_DA_ALIGN; |
808 | codes[1] = get_saved_state_pc(ss); |
809 | |
810 | exception_triage(exc, codes, numcodes); |
811 | assert(0); /* NOTREACHED */ |
812 | } |
813 | |
814 | static void |
815 | handle_sp_align(arm_saved_state_t *ss) |
816 | { |
817 | exception_type_t exc; |
818 | mach_exception_data_type_t codes[2]; |
819 | mach_msg_type_number_t numcodes = 2; |
820 | |
821 | if (!PSR64_IS_USER(get_saved_state_cpsr(ss))) { |
822 | panic_with_thread_kernel_state("SP alignment exception from kernel." , ss); |
823 | } |
824 | |
825 | exc = EXC_BAD_ACCESS; |
826 | codes[0] = EXC_ARM_SP_ALIGN; |
827 | codes[1] = get_saved_state_sp(ss); |
828 | |
829 | exception_triage(exc, codes, numcodes); |
830 | assert(0); /* NOTREACHED */ |
831 | } |
832 | |
833 | static void |
834 | handle_wf_trap(arm_saved_state_t *ss) |
835 | { |
836 | exception_type_t exc; |
837 | mach_exception_data_type_t codes[2]; |
838 | mach_msg_type_number_t numcodes = 2; |
839 | |
840 | exc = EXC_BAD_INSTRUCTION; |
841 | codes[0] = EXC_ARM_UNDEFINED; |
842 | codes[1] = get_saved_state_sp(ss); |
843 | |
844 | exception_triage(exc, codes, numcodes); |
845 | assert(0); /* NOTREACHED */ |
846 | } |
847 | |
848 | |
849 | static void |
850 | handle_sw_step_debug(arm_saved_state_t *state) |
851 | { |
852 | thread_t thread = current_thread(); |
853 | exception_type_t exc; |
854 | mach_exception_data_type_t codes[2]; |
855 | mach_msg_type_number_t numcodes = 2; |
856 | |
857 | if (!PSR64_IS_USER(get_saved_state_cpsr(state))) { |
858 | panic_with_thread_kernel_state("SW_STEP_DEBUG exception from kernel." , state); |
859 | } |
860 | |
861 | // Disable single step and unmask interrupts (in the saved state, anticipating next exception return) |
862 | if (thread->machine.DebugData != NULL) { |
863 | thread->machine.DebugData->uds.ds64.mdscr_el1 &= ~0x1; |
864 | } else { |
865 | panic_with_thread_kernel_state("SW_STEP_DEBUG exception thread DebugData is NULL." , state); |
866 | } |
867 | |
868 | set_saved_state_cpsr((thread->machine.upcb), |
869 | get_saved_state_cpsr((thread->machine.upcb)) & ~(PSR64_SS | DAIF_IRQF | DAIF_FIQF)); |
870 | |
871 | // Special encoding for gdb single step event on ARM |
872 | exc = EXC_BREAKPOINT; |
873 | codes[0] = 1; |
874 | codes[1] = 0; |
875 | |
876 | exception_triage(exc, codes, numcodes); |
877 | assert(0); /* NOTREACHED */ |
878 | } |
879 | |
880 | static int |
881 | is_vm_fault(fault_status_t status) |
882 | { |
883 | switch (status) { |
884 | case FSC_TRANSLATION_FAULT_L0: |
885 | case FSC_TRANSLATION_FAULT_L1: |
886 | case FSC_TRANSLATION_FAULT_L2: |
887 | case FSC_TRANSLATION_FAULT_L3: |
888 | case FSC_ACCESS_FLAG_FAULT_L1: |
889 | case FSC_ACCESS_FLAG_FAULT_L2: |
890 | case FSC_ACCESS_FLAG_FAULT_L3: |
891 | case FSC_PERMISSION_FAULT_L1: |
892 | case FSC_PERMISSION_FAULT_L2: |
893 | case FSC_PERMISSION_FAULT_L3: |
894 | return TRUE; |
895 | default: |
896 | return FALSE; |
897 | } |
898 | } |
899 | |
900 | static int |
901 | is_translation_fault(fault_status_t status) |
902 | { |
903 | switch (status) { |
904 | case FSC_TRANSLATION_FAULT_L0: |
905 | case FSC_TRANSLATION_FAULT_L1: |
906 | case FSC_TRANSLATION_FAULT_L2: |
907 | case FSC_TRANSLATION_FAULT_L3: |
908 | return TRUE; |
909 | default: |
910 | return FALSE; |
911 | } |
912 | } |
913 | |
914 | #if __ARM_PAN_AVAILABLE__ |
915 | static int |
916 | is_permission_fault(fault_status_t status) |
917 | { |
918 | switch (status) { |
919 | case FSC_PERMISSION_FAULT_L1: |
920 | case FSC_PERMISSION_FAULT_L2: |
921 | case FSC_PERMISSION_FAULT_L3: |
922 | return TRUE; |
923 | default: |
924 | return FALSE; |
925 | } |
926 | } |
927 | #endif |
928 | |
929 | static int |
930 | is_alignment_fault(fault_status_t status) |
931 | { |
932 | return (status == FSC_ALIGNMENT_FAULT); |
933 | } |
934 | |
935 | static int |
936 | is_parity_error(fault_status_t status) |
937 | { |
938 | switch (status) { |
939 | case FSC_SYNC_PARITY: |
940 | case FSC_ASYNC_PARITY: |
941 | case FSC_SYNC_PARITY_TT_L1: |
942 | case FSC_SYNC_PARITY_TT_L2: |
943 | case FSC_SYNC_PARITY_TT_L3: |
944 | return TRUE; |
945 | default: |
946 | return FALSE; |
947 | } |
948 | } |
949 | |
950 | static void |
951 | handle_user_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, |
952 | fault_status_t fault_code, vm_prot_t fault_type, vm_offset_t recover) |
953 | { |
954 | exception_type_t exc = EXC_BAD_ACCESS; |
955 | mach_exception_data_type_t codes[2]; |
956 | mach_msg_type_number_t numcodes = 2; |
957 | thread_t thread = current_thread(); |
958 | |
959 | (void)esr; |
960 | (void)state; |
961 | |
962 | if (ml_at_interrupt_context()) |
963 | panic_with_thread_kernel_state("Apparently on interrupt stack when taking user abort!\n" , state); |
964 | |
965 | thread->iotier_override = THROTTLE_LEVEL_NONE; /* Reset IO tier override before handling abort from userspace */ |
966 | |
967 | if (is_vm_fault(fault_code)) { |
968 | kern_return_t result = KERN_FAILURE; |
969 | vm_map_t map = thread->map; |
970 | vm_offset_t vm_fault_addr = fault_addr; |
971 | |
972 | assert(map != kernel_map); |
973 | |
974 | if (!(fault_type & VM_PROT_EXECUTE) && user_tbi_enabled()) |
975 | vm_fault_addr = tbi_clear(fault_addr); |
976 | |
977 | #if CONFIG_DTRACE |
978 | if (thread->options & TH_OPT_DTRACE) { /* Executing under dtrace_probe? */ |
979 | if (dtrace_tally_fault(vm_fault_addr)) { /* Should a user mode fault under dtrace be ignored? */ |
980 | if (recover) { |
981 | set_saved_state_pc(state, recover); |
982 | } else { |
983 | boolean_t intr = ml_set_interrupts_enabled(FALSE); |
984 | panic_with_thread_kernel_state("copyin/out has no recovery point" , state); |
985 | (void) ml_set_interrupts_enabled(intr); |
986 | } |
987 | return; |
988 | } else { |
989 | boolean_t intr = ml_set_interrupts_enabled(FALSE); |
990 | panic_with_thread_kernel_state("Unexpected UMW page fault under dtrace_probe" , state); |
991 | (void) ml_set_interrupts_enabled(intr); |
992 | return; |
993 | } |
994 | } |
995 | #else |
996 | (void)recover; |
997 | #endif |
998 | |
999 | #if CONFIG_PGTRACE |
1000 | if (pgtrace_enabled) { |
1001 | /* Check to see if trace bit is set */ |
1002 | result = pmap_pgtrace_fault(map->pmap, fault_addr, state); |
1003 | if (result == KERN_SUCCESS) return; |
1004 | } |
1005 | #endif |
1006 | |
1007 | /* check to see if it is just a pmap ref/modify fault */ |
1008 | |
1009 | if ((result != KERN_SUCCESS) && !is_translation_fault(fault_code)) { |
1010 | result = arm_fast_fault(map->pmap, trunc_page(vm_fault_addr), fault_type, TRUE); |
1011 | } |
1012 | if (result != KERN_SUCCESS) { |
1013 | |
1014 | { |
1015 | /* We have to fault the page in */ |
1016 | result = vm_fault(map, vm_fault_addr, fault_type, |
1017 | /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, THREAD_ABORTSAFE, |
1018 | /* caller_pmap */ NULL, /* caller_pmap_addr */ 0); |
1019 | } |
1020 | } |
1021 | if (result == KERN_SUCCESS || result == KERN_ABORTED) { |
1022 | thread_exception_return(); |
1023 | /* NOTREACHED */ |
1024 | } |
1025 | |
1026 | codes[0] = result; |
1027 | } else if (is_alignment_fault(fault_code)) { |
1028 | codes[0] = EXC_ARM_DA_ALIGN; |
1029 | } else if (is_parity_error(fault_code)) { |
1030 | #if defined(APPLE_ARM64_ARCH_FAMILY) |
1031 | if (fault_code == FSC_SYNC_PARITY) { |
1032 | arm64_platform_error(state, esr, fault_addr); |
1033 | thread_exception_return(); |
1034 | /* NOTREACHED */ |
1035 | } |
1036 | #else |
1037 | panic("User parity error." ); |
1038 | #endif |
1039 | } else { |
1040 | codes[0] = KERN_FAILURE; |
1041 | } |
1042 | |
1043 | codes[1] = fault_addr; |
1044 | exception_triage(exc, codes, numcodes); |
1045 | assert(0); /* NOTREACHED */ |
1046 | } |
1047 | |
1048 | #if __ARM_PAN_AVAILABLE__ |
1049 | static int |
1050 | is_pan_fault(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, fault_status_t fault_code) |
1051 | { |
1052 | // PAN (Privileged Access Never) fault occurs for data read/write in EL1 to |
1053 | // virtual address that is readable/writeable from both EL1 and EL0 |
1054 | |
1055 | // To check for PAN fault, we evaluate if the following conditions are true: |
1056 | // 1. This is a permission fault |
1057 | // 2. PAN is enabled |
1058 | // 3. AT instruction (on which PAN has no effect) on the same faulting address |
1059 | // succeeds |
1060 | |
1061 | vm_offset_t pa; |
1062 | |
1063 | if (!(is_permission_fault(fault_code) && get_saved_state_cpsr(state) & PSR64_PAN)) { |
1064 | return FALSE; |
1065 | } |
1066 | |
1067 | if (esr & ISS_DA_WNR) { |
1068 | pa = mmu_kvtop_wpreflight(fault_addr); |
1069 | } else { |
1070 | pa = mmu_kvtop(fault_addr); |
1071 | } |
1072 | return (pa)? TRUE: FALSE; |
1073 | } |
1074 | #endif |
1075 | |
1076 | static void |
1077 | handle_kernel_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, |
1078 | fault_status_t fault_code, vm_prot_t fault_type, vm_offset_t recover) |
1079 | { |
1080 | thread_t thread = current_thread(); |
1081 | (void)esr; |
1082 | |
1083 | #if CONFIG_DTRACE |
1084 | if (is_vm_fault(fault_code) && thread->options & TH_OPT_DTRACE) { /* Executing under dtrace_probe? */ |
1085 | if (dtrace_tally_fault(fault_addr)) { /* Should a fault under dtrace be ignored? */ |
1086 | /* |
1087 | * Point to next instruction, or recovery handler if set. |
1088 | */ |
1089 | if (recover) { |
1090 | set_saved_state_pc(state, recover); |
1091 | } else { |
1092 | set_saved_state_pc(state, get_saved_state_pc(state) + 4); |
1093 | } |
1094 | return; |
1095 | } else { |
1096 | boolean_t intr = ml_set_interrupts_enabled(FALSE); |
1097 | panic_with_thread_kernel_state("Unexpected page fault under dtrace_probe" , state); |
1098 | (void) ml_set_interrupts_enabled(intr); |
1099 | return; |
1100 | } |
1101 | } |
1102 | #endif |
1103 | |
1104 | #if !CONFIG_PGTRACE /* This will be moved next to pgtrace fault evaluation */ |
1105 | if (ml_at_interrupt_context()) |
1106 | panic_with_thread_kernel_state("Unexpected abort while on interrupt stack." , state); |
1107 | #endif |
1108 | |
1109 | if (is_vm_fault(fault_code)) { |
1110 | kern_return_t result = KERN_FAILURE; |
1111 | vm_map_t map; |
1112 | int interruptible; |
1113 | |
1114 | /* |
1115 | * Ensure no faults in the physical aperture. This could happen if |
1116 | * a page table is incorrectly allocated from the read only region |
1117 | * when running with KTRR. |
1118 | */ |
1119 | |
1120 | |
1121 | #if __ARM_PAN_AVAILABLE__ && defined(CONFIG_XNUPOST) |
1122 | if (is_permission_fault(fault_code) && !(get_saved_state_cpsr(state) & PSR64_PAN) && |
1123 | (pan_ro_addr != 0) && (fault_addr == pan_ro_addr)) { |
1124 | ++pan_exception_level; |
1125 | // On an exception taken from a PAN-disabled context, verify |
1126 | // that PAN is re-enabled for the exception handler and that |
1127 | // accessing the test address produces a PAN fault. |
1128 | pan_fault_value = *(char *)pan_test_addr; |
1129 | set_saved_state_pc(state, get_saved_state_pc(state) + 4); |
1130 | return; |
1131 | } |
1132 | #endif |
1133 | |
1134 | if (fault_addr >= gVirtBase && fault_addr < static_memory_end) { |
1135 | panic_with_thread_kernel_state("Unexpected fault in kernel static region\n" ,state); |
1136 | } |
1137 | |
1138 | if (VM_KERNEL_ADDRESS(fault_addr) || thread == THREAD_NULL) { |
1139 | map = kernel_map; |
1140 | interruptible = THREAD_UNINT; |
1141 | } else { |
1142 | map = thread->map; |
1143 | interruptible = THREAD_ABORTSAFE; |
1144 | } |
1145 | |
1146 | #if CONFIG_PGTRACE |
1147 | if (pgtrace_enabled) { |
1148 | /* Check to see if trace bit is set */ |
1149 | result = pmap_pgtrace_fault(map->pmap, fault_addr, state); |
1150 | if (result == KERN_SUCCESS) return; |
1151 | } |
1152 | |
1153 | if (ml_at_interrupt_context()) |
1154 | panic_with_thread_kernel_state("Unexpected abort while on interrupt stack." , state); |
1155 | #endif |
1156 | |
1157 | /* check to see if it is just a pmap ref/modify fault */ |
1158 | if (!is_translation_fault(fault_code)) { |
1159 | result = arm_fast_fault(map->pmap, trunc_page(fault_addr), fault_type, FALSE); |
1160 | if (result == KERN_SUCCESS) return; |
1161 | } |
1162 | |
1163 | if (result != KERN_PROTECTION_FAILURE) |
1164 | { |
1165 | /* |
1166 | * We have to "fault" the page in. |
1167 | */ |
1168 | result = vm_fault(map, fault_addr, fault_type, |
1169 | /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, interruptible, |
1170 | /* caller_pmap */ NULL, /* caller_pmap_addr */ 0); |
1171 | } |
1172 | |
1173 | if (result == KERN_SUCCESS) return; |
1174 | |
1175 | /* |
1176 | * If we have a recover handler, invoke it now. |
1177 | */ |
1178 | if (recover) { |
1179 | set_saved_state_pc(state, recover); |
1180 | return; |
1181 | } |
1182 | |
1183 | #if __ARM_PAN_AVAILABLE__ |
1184 | if (is_pan_fault(state, esr, fault_addr, fault_code)) { |
1185 | #ifdef CONFIG_XNUPOST |
1186 | if ((pan_test_addr != 0) && (fault_addr == pan_test_addr)) |
1187 | { |
1188 | ++pan_exception_level; |
1189 | // read the user-accessible value to make sure |
1190 | // pan is enabled and produces a 2nd fault from |
1191 | // the exception handler |
1192 | if (pan_exception_level == 1) |
1193 | pan_fault_value = *(char *)pan_test_addr; |
1194 | // this fault address is used for PAN test |
1195 | // disable PAN and rerun |
1196 | set_saved_state_cpsr(state, |
1197 | get_saved_state_cpsr(state) & (~PSR64_PAN)); |
1198 | return; |
1199 | } |
1200 | #endif |
1201 | panic_with_thread_kernel_state("Privileged access never abort." , state); |
1202 | } |
1203 | #endif |
1204 | |
1205 | #if CONFIG_PGTRACE |
1206 | } else if (ml_at_interrupt_context()) { |
1207 | panic_with_thread_kernel_state("Unexpected abort while on interrupt stack." , state); |
1208 | #endif |
1209 | } else if (is_alignment_fault(fault_code)) { |
1210 | panic_with_thread_kernel_state("Unaligned kernel data abort." , state); |
1211 | } else if (is_parity_error(fault_code)) { |
1212 | #if defined(APPLE_ARM64_ARCH_FAMILY) |
1213 | if (fault_code == FSC_SYNC_PARITY) { |
1214 | arm64_platform_error(state, esr, fault_addr); |
1215 | return; |
1216 | } |
1217 | #else |
1218 | panic_with_thread_kernel_state("Kernel parity error." , state); |
1219 | #endif |
1220 | } else { |
1221 | kprintf("Unclassified kernel abort (fault_code=0x%x)\n" , fault_code); |
1222 | } |
1223 | |
1224 | panic_with_thread_kernel_state("Kernel data abort." , state); |
1225 | } |
1226 | |
1227 | extern void syscall_trace(struct arm_saved_state * regs); |
1228 | |
1229 | static void |
1230 | handle_svc(arm_saved_state_t *state) |
1231 | { |
1232 | int trap_no = get_saved_state_svc_number(state); |
1233 | thread_t thread = current_thread(); |
1234 | struct proc *p; |
1235 | |
1236 | #define handle_svc_kprintf(x...) /* kprintf("handle_svc: " x) */ |
1237 | |
1238 | #define TRACE_SYSCALL 1 |
1239 | #if TRACE_SYSCALL |
1240 | syscall_trace(state); |
1241 | #endif |
1242 | |
1243 | thread->iotier_override = THROTTLE_LEVEL_NONE; /* Reset IO tier override before handling SVC from userspace */ |
1244 | |
1245 | if (trap_no == (int)PLATFORM_SYSCALL_TRAP_NO) { |
1246 | platform_syscall(state); |
1247 | panic("Returned from platform_syscall()?" ); |
1248 | } |
1249 | |
1250 | mach_kauth_cred_uthread_update(); |
1251 | |
1252 | if (trap_no < 0) { |
1253 | if (trap_no == -3) { |
1254 | handle_mach_absolute_time_trap(state); |
1255 | return; |
1256 | } else if (trap_no == -4) { |
1257 | handle_mach_continuous_time_trap(state); |
1258 | return; |
1259 | } |
1260 | |
1261 | /* Counting perhaps better in the handler, but this is how it's been done */ |
1262 | thread->syscalls_mach++; |
1263 | mach_syscall(state); |
1264 | } else { |
1265 | /* Counting perhaps better in the handler, but this is how it's been done */ |
1266 | thread->syscalls_unix++; |
1267 | p = get_bsdthreadtask_info(thread); |
1268 | |
1269 | assert(p); |
1270 | |
1271 | unix_syscall(state, thread, (struct uthread*)thread->uthread, p); |
1272 | } |
1273 | } |
1274 | |
1275 | static void |
1276 | handle_mach_absolute_time_trap(arm_saved_state_t *state) |
1277 | { |
1278 | uint64_t now = mach_absolute_time(); |
1279 | saved_state64(state)->x[0] = now; |
1280 | } |
1281 | |
1282 | static void |
1283 | handle_mach_continuous_time_trap(arm_saved_state_t *state) |
1284 | { |
1285 | uint64_t now = mach_continuous_time(); |
1286 | saved_state64(state)->x[0] = now; |
1287 | } |
1288 | |
1289 | static void |
1290 | handle_msr_trap(arm_saved_state_t *state, uint32_t iss) |
1291 | { |
1292 | exception_type_t exception = EXC_BAD_INSTRUCTION; |
1293 | mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED}; |
1294 | mach_msg_type_number_t numcodes = 2; |
1295 | uint32_t instr = 0; |
1296 | |
1297 | (void)iss; |
1298 | |
1299 | if (!is_saved_state64(state)) { |
1300 | panic("MSR/MRS trap (EC 0x%x) from 32-bit state\n" , ESR_EC_MSR_TRAP); |
1301 | } |
1302 | |
1303 | if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) { |
1304 | panic("MSR/MRS trap (EC 0x%x) from kernel\n" , ESR_EC_MSR_TRAP); |
1305 | } |
1306 | |
1307 | COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr)); |
1308 | codes[1] = instr; |
1309 | |
1310 | exception_triage(exception, codes, numcodes); |
1311 | } |
1312 | |
1313 | static void |
1314 | handle_user_trapped_instruction32(arm_saved_state_t *state, uint32_t esr) |
1315 | { |
1316 | exception_type_t exception = EXC_BAD_INSTRUCTION; |
1317 | mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED}; |
1318 | mach_msg_type_number_t numcodes = 2; |
1319 | uint32_t instr = 0; |
1320 | |
1321 | if (is_saved_state64(state)) { |
1322 | panic("ESR (0x%x) for instruction trapped from U32, but saved state is 64-bit." , esr); |
1323 | } |
1324 | |
1325 | if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) { |
1326 | panic("ESR (0x%x) for instruction trapped from U32, actually came from kernel?" , esr); |
1327 | } |
1328 | |
1329 | COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr)); |
1330 | codes[1] = instr; |
1331 | |
1332 | exception_triage(exception, codes, numcodes); |
1333 | } |
1334 | |
1335 | static void |
1336 | handle_simd_trap(arm_saved_state_t *state, uint32_t esr) |
1337 | { |
1338 | exception_type_t exception = EXC_BAD_INSTRUCTION; |
1339 | mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED}; |
1340 | mach_msg_type_number_t numcodes = 2; |
1341 | uint32_t instr = 0; |
1342 | |
1343 | if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) { |
1344 | panic("ESR (0x%x) for SIMD trap from userland, actually came from kernel?" , esr); |
1345 | } |
1346 | |
1347 | COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr)); |
1348 | codes[1] = instr; |
1349 | |
1350 | exception_triage(exception, codes, numcodes); |
1351 | } |
1352 | |
1353 | void |
1354 | sleh_irq(arm_saved_state_t *state) |
1355 | { |
1356 | uint64_t timestamp = 0; |
1357 | uint32_t old_entropy_data = 0; |
1358 | uint32_t * old_entropy_data_ptr = NULL; |
1359 | uint32_t * new_entropy_data_ptr = NULL; |
1360 | cpu_data_t * cdp = getCpuDatap(); |
1361 | #if DEVELOPMENT || DEBUG |
1362 | int preemption_level = get_preemption_level(); |
1363 | #endif |
1364 | |
1365 | sleh_interrupt_handler_prologue(state, DBG_INTR_TYPE_OTHER); |
1366 | |
1367 | /* Run the registered interrupt handler. */ |
1368 | cdp->interrupt_handler(cdp->interrupt_target, |
1369 | cdp->interrupt_refCon, |
1370 | cdp->interrupt_nub, |
1371 | cdp->interrupt_source); |
1372 | |
1373 | /* We use interrupt timing as an entropy source. */ |
1374 | timestamp = ml_get_timebase(); |
1375 | |
1376 | /* |
1377 | * The buffer index is subject to races, but as these races should only |
1378 | * result in multiple CPUs updating the same location, the end result |
1379 | * should be that noise gets written into the entropy buffer. As this |
1380 | * is the entire point of the entropy buffer, we will not worry about |
1381 | * these races for now. |
1382 | */ |
1383 | old_entropy_data_ptr = EntropyData.index_ptr; |
1384 | new_entropy_data_ptr = old_entropy_data_ptr + 1; |
1385 | |
1386 | if (new_entropy_data_ptr >= &EntropyData.buffer[ENTROPY_BUFFER_SIZE]) { |
1387 | new_entropy_data_ptr = EntropyData.buffer; |
1388 | } |
1389 | |
1390 | EntropyData.index_ptr = new_entropy_data_ptr; |
1391 | |
1392 | /* Mix the timestamp data and the old data together. */ |
1393 | old_entropy_data = *old_entropy_data_ptr; |
1394 | *old_entropy_data_ptr = (uint32_t)timestamp ^ __ror(old_entropy_data, 9); |
1395 | |
1396 | sleh_interrupt_handler_epilogue(); |
1397 | #if DEVELOPMENT || DEBUG |
1398 | if (preemption_level != get_preemption_level()) |
1399 | panic("irq handler %p changed preemption level from %d to %d" , cdp->interrupt_handler, preemption_level, get_preemption_level()); |
1400 | #endif |
1401 | } |
1402 | |
1403 | void |
1404 | sleh_fiq(arm_saved_state_t *state) |
1405 | { |
1406 | unsigned int type = DBG_INTR_TYPE_UNKNOWN; |
1407 | #if DEVELOPMENT || DEBUG |
1408 | int preemption_level = get_preemption_level(); |
1409 | #endif |
1410 | #if MONOTONIC |
1411 | uint64_t pmsr = 0, upmsr = 0; |
1412 | #endif /* MONOTONIC */ |
1413 | |
1414 | #if MONOTONIC |
1415 | if (mt_pmi_pending(&pmsr, &upmsr)) { |
1416 | type = DBG_INTR_TYPE_PMI; |
1417 | } else |
1418 | #endif /* MONOTONIC */ |
1419 | if (ml_get_timer_pending()) { |
1420 | type = DBG_INTR_TYPE_TIMER; |
1421 | } |
1422 | |
1423 | sleh_interrupt_handler_prologue(state, type); |
1424 | |
1425 | #if MONOTONIC |
1426 | if (type == DBG_INTR_TYPE_PMI) { |
1427 | mt_fiq(getCpuDatap(), pmsr, upmsr); |
1428 | } else |
1429 | #endif /* MONOTONIC */ |
1430 | { |
1431 | /* |
1432 | * We don't know that this is a timer, but we don't have insight into |
1433 | * the other interrupts that go down this path. |
1434 | */ |
1435 | |
1436 | cpu_data_t *cdp = getCpuDatap(); |
1437 | |
1438 | cdp->cpu_decrementer = -1; /* Large */ |
1439 | |
1440 | /* |
1441 | * ARM64_TODO: whether we're coming from userland is ignored right now. |
1442 | * We can easily thread it through, but not bothering for the |
1443 | * moment (AArch32 doesn't either). |
1444 | */ |
1445 | rtclock_intr(TRUE); |
1446 | } |
1447 | |
1448 | sleh_interrupt_handler_epilogue(); |
1449 | #if DEVELOPMENT || DEBUG |
1450 | if (preemption_level != get_preemption_level()) |
1451 | panic("fiq type %u changed preemption level from %d to %d" , type, preemption_level, get_preemption_level()); |
1452 | #endif |
1453 | } |
1454 | |
1455 | void |
1456 | sleh_serror(arm_context_t *context, uint32_t esr, vm_offset_t far) |
1457 | { |
1458 | arm_saved_state_t *state = &context->ss; |
1459 | #if DEVELOPMENT || DEBUG |
1460 | int preemption_level = get_preemption_level(); |
1461 | #endif |
1462 | |
1463 | ASSERT_CONTEXT_SANITY(context); |
1464 | arm64_platform_error(state, esr, far); |
1465 | #if DEVELOPMENT || DEBUG |
1466 | if (preemption_level != get_preemption_level()) |
1467 | panic("serror changed preemption level from %d to %d" , preemption_level, get_preemption_level()); |
1468 | #endif |
1469 | } |
1470 | |
1471 | void |
1472 | mach_syscall_trace_exit( |
1473 | unsigned int retval, |
1474 | unsigned int call_number) |
1475 | { |
1476 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
1477 | MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_END, |
1478 | retval, 0, 0, 0, 0); |
1479 | } |
1480 | |
1481 | __attribute__((noreturn)) |
1482 | void |
1483 | thread_syscall_return(kern_return_t error) |
1484 | { |
1485 | thread_t thread; |
1486 | struct arm_saved_state *state; |
1487 | |
1488 | thread = current_thread(); |
1489 | state = get_user_regs(thread); |
1490 | |
1491 | assert(is_saved_state64(state)); |
1492 | saved_state64(state)->x[0] = error; |
1493 | |
1494 | #if DEBUG || DEVELOPMENT |
1495 | kern_allocation_name_t |
1496 | prior __assert_only = thread_get_kernel_state(thread)->allocation_name; |
1497 | assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared" , kern_allocation_get_name(prior)); |
1498 | #endif /* DEBUG || DEVELOPMENT */ |
1499 | |
1500 | if (kdebug_enable) { |
1501 | /* Invert syscall number (negative for a mach syscall) */ |
1502 | mach_syscall_trace_exit(error, (-1) * get_saved_state_svc_number(state)); |
1503 | } |
1504 | |
1505 | thread_exception_return(); |
1506 | } |
1507 | |
1508 | void |
1509 | syscall_trace( |
1510 | struct arm_saved_state * regs __unused) |
1511 | { |
1512 | /* kprintf("syscall: %d\n", saved_state64(regs)->x[16]); */ |
1513 | } |
1514 | |
1515 | static void |
1516 | sleh_interrupt_handler_prologue(arm_saved_state_t *state, unsigned int type) |
1517 | { |
1518 | uint64_t is_user = PSR64_IS_USER(get_saved_state_cpsr(state)); |
1519 | |
1520 | uint64_t pc = is_user ? get_saved_state_pc(state) : |
1521 | VM_KERNEL_UNSLIDE(get_saved_state_pc(state)); |
1522 | |
1523 | KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START, |
1524 | 0, pc, is_user, type); |
1525 | |
1526 | #if CONFIG_TELEMETRY |
1527 | if (telemetry_needs_record) { |
1528 | telemetry_mark_curthread((boolean_t)is_user, FALSE); |
1529 | } |
1530 | #endif /* CONFIG_TELEMETRY */ |
1531 | } |
1532 | |
1533 | static void |
1534 | sleh_interrupt_handler_epilogue(void) |
1535 | { |
1536 | #if KPERF |
1537 | kperf_interrupt(); |
1538 | #endif /* KPERF */ |
1539 | KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END); |
1540 | } |
1541 | |
1542 | void |
1543 | sleh_invalid_stack(arm_context_t *context, uint32_t esr __unused, vm_offset_t far __unused) |
1544 | { |
1545 | thread_t thread = current_thread(); |
1546 | vm_offset_t kernel_stack_bottom, sp; |
1547 | |
1548 | sp = get_saved_state_sp(&context->ss); |
1549 | kernel_stack_bottom = round_page(thread->machine.kstackptr) - KERNEL_STACK_SIZE; |
1550 | |
1551 | if ((sp < kernel_stack_bottom) && (sp >= (kernel_stack_bottom - PAGE_SIZE))) { |
1552 | panic_with_thread_kernel_state("Invalid kernel stack pointer (probable overflow)." , &context->ss); |
1553 | } |
1554 | |
1555 | panic_with_thread_kernel_state("Invalid kernel stack pointer (probable corruption)." , &context->ss); |
1556 | } |
1557 | |
1558 | |