1 | /* |
2 | * Copyright (c) 2000-2007 Apple Inc. All rights reserved. |
3 | */ |
4 | |
5 | #include <mach/mach_types.h> |
6 | #include <mach/exception_types.h> |
7 | |
8 | #include <sys/param.h> |
9 | #include <sys/proc_internal.h> |
10 | #include <sys/user.h> |
11 | #include <sys/signal.h> |
12 | #include <sys/ucontext.h> |
13 | #include <sys/sysproto.h> |
14 | #include <sys/systm.h> |
15 | #include <sys/ux_exception.h> |
16 | |
17 | #include <arm/signal.h> |
18 | #include <sys/signalvar.h> |
19 | #include <sys/kdebug.h> |
20 | #include <sys/sdt.h> |
21 | #include <sys/wait.h> |
22 | #include <kern/thread.h> |
23 | #include <mach/arm/thread_status.h> |
24 | #include <arm64/proc_reg.h> |
25 | |
26 | #include <kern/assert.h> |
27 | #include <kern/ast.h> |
28 | #include <pexpert/pexpert.h> |
29 | #include <sys/random.h> |
30 | |
31 | extern struct arm_saved_state *get_user_regs(thread_t); |
32 | extern user_addr_t thread_get_cthread_self(void); |
33 | extern kern_return_t thread_getstatus(thread_t act, int flavor, |
34 | thread_state_t tstate, mach_msg_type_number_t *count); |
35 | extern kern_return_t thread_getstatus_to_user(thread_t act, int flavor, |
36 | thread_state_t tstate, mach_msg_type_number_t *count, thread_set_status_flags_t); |
37 | extern kern_return_t machine_thread_state_convert_to_user(thread_t act, int flavor, |
38 | thread_state_t tstate, mach_msg_type_number_t *count, thread_set_status_flags_t); |
39 | extern kern_return_t thread_setstatus(thread_t thread, int flavor, |
40 | thread_state_t tstate, mach_msg_type_number_t count); |
41 | extern kern_return_t thread_setstatus_from_user(thread_t thread, int flavor, |
42 | thread_state_t tstate, mach_msg_type_number_t count, |
43 | thread_state_t old_tstate, mach_msg_type_number_t old_count, |
44 | thread_set_status_flags_t flags); |
45 | extern task_t current_task(void); |
46 | extern bool task_needs_user_signed_thread_state(task_t); |
47 | /* XXX Put these someplace smarter... */ |
48 | typedef struct mcontext32 mcontext32_t; |
49 | typedef struct mcontext64 mcontext64_t; |
50 | |
51 | /* Signal handler flavors supported */ |
52 | /* These defns should match the libplatform implmn */ |
53 | #define UC_TRAD 1 |
54 | #define UC_FLAVOR 30 |
55 | #define UC_SET_ALT_STACK 0x40000000 |
56 | #define UC_RESET_ALT_STACK 0x80000000 |
57 | |
58 | /* The following are valid mcontext sizes */ |
59 | #define UC_FLAVOR_SIZE32 ((ARM_THREAD_STATE_COUNT + ARM_EXCEPTION_STATE_COUNT + ARM_VFP_STATE_COUNT) * sizeof(int)) |
60 | #define UC_FLAVOR_SIZE64 ((ARM_THREAD_STATE64_COUNT + ARM_EXCEPTION_STATE64_COUNT + ARM_NEON_STATE64_COUNT) * sizeof(int)) |
61 | |
62 | #if __arm64__ |
63 | #define C_64_REDZONE_LEN 128 |
64 | #endif |
65 | |
66 | #define TRUNC_TO_16_BYTES(addr) (addr & ~0xf) |
67 | |
68 | static int |
69 | sendsig_get_state32(thread_t th_act, arm_thread_state_t *ts, mcontext32_t *mcp) |
70 | { |
71 | void *tstate; |
72 | mach_msg_type_number_t state_count; |
73 | |
74 | assert(!proc_is64bit_data(current_proc())); |
75 | |
76 | tstate = (void *) ts; |
77 | state_count = ARM_THREAD_STATE_COUNT; |
78 | if (thread_getstatus(act: th_act, ARM_THREAD_STATE, tstate: (thread_state_t) tstate, count: &state_count) != KERN_SUCCESS) { |
79 | return EINVAL; |
80 | } |
81 | |
82 | mcp->ss = *ts; |
83 | tstate = (void *) &mcp->ss; |
84 | state_count = ARM_THREAD_STATE_COUNT; |
85 | if (machine_thread_state_convert_to_user(act: th_act, ARM_THREAD_STATE, tstate: (thread_state_t) tstate, |
86 | count: &state_count, TSSF_FLAGS_NONE) != KERN_SUCCESS) { |
87 | return EINVAL; |
88 | } |
89 | |
90 | tstate = (void *) &mcp->es; |
91 | state_count = ARM_EXCEPTION_STATE_COUNT; |
92 | if (thread_getstatus(act: th_act, ARM_EXCEPTION_STATE, tstate: (thread_state_t) tstate, count: &state_count) != KERN_SUCCESS) { |
93 | return EINVAL; |
94 | } |
95 | |
96 | tstate = (void *) &mcp->fs; |
97 | state_count = ARM_VFP_STATE_COUNT; |
98 | if (thread_getstatus_to_user(act: th_act, ARM_VFP_STATE, tstate: (thread_state_t) tstate, count: &state_count, TSSF_FLAGS_NONE) != KERN_SUCCESS) { |
99 | return EINVAL; |
100 | } |
101 | |
102 | return 0; |
103 | } |
104 | |
105 | static TUNABLE(bool, pac_sigreturn_token, "pac_sigreturn_token" , true); |
106 | |
107 | #if defined(__arm64__) |
108 | struct user_sigframe64 { |
109 | /* We can pass the last two args in registers for ARM64 */ |
110 | user64_siginfo_t sinfo; |
111 | struct user_ucontext64 uctx; |
112 | mcontext64_t mctx; |
113 | }; |
114 | |
115 | static int |
116 | sendsig_get_state64(thread_t th_act, arm_thread_state64_t *ts, mcontext64_t *mcp) |
117 | { |
118 | void *tstate; |
119 | mach_msg_type_number_t state_count; |
120 | |
121 | assert(proc_is64bit_data(current_proc())); |
122 | |
123 | tstate = (void *) ts; |
124 | state_count = ARM_THREAD_STATE64_COUNT; |
125 | if (thread_getstatus(act: th_act, ARM_THREAD_STATE64, tstate: (thread_state_t) tstate, count: &state_count) != KERN_SUCCESS) { |
126 | return EINVAL; |
127 | } |
128 | |
129 | mcp->ss = *ts; |
130 | tstate = (void *) &mcp->ss; |
131 | state_count = ARM_THREAD_STATE64_COUNT; |
132 | thread_set_status_flags_t flags = TSSF_STASH_SIGRETURN_TOKEN; |
133 | if (pac_sigreturn_token || task_needs_user_signed_thread_state(current_task())) { |
134 | flags |= TSSF_THREAD_USER_DIV; |
135 | } |
136 | if (machine_thread_state_convert_to_user(act: th_act, ARM_THREAD_STATE64, tstate: (thread_state_t) tstate, |
137 | count: &state_count, flags) != KERN_SUCCESS) { |
138 | return EINVAL; |
139 | } |
140 | |
141 | tstate = (void *) &mcp->es; |
142 | state_count = ARM_EXCEPTION_STATE64_COUNT; |
143 | if (thread_getstatus(act: th_act, ARM_EXCEPTION_STATE64, tstate: (thread_state_t) tstate, count: &state_count) != KERN_SUCCESS) { |
144 | return EINVAL; |
145 | } |
146 | |
147 | tstate = (void *) &mcp->ns; |
148 | state_count = ARM_NEON_STATE64_COUNT; |
149 | if (thread_getstatus_to_user(act: th_act, ARM_NEON_STATE64, tstate: (thread_state_t) tstate, count: &state_count, TSSF_FLAGS_NONE) != KERN_SUCCESS) { |
150 | return EINVAL; |
151 | } |
152 | |
153 | return 0; |
154 | } |
155 | |
156 | static void |
157 | sendsig_fill_uctx64(user_ucontext64_t *uctx, int oonstack, int mask, user64_addr_t sp, user64_size_t stack_size, user64_addr_t p_mctx) |
158 | { |
159 | bzero(s: uctx, n: sizeof(*uctx)); |
160 | uctx->uc_onstack = oonstack; |
161 | uctx->uc_sigmask = mask; |
162 | uctx->uc_stack.ss_sp = sp; |
163 | uctx->uc_stack.ss_size = stack_size; |
164 | if (oonstack) { |
165 | uctx->uc_stack.ss_flags |= SS_ONSTACK; |
166 | } |
167 | uctx->uc_link = (user64_addr_t)0; |
168 | uctx->uc_mcsize = (user64_size_t) UC_FLAVOR_SIZE64; |
169 | uctx->uc_mcontext64 = (user64_addr_t) p_mctx; |
170 | } |
171 | |
172 | static kern_return_t |
173 | sendsig_set_thread_state64(arm_thread_state64_t *regs, |
174 | user64_addr_t catcher, int infostyle, int sig, user64_addr_t p_sinfo, |
175 | user64_addr_t p_uctx, user64_addr_t token, user64_addr_t trampact, user64_addr_t sp, thread_t th_act) |
176 | { |
177 | assert(proc_is64bit_data(current_proc())); |
178 | |
179 | regs->x[0] = catcher; |
180 | regs->x[1] = infostyle; |
181 | regs->x[2] = sig; |
182 | regs->x[3] = p_sinfo; |
183 | regs->x[4] = p_uctx; |
184 | regs->x[5] = token; |
185 | regs->pc = trampact; |
186 | regs->cpsr = PSR64_USER64_DEFAULT; |
187 | regs->sp = sp; |
188 | |
189 | return thread_setstatus(thread: th_act, ARM_THREAD_STATE64, tstate: (void *)regs, ARM_THREAD_STATE64_COUNT); |
190 | } |
191 | #endif /* defined(__arm64__) */ |
192 | |
193 | static void |
194 | sendsig_fill_uctx32(user_ucontext32_t *uctx, int oonstack, int mask, user_addr_t sp, user_size_t stack_size, user_addr_t p_mctx) |
195 | { |
196 | bzero(s: uctx, n: sizeof(*uctx)); |
197 | uctx->uc_onstack = oonstack; |
198 | uctx->uc_sigmask = mask; |
199 | uctx->uc_stack.ss_sp = (user32_addr_t) sp; |
200 | uctx->uc_stack.ss_size = (user32_size_t) stack_size; |
201 | if (oonstack) { |
202 | uctx->uc_stack.ss_flags |= SS_ONSTACK; |
203 | } |
204 | uctx->uc_link = (user32_addr_t)0; |
205 | uctx->uc_mcsize = (user32_size_t) UC_FLAVOR_SIZE32; |
206 | uctx->uc_mcontext = (user32_addr_t) p_mctx; |
207 | } |
208 | |
209 | static kern_return_t |
210 | sendsig_set_thread_state32(arm_thread_state_t *regs, |
211 | user32_addr_t catcher, int infostyle, int sig, user32_addr_t p_sinfo, |
212 | user32_addr_t trampact, user32_addr_t sp, thread_t th_act) |
213 | { |
214 | assert(!proc_is64bit_data(current_proc())); |
215 | |
216 | regs->r[0] = catcher; |
217 | regs->r[1] = infostyle; |
218 | regs->r[2] = sig; |
219 | regs->r[3] = p_sinfo; |
220 | if (trampact & 1) { |
221 | regs->pc = trampact & ~1; |
222 | #if defined(__arm64__) |
223 | regs->cpsr = PSR64_USER32_DEFAULT | PSR64_MODE_USER32_THUMB; |
224 | #else |
225 | #error Unknown architecture. |
226 | #endif |
227 | } else { |
228 | regs->pc = trampact; |
229 | regs->cpsr = PSR_USERDFLT; |
230 | } |
231 | regs->sp = sp; |
232 | |
233 | return thread_setstatus(thread: th_act, ARM_THREAD_STATE, tstate: (void *)regs, ARM_THREAD_STATE_COUNT); |
234 | } |
235 | |
236 | #if CONFIG_DTRACE |
237 | static void |
238 | sendsig_do_dtrace(uthread_t ut, user_siginfo_t *sinfo, int sig, user_addr_t catcher) |
239 | { |
240 | bzero(s: (caddr_t)&(ut->t_dtrace_siginfo), n: sizeof(ut->t_dtrace_siginfo)); |
241 | |
242 | ut->t_dtrace_siginfo.si_signo = sinfo->si_signo; |
243 | ut->t_dtrace_siginfo.si_code = sinfo->si_code; |
244 | ut->t_dtrace_siginfo.si_pid = sinfo->si_pid; |
245 | ut->t_dtrace_siginfo.si_uid = sinfo->si_uid; |
246 | ut->t_dtrace_siginfo.si_status = sinfo->si_status; |
247 | /* XXX truncates faulting address to void * */ |
248 | ut->t_dtrace_siginfo.si_addr = CAST_DOWN_EXPLICIT(void *, sinfo->si_addr); |
249 | |
250 | /* Fire DTrace proc:::fault probe when signal is generated by hardware. */ |
251 | switch (sig) { |
252 | case SIGILL: case SIGBUS: case SIGSEGV: case SIGFPE: case SIGTRAP: |
253 | DTRACE_PROC2(fault, int, (int)(ut->uu_code), siginfo_t *, &(ut->t_dtrace_siginfo)); |
254 | break; |
255 | default: |
256 | break; |
257 | } |
258 | |
259 | /* XXX truncates faulting address to uintptr_t */ |
260 | DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &(ut->t_dtrace_siginfo), |
261 | void (*)(void), CAST_DOWN(uintptr_t, catcher)); |
262 | } |
263 | #endif |
264 | |
265 | struct user_sigframe32 { |
266 | user32_addr_t puctx; |
267 | user32_addr_t token; |
268 | user32_siginfo_t sinfo; |
269 | struct user_ucontext32 uctx; |
270 | mcontext32_t mctx; |
271 | }; |
272 | |
273 | /* |
274 | * Send an interrupt to process. |
275 | * |
276 | */ |
277 | void |
278 | sendsig( |
279 | struct proc * p, |
280 | user_addr_t catcher, |
281 | int sig, |
282 | int mask, |
283 | __unused uint32_t code, |
284 | sigset_t siginfo |
285 | ) |
286 | { |
287 | union { |
288 | struct ts32 { |
289 | arm_thread_state_t ss; |
290 | } ts32; |
291 | #if defined(__arm64__) |
292 | struct ts64 { |
293 | arm_thread_state64_t ss; |
294 | } ts64; |
295 | #endif |
296 | } ts; |
297 | union { |
298 | struct user_sigframe32 uf32; |
299 | #if defined(__arm64__) |
300 | struct user_sigframe64 uf64; |
301 | #endif |
302 | } user_frame; |
303 | |
304 | user_siginfo_t sinfo; |
305 | user_addr_t sp = 0, trampact; |
306 | struct sigacts *ps = &p->p_sigacts; |
307 | int oonstack, infostyle; |
308 | thread_t th_act; |
309 | struct uthread *ut; |
310 | user_size_t stack_size = 0; |
311 | user_addr_t p_uctx, token_uctx; |
312 | kern_return_t kr; |
313 | |
314 | th_act = current_thread(); |
315 | ut = get_bsdthread_info(th_act); |
316 | |
317 | bzero(s: &ts, n: sizeof(ts)); |
318 | bzero(s: &user_frame, n: sizeof(user_frame)); |
319 | |
320 | if (siginfo & sigmask(sig)) { |
321 | infostyle = UC_FLAVOR; |
322 | } else { |
323 | infostyle = UC_TRAD; |
324 | } |
325 | |
326 | trampact = SIGTRAMP(p, sig); |
327 | oonstack = ut->uu_sigstk.ss_flags & SA_ONSTACK; |
328 | |
329 | if (ut->uu_pending_sigreturn == 0) { |
330 | /* Generate random token value used to validate sigreturn arguments */ |
331 | read_random(buffer: &ut->uu_sigreturn_token, numBytes: sizeof(ut->uu_sigreturn_token)); |
332 | |
333 | do { |
334 | read_random(buffer: &ut->uu_sigreturn_diversifier, numBytes: sizeof(ut->uu_sigreturn_diversifier)); |
335 | ut->uu_sigreturn_diversifier &= |
336 | __DARWIN_ARM_THREAD_STATE64_USER_DIVERSIFIER_MASK; |
337 | } while (ut->uu_sigreturn_diversifier == 0); |
338 | } |
339 | ut->uu_pending_sigreturn++; |
340 | |
341 | /* |
342 | * Get sundry thread state. |
343 | */ |
344 | if (proc_is64bit_data(p)) { |
345 | #ifdef __arm64__ |
346 | int ret = 0; |
347 | if ((ret = sendsig_get_state64(th_act, ts: &ts.ts64.ss, mcp: &user_frame.uf64.mctx)) != 0) { |
348 | #if DEVELOPMENT || DEBUG |
349 | printf("process [%s][%d] sendsig_get_state64 failed with ret %d, expected 0" , p->p_comm, proc_getpid(p), ret); |
350 | #endif |
351 | goto bad2; |
352 | } |
353 | #else |
354 | #error Unsupported architecture |
355 | #endif |
356 | } else { |
357 | int ret = 0; |
358 | if ((ret = sendsig_get_state32(th_act, ts: &ts.ts32.ss, mcp: &user_frame.uf32.mctx)) != 0) { |
359 | #if DEVELOPMENT || DEBUG |
360 | printf("process [%s][%d] sendsig_get_state32 failed with ret %d, expected 0" , p->p_comm, proc_getpid(p), ret); |
361 | #endif |
362 | goto bad2; |
363 | } |
364 | } |
365 | |
366 | /* |
367 | * Figure out where our new stack lives. |
368 | */ |
369 | if ((ut->uu_flag & UT_ALTSTACK) && !oonstack && |
370 | (ps->ps_sigonstack & sigmask(sig))) { |
371 | sp = ut->uu_sigstk.ss_sp; |
372 | stack_size = ut->uu_sigstk.ss_size; |
373 | |
374 | sp += stack_size; |
375 | ut->uu_sigstk.ss_flags |= SA_ONSTACK; |
376 | } else { |
377 | /* |
378 | * Get stack pointer, and allocate enough space |
379 | * for signal handler data. |
380 | */ |
381 | if (proc_is64bit_data(p)) { |
382 | #if defined(__arm64__) |
383 | sp = CAST_USER_ADDR_T(ts.ts64.ss.sp); |
384 | #else |
385 | #error Unsupported architecture |
386 | #endif |
387 | } else { |
388 | sp = CAST_USER_ADDR_T(ts.ts32.ss.sp); |
389 | } |
390 | } |
391 | |
392 | /* Make sure to move stack pointer down for room for metadata */ |
393 | if (proc_is64bit_data(p)) { |
394 | #if defined(__arm64__) |
395 | sp = (sp - sizeof(user_frame.uf64) - C_64_REDZONE_LEN); |
396 | sp = TRUNC_TO_16_BYTES(sp); |
397 | #else |
398 | #error Unsupported architecture |
399 | #endif |
400 | } else { |
401 | sp -= sizeof(user_frame.uf32); |
402 | } |
403 | |
404 | proc_unlock(p); |
405 | |
406 | /* |
407 | * Fill in ucontext (points to mcontext, i.e. thread states). |
408 | */ |
409 | if (proc_is64bit_data(p)) { |
410 | #if defined(__arm64__) |
411 | sendsig_fill_uctx64(uctx: &user_frame.uf64.uctx, oonstack, mask, sp, stack_size: (user64_size_t)stack_size, |
412 | p_mctx: (user64_addr_t)&((struct user_sigframe64*)sp)->mctx); |
413 | #else |
414 | #error Unsupported architecture |
415 | #endif |
416 | } else { |
417 | sendsig_fill_uctx32(uctx: &user_frame.uf32.uctx, oonstack, mask, sp, stack_size: (user32_size_t)stack_size, |
418 | p_mctx: (user32_addr_t)&((struct user_sigframe32*)sp)->mctx); |
419 | } |
420 | |
421 | /* |
422 | * Setup siginfo. |
423 | */ |
424 | bzero(s: (caddr_t) &sinfo, n: sizeof(sinfo)); |
425 | sinfo.si_signo = sig; |
426 | |
427 | if (proc_is64bit_data(p)) { |
428 | #if defined(__arm64__) |
429 | sinfo.si_addr = ts.ts64.ss.pc; |
430 | sinfo.pad[0] = ts.ts64.ss.sp; |
431 | #else |
432 | #error Unsupported architecture |
433 | #endif |
434 | } else { |
435 | sinfo.si_addr = ts.ts32.ss.pc; |
436 | sinfo.pad[0] = ts.ts32.ss.sp; |
437 | } |
438 | |
439 | switch (sig) { |
440 | case SIGILL: |
441 | #ifdef BER_XXX |
442 | if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_ILL_INS_BIT))) { |
443 | sinfo.si_code = ILL_ILLOPC; |
444 | } else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_PRV_INS_BIT))) { |
445 | sinfo.si_code = ILL_PRVOPC; |
446 | } else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_TRAP_BIT))) { |
447 | sinfo.si_code = ILL_ILLTRP; |
448 | } else { |
449 | sinfo.si_code = ILL_NOOP; |
450 | } |
451 | #else |
452 | sinfo.si_code = ILL_ILLTRP; |
453 | #endif |
454 | break; |
455 | |
456 | case SIGFPE: |
457 | switch (ut->uu_code) { |
458 | case EXC_ARM_FP_UF: |
459 | sinfo.si_code = FPE_FLTUND; |
460 | break; |
461 | case EXC_ARM_FP_OF: |
462 | sinfo.si_code = FPE_FLTOVF; |
463 | break; |
464 | case EXC_ARM_FP_IO: |
465 | sinfo.si_code = FPE_FLTINV; |
466 | break; |
467 | case EXC_ARM_FP_DZ: |
468 | sinfo.si_code = FPE_FLTDIV; |
469 | break; |
470 | case EXC_ARM_FP_ID: |
471 | sinfo.si_code = FPE_FLTINV; |
472 | break; |
473 | case EXC_ARM_FP_IX: |
474 | sinfo.si_code = FPE_FLTRES; |
475 | break; |
476 | default: |
477 | sinfo.si_code = FPE_NOOP; |
478 | break; |
479 | } |
480 | |
481 | break; |
482 | |
483 | case SIGBUS: |
484 | if (proc_is64bit_data(p)) { |
485 | #if defined(__arm64__) |
486 | sinfo.si_addr = user_frame.uf64.mctx.es.far; |
487 | #else |
488 | #error Unsupported architecture |
489 | #endif |
490 | } else { |
491 | sinfo.si_addr = user_frame.uf32.mctx.es.far; |
492 | } |
493 | |
494 | sinfo.si_code = BUS_ADRALN; |
495 | break; |
496 | |
497 | case SIGSEGV: |
498 | if (proc_is64bit_data(p)) { |
499 | #if defined(__arm64__) |
500 | sinfo.si_addr = user_frame.uf64.mctx.es.far; |
501 | #else |
502 | #error Unsupported architecture |
503 | #endif |
504 | } else { |
505 | sinfo.si_addr = user_frame.uf32.mctx.es.far; |
506 | } |
507 | |
508 | #ifdef BER_XXX |
509 | /* First check in srr1 and then in dsisr */ |
510 | if (mctx.ss.srr1 & (1 << (31 - DSISR_PROT_BIT))) { |
511 | sinfo.si_code = SEGV_ACCERR; |
512 | } else if (mctx.es.dsisr & (1 << (31 - DSISR_PROT_BIT))) { |
513 | sinfo.si_code = SEGV_ACCERR; |
514 | } else { |
515 | sinfo.si_code = SEGV_MAPERR; |
516 | } |
517 | #else |
518 | sinfo.si_code = SEGV_ACCERR; |
519 | #endif |
520 | break; |
521 | |
522 | default: |
523 | { |
524 | int status_and_exitcode; |
525 | |
526 | /* |
527 | * All other signals need to fill out a minimum set of |
528 | * information for the siginfo structure passed into |
529 | * the signal handler, if SA_SIGINFO was specified. |
530 | * |
531 | * p->si_status actually contains both the status and |
532 | * the exit code; we save it off in its own variable |
533 | * for later breakdown. |
534 | */ |
535 | proc_lock(p); |
536 | sinfo.si_pid = p->si_pid; |
537 | p->si_pid = 0; |
538 | status_and_exitcode = p->si_status; |
539 | p->si_status = 0; |
540 | sinfo.si_uid = p->si_uid; |
541 | p->si_uid = 0; |
542 | sinfo.si_code = p->si_code; |
543 | p->si_code = 0; |
544 | proc_unlock(p); |
545 | if (sinfo.si_code == CLD_EXITED) { |
546 | if (WIFEXITED(status_and_exitcode)) { |
547 | sinfo.si_code = CLD_EXITED; |
548 | } else if (WIFSIGNALED(status_and_exitcode)) { |
549 | if (WCOREDUMP(status_and_exitcode)) { |
550 | sinfo.si_code = CLD_DUMPED; |
551 | status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode); |
552 | } else { |
553 | sinfo.si_code = CLD_KILLED; |
554 | status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode); |
555 | } |
556 | } |
557 | } |
558 | /* |
559 | * The recorded status contains the exit code and the |
560 | * signal information, but the information to be passed |
561 | * in the siginfo to the handler is supposed to only |
562 | * contain the status, so we have to shift it out. |
563 | */ |
564 | sinfo.si_status = (WEXITSTATUS(status_and_exitcode) & 0x00FFFFFF) | (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000); |
565 | p->p_xhighbits = 0; |
566 | break; |
567 | } |
568 | } |
569 | |
570 | #if CONFIG_DTRACE |
571 | sendsig_do_dtrace(ut, sinfo: &sinfo, sig, catcher); |
572 | #endif /* CONFIG_DTRACE */ |
573 | |
574 | /* |
575 | * Copy signal-handling frame out to user space, set thread state. |
576 | */ |
577 | if (proc_is64bit_data(p)) { |
578 | #if defined(__arm64__) |
579 | user64_addr_t token; |
580 | |
581 | /* |
582 | * mctx filled in when we get state. uctx filled in by |
583 | * sendsig_fill_uctx64(). We fill in the sinfo now. |
584 | */ |
585 | siginfo_user_to_user64(&sinfo, &user_frame.uf64.sinfo); |
586 | |
587 | p_uctx = (user_addr_t)&((struct user_sigframe64*)sp)->uctx; |
588 | /* |
589 | * Generate the validation token for sigreturn |
590 | */ |
591 | token_uctx = p_uctx; |
592 | kr = machine_thread_siguctx_pointer_convert_to_user(thread: th_act, uctxp: &token_uctx); |
593 | assert(kr == KERN_SUCCESS); |
594 | token = (user64_addr_t)token_uctx ^ (user64_addr_t)ut->uu_sigreturn_token; |
595 | |
596 | int ret = 0; |
597 | if ((ret = copyout(&user_frame.uf64, sp, sizeof(user_frame.uf64))) != 0) { |
598 | #if DEVELOPMENT || DEBUG |
599 | printf("process [%s][%d] copyout of user_frame to (sp, size) = (0x%llx, %zu) failed with ret %d, expected 0\n" , p->p_comm, proc_getpid(p), sp, sizeof(user_frame.uf64), ret); |
600 | #endif |
601 | goto bad; |
602 | } |
603 | |
604 | if ((kr = sendsig_set_thread_state64(regs: &ts.ts64.ss, |
605 | catcher, infostyle, sig, p_sinfo: (user64_addr_t)&((struct user_sigframe64*)sp)->sinfo, |
606 | p_uctx: (user64_addr_t)p_uctx, token, trampact, sp, th_act)) != KERN_SUCCESS) { |
607 | #if DEVELOPMENT || DEBUG |
608 | printf("process [%s][%d] sendsig_set_thread_state64 failed with kr %d, expected 0" , p->p_comm, proc_getpid(p), kr); |
609 | #endif |
610 | goto bad; |
611 | } |
612 | |
613 | #else |
614 | #error Unsupported architecture |
615 | #endif |
616 | } else { |
617 | user32_addr_t token; |
618 | |
619 | /* |
620 | * mctx filled in when we get state. uctx filled in by |
621 | * sendsig_fill_uctx32(). We fill in the sinfo, *pointer* |
622 | * to uctx and token now. |
623 | */ |
624 | siginfo_user_to_user32(&sinfo, &user_frame.uf32.sinfo); |
625 | |
626 | p_uctx = (user_addr_t)&((struct user_sigframe32*)sp)->uctx; |
627 | /* |
628 | * Generate the validation token for sigreturn |
629 | */ |
630 | token_uctx = (user_addr_t)p_uctx; |
631 | kr = machine_thread_siguctx_pointer_convert_to_user(thread: th_act, uctxp: &token_uctx); |
632 | assert(kr == KERN_SUCCESS); |
633 | token = (user32_addr_t)token_uctx ^ (user32_addr_t)ut->uu_sigreturn_token; |
634 | |
635 | user_frame.uf32.puctx = (user32_addr_t)p_uctx; |
636 | user_frame.uf32.token = token; |
637 | |
638 | if (copyout(&user_frame.uf32, sp, sizeof(user_frame.uf32)) != 0) { |
639 | goto bad; |
640 | } |
641 | |
642 | if (sendsig_set_thread_state32(regs: &ts.ts32.ss, |
643 | CAST_DOWN_EXPLICIT(user32_addr_t, catcher), infostyle, sig, p_sinfo: (user32_addr_t)&((struct user_sigframe32*)sp)->sinfo, |
644 | CAST_DOWN_EXPLICIT(user32_addr_t, trampact), CAST_DOWN_EXPLICIT(user32_addr_t, sp), th_act) != KERN_SUCCESS) { |
645 | goto bad; |
646 | } |
647 | } |
648 | |
649 | proc_lock(p); |
650 | return; |
651 | |
652 | bad: |
653 | proc_lock(p); |
654 | bad2: |
655 | assert(ut->uu_pending_sigreturn > 0); |
656 | ut->uu_pending_sigreturn--; |
657 | proc_set_sigact(p, SIGILL, SIG_DFL); |
658 | sig = sigmask(SIGILL); |
659 | p->p_sigignore &= ~sig; |
660 | p->p_sigcatch &= ~sig; |
661 | ut->uu_sigmask &= ~sig; |
662 | /* sendsig is called with signal lock held */ |
663 | proc_unlock(p); |
664 | psignal_locked(p, SIGILL); |
665 | proc_lock(p); |
666 | } |
667 | |
668 | /* |
669 | * System call to cleanup state after a signal |
670 | * has been taken. Reset signal mask and |
671 | * stack state from context left by sendsig (above). |
672 | * Return to previous * context left by sendsig. |
673 | * Check carefully to * make sure that the user has not |
674 | * modified the * spr to gain improper priviledges. |
675 | */ |
676 | |
677 | static int |
678 | sigreturn_copyin_ctx32(struct user_ucontext32 *uctx, mcontext32_t *mctx, user_addr_t uctx_addr) |
679 | { |
680 | int error; |
681 | |
682 | assert(!proc_is64bit_data(current_proc())); |
683 | |
684 | error = copyin(uctx_addr, uctx, sizeof(*uctx)); |
685 | if (error) { |
686 | return error; |
687 | } |
688 | |
689 | /* validate the machine context size */ |
690 | switch (uctx->uc_mcsize) { |
691 | case UC_FLAVOR_SIZE32: |
692 | break; |
693 | default: |
694 | return EINVAL; |
695 | } |
696 | |
697 | assert(uctx->uc_mcsize == sizeof(*mctx)); |
698 | error = copyin((user_addr_t)uctx->uc_mcontext, mctx, uctx->uc_mcsize); |
699 | if (error) { |
700 | return error; |
701 | } |
702 | |
703 | return 0; |
704 | } |
705 | |
706 | static int |
707 | sigreturn_set_state32(thread_t th_act, mcontext32_t *mctx) |
708 | { |
709 | assert(!proc_is64bit_data(current_proc())); |
710 | |
711 | /* validate the thread state, set/reset appropriate mode bits in cpsr */ |
712 | #if defined(__arm64__) |
713 | mctx->ss.cpsr = (mctx->ss.cpsr & ~PSR64_MODE_MASK) | PSR64_USER32_DEFAULT; |
714 | #else |
715 | #error Unknown architecture. |
716 | #endif |
717 | |
718 | if (thread_setstatus_from_user(thread: th_act, ARM_THREAD_STATE, tstate: (void *)&mctx->ss, |
719 | ARM_THREAD_STATE_COUNT, NULL, old_count: 0, flags: TSSF_FLAGS_NONE) != KERN_SUCCESS) { |
720 | return EINVAL; |
721 | } |
722 | if (thread_setstatus_from_user(thread: th_act, ARM_VFP_STATE, tstate: (void *)&mctx->fs, |
723 | ARM_VFP_STATE_COUNT, NULL, old_count: 0, flags: TSSF_FLAGS_NONE) != KERN_SUCCESS) { |
724 | return EINVAL; |
725 | } |
726 | |
727 | return 0; |
728 | } |
729 | |
730 | #if defined(__arm64__) |
731 | static int |
732 | sigreturn_copyin_ctx64(struct user_ucontext64 *uctx, mcontext64_t *mctx, user_addr_t uctx_addr) |
733 | { |
734 | int error; |
735 | |
736 | assert(proc_is64bit_data(current_proc())); |
737 | |
738 | error = copyin(uctx_addr, uctx, sizeof(*uctx)); |
739 | if (error) { |
740 | return error; |
741 | } |
742 | |
743 | /* validate the machine context size */ |
744 | switch (uctx->uc_mcsize) { |
745 | case UC_FLAVOR_SIZE64: |
746 | break; |
747 | default: |
748 | return EINVAL; |
749 | } |
750 | |
751 | assert(uctx->uc_mcsize == sizeof(*mctx)); |
752 | error = copyin((user_addr_t)uctx->uc_mcontext64, mctx, uctx->uc_mcsize); |
753 | if (error) { |
754 | return error; |
755 | } |
756 | |
757 | return 0; |
758 | } |
759 | |
760 | static int |
761 | sigreturn_set_state64(thread_t th_act, mcontext64_t *mctx, thread_set_status_flags_t tssf_flags) |
762 | { |
763 | assert(proc_is64bit_data(current_proc())); |
764 | |
765 | /* validate the thread state, set/reset appropriate mode bits in cpsr */ |
766 | mctx->ss.cpsr = (mctx->ss.cpsr & ~PSR64_MODE_MASK) | PSR64_USER64_DEFAULT; |
767 | |
768 | if (thread_setstatus_from_user(thread: th_act, ARM_THREAD_STATE64, tstate: (void *)&mctx->ss, |
769 | ARM_THREAD_STATE64_COUNT, NULL, old_count: 0, flags: tssf_flags) != KERN_SUCCESS) { |
770 | return EINVAL; |
771 | } |
772 | if (thread_setstatus_from_user(thread: th_act, ARM_NEON_STATE64, tstate: (void *)&mctx->ns, |
773 | ARM_NEON_STATE64_COUNT, NULL, old_count: 0, flags: TSSF_FLAGS_NONE) != KERN_SUCCESS) { |
774 | return EINVAL; |
775 | } |
776 | |
777 | return 0; |
778 | } |
779 | #endif /* defined(__arm64__) */ |
780 | |
781 | /* ARGSUSED */ |
782 | int |
783 | sigreturn( |
784 | struct proc * p, |
785 | struct sigreturn_args * uap, |
786 | __unused int *retval) |
787 | { |
788 | union { |
789 | user_ucontext32_t uc32; |
790 | #if defined(__arm64__) |
791 | user_ucontext64_t uc64; |
792 | #endif |
793 | } uctx; |
794 | |
795 | union { |
796 | mcontext32_t mc32; |
797 | #if defined(__arm64__) |
798 | mcontext64_t mc64; |
799 | #endif |
800 | } mctx; |
801 | |
802 | struct sigacts *ps = &p->p_sigacts; |
803 | int error, sigmask = 0, onstack = 0; |
804 | thread_t th_act; |
805 | struct uthread *ut; |
806 | uint32_t sigreturn_validation; |
807 | user_addr_t token_uctx; |
808 | kern_return_t kr; |
809 | |
810 | th_act = current_thread(); |
811 | ut = (struct uthread *) get_bsdthread_info(th_act); |
812 | |
813 | /* see osfmk/kern/restartable.c */ |
814 | act_set_ast_reset_pcs(TASK_NULL, thread: th_act); |
815 | |
816 | /* |
817 | * If we are being asked to change the altstack flag on the thread, we |
818 | * just set/reset it and return (the uap->uctx is not used). |
819 | */ |
820 | if ((unsigned int)uap->infostyle == UC_SET_ALT_STACK) { |
821 | ut->uu_sigstk.ss_flags |= SA_ONSTACK; |
822 | return 0; |
823 | } else if ((unsigned int)uap->infostyle == UC_RESET_ALT_STACK) { |
824 | ut->uu_sigstk.ss_flags &= ~SA_ONSTACK; |
825 | return 0; |
826 | } |
827 | |
828 | if (proc_is64bit_data(p)) { |
829 | #if defined(__arm64__) |
830 | error = sigreturn_copyin_ctx64(uctx: &uctx.uc64, mctx: &mctx.mc64, uctx_addr: uap->uctx); |
831 | if (error != 0) { |
832 | return error; |
833 | } |
834 | |
835 | onstack = uctx.uc64.uc_onstack; |
836 | sigmask = uctx.uc64.uc_sigmask; |
837 | #else |
838 | #error Unsupported architecture |
839 | #endif |
840 | } else { |
841 | error = sigreturn_copyin_ctx32(uctx: &uctx.uc32, mctx: &mctx.mc32, uctx_addr: uap->uctx); |
842 | if (error != 0) { |
843 | return error; |
844 | } |
845 | |
846 | onstack = uctx.uc32.uc_onstack; |
847 | sigmask = uctx.uc32.uc_sigmask; |
848 | } |
849 | |
850 | if ((onstack & 01)) { |
851 | ut->uu_sigstk.ss_flags |= SA_ONSTACK; |
852 | } else { |
853 | ut->uu_sigstk.ss_flags &= ~SA_ONSTACK; |
854 | } |
855 | |
856 | ut->uu_sigmask = sigmask & ~sigcantmask; |
857 | if (ut->uu_siglist & ~ut->uu_sigmask) { |
858 | signal_setast(sig_actthread: current_thread()); |
859 | } |
860 | |
861 | sigreturn_validation = atomic_load_explicit( |
862 | &ps->ps_sigreturn_validation, memory_order_relaxed); |
863 | token_uctx = uap->uctx; |
864 | kr = machine_thread_siguctx_pointer_convert_to_user(thread: th_act, uctxp: &token_uctx); |
865 | assert(kr == KERN_SUCCESS); |
866 | |
867 | if (proc_is64bit_data(p)) { |
868 | #if defined(__arm64__) |
869 | user64_addr_t token; |
870 | token = (user64_addr_t)token_uctx ^ (user64_addr_t)ut->uu_sigreturn_token; |
871 | thread_set_status_flags_t tssf_flags = TSSF_FLAGS_NONE; |
872 | |
873 | if ((user64_addr_t)uap->token != token) { |
874 | #if DEVELOPMENT || DEBUG |
875 | printf("process %s[%d] sigreturn token mismatch: received 0x%llx expected 0x%llx\n" , |
876 | p->p_comm, proc_getpid(p), (user64_addr_t)uap->token, token); |
877 | #endif /* DEVELOPMENT || DEBUG */ |
878 | if (sigreturn_validation != PS_SIGRETURN_VALIDATION_DISABLED) { |
879 | return EINVAL; |
880 | } |
881 | } |
882 | |
883 | if (sigreturn_validation != PS_SIGRETURN_VALIDATION_DISABLED) { |
884 | tssf_flags |= TSSF_CHECK_SIGRETURN_TOKEN; |
885 | |
886 | if (pac_sigreturn_token || task_needs_user_signed_thread_state(current_task())) { |
887 | tssf_flags |= TSSF_ALLOW_ONLY_MATCHING_TOKEN | TSSF_THREAD_USER_DIV; |
888 | } |
889 | } |
890 | error = sigreturn_set_state64(th_act, mctx: &mctx.mc64, tssf_flags); |
891 | if (error != 0) { |
892 | #if DEVELOPMENT || DEBUG |
893 | printf("process %s[%d] sigreturn set_state64 error %d\n" , |
894 | p->p_comm, proc_getpid(p), error); |
895 | #endif /* DEVELOPMENT || DEBUG */ |
896 | return error; |
897 | } |
898 | #else |
899 | #error Unsupported architecture |
900 | #endif |
901 | } else { |
902 | user32_addr_t token; |
903 | token = (user32_addr_t)token_uctx ^ (user32_addr_t)ut->uu_sigreturn_token; |
904 | if ((user32_addr_t)uap->token != token) { |
905 | #if DEVELOPMENT || DEBUG |
906 | printf("process %s[%d] sigreturn token mismatch: received 0x%x expected 0x%x\n" , |
907 | p->p_comm, proc_getpid(p), (user32_addr_t)uap->token, token); |
908 | #endif /* DEVELOPMENT || DEBUG */ |
909 | if (sigreturn_validation != PS_SIGRETURN_VALIDATION_DISABLED) { |
910 | return EINVAL; |
911 | } |
912 | } |
913 | error = sigreturn_set_state32(th_act, mctx: &mctx.mc32); |
914 | if (error != 0) { |
915 | #if DEVELOPMENT || DEBUG |
916 | printf("process %s[%d] sigreturn sigreturn_set_state32 error %d\n" , |
917 | p->p_comm, proc_getpid(p), error); |
918 | #endif /* DEVELOPMENT || DEBUG */ |
919 | return error; |
920 | } |
921 | } |
922 | |
923 | /* Decrement the pending sigreturn count */ |
924 | if (ut->uu_pending_sigreturn > 0) { |
925 | ut->uu_pending_sigreturn--; |
926 | } |
927 | |
928 | return EJUSTRETURN; |
929 | } |
930 | |
931 | /* |
932 | * machine_exception() performs machine-dependent translation |
933 | * of a mach exception to a unix signal. |
934 | */ |
935 | int |
936 | machine_exception(int exception, |
937 | __unused mach_exception_code_t code, |
938 | __unused mach_exception_subcode_t subcode) |
939 | { |
940 | switch (exception) { |
941 | case EXC_BAD_INSTRUCTION: |
942 | return SIGILL; |
943 | |
944 | case EXC_ARITHMETIC: |
945 | return SIGFPE; |
946 | } |
947 | |
948 | return 0; |
949 | } |
950 | |