1/*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 */
4
5#include <kern/task.h>
6#include <kern/thread.h>
7#include <kern/assert.h>
8#include <kern/clock.h>
9#include <kern/locks.h>
10#include <kern/sched_prim.h>
11#include <mach/machine/thread_status.h>
12#include <mach/thread_act.h>
13#include <machine/machine_routines.h>
14#include <arm/thread.h>
15#include <arm/proc_reg.h>
16#include <pexpert/pexpert.h>
17
18#include <sys/kernel.h>
19#include <sys/vm.h>
20#include <sys/proc_internal.h>
21#include <sys/syscall.h>
22#include <sys/systm.h>
23#include <sys/user.h>
24#include <sys/errno.h>
25#include <sys/kdebug.h>
26#include <sys/sysent.h>
27#include <sys/sysproto.h>
28#include <sys/kauth.h>
29
30#include <security/audit/audit.h>
31
32#if CONFIG_DTRACE
33extern int32_t dtrace_systrace_syscall(struct proc *, void *, int *);
34extern void dtrace_systrace_syscall_return(unsigned short, int, int *);
35#endif /* CONFIG_DTRACE */
36
37extern void
38unix_syscall(struct arm_saved_state * regs, thread_t thread_act,
39 struct uthread * uthread, struct proc * proc);
40
41static int arm_get_syscall_args(uthread_t, struct arm_saved_state *, struct sysent *);
42static int arm_get_u32_syscall_args(uthread_t, arm_saved_state32_t *, struct sysent *);
43static void arm_prepare_u32_syscall_return(struct sysent *, arm_saved_state_t *, uthread_t, int);
44static void arm_prepare_syscall_return(struct sysent *, struct arm_saved_state *, uthread_t, int);
45static int arm_get_syscall_number(struct arm_saved_state *);
46static void arm_trace_unix_syscall(int, struct arm_saved_state *);
47static void arm_clear_syscall_error(struct arm_saved_state *);
48#define save_r0 r[0]
49#define save_r1 r[1]
50#define save_r2 r[2]
51#define save_r3 r[3]
52#define save_r4 r[4]
53#define save_r5 r[5]
54#define save_r6 r[6]
55#define save_r7 r[7]
56#define save_r8 r[8]
57#define save_r9 r[9]
58#define save_r10 r[10]
59#define save_r11 r[11]
60#define save_r12 r[12]
61#define save_r13 r[13]
62
63#if COUNT_SYSCALLS
64__XNU_PRIVATE_EXTERN int do_count_syscalls = 1;
65__XNU_PRIVATE_EXTERN int syscalls_log[SYS_MAXSYSCALL];
66#endif
67
68#define code_is_kdebug_trace(code) (((code) == SYS_kdebug_trace) || \
69 ((code) == SYS_kdebug_trace64) || \
70 ((code) == SYS_kdebug_trace_string))
71
72/*
73 * Function: unix_syscall
74 *
75 * Inputs: regs - pointer to Process Control Block
76 *
77 * Outputs: none
78 */
79#ifdef __arm__
80__attribute__((noreturn))
81#endif
82void
83unix_syscall(
84 struct arm_saved_state * state,
85 __unused thread_t thread_act,
86 struct uthread * uthread,
87 struct proc * proc)
88{
89 struct sysent *callp;
90 int error;
91 unsigned short code;
92 pid_t pid;
93
94#if defined(__arm__)
95 assert(is_saved_state32(state));
96#endif
97
98 uthread_reset_proc_refcount(uthread);
99
100 code = arm_get_syscall_number(state);
101
102#define unix_syscall_kprintf(x...) /* kprintf("unix_syscall: " x) */
103
104#if (KDEBUG_LEVEL >= KDEBUG_LEVEL_IST)
105 if (kdebug_enable && !code_is_kdebug_trace(code)) {
106 arm_trace_unix_syscall(code, state);
107 }
108#endif
109
110 if ((uthread->uu_flag & UT_VFORK))
111 proc = current_proc();
112
113 callp = (code >= nsysent) ? &sysent[SYS_invalid] : &sysent[code];
114
115 /*
116 * sy_narg is inaccurate on ARM if a 64 bit parameter is specified. Since user_addr_t
117 * is currently a 32 bit type, this is really a long word count. See rdar://problem/6104668.
118 */
119 if (callp->sy_narg != 0) {
120 if (arm_get_syscall_args(uthread, state, callp) != 0) {
121 /* Too many arguments, or something failed */
122 unix_syscall_kprintf("arm_get_syscall_args failed.\n");
123 callp = &sysent[SYS_invalid];
124 }
125 }
126
127 uthread->uu_flag |= UT_NOTCANCELPT;
128 uthread->syscall_code = code;
129
130 uthread->uu_rval[0] = 0;
131
132 /*
133 * r4 is volatile, if we set it to regs->save_r4 here the child
134 * will have parents r4 after execve
135 */
136 uthread->uu_rval[1] = 0;
137
138 error = 0;
139
140 /*
141 * ARM runtime will call cerror if the carry bit is set after a
142 * system call, so clear it here for the common case of success.
143 */
144 arm_clear_syscall_error(state);
145
146#if COUNT_SYSCALLS
147 if (do_count_syscalls > 0) {
148 syscalls_log[code]++;
149 }
150#endif
151 pid = proc_pid(proc);
152
153#ifdef JOE_DEBUG
154 uthread->uu_iocount = 0;
155 uthread->uu_vpindex = 0;
156#endif
157 unix_syscall_kprintf("code %d (pid %d - %s, tid %lld)\n", code,
158 pid, proc->p_comm, thread_tid(current_thread()));
159
160 AUDIT_SYSCALL_ENTER(code, proc, uthread);
161 error = (*(callp->sy_call)) (proc, &uthread->uu_arg[0], &(uthread->uu_rval[0]));
162 AUDIT_SYSCALL_EXIT(code, proc, uthread, error);
163
164 unix_syscall_kprintf("code %d, error %d, results %x, %x (pid %d - %s, tid %lld)\n", code, error,
165 uthread->uu_rval[0], uthread->uu_rval[1],
166 pid, get_bsdtask_info(current_task()) ? proc->p_comm : "unknown" , thread_tid(current_thread()));
167
168#ifdef JOE_DEBUG
169 if (uthread->uu_iocount) {
170 printf("system call returned with uu_iocount != 0");
171 }
172#endif
173#if CONFIG_DTRACE
174 uthread->t_dtrace_errno = error;
175#endif /* CONFIG_DTRACE */
176#if DEBUG || DEVELOPMENT
177 kern_allocation_name_t
178 prior __assert_only = thread_set_allocation_name(NULL);
179 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
180#endif /* DEBUG || DEVELOPMENT */
181
182 arm_prepare_syscall_return(callp, state, uthread, error);
183
184 uthread->uu_flag &= ~UT_NOTCANCELPT;
185
186 if (uthread->uu_lowpri_window) {
187 /*
188 * task is marked as a low priority I/O type
189 * and the I/O we issued while in this system call
190 * collided with normal I/O operations... we'll
191 * delay in order to mitigate the impact of this
192 * task on the normal operation of the system
193 */
194 throttle_lowpri_io(1);
195 }
196#if (KDEBUG_LEVEL >= KDEBUG_LEVEL_IST)
197 if (kdebug_enable && !code_is_kdebug_trace(code)) {
198 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
199 BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
200 error, uthread->uu_rval[0], uthread->uu_rval[1], pid, 0);
201 }
202#endif
203
204#if PROC_REF_DEBUG
205 if (__improbable(uthread_get_proc_refcount(uthread) != 0)) {
206 panic("system call returned with uu_proc_refcount != 0");
207 }
208#endif
209
210#ifdef __arm__
211 thread_exception_return();
212#endif
213}
214
215void
216unix_syscall_return(int error)
217{
218 thread_t thread_act;
219 struct uthread *uthread;
220 struct proc *proc;
221 struct arm_saved_state *regs;
222 unsigned short code;
223 struct sysent *callp;
224
225#define unix_syscall_return_kprintf(x...) /* kprintf("unix_syscall_retur
226 * n: " x) */
227
228 thread_act = current_thread();
229 proc = current_proc();
230 uthread = get_bsdthread_info(thread_act);
231
232 regs = find_user_regs(thread_act);
233 code = uthread->syscall_code;
234 callp = (code >= nsysent) ? &sysent[SYS_invalid] : &sysent[code];
235
236#if CONFIG_DTRACE
237 if (callp->sy_call == dtrace_systrace_syscall)
238 dtrace_systrace_syscall_return( code, error, uthread->uu_rval );
239#endif /* CONFIG_DTRACE */
240#if DEBUG || DEVELOPMENT
241 kern_allocation_name_t
242 prior __assert_only = thread_set_allocation_name(NULL);
243 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
244#endif /* DEBUG || DEVELOPMENT */
245
246 AUDIT_SYSCALL_EXIT(code, proc, uthread, error);
247
248 /*
249 * Get index into sysent table
250 */
251 arm_prepare_syscall_return(callp, regs, uthread, error);
252
253 uthread->uu_flag &= ~UT_NOTCANCELPT;
254
255 if (uthread->uu_lowpri_window) {
256 /*
257 * task is marked as a low priority I/O type
258 * and the I/O we issued while in this system call
259 * collided with normal I/O operations... we'll
260 * delay in order to mitigate the impact of this
261 * task on the normal operation of the system
262 */
263 throttle_lowpri_io(1);
264 }
265#if (KDEBUG_LEVEL >= KDEBUG_LEVEL_IST)
266 if (kdebug_enable && !code_is_kdebug_trace(code)) {
267 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
268 BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
269 error, uthread->uu_rval[0], uthread->uu_rval[1], proc->p_pid, 0);
270 }
271#endif
272
273 thread_exception_return();
274 /* NOTREACHED */
275}
276
277static void
278arm_prepare_u32_syscall_return(struct sysent *callp, arm_saved_state_t *regs, uthread_t uthread, int error)
279{
280 assert(is_saved_state32(regs));
281
282 arm_saved_state32_t *ss32 = saved_state32(regs);
283
284 if (error == ERESTART) {
285 ss32->pc -= 4;
286 } else if (error != EJUSTRETURN) {
287 if (error) {
288 ss32->save_r0 = error;
289 ss32->save_r1 = 0;
290 /* set the carry bit to execute cerror routine */
291 ss32->cpsr |= PSR_CF;
292 unix_syscall_return_kprintf("error: setting carry to trigger cerror call\n");
293 } else { /* (not error) */
294 switch (callp->sy_return_type) {
295 case _SYSCALL_RET_INT_T:
296 case _SYSCALL_RET_UINT_T:
297 case _SYSCALL_RET_OFF_T:
298 case _SYSCALL_RET_ADDR_T:
299 case _SYSCALL_RET_SIZE_T:
300 case _SYSCALL_RET_SSIZE_T:
301 case _SYSCALL_RET_UINT64_T:
302 ss32->save_r0 = uthread->uu_rval[0];
303 ss32->save_r1 = uthread->uu_rval[1];
304 break;
305 case _SYSCALL_RET_NONE:
306 ss32->save_r0 = 0;
307 ss32->save_r1 = 0;
308 break;
309 default:
310 panic("unix_syscall: unknown return type");
311 break;
312 }
313 }
314 }
315 /* else (error == EJUSTRETURN) { nothing } */
316
317}
318
319static void
320arm_trace_u32_unix_syscall(int code, arm_saved_state32_t *regs)
321{
322 boolean_t indirect = (regs->save_r12 == 0);
323 if (indirect)
324 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
325 BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
326 regs->save_r1, regs->save_r2, regs->save_r3, regs->save_r4, 0);
327 else
328 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
329 BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
330 regs->save_r0, regs->save_r1, regs->save_r2, regs->save_r3, 0);
331}
332
333static void
334arm_clear_u32_syscall_error(arm_saved_state32_t *regs)
335{
336 regs->cpsr &= ~PSR_CF;
337}
338
339#if defined(__arm__)
340
341static int
342arm_get_syscall_args(uthread_t uthread, struct arm_saved_state *state, struct sysent *callp)
343{
344 assert(is_saved_state32(state));
345 return arm_get_u32_syscall_args(uthread, saved_state32(state), callp);
346}
347
348#if __arm__ && (__BIGGEST_ALIGNMENT__ > 4)
349/*
350 * For armv7k, the alignment constraints of the ABI mean we don't know how the userspace
351 * arguments are arranged without knowing the the prototype of the syscall. So we use mungers
352 * to marshal the userspace data into the uu_arg. This also means we need the same convention
353 * as mach syscalls. That means we use r8 to pass arguments in the BSD case as well.
354 */
355static int
356arm_get_u32_syscall_args(uthread_t uthread, arm_saved_state32_t *regs, struct sysent *callp)
357{
358 sy_munge_t *munger;
359
360 /* This check is probably not very useful since these both come from build-time */
361 if (callp->sy_arg_bytes > sizeof(uthread->uu_arg))
362 return -1;
363
364 /* get the munger and use it to marshal in the data from userspace */
365 munger = callp->sy_arg_munge32;
366 if (munger == NULL || (callp->sy_arg_bytes == 0))
367 return 0;
368
369 return munger(regs, uthread->uu_arg);
370}
371#else
372/*
373 * For an AArch32 kernel, where we know that we have only AArch32 userland,
374 * we do not do any munging (which is a little confusing, as it is a contrast
375 * to the i386 kernel, where, like the x86_64 kernel, we always munge
376 * arguments from a 32-bit userland out to 64-bit.
377 */
378static int
379arm_get_u32_syscall_args(uthread_t uthread, arm_saved_state32_t *regs, struct sysent *callp)
380{
381 int regparams;
382 int flavor = (regs->save_r12 == 0 ? 1 : 0);
383
384 regparams = (7 - flavor); /* Indirect value consumes a register */
385
386 assert((unsigned) callp->sy_arg_bytes <= sizeof (uthread->uu_arg));
387
388 if (callp->sy_arg_bytes <= (sizeof(uint32_t) * regparams)) {
389 /*
390 * Seven arguments or less are passed in registers.
391 */
392 memcpy(&uthread->uu_arg[0], &regs->r[flavor], callp->sy_arg_bytes);
393 } else if (callp->sy_arg_bytes <= sizeof(uthread->uu_arg)) {
394 /*
395 * In this case, we composite - take the first args from registers,
396 * the remainder from the stack (offset by the 7 regs therein).
397 */
398 unix_syscall_kprintf("%s: spillover...\n", __FUNCTION__);
399 memcpy(&uthread->uu_arg[0] , &regs->r[flavor], regparams * sizeof(int));
400 if (copyin((user_addr_t)regs->sp + 7 * sizeof(int), (int *)&uthread->uu_arg[0] + regparams,
401 (callp->sy_arg_bytes - (sizeof(uint32_t) * regparams))) != 0) {
402 return -1;
403 }
404 } else {
405 return -1;
406 }
407
408 return 0;
409}
410#endif
411
412static int
413arm_get_syscall_number(struct arm_saved_state *regs)
414{
415 if (regs->save_r12 != 0) {
416 return regs->save_r12;
417 } else {
418 return regs->save_r0;
419 }
420}
421
422static void
423arm_prepare_syscall_return(struct sysent *callp, struct arm_saved_state *state, uthread_t uthread, int error)
424{
425 assert(is_saved_state32(state));
426 arm_prepare_u32_syscall_return(callp, state, uthread, error);
427}
428
429static void
430arm_trace_unix_syscall(int code, struct arm_saved_state *state)
431{
432 assert(is_saved_state32(state));
433 arm_trace_u32_unix_syscall(code, saved_state32(state));
434}
435
436static void
437arm_clear_syscall_error(struct arm_saved_state * state)
438{
439 assert(is_saved_state32(state));
440 arm_clear_u32_syscall_error(saved_state32(state));
441}
442
443#elif defined(__arm64__)
444static void arm_prepare_u64_syscall_return(struct sysent *, arm_saved_state_t *, uthread_t, int);
445static int arm_get_u64_syscall_args(uthread_t, arm_saved_state64_t *, struct sysent *);
446
447static int
448arm_get_syscall_args(uthread_t uthread, struct arm_saved_state *state, struct sysent *callp)
449{
450 if (is_saved_state32(state)) {
451 return arm_get_u32_syscall_args(uthread, saved_state32(state), callp);
452 } else {
453 return arm_get_u64_syscall_args(uthread, saved_state64(state), callp);
454 }
455}
456
457/*
458 * 64-bit: all arguments in registers. We're willing to use x9, a temporary
459 * register per the ABI, to pass an argument to the kernel for one case,
460 * an indirect syscall with 8 arguments. No munging required, as all arguments
461 * are in 64-bit wide registers already.
462 */
463static int
464arm_get_u64_syscall_args(uthread_t uthread, arm_saved_state64_t *regs, struct sysent *callp)
465{
466 int indirect_offset, regparams;
467
468#if CONFIG_REQUIRES_U32_MUNGING
469 sy_munge_t *mungerp;
470#endif
471
472 indirect_offset = (regs->x[ARM64_SYSCALL_CODE_REG_NUM] == 0) ? 1 : 0;
473 regparams = 9 - indirect_offset;
474
475 /*
476 * Everything should fit in registers for now.
477 */
478 assert(callp->sy_narg <= 8);
479 if (callp->sy_narg > regparams) {
480 return -1;
481 }
482
483 memcpy(&uthread->uu_arg[0], &regs->x[indirect_offset], callp->sy_narg * sizeof(uint64_t));
484
485#if CONFIG_REQUIRES_U32_MUNGING
486 /*
487 * The indirect system call interface is vararg based. For armv7k, arm64_32,
488 * and arm64, this means we simply lay the values down on the stack, padded to
489 * a width multiple (4 bytes for armv7k and arm64_32, 8 bytes for arm64).
490 * The arm64(_32) stub for syscall will load this data into the registers and
491 * then trap. This gives us register state that corresponds to what we would
492 * expect from a armv7 task, so in this particular case we need to munge the
493 * arguments.
494 *
495 * TODO: Is there a cleaner way to do this check? What we're actually
496 * interested in is whether the task is arm64_32. We don't appear to guarantee
497 * that uu_proc is populated here, which is why this currently uses the
498 * thread_t.
499 */
500 mungerp = callp->sy_arg_munge32;
501 assert(uthread->uu_thread);
502
503 if (indirect_offset && !ml_thread_is64bit(uthread->uu_thread)) {
504 (*mungerp)(&uthread->uu_arg[0]);
505 }
506#endif
507
508 return 0;
509}
510/*
511 * When the kernel is running AArch64, munge arguments from 32-bit
512 * userland out to 64-bit.
513 *
514 * flavor == 1 indicates an indirect syscall.
515 */
516static int
517arm_get_u32_syscall_args(uthread_t uthread, arm_saved_state32_t *regs, struct sysent *callp)
518{
519 int regparams;
520#if CONFIG_REQUIRES_U32_MUNGING
521 sy_munge_t *mungerp;
522#else
523#error U32 syscalls on ARM64 kernel requires munging
524#endif
525 int flavor = (regs->save_r12 == 0 ? 1 : 0);
526
527 regparams = (7 - flavor); /* Indirect value consumes a register */
528
529 assert((unsigned) callp->sy_arg_bytes <= sizeof (uthread->uu_arg));
530
531 if (callp->sy_arg_bytes <= (sizeof(uint32_t) * regparams)) {
532 /*
533 * Seven arguments or less are passed in registers.
534 */
535 memcpy(&uthread->uu_arg[0], &regs->r[flavor], callp->sy_arg_bytes);
536 } else if (callp->sy_arg_bytes <= sizeof(uthread->uu_arg)) {
537 /*
538 * In this case, we composite - take the first args from registers,
539 * the remainder from the stack (offset by the 7 regs therein).
540 */
541 unix_syscall_kprintf("%s: spillover...\n", __FUNCTION__);
542 memcpy(&uthread->uu_arg[0] , &regs->r[flavor], regparams * sizeof(int));
543 if (copyin((user_addr_t)regs->sp + 7 * sizeof(int), (int *)&uthread->uu_arg[0] + regparams,
544 (callp->sy_arg_bytes - (sizeof(uint32_t) * regparams))) != 0) {
545 return -1;
546 }
547 } else {
548 return -1;
549 }
550
551#if CONFIG_REQUIRES_U32_MUNGING
552 /* Munge here */
553 mungerp = callp->sy_arg_munge32;
554 if (mungerp != NULL) {
555 (*mungerp)(&uthread->uu_arg[0]);
556 }
557#endif
558
559 return 0;
560
561}
562
563static int
564arm_get_syscall_number(struct arm_saved_state *state)
565{
566 if (is_saved_state32(state)) {
567 if (saved_state32(state)->save_r12 != 0) {
568 return saved_state32(state)->save_r12;
569 } else {
570 return saved_state32(state)->save_r0;
571 }
572 } else {
573 if (saved_state64(state)->x[ARM64_SYSCALL_CODE_REG_NUM] != 0) {
574 return saved_state64(state)->x[ARM64_SYSCALL_CODE_REG_NUM];
575 } else {
576 return saved_state64(state)->x[0];
577 }
578 }
579
580}
581
582static void
583arm_prepare_syscall_return(struct sysent *callp, struct arm_saved_state *state, uthread_t uthread, int error)
584{
585 if (is_saved_state32(state)) {
586 arm_prepare_u32_syscall_return(callp, state, uthread, error);
587 } else {
588 arm_prepare_u64_syscall_return(callp, state, uthread, error);
589 }
590}
591
592static void
593arm_prepare_u64_syscall_return(struct sysent *callp, arm_saved_state_t *regs, uthread_t uthread, int error)
594{
595 assert(is_saved_state64(regs));
596
597 arm_saved_state64_t *ss64 = saved_state64(regs);
598
599 if (error == ERESTART) {
600 ss64->pc -= 4;
601 } else if (error != EJUSTRETURN) {
602 if (error) {
603 ss64->x[0] = error;
604 ss64->x[1] = 0;
605 /*
606 * Set the carry bit to execute cerror routine.
607 * ARM64_TODO: should we have a separate definition?
608 * The bits are the same.
609 */
610 ss64->cpsr |= PSR_CF;
611 unix_syscall_return_kprintf("error: setting carry to trigger cerror call\n");
612 } else { /* (not error) */
613 switch (callp->sy_return_type) {
614 case _SYSCALL_RET_INT_T:
615 ss64->x[0] = uthread->uu_rval[0];
616 ss64->x[1] = uthread->uu_rval[1];
617 break;
618 case _SYSCALL_RET_UINT_T:
619 ss64->x[0] = (u_int)uthread->uu_rval[0];
620 ss64->x[1] = (u_int)uthread->uu_rval[1];
621 break;
622 case _SYSCALL_RET_OFF_T:
623 case _SYSCALL_RET_ADDR_T:
624 case _SYSCALL_RET_SIZE_T:
625 case _SYSCALL_RET_SSIZE_T:
626 case _SYSCALL_RET_UINT64_T:
627 ss64->x[0] = *((uint64_t *)(&uthread->uu_rval[0]));
628 ss64->x[1] = 0;
629 break;
630 case _SYSCALL_RET_NONE:
631 break;
632 default:
633 panic("unix_syscall: unknown return type");
634 break;
635 }
636 }
637 }
638 /* else (error == EJUSTRETURN) { nothing } */
639
640
641}
642static void
643arm_trace_u64_unix_syscall(int code, arm_saved_state64_t *regs)
644{
645 boolean_t indirect = (regs->x[ARM64_SYSCALL_CODE_REG_NUM] == 0);
646 if (indirect)
647 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
648 BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
649 regs->x[1], regs->x[2], regs->x[3], regs->x[4], 0);
650 else
651 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
652 BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
653 regs->x[0], regs->x[1], regs->x[2], regs->x[3], 0);
654}
655
656static void
657arm_trace_unix_syscall(int code, struct arm_saved_state *state)
658{
659 if (is_saved_state32(state)) {
660 arm_trace_u32_unix_syscall(code, saved_state32(state));
661 } else {
662 arm_trace_u64_unix_syscall(code, saved_state64(state));
663 }
664}
665
666static void
667arm_clear_u64_syscall_error(arm_saved_state64_t *regs)
668{
669 /*
670 * ARM64_TODO: should we have a separate definition?
671 * The bits are the same.
672 */
673 regs->cpsr &= ~PSR_CF;
674}
675
676static void
677arm_clear_syscall_error(struct arm_saved_state * state)
678{
679 if (is_saved_state32(state)) {
680 arm_clear_u32_syscall_error(saved_state32(state));
681 } else {
682 arm_clear_u64_syscall_error(saved_state64(state));
683 }
684}
685
686#else
687#error Unknown architecture.
688#endif
689