musl/arch/sh/syscall_arch.h
Rich Felker 10d0268ccf switch to using trap number 31 for syscalls on sh
nominally the low bits of the trap number on sh are the number of
syscall arguments, but they have never been used by the kernel, and
some code making syscalls does not even know the number of arguments
and needs to pass an arbitrary high number anyway.

sh3/sh4 traditionally used the trap range 16-31 for syscalls, but part
of this range overlapped with hardware exceptions/interrupts on sh2
hardware, so an incompatible range 32-47 was chosen for sh2.

using trap number 31 everywhere, since it's in the existing sh3/sh4
range and does not conflict with sh2 hardware, is a proposed
unification of the kernel syscall convention that will allow binaries
to be shared between sh2 and sh3/sh4. if this is not accepted into the
kernel, we can refit the sh2 target with runtime selection mechanisms
for the trap number, but doing so would be invasive and would entail
non-trivial overhead.
2015-06-16 15:25:02 +00:00

88 lines
2.7 KiB
C

#define __SYSCALL_LL_E(x) \
((union { long long ll; long l[2]; }){ .ll = x }).l[0], \
((union { long long ll; long l[2]; }){ .ll = x }).l[1]
#define __SYSCALL_LL_O(x) __SYSCALL_LL_E((x))
/* The extra OR instructions are to work around a hardware bug:
* http://documentation.renesas.com/doc/products/mpumcu/tu/tnsh7456ae.pdf
*/
#define __asm_syscall(trapno, ...) do { \
__asm__ __volatile__ ( \
"trapa #31\n" \
"or r0, r0\n" \
"or r0, r0\n" \
"or r0, r0\n" \
"or r0, r0\n" \
"or r0, r0\n" \
: "=r"(r0) : __VA_ARGS__ : "memory"); \
return r0; \
} while (0)
static inline long __syscall0(long n)
{
register long r3 __asm__("r3") = n;
register long r0 __asm__("r0");
__asm_syscall(16, "r"(r3));
}
static inline long __syscall1(long n, long a)
{
register long r3 __asm__("r3") = n;
register long r4 __asm__("r4") = a;
register long r0 __asm__("r0");
__asm_syscall(17, "r"(r3), "r"(r4));
}
static inline long __syscall2(long n, long a, long b)
{
register long r3 __asm__("r3") = n;
register long r4 __asm__("r4") = a;
register long r5 __asm__("r5") = b;
register long r0 __asm__("r0");
__asm_syscall(18, "r"(r3), "r"(r4), "r"(r5));
}
static inline long __syscall3(long n, long a, long b, long c)
{
register long r3 __asm__("r3") = n;
register long r4 __asm__("r4") = a;
register long r5 __asm__("r5") = b;
register long r6 __asm__("r6") = c;
register long r0 __asm__("r0");
__asm_syscall(19, "r"(r3), "r"(r4), "r"(r5), "r"(r6));
}
static inline long __syscall4(long n, long a, long b, long c, long d)
{
register long r3 __asm__("r3") = n;
register long r4 __asm__("r4") = a;
register long r5 __asm__("r5") = b;
register long r6 __asm__("r6") = c;
register long r7 __asm__("r7") = d;
register long r0 __asm__("r0");
__asm_syscall(20, "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7));
}
static inline long __syscall5(long n, long a, long b, long c, long d, long e)
{
register long r3 __asm__("r3") = n;
register long r4 __asm__("r4") = a;
register long r5 __asm__("r5") = b;
register long r6 __asm__("r6") = c;
register long r7 __asm__("r7") = d;
register long r0 __asm__("r0") = e;
__asm_syscall(21, "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "0"(r0));
}
static inline long __syscall6(long n, long a, long b, long c, long d, long e, long f)
{
register long r3 __asm__("r3") = n;
register long r4 __asm__("r4") = a;
register long r5 __asm__("r5") = b;
register long r6 __asm__("r6") = c;
register long r7 __asm__("r7") = d;
register long r0 __asm__("r0") = e;
register long r1 __asm__("r1") = f;
__asm_syscall(22, "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "0"(r0), "r"(r1));
}