musl/arch/sh/syscall_arch.h
Rich Felker 7cc3a28eed fix pread/pwrite syscall calling convention on sh
despite sh not generally using register-pair alignment for 64-bit
syscall arguments, there are arch-specific versions of the syscall
entry points for pread and pwrite which include a dummy argument for
alignment before the 64-bit offset argument.
2016-08-11 18:36:46 -04:00

89 lines
2.8 KiB
C

#define __SYSCALL_LL_E(x) \
((union { long long ll; long l[2]; }){ .ll = x }).l[0], \
((union { long long ll; long l[2]; }){ .ll = x }).l[1]
#define __SYSCALL_LL_O(x) __SYSCALL_LL_E((x))
#define __SYSCALL_LL_PRW(x) 0, __SYSCALL_LL_E((x))
/* The extra OR instructions are to work around a hardware bug:
* http://documentation.renesas.com/doc/products/mpumcu/tu/tnsh7456ae.pdf
*/
#define __asm_syscall(trapno, ...) do { \
__asm__ __volatile__ ( \
"trapa #31\n" \
"or r0, r0\n" \
"or r0, r0\n" \
"or r0, r0\n" \
"or r0, r0\n" \
"or r0, r0\n" \
: "=r"(r0) : __VA_ARGS__ : "memory"); \
return r0; \
} while (0)
static inline long __syscall0(long n)
{
register long r3 __asm__("r3") = n;
register long r0 __asm__("r0");
__asm_syscall(16, "r"(r3));
}
static inline long __syscall1(long n, long a)
{
register long r3 __asm__("r3") = n;
register long r4 __asm__("r4") = a;
register long r0 __asm__("r0");
__asm_syscall(17, "r"(r3), "r"(r4));
}
static inline long __syscall2(long n, long a, long b)
{
register long r3 __asm__("r3") = n;
register long r4 __asm__("r4") = a;
register long r5 __asm__("r5") = b;
register long r0 __asm__("r0");
__asm_syscall(18, "r"(r3), "r"(r4), "r"(r5));
}
static inline long __syscall3(long n, long a, long b, long c)
{
register long r3 __asm__("r3") = n;
register long r4 __asm__("r4") = a;
register long r5 __asm__("r5") = b;
register long r6 __asm__("r6") = c;
register long r0 __asm__("r0");
__asm_syscall(19, "r"(r3), "r"(r4), "r"(r5), "r"(r6));
}
static inline long __syscall4(long n, long a, long b, long c, long d)
{
register long r3 __asm__("r3") = n;
register long r4 __asm__("r4") = a;
register long r5 __asm__("r5") = b;
register long r6 __asm__("r6") = c;
register long r7 __asm__("r7") = d;
register long r0 __asm__("r0");
__asm_syscall(20, "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7));
}
static inline long __syscall5(long n, long a, long b, long c, long d, long e)
{
register long r3 __asm__("r3") = n;
register long r4 __asm__("r4") = a;
register long r5 __asm__("r5") = b;
register long r6 __asm__("r6") = c;
register long r7 __asm__("r7") = d;
register long r0 __asm__("r0") = e;
__asm_syscall(21, "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "0"(r0));
}
static inline long __syscall6(long n, long a, long b, long c, long d, long e, long f)
{
register long r3 __asm__("r3") = n;
register long r4 __asm__("r4") = a;
register long r5 __asm__("r5") = b;
register long r6 __asm__("r6") = c;
register long r7 __asm__("r7") = d;
register long r0 __asm__("r0") = e;
register long r1 __asm__("r1") = f;
__asm_syscall(22, "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "0"(r0), "r"(r1));
}