mirror of
git://git.musl-libc.org/musl
synced 2025-01-09 16:19:50 +00:00
f56d57f8a7
the definition of the IPC_64 macro controls the interface between libc and the kernel through syscalls; it's not a public API. the meaning is rather obscure. long ago, Linux's sysvipc *id_ds structures used 16-bit uids/gids and wrong types for a few other fields. this was in the libc5 era, before glibc. the IPC_64 flag (64 is a misnomer; it's more like 32) tells the kernel to use the modern[-ish] versions of the structures. the definition of IPC_64 has nothing to do with whether the arch is 32- or 64-bit. rather, due to either historical accident or intentional obnoxiousness, the kernel only accepts and masks off the 0x100 IPC_64 flag conditional on CONFIG_ARCH_WANT_IPC_PARSE_VERSION, i.e. for archs that want to provide, or that accidentally provided, both. for archs which don't define this option, no masking is performed and commands with the 0x100 bit set will fail as invalid. so ultimately, the definition is just a matter of matching an arbitrary switch defined per-arch in the kernel.
79 lines
2.2 KiB
C
79 lines
2.2 KiB
C
#define __SYSCALL_LL_E(x) (x)
|
|
#define __SYSCALL_LL_O(x) (x)
|
|
|
|
#define __asm_syscall(...) do { \
|
|
__asm__ __volatile__ ( "svc 0" \
|
|
: "=r"(x0) : __VA_ARGS__ : "memory", "cc"); \
|
|
return x0; \
|
|
} while (0)
|
|
|
|
static inline long __syscall0(long n)
|
|
{
|
|
register long x8 __asm__("x8") = n;
|
|
register long x0 __asm__("x0");
|
|
__asm_syscall("r"(x8));
|
|
}
|
|
|
|
static inline long __syscall1(long n, long a)
|
|
{
|
|
register long x8 __asm__("x8") = n;
|
|
register long x0 __asm__("x0") = a;
|
|
__asm_syscall("r"(x8), "0"(x0));
|
|
}
|
|
|
|
static inline long __syscall2(long n, long a, long b)
|
|
{
|
|
register long x8 __asm__("x8") = n;
|
|
register long x0 __asm__("x0") = a;
|
|
register long x1 __asm__("x1") = b;
|
|
__asm_syscall("r"(x8), "0"(x0), "r"(x1));
|
|
}
|
|
|
|
static inline long __syscall3(long n, long a, long b, long c)
|
|
{
|
|
register long x8 __asm__("x8") = n;
|
|
register long x0 __asm__("x0") = a;
|
|
register long x1 __asm__("x1") = b;
|
|
register long x2 __asm__("x2") = c;
|
|
__asm_syscall("r"(x8), "0"(x0), "r"(x1), "r"(x2));
|
|
}
|
|
|
|
static inline long __syscall4(long n, long a, long b, long c, long d)
|
|
{
|
|
register long x8 __asm__("x8") = n;
|
|
register long x0 __asm__("x0") = a;
|
|
register long x1 __asm__("x1") = b;
|
|
register long x2 __asm__("x2") = c;
|
|
register long x3 __asm__("x3") = d;
|
|
__asm_syscall("r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3));
|
|
}
|
|
|
|
static inline long __syscall5(long n, long a, long b, long c, long d, long e)
|
|
{
|
|
register long x8 __asm__("x8") = n;
|
|
register long x0 __asm__("x0") = a;
|
|
register long x1 __asm__("x1") = b;
|
|
register long x2 __asm__("x2") = c;
|
|
register long x3 __asm__("x3") = d;
|
|
register long x4 __asm__("x4") = e;
|
|
__asm_syscall("r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3), "r"(x4));
|
|
}
|
|
|
|
static inline long __syscall6(long n, long a, long b, long c, long d, long e, long f)
|
|
{
|
|
register long x8 __asm__("x8") = n;
|
|
register long x0 __asm__("x0") = a;
|
|
register long x1 __asm__("x1") = b;
|
|
register long x2 __asm__("x2") = c;
|
|
register long x3 __asm__("x3") = d;
|
|
register long x4 __asm__("x4") = e;
|
|
register long x5 __asm__("x5") = f;
|
|
__asm_syscall("r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3), "r"(x4), "r"(x5));
|
|
}
|
|
|
|
#define VDSO_USEFUL
|
|
#define VDSO_CGT_SYM "__kernel_clock_gettime"
|
|
#define VDSO_CGT_VER "LINUX_2.6.39"
|
|
|
|
#define IPC_64 0
|