mirror of
git://git.musl-libc.org/musl
synced 2025-01-27 09:03:59 +00:00
3814333964
this commit preserves ABI fully for existing interface boundaries between libc and libc consumers (applications or libraries), by retaining existing symbol names for the legacy 32-bit interfaces and redirecting sources compiled against the new headers to alternate symbol names. this does not necessarily, however, preserve the pairwise ABI of libc consumers with one another; where they use time_t-derived types in their interfaces with one another, it may be necessary to synchronize updates with each other. the intent is that ABI resulting from this commit already be stable and permanent, but it will not be officially so until a release is made. changes to some header-defined types that do not play any role in the ABI between libc and its consumers may still be subject to change. mechanically, the changes made by this commit for each 32-bit arch are as follows: - _REDIR_TIME64 is defined to activate the symbol redirections in public headers - COMPAT_SRC_DIRS is defined in arch.mak to activate build of ABI compat shims to serve as definitions for the original symbol names - time_t and suseconds_t definitions are changed to long long (64-bit) - IPC_STAT definition is changed to add the IPC_TIME64 bit (0x100), triggering conversion of semid_ds, shmid_ds, and msqid_ds split low/high time bits into new time_t members - structs semid_ds, shmid_ds, msqid_ds, and stat are modified to add new 64-bit time_t/timespec members at the end, maintaining existing layout of other members. - socket options (SO_*) and ioctl (sockios) command macros are redefined to use the kernel's "_NEW" values. in addition, on archs where vdso clock_gettime is used, the VDSO_CGT_SYM macro definition in syscall_arch.h is changed to use a new time64 vdso function if available, and a new VDSO_CGT32_SYM macro is added for use as fallback on kernels lacking time64.
110 lines
3.2 KiB
C
110 lines
3.2 KiB
C
#define __SYSCALL_LL_E(x) \
|
|
((union { long long ll; long l[2]; }){ .ll = x }).l[0], \
|
|
((union { long long ll; long l[2]; }){ .ll = x }).l[1]
|
|
#define __SYSCALL_LL_O(x) 0, __SYSCALL_LL_E((x))
|
|
|
|
#ifdef __thumb__
|
|
|
|
/* Avoid use of r7 in asm constraints when producing thumb code,
|
|
* since it's reserved as frame pointer and might not be supported. */
|
|
#define __ASM____R7__
|
|
#define __asm_syscall(...) do { \
|
|
__asm__ __volatile__ ( "mov %1,r7 ; mov r7,%2 ; svc 0 ; mov r7,%1" \
|
|
: "=r"(r0), "=&r"((int){0}) : __VA_ARGS__ : "memory"); \
|
|
return r0; \
|
|
} while (0)
|
|
|
|
#else
|
|
|
|
#define __ASM____R7__ __asm__("r7")
|
|
#define __asm_syscall(...) do { \
|
|
__asm__ __volatile__ ( "svc 0" \
|
|
: "=r"(r0) : __VA_ARGS__ : "memory"); \
|
|
return r0; \
|
|
} while (0)
|
|
#endif
|
|
|
|
/* For thumb2, we can allow 8-bit immediate syscall numbers, saving a
|
|
* register in the above dance around r7. Does not work for thumb1 where
|
|
* only movs, not mov, supports immediates, and we can't use movs because
|
|
* it doesn't support high regs. */
|
|
#ifdef __thumb2__
|
|
#define R7_OPERAND "rI"(r7)
|
|
#else
|
|
#define R7_OPERAND "r"(r7)
|
|
#endif
|
|
|
|
static inline long __syscall0(long n)
|
|
{
|
|
register long r7 __ASM____R7__ = n;
|
|
register long r0 __asm__("r0");
|
|
__asm_syscall(R7_OPERAND);
|
|
}
|
|
|
|
static inline long __syscall1(long n, long a)
|
|
{
|
|
register long r7 __ASM____R7__ = n;
|
|
register long r0 __asm__("r0") = a;
|
|
__asm_syscall(R7_OPERAND, "0"(r0));
|
|
}
|
|
|
|
static inline long __syscall2(long n, long a, long b)
|
|
{
|
|
register long r7 __ASM____R7__ = n;
|
|
register long r0 __asm__("r0") = a;
|
|
register long r1 __asm__("r1") = b;
|
|
__asm_syscall(R7_OPERAND, "0"(r0), "r"(r1));
|
|
}
|
|
|
|
static inline long __syscall3(long n, long a, long b, long c)
|
|
{
|
|
register long r7 __ASM____R7__ = n;
|
|
register long r0 __asm__("r0") = a;
|
|
register long r1 __asm__("r1") = b;
|
|
register long r2 __asm__("r2") = c;
|
|
__asm_syscall(R7_OPERAND, "0"(r0), "r"(r1), "r"(r2));
|
|
}
|
|
|
|
static inline long __syscall4(long n, long a, long b, long c, long d)
|
|
{
|
|
register long r7 __ASM____R7__ = n;
|
|
register long r0 __asm__("r0") = a;
|
|
register long r1 __asm__("r1") = b;
|
|
register long r2 __asm__("r2") = c;
|
|
register long r3 __asm__("r3") = d;
|
|
__asm_syscall(R7_OPERAND, "0"(r0), "r"(r1), "r"(r2), "r"(r3));
|
|
}
|
|
|
|
static inline long __syscall5(long n, long a, long b, long c, long d, long e)
|
|
{
|
|
register long r7 __ASM____R7__ = n;
|
|
register long r0 __asm__("r0") = a;
|
|
register long r1 __asm__("r1") = b;
|
|
register long r2 __asm__("r2") = c;
|
|
register long r3 __asm__("r3") = d;
|
|
register long r4 __asm__("r4") = e;
|
|
__asm_syscall(R7_OPERAND, "0"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4));
|
|
}
|
|
|
|
static inline long __syscall6(long n, long a, long b, long c, long d, long e, long f)
|
|
{
|
|
register long r7 __ASM____R7__ = n;
|
|
register long r0 __asm__("r0") = a;
|
|
register long r1 __asm__("r1") = b;
|
|
register long r2 __asm__("r2") = c;
|
|
register long r3 __asm__("r3") = d;
|
|
register long r4 __asm__("r4") = e;
|
|
register long r5 __asm__("r5") = f;
|
|
__asm_syscall(R7_OPERAND, "0"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4), "r"(r5));
|
|
}
|
|
|
|
#define VDSO_USEFUL
|
|
#define VDSO_CGT32_SYM "__vdso_clock_gettime"
|
|
#define VDSO_CGT32_VER "LINUX_2.6"
|
|
#define VDSO_CGT_SYM "__vdso_clock_gettime64"
|
|
#define VDSO_CGT_VER "LINUX_2.6"
|
|
|
|
#define SYSCALL_FADVISE_6_ARG
|
|
|
|
#define SYSCALL_IPC_BROKEN_MODE
|