mirror of
git://git.musl-libc.org/musl
synced 2025-01-25 08:03:04 +00:00
29237f7f5c
three problems are addressed: - use of pc arithmetic, which was difficult if not impossible to make correct in thumb mode on all models, so that relative rather than absolute pointers to the backends could be used. this was designed back when there was no coherent model for the early stages of the dynamic linker before relocations, and is no longer necessary. - assumption that data (the relative pointers to the backends) can be accessed at a constant displacement from the code. this will not be possible on future fdpic subarchs (for cortex-m), so move responsibility for loading the backend code address to the caller. - hard-coded arm opcodes using the .word directive. instead, use the .arch directive to work around the assembler's refusal to assemble instructions not available (or in some cases, available but just considered deprecated) in the target isa level. the obscure v6t2 arch is used for v6 code so as to (1) allow generation of thumb2 output if -mthumb is active, and (2) avoid warnings/errors for mcr barriers that clang would produce if we just set arch to v7-a. in addition, the __aeabi_read_tp function is moved out of the inner workings and implemented as an asm wrapper around a C function, so that asm code does not need to read global data. the asm wrapper serves to satisfy the ABI calling convention requirements for this function.
84 lines
1.7 KiB
C
84 lines
1.7 KiB
C
#if __ARM_ARCH_4__ || __ARM_ARCH_4T__ || __ARM_ARCH == 4
|
|
#define BLX "mov lr,pc\n\tbx"
|
|
#else
|
|
#define BLX "blx"
|
|
#endif
|
|
|
|
extern uintptr_t __attribute__((__visibility__("hidden")))
|
|
__a_cas_ptr, __a_barrier_ptr;
|
|
|
|
#if ((__ARM_ARCH_6__ || __ARM_ARCH_6K__ || __ARM_ARCH_6ZK__) && !__thumb__) \
|
|
|| __ARM_ARCH_7A__ || __ARM_ARCH_7R__ || __ARM_ARCH >= 7
|
|
|
|
#define a_ll a_ll
|
|
static inline int a_ll(volatile int *p)
|
|
{
|
|
int v;
|
|
__asm__ __volatile__ ("ldrex %0, %1" : "=r"(v) : "Q"(*p));
|
|
return v;
|
|
}
|
|
|
|
#define a_sc a_sc
|
|
static inline int a_sc(volatile int *p, int v)
|
|
{
|
|
int r;
|
|
__asm__ __volatile__ ("strex %0,%2,%1" : "=&r"(r), "=Q"(*p) : "r"(v) : "memory");
|
|
return !r;
|
|
}
|
|
|
|
#if __ARM_ARCH_7A__ || __ARM_ARCH_7R__ || __ARM_ARCH >= 7
|
|
|
|
#define a_barrier a_barrier
|
|
static inline void a_barrier()
|
|
{
|
|
__asm__ __volatile__ ("dmb ish" : : : "memory");
|
|
}
|
|
|
|
#endif
|
|
|
|
#define a_pre_llsc a_barrier
|
|
#define a_post_llsc a_barrier
|
|
|
|
#else
|
|
|
|
#define a_cas a_cas
|
|
static inline int a_cas(volatile int *p, int t, int s)
|
|
{
|
|
for (;;) {
|
|
register int r0 __asm__("r0") = t;
|
|
register int r1 __asm__("r1") = s;
|
|
register volatile int *r2 __asm__("r2") = p;
|
|
register uintptr_t r3 __asm__("r3") = __a_cas_ptr;
|
|
int old;
|
|
__asm__ __volatile__ (
|
|
BLX " r3"
|
|
: "+r"(r0), "+r"(r3) : "r"(r1), "r"(r2)
|
|
: "memory", "lr", "ip", "cc" );
|
|
if (!r0) return t;
|
|
if ((old=*p)!=t) return old;
|
|
}
|
|
}
|
|
|
|
#endif
|
|
|
|
#ifndef a_barrier
|
|
#define a_barrier a_barrier
|
|
static inline void a_barrier()
|
|
{
|
|
register uintptr_t ip __asm__("ip") = __a_barrier_ptr;
|
|
__asm__ __volatile__( BLX " ip" : "+r"(ip) : : "memory", "cc", "lr" );
|
|
}
|
|
#endif
|
|
|
|
#define a_crash a_crash
|
|
static inline void a_crash()
|
|
{
|
|
__asm__ __volatile__(
|
|
#ifndef __thumb__
|
|
".word 0xe7f000f0"
|
|
#else
|
|
".short 0xdeff"
|
|
#endif
|
|
: : : "memory");
|
|
}
|