mirror of
git://git.musl-libc.org/musl
synced 2024-12-24 15:43:06 +00:00
513c043694
previously powerpc had a_cas defined in terms of its native ll/sc style operations, but all other atomics were defined in terms of a_cas. instead define a_ll and a_sc so the compiler can generate optimized versions of all the atomic ops and perform better inlining of a_cas. extracting the result of the sc (stwcx.) instruction is rather awkward because it's natively stored in a condition flag, which is not representable in inline asm. but even with this limitation the new code still seems significantly better.
40 lines
791 B
C
40 lines
791 B
C
#define a_ll a_ll
|
|
static inline int a_ll(volatile int *p)
|
|
{
|
|
int v;
|
|
__asm__ __volatile__ ("lwarx %0, 0, %2" : "=r"(v) : "m"(*p), "r"(p));
|
|
return v;
|
|
}
|
|
|
|
#define a_sc a_sc
|
|
static inline int a_sc(volatile int *p, int v)
|
|
{
|
|
int r;
|
|
__asm__ __volatile__ (
|
|
"stwcx. %2, 0, %3 ; mfcr %0"
|
|
: "=r"(r), "=m"(*p) : "r"(v), "r"(p) : "memory", "cc");
|
|
return r & 0x20000000; /* "bit 2" of "cr0" (backwards bit order) */
|
|
}
|
|
|
|
#define a_barrier a_barrier
|
|
static inline void a_barrier()
|
|
{
|
|
__asm__ __volatile__ ("sync" : : : "memory");
|
|
}
|
|
|
|
#define a_pre_llsc a_barrier
|
|
|
|
#define a_post_llsc a_post_llsc
|
|
static inline void a_post_llsc()
|
|
{
|
|
__asm__ __volatile__ ("isync" : : : "memory");
|
|
}
|
|
|
|
#define a_store a_store
|
|
static inline void a_store(volatile int *p, int v)
|
|
{
|
|
a_pre_llsc();
|
|
*p = v;
|
|
a_post_llsc();
|
|
}
|