overhaul powerpc atomics for new atomics framework

previously powerpc had a_cas defined in terms of its native ll/sc
style operations, but all other atomics were defined in terms of
a_cas. instead define a_ll and a_sc so the compiler can generate
optimized versions of all the atomic ops and perform better inlining
of a_cas.

extracting the result of the sc (stwcx.) instruction is rather awkward
because it's natively stored in a condition flag, which is not
representable in inline asm. but even with this limitation the new
code still seems significantly better.
This commit is contained in:
Rich Felker 2016-01-22 02:58:32 +00:00
parent 16b55298dc
commit 513c043694
1 changed files with 37 additions and 13 deletions

View File

@ -1,15 +1,39 @@
#define a_cas a_cas #define a_ll a_ll
static inline int a_cas(volatile int *p, int t, int s) static inline int a_ll(volatile int *p)
{ {
__asm__("\n" int v;
" sync\n" __asm__ __volatile__ ("lwarx %0, 0, %2" : "=r"(v) : "m"(*p), "r"(p));
"1: lwarx %0, 0, %4\n" return v;
" cmpw %0, %2\n" }
" bne 1f\n"
" stwcx. %3, 0, %4\n" #define a_sc a_sc
" bne- 1b\n" static inline int a_sc(volatile int *p, int v)
" isync\n" {
"1: \n" int r;
: "=&r"(t), "+m"(*p) : "r"(t), "r"(s), "r"(p) : "cc", "memory" ); __asm__ __volatile__ (
return t; "stwcx. %2, 0, %3 ; mfcr %0"
: "=r"(r), "=m"(*p) : "r"(v), "r"(p) : "memory", "cc");
return r & 0x20000000; /* "bit 2" of "cr0" (backwards bit order) */
}
#define a_barrier a_barrier
static inline void a_barrier()
{
__asm__ __volatile__ ("sync" : : : "memory");
}
#define a_pre_llsc a_barrier
#define a_post_llsc a_post_llsc
static inline void a_post_llsc()
{
__asm__ __volatile__ ("isync" : : : "memory");
}
#define a_store a_store
static inline void a_store(volatile int *p, int v)
{
a_pre_llsc();
*p = v;
a_post_llsc();
} }