mirror of
git://git.musl-libc.org/musl
synced 2024-12-14 18:55:23 +00:00
878887c50c
this error was only found by reading the code, but it seems to have been causing gcc to produce wrong code in malloc: the same register was used for the output and the high word of the input. in principle this could have caused an infinite loop searching for an available bin, but in practice most x86 models seem to implement the "undefined" result of the bsf instruction as "unchanged".
111 lines
2.3 KiB
C
111 lines
2.3 KiB
C
#ifndef _INTERNAL_ATOMIC_H
|
|
#define _INTERNAL_ATOMIC_H
|
|
|
|
#include <stdint.h>
|
|
|
|
static inline int a_ctz_64(uint64_t x)
|
|
{
|
|
int r;
|
|
__asm__( "bsf %1,%0 ; jnz 1f ; bsf %2,%0 ; addl $32,%0\n1:"
|
|
: "=&r"(r) : "r"((unsigned)x), "r"((unsigned)(x>>32)) );
|
|
return r;
|
|
}
|
|
|
|
static inline int a_ctz_l(unsigned long x)
|
|
{
|
|
long r;
|
|
__asm__( "bsf %1,%0" : "=r"(r) : "r"(x) );
|
|
return r;
|
|
}
|
|
|
|
static inline void a_and_64(volatile uint64_t *p, uint64_t v)
|
|
{
|
|
__asm__( "lock ; andl %1, (%0) ; lock ; andl %2, 4(%0)"
|
|
: : "r"((long *)p), "r"((unsigned)v), "r"((unsigned)(v>>32)) : "memory" );
|
|
}
|
|
|
|
static inline void a_or_64(volatile uint64_t *p, uint64_t v)
|
|
{
|
|
__asm__( "lock ; orl %1, (%0) ; lock ; orl %2, 4(%0)"
|
|
: : "r"((long *)p), "r"((unsigned)v), "r"((unsigned)(v>>32)) : "memory" );
|
|
}
|
|
|
|
static inline void a_or_l(volatile void *p, long v)
|
|
{
|
|
__asm__( "lock ; orl %1, %0"
|
|
: "=m"(*(long *)p) : "r"(v) : "memory" );
|
|
}
|
|
|
|
static inline void *a_cas_p(volatile void *p, void *t, void *s)
|
|
{
|
|
__asm__( "lock ; cmpxchg %3, %1"
|
|
: "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" );
|
|
return t;
|
|
}
|
|
|
|
static inline int a_cas(volatile int *p, int t, int s)
|
|
{
|
|
__asm__( "lock ; cmpxchg %3, %1"
|
|
: "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory" );
|
|
return t;
|
|
}
|
|
|
|
static inline void a_or(volatile int *p, int v)
|
|
{
|
|
__asm__( "lock ; orl %1, %0"
|
|
: "=m"(*p) : "r"(v) : "memory" );
|
|
}
|
|
|
|
static inline void a_and(volatile int *p, int v)
|
|
{
|
|
__asm__( "lock ; andl %1, %0"
|
|
: "=m"(*p) : "r"(v) : "memory" );
|
|
}
|
|
|
|
static inline int a_swap(volatile int *x, int v)
|
|
{
|
|
__asm__( "xchg %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
|
|
return v;
|
|
}
|
|
|
|
#define a_xchg a_swap
|
|
|
|
static inline int a_fetch_add(volatile int *x, int v)
|
|
{
|
|
__asm__( "lock ; xadd %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
|
|
return v;
|
|
}
|
|
|
|
static inline void a_inc(volatile int *x)
|
|
{
|
|
__asm__( "lock ; incl %0" : "=m"(*x) : "m"(*x) : "memory" );
|
|
}
|
|
|
|
static inline void a_dec(volatile int *x)
|
|
{
|
|
__asm__( "lock ; decl %0" : "=m"(*x) : "m"(*x) : "memory" );
|
|
}
|
|
|
|
static inline void a_store(volatile int *p, int x)
|
|
{
|
|
__asm__( "movl %1, %0 ; lock ; orl $0,(%%esp)" : "=m"(*p) : "r"(x) : "memory" );
|
|
}
|
|
|
|
static inline void a_spin()
|
|
{
|
|
__asm__ __volatile__( "pause" : : : "memory" );
|
|
}
|
|
|
|
static inline void a_barrier()
|
|
{
|
|
__asm__ __volatile__( "" : : : "memory" );
|
|
}
|
|
|
|
static inline void a_crash()
|
|
{
|
|
__asm__ __volatile__( "hlt" : : : "memory" );
|
|
}
|
|
|
|
|
|
#endif
|