port spinlocks atomic ops usage to std::atomic

This commit is contained in:
Aliaksey Kandratsenka 2023-06-21 10:39:14 -04:00
parent 3494eb29d6
commit ea0988b020
5 changed files with 54 additions and 39 deletions

View File

@ -75,29 +75,44 @@ inline void SpinlockPause(void) {
// Monitor the lock to see if its value changes within some time
// period (adaptive_spin_count loop iterations). The last value read
// from the lock is returned from the method.
Atomic32 SpinLock::SpinLoop() {
int SpinLock::SpinLoop() {
int c = adaptive_spin_count;
while (base::subtle::NoBarrier_Load(&lockword_) != kSpinLockFree && --c > 0) {
while (lockword_.load(std::memory_order_relaxed) != kSpinLockFree && --c > 0) {
SpinlockPause();
}
return base::subtle::Acquire_CompareAndSwap(&lockword_, kSpinLockFree,
kSpinLockSleeper);
int old = kSpinLockFree;
lockword_.compare_exchange_strong(old, kSpinLockSleeper, std::memory_order_acquire);
// note, that we try to set lock word to 'have sleeper' state might
// look unnecessary, but:
//
// *) pay attention to second call to SpinLoop at the bottom of SlowLock loop below
//
// *) note, that we get there after sleeping in SpinLockDelay and
// getting woken by Unlock
//
// *) also note, that we don't "count" sleepers, so when unlock
// awakes us, it also sets lock word to "free". So we risk
// forgetting other sleepers. And to prevent this, we become
// "designated waker", by setting lock word to "have sleeper". So
// then when we unlock, we also wake up someone.
return old;
}
void SpinLock::SlowLock() {
Atomic32 lock_value = SpinLoop();
int lock_value = SpinLoop();
int lock_wait_call_count = 0;
while (lock_value != kSpinLockFree) {
// If the lock is currently held, but not marked as having a sleeper, mark
// it as having a sleeper.
if (lock_value == kSpinLockHeld) {
// Here, just "mark" that the thread is going to sleep. Don't store the
// lock wait time in the lock as that will cause the current lock
// owner to think it experienced contention.
lock_value = base::subtle::Acquire_CompareAndSwap(&lockword_,
kSpinLockHeld,
kSpinLockSleeper);
// Here, just "mark" that the thread is going to sleep. Don't
// store the lock wait time in the lock as that will cause the
// current lock owner to think it experienced contention. Note,
// compare_exchange updates lock_value with previous value of
// lock word.
lockword_.compare_exchange_strong(lock_value, kSpinLockSleeper,
std::memory_order_acquire);
if (lock_value == kSpinLockHeld) {
// Successfully transitioned to kSpinLockSleeper. Pass
// kSpinLockSleeper to the SpinLockDelay routine to properly indicate
@ -107,9 +122,7 @@ void SpinLock::SlowLock() {
// Lock is free again, so try and acquire it before sleeping. The
// new lock state will be the number of cycles this thread waited if
// this thread obtains the lock.
lock_value = base::subtle::Acquire_CompareAndSwap(&lockword_,
kSpinLockFree,
kSpinLockSleeper);
lockword_.compare_exchange_strong(lock_value, kSpinLockSleeper, std::memory_order_acquire);
continue; // skip the delay at the end of the loop
}
}

View File

@ -40,6 +40,9 @@
#define BASE_SPINLOCK_H_
#include <config.h>
#include <atomic>
#include "base/atomicops.h"
#include "base/basictypes.h"
#include "base/dynamic_annotations.h"
@ -63,9 +66,9 @@ class LOCKABLE SpinLock {
}
// Acquire this SpinLock.
inline void Lock() EXCLUSIVE_LOCK_FUNCTION() {
if (base::subtle::Acquire_CompareAndSwap(&lockword_, kSpinLockFree,
kSpinLockHeld) != kSpinLockFree) {
void Lock() EXCLUSIVE_LOCK_FUNCTION() {
int old = kSpinLockFree;
if (!lockword_.compare_exchange_weak(old, kSpinLockHeld, std::memory_order_acquire)) {
SlowLock();
}
}
@ -74,17 +77,14 @@ class LOCKABLE SpinLock {
// acquisition was successful. If the lock was not acquired, false is
// returned. If this SpinLock is free at the time of the call, TryLock
// will return true with high probability.
inline bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true) {
bool res =
(base::subtle::Acquire_CompareAndSwap(&lockword_, kSpinLockFree,
kSpinLockHeld) == kSpinLockFree);
return res;
bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true) {
int old = kSpinLockFree;
return lockword_.compare_exchange_weak(old, kSpinLockHeld);
}
// Release this SpinLock, which must be held by the calling thread.
inline void Unlock() UNLOCK_FUNCTION() {
uint64 prev_value = static_cast<uint64>(
base::subtle::Release_AtomicExchange(&lockword_, kSpinLockFree));
void Unlock() UNLOCK_FUNCTION() {
int prev_value = lockword_.exchange(kSpinLockFree, std::memory_order_release);
if (prev_value != kSpinLockHeld) {
// Speed the wakeup of any waiter.
SlowUnlock();
@ -94,8 +94,8 @@ class LOCKABLE SpinLock {
// Determine if the lock is held. When the lock is held by the invoking
// thread, true will always be returned. Intended to be used as
// CHECK(lock.IsHeld()).
inline bool IsHeld() const {
return base::subtle::NoBarrier_Load(&lockword_) != kSpinLockFree;
bool IsHeld() const {
return lockword_.load(std::memory_order_relaxed) != kSpinLockFree;
}
static const base::LinkerInitialized LINKER_INITIALIZED; // backwards compat
@ -104,11 +104,11 @@ class LOCKABLE SpinLock {
enum { kSpinLockHeld = 1 };
enum { kSpinLockSleeper = 2 };
volatile Atomic32 lockword_;
std::atomic<int> lockword_;
void SlowLock();
void SlowUnlock();
Atomic32 SpinLoop();
int SpinLoop();
DISALLOW_COPY_AND_ASSIGN(SpinLock);
};

View File

@ -37,14 +37,17 @@
#define BASE_SPINLOCK_INTERNAL_H_
#include <config.h>
#include <atomic>
#include "base/basictypes.h"
#include "base/atomicops.h"
namespace base {
namespace internal {
void SpinLockWake(volatile Atomic32 *w, bool all);
void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop);
void SpinLockWake(std::atomic<int> *w, bool all);
void SpinLockDelay(std::atomic<int> *w, int32 value, int loop);
} // namespace internal
} // namespace base

View File

@ -55,8 +55,7 @@ static struct InitModule {
int x = 0;
// futexes are ints, so we can use them only when
// that's the same size as the lockword_ in SpinLock.
have_futex = (sizeof(Atomic32) == sizeof(int) &&
syscall(__NR_futex, &x, FUTEX_WAKE, 1, NULL, NULL, 0) >= 0);
have_futex = (syscall(__NR_futex, &x, FUTEX_WAKE, 1, NULL, NULL, 0) >= 0);
if (have_futex && syscall(__NR_futex, &x, FUTEX_WAKE | futex_private_flag,
1, NULL, NULL, 0) < 0) {
futex_private_flag = 0;
@ -70,7 +69,7 @@ static struct InitModule {
namespace base {
namespace internal {
void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop) {
void SpinLockDelay(std::atomic<int> *w, int32 value, int loop) {
if (loop != 0) {
int save_errno = errno;
struct timespec tm;
@ -82,7 +81,7 @@ void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop) {
}
if (have_futex) {
tm.tv_nsec *= 16; // increase the delay; we expect explicit wakeups
syscall(__NR_futex, reinterpret_cast<int*>(const_cast<Atomic32*>(w)),
syscall(__NR_futex, reinterpret_cast<int*>(w),
FUTEX_WAIT | futex_private_flag, value,
reinterpret_cast<struct kernel_timespec*>(&tm), NULL, 0);
} else {
@ -92,9 +91,9 @@ void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop) {
}
}
void SpinLockWake(volatile Atomic32 *w, bool all) {
void SpinLockWake(std::atomic<int> *w, bool all) {
if (have_futex) {
syscall(__NR_futex, reinterpret_cast<int*>(const_cast<Atomic32*>(w)),
syscall(__NR_futex, reinterpret_cast<int*>(w),
FUTEX_WAKE | futex_private_flag, all ? INT_MAX : 1, NULL, NULL, 0);
}
}

View File

@ -38,7 +38,7 @@
namespace base {
namespace internal {
void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop) {
void SpinLockDelay(std::atomic<int> *w, int32 value, int loop) {
if (loop == 0) {
} else if (loop == 1) {
Sleep(0);
@ -47,7 +47,7 @@ void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop) {
}
}
void SpinLockWake(volatile Atomic32 *w, bool all) {
void SpinLockWake(std::atomic<int> *w, bool all) {
}
} // namespace internal