mirror of
https://github.com/gperftools/gperftools
synced 2025-03-11 07:17:38 +00:00
port spinlocks atomic ops usage to std::atomic
This commit is contained in:
parent
3494eb29d6
commit
ea0988b020
@ -75,29 +75,44 @@ inline void SpinlockPause(void) {
|
|||||||
// Monitor the lock to see if its value changes within some time
|
// Monitor the lock to see if its value changes within some time
|
||||||
// period (adaptive_spin_count loop iterations). The last value read
|
// period (adaptive_spin_count loop iterations). The last value read
|
||||||
// from the lock is returned from the method.
|
// from the lock is returned from the method.
|
||||||
Atomic32 SpinLock::SpinLoop() {
|
int SpinLock::SpinLoop() {
|
||||||
int c = adaptive_spin_count;
|
int c = adaptive_spin_count;
|
||||||
while (base::subtle::NoBarrier_Load(&lockword_) != kSpinLockFree && --c > 0) {
|
while (lockword_.load(std::memory_order_relaxed) != kSpinLockFree && --c > 0) {
|
||||||
SpinlockPause();
|
SpinlockPause();
|
||||||
}
|
}
|
||||||
return base::subtle::Acquire_CompareAndSwap(&lockword_, kSpinLockFree,
|
int old = kSpinLockFree;
|
||||||
kSpinLockSleeper);
|
lockword_.compare_exchange_strong(old, kSpinLockSleeper, std::memory_order_acquire);
|
||||||
|
// note, that we try to set lock word to 'have sleeper' state might
|
||||||
|
// look unnecessary, but:
|
||||||
|
//
|
||||||
|
// *) pay attention to second call to SpinLoop at the bottom of SlowLock loop below
|
||||||
|
//
|
||||||
|
// *) note, that we get there after sleeping in SpinLockDelay and
|
||||||
|
// getting woken by Unlock
|
||||||
|
//
|
||||||
|
// *) also note, that we don't "count" sleepers, so when unlock
|
||||||
|
// awakes us, it also sets lock word to "free". So we risk
|
||||||
|
// forgetting other sleepers. And to prevent this, we become
|
||||||
|
// "designated waker", by setting lock word to "have sleeper". So
|
||||||
|
// then when we unlock, we also wake up someone.
|
||||||
|
return old;
|
||||||
}
|
}
|
||||||
|
|
||||||
void SpinLock::SlowLock() {
|
void SpinLock::SlowLock() {
|
||||||
Atomic32 lock_value = SpinLoop();
|
int lock_value = SpinLoop();
|
||||||
|
|
||||||
int lock_wait_call_count = 0;
|
int lock_wait_call_count = 0;
|
||||||
while (lock_value != kSpinLockFree) {
|
while (lock_value != kSpinLockFree) {
|
||||||
// If the lock is currently held, but not marked as having a sleeper, mark
|
// If the lock is currently held, but not marked as having a sleeper, mark
|
||||||
// it as having a sleeper.
|
// it as having a sleeper.
|
||||||
if (lock_value == kSpinLockHeld) {
|
if (lock_value == kSpinLockHeld) {
|
||||||
// Here, just "mark" that the thread is going to sleep. Don't store the
|
// Here, just "mark" that the thread is going to sleep. Don't
|
||||||
// lock wait time in the lock as that will cause the current lock
|
// store the lock wait time in the lock as that will cause the
|
||||||
// owner to think it experienced contention.
|
// current lock owner to think it experienced contention. Note,
|
||||||
lock_value = base::subtle::Acquire_CompareAndSwap(&lockword_,
|
// compare_exchange updates lock_value with previous value of
|
||||||
kSpinLockHeld,
|
// lock word.
|
||||||
kSpinLockSleeper);
|
lockword_.compare_exchange_strong(lock_value, kSpinLockSleeper,
|
||||||
|
std::memory_order_acquire);
|
||||||
if (lock_value == kSpinLockHeld) {
|
if (lock_value == kSpinLockHeld) {
|
||||||
// Successfully transitioned to kSpinLockSleeper. Pass
|
// Successfully transitioned to kSpinLockSleeper. Pass
|
||||||
// kSpinLockSleeper to the SpinLockDelay routine to properly indicate
|
// kSpinLockSleeper to the SpinLockDelay routine to properly indicate
|
||||||
@ -107,9 +122,7 @@ void SpinLock::SlowLock() {
|
|||||||
// Lock is free again, so try and acquire it before sleeping. The
|
// Lock is free again, so try and acquire it before sleeping. The
|
||||||
// new lock state will be the number of cycles this thread waited if
|
// new lock state will be the number of cycles this thread waited if
|
||||||
// this thread obtains the lock.
|
// this thread obtains the lock.
|
||||||
lock_value = base::subtle::Acquire_CompareAndSwap(&lockword_,
|
lockword_.compare_exchange_strong(lock_value, kSpinLockSleeper, std::memory_order_acquire);
|
||||||
kSpinLockFree,
|
|
||||||
kSpinLockSleeper);
|
|
||||||
continue; // skip the delay at the end of the loop
|
continue; // skip the delay at the end of the loop
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -40,6 +40,9 @@
|
|||||||
#define BASE_SPINLOCK_H_
|
#define BASE_SPINLOCK_H_
|
||||||
|
|
||||||
#include <config.h>
|
#include <config.h>
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
|
||||||
#include "base/atomicops.h"
|
#include "base/atomicops.h"
|
||||||
#include "base/basictypes.h"
|
#include "base/basictypes.h"
|
||||||
#include "base/dynamic_annotations.h"
|
#include "base/dynamic_annotations.h"
|
||||||
@ -63,9 +66,9 @@ class LOCKABLE SpinLock {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Acquire this SpinLock.
|
// Acquire this SpinLock.
|
||||||
inline void Lock() EXCLUSIVE_LOCK_FUNCTION() {
|
void Lock() EXCLUSIVE_LOCK_FUNCTION() {
|
||||||
if (base::subtle::Acquire_CompareAndSwap(&lockword_, kSpinLockFree,
|
int old = kSpinLockFree;
|
||||||
kSpinLockHeld) != kSpinLockFree) {
|
if (!lockword_.compare_exchange_weak(old, kSpinLockHeld, std::memory_order_acquire)) {
|
||||||
SlowLock();
|
SlowLock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -74,17 +77,14 @@ class LOCKABLE SpinLock {
|
|||||||
// acquisition was successful. If the lock was not acquired, false is
|
// acquisition was successful. If the lock was not acquired, false is
|
||||||
// returned. If this SpinLock is free at the time of the call, TryLock
|
// returned. If this SpinLock is free at the time of the call, TryLock
|
||||||
// will return true with high probability.
|
// will return true with high probability.
|
||||||
inline bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true) {
|
bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true) {
|
||||||
bool res =
|
int old = kSpinLockFree;
|
||||||
(base::subtle::Acquire_CompareAndSwap(&lockword_, kSpinLockFree,
|
return lockword_.compare_exchange_weak(old, kSpinLockHeld);
|
||||||
kSpinLockHeld) == kSpinLockFree);
|
|
||||||
return res;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Release this SpinLock, which must be held by the calling thread.
|
// Release this SpinLock, which must be held by the calling thread.
|
||||||
inline void Unlock() UNLOCK_FUNCTION() {
|
void Unlock() UNLOCK_FUNCTION() {
|
||||||
uint64 prev_value = static_cast<uint64>(
|
int prev_value = lockword_.exchange(kSpinLockFree, std::memory_order_release);
|
||||||
base::subtle::Release_AtomicExchange(&lockword_, kSpinLockFree));
|
|
||||||
if (prev_value != kSpinLockHeld) {
|
if (prev_value != kSpinLockHeld) {
|
||||||
// Speed the wakeup of any waiter.
|
// Speed the wakeup of any waiter.
|
||||||
SlowUnlock();
|
SlowUnlock();
|
||||||
@ -94,8 +94,8 @@ class LOCKABLE SpinLock {
|
|||||||
// Determine if the lock is held. When the lock is held by the invoking
|
// Determine if the lock is held. When the lock is held by the invoking
|
||||||
// thread, true will always be returned. Intended to be used as
|
// thread, true will always be returned. Intended to be used as
|
||||||
// CHECK(lock.IsHeld()).
|
// CHECK(lock.IsHeld()).
|
||||||
inline bool IsHeld() const {
|
bool IsHeld() const {
|
||||||
return base::subtle::NoBarrier_Load(&lockword_) != kSpinLockFree;
|
return lockword_.load(std::memory_order_relaxed) != kSpinLockFree;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const base::LinkerInitialized LINKER_INITIALIZED; // backwards compat
|
static const base::LinkerInitialized LINKER_INITIALIZED; // backwards compat
|
||||||
@ -104,11 +104,11 @@ class LOCKABLE SpinLock {
|
|||||||
enum { kSpinLockHeld = 1 };
|
enum { kSpinLockHeld = 1 };
|
||||||
enum { kSpinLockSleeper = 2 };
|
enum { kSpinLockSleeper = 2 };
|
||||||
|
|
||||||
volatile Atomic32 lockword_;
|
std::atomic<int> lockword_;
|
||||||
|
|
||||||
void SlowLock();
|
void SlowLock();
|
||||||
void SlowUnlock();
|
void SlowUnlock();
|
||||||
Atomic32 SpinLoop();
|
int SpinLoop();
|
||||||
|
|
||||||
DISALLOW_COPY_AND_ASSIGN(SpinLock);
|
DISALLOW_COPY_AND_ASSIGN(SpinLock);
|
||||||
};
|
};
|
||||||
|
@ -37,14 +37,17 @@
|
|||||||
#define BASE_SPINLOCK_INTERNAL_H_
|
#define BASE_SPINLOCK_INTERNAL_H_
|
||||||
|
|
||||||
#include <config.h>
|
#include <config.h>
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
|
||||||
#include "base/basictypes.h"
|
#include "base/basictypes.h"
|
||||||
#include "base/atomicops.h"
|
#include "base/atomicops.h"
|
||||||
|
|
||||||
namespace base {
|
namespace base {
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
void SpinLockWake(volatile Atomic32 *w, bool all);
|
void SpinLockWake(std::atomic<int> *w, bool all);
|
||||||
void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop);
|
void SpinLockDelay(std::atomic<int> *w, int32 value, int loop);
|
||||||
|
|
||||||
} // namespace internal
|
} // namespace internal
|
||||||
} // namespace base
|
} // namespace base
|
||||||
|
@ -55,8 +55,7 @@ static struct InitModule {
|
|||||||
int x = 0;
|
int x = 0;
|
||||||
// futexes are ints, so we can use them only when
|
// futexes are ints, so we can use them only when
|
||||||
// that's the same size as the lockword_ in SpinLock.
|
// that's the same size as the lockword_ in SpinLock.
|
||||||
have_futex = (sizeof(Atomic32) == sizeof(int) &&
|
have_futex = (syscall(__NR_futex, &x, FUTEX_WAKE, 1, NULL, NULL, 0) >= 0);
|
||||||
syscall(__NR_futex, &x, FUTEX_WAKE, 1, NULL, NULL, 0) >= 0);
|
|
||||||
if (have_futex && syscall(__NR_futex, &x, FUTEX_WAKE | futex_private_flag,
|
if (have_futex && syscall(__NR_futex, &x, FUTEX_WAKE | futex_private_flag,
|
||||||
1, NULL, NULL, 0) < 0) {
|
1, NULL, NULL, 0) < 0) {
|
||||||
futex_private_flag = 0;
|
futex_private_flag = 0;
|
||||||
@ -70,7 +69,7 @@ static struct InitModule {
|
|||||||
namespace base {
|
namespace base {
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop) {
|
void SpinLockDelay(std::atomic<int> *w, int32 value, int loop) {
|
||||||
if (loop != 0) {
|
if (loop != 0) {
|
||||||
int save_errno = errno;
|
int save_errno = errno;
|
||||||
struct timespec tm;
|
struct timespec tm;
|
||||||
@ -82,7 +81,7 @@ void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop) {
|
|||||||
}
|
}
|
||||||
if (have_futex) {
|
if (have_futex) {
|
||||||
tm.tv_nsec *= 16; // increase the delay; we expect explicit wakeups
|
tm.tv_nsec *= 16; // increase the delay; we expect explicit wakeups
|
||||||
syscall(__NR_futex, reinterpret_cast<int*>(const_cast<Atomic32*>(w)),
|
syscall(__NR_futex, reinterpret_cast<int*>(w),
|
||||||
FUTEX_WAIT | futex_private_flag, value,
|
FUTEX_WAIT | futex_private_flag, value,
|
||||||
reinterpret_cast<struct kernel_timespec*>(&tm), NULL, 0);
|
reinterpret_cast<struct kernel_timespec*>(&tm), NULL, 0);
|
||||||
} else {
|
} else {
|
||||||
@ -92,9 +91,9 @@ void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SpinLockWake(volatile Atomic32 *w, bool all) {
|
void SpinLockWake(std::atomic<int> *w, bool all) {
|
||||||
if (have_futex) {
|
if (have_futex) {
|
||||||
syscall(__NR_futex, reinterpret_cast<int*>(const_cast<Atomic32*>(w)),
|
syscall(__NR_futex, reinterpret_cast<int*>(w),
|
||||||
FUTEX_WAKE | futex_private_flag, all ? INT_MAX : 1, NULL, NULL, 0);
|
FUTEX_WAKE | futex_private_flag, all ? INT_MAX : 1, NULL, NULL, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -38,7 +38,7 @@
|
|||||||
namespace base {
|
namespace base {
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop) {
|
void SpinLockDelay(std::atomic<int> *w, int32 value, int loop) {
|
||||||
if (loop == 0) {
|
if (loop == 0) {
|
||||||
} else if (loop == 1) {
|
} else if (loop == 1) {
|
||||||
Sleep(0);
|
Sleep(0);
|
||||||
@ -47,7 +47,7 @@ void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SpinLockWake(volatile Atomic32 *w, bool all) {
|
void SpinLockWake(std::atomic<int> *w, bool all) {
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace internal
|
} // namespace internal
|
||||||
|
Loading…
Reference in New Issue
Block a user