mirror of
https://github.com/ceph/ceph
synced 2025-03-11 02:39:05 +00:00
Merge pull request #16618 from adamemerson/wip-throttle
common/Throttle: Clean up Reviewed-by: Gregory Farnum <gfarnum@redhat.com> Reviewed-by: Kefu Chai <kchai@redhat.com>
This commit is contained in:
commit
013948aa14
@ -4,7 +4,10 @@
|
||||
#include "include/scope_guard.h"
|
||||
|
||||
#include "common/Throttle.h"
|
||||
#include "common/ceph_time.h"
|
||||
#include "common/perf_counters.h"
|
||||
#include "common/Throttle.h"
|
||||
|
||||
|
||||
// re-include our assert to clobber the system one; fix dout:
|
||||
#include "include/assert.h"
|
||||
@ -14,6 +17,10 @@
|
||||
#undef dout_prefix
|
||||
#define dout_prefix *_dout << "throttle(" << name << " " << (void*)this << ") "
|
||||
|
||||
using ceph::mono_clock;
|
||||
using ceph::mono_time;
|
||||
using ceph::uniquely_lock;
|
||||
|
||||
enum {
|
||||
l_throttle_first = 532430,
|
||||
l_throttle_val,
|
||||
@ -31,11 +38,9 @@ enum {
|
||||
l_throttle_last,
|
||||
};
|
||||
|
||||
Throttle::Throttle(CephContext *cct, const std::string& n, int64_t m, bool _use_perf)
|
||||
: cct(cct), name(n), logger(NULL),
|
||||
max(m),
|
||||
lock("Throttle::lock"),
|
||||
use_perf(_use_perf)
|
||||
Throttle::Throttle(CephContext *cct, const std::string& n, int64_t m,
|
||||
bool _use_perf)
|
||||
: cct(cct), name(n), max(m), use_perf(_use_perf)
|
||||
{
|
||||
assert(m >= 0);
|
||||
|
||||
@ -57,69 +62,55 @@ Throttle::Throttle(CephContext *cct, const std::string& n, int64_t m, bool _use_
|
||||
b.add_u64_counter(l_throttle_put_sum, "put_sum", "Put data");
|
||||
b.add_time_avg(l_throttle_wait, "wait", "Waiting latency");
|
||||
|
||||
logger = b.create_perf_counters();
|
||||
cct->get_perfcounters_collection()->add(logger);
|
||||
logger = { b.create_perf_counters(), cct };
|
||||
cct->get_perfcounters_collection()->add(logger.get());
|
||||
logger->set(l_throttle_max, max);
|
||||
}
|
||||
}
|
||||
|
||||
Throttle::~Throttle()
|
||||
{
|
||||
{
|
||||
Mutex::Locker l(lock);
|
||||
assert(cond.empty());
|
||||
}
|
||||
|
||||
if (!use_perf)
|
||||
return;
|
||||
|
||||
if (logger) {
|
||||
cct->get_perfcounters_collection()->remove(logger);
|
||||
delete logger;
|
||||
}
|
||||
auto l = uniquely_lock(lock);
|
||||
assert(conds.empty());
|
||||
}
|
||||
|
||||
void Throttle::_reset_max(int64_t m)
|
||||
{
|
||||
assert(lock.is_locked());
|
||||
// lock must be held.
|
||||
if (static_cast<int64_t>(max) == m)
|
||||
return;
|
||||
if (!cond.empty())
|
||||
cond.front()->SignalOne();
|
||||
if (!conds.empty())
|
||||
conds.front().notify_one();
|
||||
if (logger)
|
||||
logger->set(l_throttle_max, m);
|
||||
max = m;
|
||||
}
|
||||
|
||||
bool Throttle::_wait(int64_t c)
|
||||
bool Throttle::_wait(int64_t c, UNIQUE_LOCK_T(lock)& l)
|
||||
{
|
||||
utime_t start;
|
||||
mono_time start;
|
||||
bool waited = false;
|
||||
if (_should_wait(c) || !cond.empty()) { // always wait behind other waiters.
|
||||
if (_should_wait(c) || !conds.empty()) { // always wait behind other waiters.
|
||||
{
|
||||
auto cv = cond.insert(cond.end(), new Cond);
|
||||
auto cv = conds.emplace(conds.end());
|
||||
auto w = make_scope_guard([this, cv]() {
|
||||
delete *cv;
|
||||
cond.erase(cv);
|
||||
conds.erase(cv);
|
||||
});
|
||||
waited = true;
|
||||
ldout(cct, 2) << "_wait waiting..." << dendl;
|
||||
if (logger)
|
||||
start = ceph_clock_now();
|
||||
|
||||
do {
|
||||
(*cv)->Wait(lock);
|
||||
} while ((_should_wait(c) || cv != cond.begin()));
|
||||
start = mono_clock::now();
|
||||
|
||||
cv->wait(l, [this, c, cv]() { return (!_should_wait(c) &&
|
||||
cv == conds.begin()); });
|
||||
ldout(cct, 2) << "_wait finished waiting" << dendl;
|
||||
if (logger) {
|
||||
utime_t dur = ceph_clock_now() - start;
|
||||
logger->tinc(l_throttle_wait, dur);
|
||||
logger->tinc(l_throttle_wait, mono_clock::now() - start);
|
||||
}
|
||||
}
|
||||
// wake up the next guy
|
||||
if (!cond.empty())
|
||||
cond.front()->SignalOne();
|
||||
if (!conds.empty())
|
||||
conds.front().notify_one();
|
||||
}
|
||||
return waited;
|
||||
}
|
||||
@ -130,13 +121,13 @@ bool Throttle::wait(int64_t m)
|
||||
return false;
|
||||
}
|
||||
|
||||
Mutex::Locker l(lock);
|
||||
auto l = uniquely_lock(lock);
|
||||
if (m) {
|
||||
assert(m > 0);
|
||||
_reset_max(m);
|
||||
}
|
||||
ldout(cct, 10) << "wait" << dendl;
|
||||
return _wait(0);
|
||||
return _wait(0, l);
|
||||
}
|
||||
|
||||
int64_t Throttle::take(int64_t c)
|
||||
@ -147,7 +138,7 @@ int64_t Throttle::take(int64_t c)
|
||||
assert(c >= 0);
|
||||
ldout(cct, 10) << "take " << c << dendl;
|
||||
{
|
||||
Mutex::Locker l(lock);
|
||||
auto l = uniquely_lock(lock);
|
||||
count += c;
|
||||
}
|
||||
if (logger) {
|
||||
@ -171,12 +162,12 @@ bool Throttle::get(int64_t c, int64_t m)
|
||||
}
|
||||
bool waited = false;
|
||||
{
|
||||
Mutex::Locker l(lock);
|
||||
auto l = uniquely_lock(lock);
|
||||
if (m) {
|
||||
assert(m > 0);
|
||||
_reset_max(m);
|
||||
}
|
||||
waited = _wait(c);
|
||||
waited = _wait(c, l);
|
||||
count += c;
|
||||
}
|
||||
if (logger) {
|
||||
@ -197,15 +188,16 @@ bool Throttle::get_or_fail(int64_t c)
|
||||
}
|
||||
|
||||
assert (c >= 0);
|
||||
Mutex::Locker l(lock);
|
||||
if (_should_wait(c) || !cond.empty()) {
|
||||
auto l = uniquely_lock(lock);
|
||||
if (_should_wait(c) || !conds.empty()) {
|
||||
ldout(cct, 10) << "get_or_fail " << c << " failed" << dendl;
|
||||
if (logger) {
|
||||
logger->inc(l_throttle_get_or_fail_fail);
|
||||
}
|
||||
return false;
|
||||
} else {
|
||||
ldout(cct, 10) << "get_or_fail " << c << " success (" << count.load() << " -> " << (count.load() + c) << ")" << dendl;
|
||||
ldout(cct, 10) << "get_or_fail " << c << " success (" << count.load()
|
||||
<< " -> " << (count.load() + c) << ")" << dendl;
|
||||
count += c;
|
||||
if (logger) {
|
||||
logger->inc(l_throttle_get_or_fail_success);
|
||||
@ -224,12 +216,14 @@ int64_t Throttle::put(int64_t c)
|
||||
}
|
||||
|
||||
assert(c >= 0);
|
||||
ldout(cct, 10) << "put " << c << " (" << count.load() << " -> " << (count.load()-c) << ")" << dendl;
|
||||
Mutex::Locker l(lock);
|
||||
ldout(cct, 10) << "put " << c << " (" << count.load() << " -> "
|
||||
<< (count.load()-c) << ")" << dendl;
|
||||
auto l = uniquely_lock(lock);
|
||||
if (c) {
|
||||
if (!cond.empty())
|
||||
cond.front()->SignalOne();
|
||||
assert(static_cast<int64_t>(count) >= c); // if count goes negative, we failed somewhere!
|
||||
if (!conds.empty())
|
||||
conds.front().notify_one();
|
||||
// if count goes negative, we failed somewhere!
|
||||
assert(static_cast<int64_t>(count) >= c);
|
||||
count -= c;
|
||||
if (logger) {
|
||||
logger->inc(l_throttle_put);
|
||||
@ -242,9 +236,9 @@ int64_t Throttle::put(int64_t c)
|
||||
|
||||
void Throttle::reset()
|
||||
{
|
||||
Mutex::Locker l(lock);
|
||||
if (!cond.empty())
|
||||
cond.front()->SignalOne();
|
||||
auto l = uniquely_lock(lock);
|
||||
if (!conds.empty())
|
||||
conds.front().notify_one();
|
||||
count = 0;
|
||||
if (logger) {
|
||||
logger->set(l_throttle_val, 0);
|
||||
@ -265,8 +259,9 @@ enum {
|
||||
l_backoff_throttle_last,
|
||||
};
|
||||
|
||||
BackoffThrottle::BackoffThrottle(CephContext *cct, const std::string& n, unsigned expected_concurrency, bool _use_perf)
|
||||
: cct(cct), name(n), logger(NULL),
|
||||
BackoffThrottle::BackoffThrottle(CephContext *cct, const std::string& n,
|
||||
unsigned expected_concurrency, bool _use_perf)
|
||||
: cct(cct), name(n),
|
||||
conds(expected_concurrency),///< [in] determines size of conds
|
||||
use_perf(_use_perf)
|
||||
{
|
||||
@ -274,7 +269,8 @@ BackoffThrottle::BackoffThrottle(CephContext *cct, const std::string& n, unsigne
|
||||
return;
|
||||
|
||||
if (cct->_conf->throttler_perf_counter) {
|
||||
PerfCountersBuilder b(cct, string("throttle-") + name, l_backoff_throttle_first, l_backoff_throttle_last);
|
||||
PerfCountersBuilder b(cct, string("throttle-") + name,
|
||||
l_backoff_throttle_first, l_backoff_throttle_last);
|
||||
b.add_u64(l_backoff_throttle_val, "val", "Currently available throttle");
|
||||
b.add_u64(l_backoff_throttle_max, "max", "Max value for throttle");
|
||||
b.add_u64_counter(l_backoff_throttle_get, "get", "Gets");
|
||||
@ -285,26 +281,16 @@ BackoffThrottle::BackoffThrottle(CephContext *cct, const std::string& n, unsigne
|
||||
b.add_u64_counter(l_backoff_throttle_put_sum, "put_sum", "Put data");
|
||||
b.add_time_avg(l_backoff_throttle_wait, "wait", "Waiting latency");
|
||||
|
||||
logger = b.create_perf_counters();
|
||||
cct->get_perfcounters_collection()->add(logger);
|
||||
logger = { b.create_perf_counters(), cct };
|
||||
cct->get_perfcounters_collection()->add(logger.get());
|
||||
logger->set(l_backoff_throttle_max, max);
|
||||
}
|
||||
}
|
||||
|
||||
BackoffThrottle::~BackoffThrottle()
|
||||
{
|
||||
{
|
||||
locker l(lock);
|
||||
assert(waiters.empty());
|
||||
}
|
||||
|
||||
if (!use_perf)
|
||||
return;
|
||||
|
||||
if (logger) {
|
||||
cct->get_perfcounters_collection()->remove(logger);
|
||||
delete logger;
|
||||
}
|
||||
auto l = uniquely_lock(lock);
|
||||
assert(waiters.empty());
|
||||
}
|
||||
|
||||
bool BackoffThrottle::set_params(
|
||||
@ -451,7 +437,7 @@ std::chrono::duration<double> BackoffThrottle::get(uint64_t c)
|
||||
}
|
||||
|
||||
auto ticket = _push_waiter();
|
||||
utime_t wait_from = ceph_clock_now();
|
||||
auto wait_from = mono_clock::now();
|
||||
bool waited = false;
|
||||
|
||||
while (waiters.begin() != ticket) {
|
||||
@ -482,7 +468,7 @@ std::chrono::duration<double> BackoffThrottle::get(uint64_t c)
|
||||
if (logger) {
|
||||
logger->set(l_backoff_throttle_val, current);
|
||||
if (waited) {
|
||||
logger->tinc(l_backoff_throttle_wait, ceph_clock_now() - wait_from);
|
||||
logger->tinc(l_backoff_throttle_wait, mono_clock::now() - wait_from);
|
||||
}
|
||||
}
|
||||
|
||||
@ -532,55 +518,45 @@ uint64_t BackoffThrottle::get_max()
|
||||
}
|
||||
|
||||
SimpleThrottle::SimpleThrottle(uint64_t max, bool ignore_enoent)
|
||||
: m_lock("SimpleThrottle"),
|
||||
m_max(max),
|
||||
m_current(0),
|
||||
m_ret(0),
|
||||
m_ignore_enoent(ignore_enoent)
|
||||
{
|
||||
}
|
||||
: m_max(max), m_ignore_enoent(ignore_enoent) {}
|
||||
|
||||
SimpleThrottle::~SimpleThrottle()
|
||||
{
|
||||
Mutex::Locker l(m_lock);
|
||||
auto l = uniquely_lock(m_lock);
|
||||
assert(m_current == 0);
|
||||
assert(waiters == 0);
|
||||
}
|
||||
|
||||
void SimpleThrottle::start_op()
|
||||
{
|
||||
Mutex::Locker l(m_lock);
|
||||
while (m_max == m_current) {
|
||||
waiters++;
|
||||
m_cond.Wait(m_lock);
|
||||
waiters--;
|
||||
}
|
||||
auto l = uniquely_lock(m_lock);
|
||||
waiters++;
|
||||
m_cond.wait(l, [this]() { return m_max != m_current; });
|
||||
waiters--;
|
||||
++m_current;
|
||||
}
|
||||
|
||||
void SimpleThrottle::end_op(int r)
|
||||
{
|
||||
Mutex::Locker l(m_lock);
|
||||
auto l = uniquely_lock(m_lock);
|
||||
--m_current;
|
||||
if (r < 0 && !m_ret && !(r == -ENOENT && m_ignore_enoent))
|
||||
m_ret = r;
|
||||
m_cond.Signal();
|
||||
m_cond.notify_all();
|
||||
}
|
||||
|
||||
bool SimpleThrottle::pending_error() const
|
||||
{
|
||||
Mutex::Locker l(m_lock);
|
||||
auto l = uniquely_lock(m_lock);
|
||||
return (m_ret < 0);
|
||||
}
|
||||
|
||||
int SimpleThrottle::wait_for_ret()
|
||||
{
|
||||
Mutex::Locker l(m_lock);
|
||||
while (m_current > 0) {
|
||||
waiters++;
|
||||
m_cond.Wait(m_lock);
|
||||
waiters--;
|
||||
}
|
||||
auto l = uniquely_lock(m_lock);
|
||||
waiters++;
|
||||
m_cond.wait(l, [this]() { return m_current == 0; });
|
||||
waiters--;
|
||||
return m_ret;
|
||||
}
|
||||
|
||||
@ -589,80 +565,76 @@ void C_OrderedThrottle::finish(int r) {
|
||||
}
|
||||
|
||||
OrderedThrottle::OrderedThrottle(uint64_t max, bool ignore_enoent)
|
||||
: m_lock("OrderedThrottle::m_lock"), m_max(max), m_current(0), m_ret_val(0),
|
||||
m_ignore_enoent(ignore_enoent), m_next_tid(0), m_complete_tid(0) {
|
||||
}
|
||||
: m_max(max), m_ignore_enoent(ignore_enoent) {}
|
||||
|
||||
OrderedThrottle::~OrderedThrottle() {
|
||||
Mutex::Locker locker(m_lock);
|
||||
auto l = uniquely_lock(m_lock);
|
||||
assert(waiters == 0);
|
||||
}
|
||||
|
||||
C_OrderedThrottle *OrderedThrottle::start_op(Context *on_finish) {
|
||||
assert(on_finish != NULL);
|
||||
assert(on_finish);
|
||||
|
||||
Mutex::Locker locker(m_lock);
|
||||
auto l = uniquely_lock(m_lock);
|
||||
uint64_t tid = m_next_tid++;
|
||||
m_tid_result[tid] = Result(on_finish);
|
||||
C_OrderedThrottle *ctx = new C_OrderedThrottle(this, tid);
|
||||
auto ctx = make_unique<C_OrderedThrottle>(this, tid);
|
||||
|
||||
complete_pending_ops();
|
||||
complete_pending_ops(l);
|
||||
while (m_max == m_current) {
|
||||
++waiters;
|
||||
m_cond.Wait(m_lock);
|
||||
m_cond.wait(l);
|
||||
--waiters;
|
||||
complete_pending_ops();
|
||||
complete_pending_ops(l);
|
||||
}
|
||||
++m_current;
|
||||
|
||||
return ctx;
|
||||
return ctx.release();
|
||||
}
|
||||
|
||||
void OrderedThrottle::end_op(int r) {
|
||||
Mutex::Locker locker(m_lock);
|
||||
auto l = uniquely_lock(m_lock);
|
||||
assert(m_current > 0);
|
||||
|
||||
if (r < 0 && m_ret_val == 0 && (r != -ENOENT || !m_ignore_enoent)) {
|
||||
m_ret_val = r;
|
||||
}
|
||||
--m_current;
|
||||
m_cond.Signal();
|
||||
m_cond.notify_all();
|
||||
}
|
||||
|
||||
void OrderedThrottle::finish_op(uint64_t tid, int r) {
|
||||
Mutex::Locker locker(m_lock);
|
||||
auto l = uniquely_lock(m_lock);
|
||||
|
||||
TidResult::iterator it = m_tid_result.find(tid);
|
||||
auto it = m_tid_result.find(tid);
|
||||
assert(it != m_tid_result.end());
|
||||
|
||||
it->second.finished = true;
|
||||
it->second.ret_val = r;
|
||||
m_cond.Signal();
|
||||
m_cond.notify_all();
|
||||
}
|
||||
|
||||
bool OrderedThrottle::pending_error() const {
|
||||
Mutex::Locker locker(m_lock);
|
||||
auto l = uniquely_lock(m_lock);
|
||||
return (m_ret_val < 0);
|
||||
}
|
||||
|
||||
int OrderedThrottle::wait_for_ret() {
|
||||
Mutex::Locker locker(m_lock);
|
||||
complete_pending_ops();
|
||||
auto l = uniquely_lock(m_lock);
|
||||
complete_pending_ops(l);
|
||||
|
||||
while (m_current > 0) {
|
||||
++waiters;
|
||||
m_cond.Wait(m_lock);
|
||||
m_cond.wait(l);
|
||||
--waiters;
|
||||
complete_pending_ops();
|
||||
complete_pending_ops(l);
|
||||
}
|
||||
return m_ret_val;
|
||||
}
|
||||
|
||||
void OrderedThrottle::complete_pending_ops() {
|
||||
assert(m_lock.is_locked());
|
||||
|
||||
void OrderedThrottle::complete_pending_ops(UNIQUE_LOCK_T(m_lock)& l) {
|
||||
while (true) {
|
||||
TidResult::iterator it = m_tid_result.begin();
|
||||
auto it = m_tid_result.begin();
|
||||
if (it == m_tid_result.end() || it->first != m_complete_tid ||
|
||||
!it->second.finished) {
|
||||
break;
|
||||
@ -671,9 +643,9 @@ void OrderedThrottle::complete_pending_ops() {
|
||||
Result result = it->second;
|
||||
m_tid_result.erase(it);
|
||||
|
||||
m_lock.Unlock();
|
||||
l.unlock();
|
||||
result.on_finish->complete(result.ret_val);
|
||||
m_lock.Lock();
|
||||
l.lock();
|
||||
|
||||
++m_complete_tid;
|
||||
}
|
||||
|
@ -4,19 +4,17 @@
|
||||
#ifndef CEPH_THROTTLE_H
|
||||
#define CEPH_THROTTLE_H
|
||||
|
||||
#include <map>
|
||||
#include <list>
|
||||
#include <chrono>
|
||||
#include <atomic>
|
||||
#include <iostream>
|
||||
#include <chrono>
|
||||
#include <condition_variable>
|
||||
#include <stdexcept>
|
||||
#include <iostream>
|
||||
#include <list>
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
|
||||
#include "Cond.h"
|
||||
#include "include/Context.h"
|
||||
|
||||
class CephContext;
|
||||
class PerfCounters;
|
||||
#include "common/convenience.h"
|
||||
#include "common/perf_counters.h"
|
||||
|
||||
/**
|
||||
* @class Throttle
|
||||
@ -29,13 +27,11 @@ class PerfCounters;
|
||||
class Throttle {
|
||||
CephContext *cct;
|
||||
const std::string name;
|
||||
PerfCounters *logger;
|
||||
PerfCountersRef logger;
|
||||
std::atomic<unsigned> count = { 0 }, max = { 0 };
|
||||
Mutex lock;
|
||||
list<Cond*> cond;
|
||||
std::mutex lock;
|
||||
std::list<std::condition_variable> conds;
|
||||
const bool use_perf;
|
||||
bool shutting_down = false;
|
||||
Cond shutdown_clear;
|
||||
|
||||
public:
|
||||
Throttle(CephContext *cct, const std::string& n, int64_t m = 0, bool _use_perf = true);
|
||||
@ -52,7 +48,7 @@ private:
|
||||
(c >= m && cur > m)); // except for large c
|
||||
}
|
||||
|
||||
bool _wait(int64_t c);
|
||||
bool _wait(int64_t c, UNIQUE_LOCK_T(lock)& l);
|
||||
|
||||
public:
|
||||
/**
|
||||
@ -124,7 +120,7 @@ public:
|
||||
return _should_wait(c);
|
||||
}
|
||||
void reset_max(int64_t m) {
|
||||
Mutex::Locker l(lock);
|
||||
auto l = ceph::uniquely_lock(lock);
|
||||
_reset_max(m);
|
||||
}
|
||||
};
|
||||
@ -158,7 +154,7 @@ public:
|
||||
class BackoffThrottle {
|
||||
CephContext *cct;
|
||||
const std::string name;
|
||||
PerfCounters *logger;
|
||||
PerfCountersRef logger;
|
||||
|
||||
std::mutex lock;
|
||||
using locker = std::unique_lock<std::mutex>;
|
||||
@ -256,11 +252,11 @@ public:
|
||||
bool pending_error() const;
|
||||
int wait_for_ret();
|
||||
private:
|
||||
mutable Mutex m_lock;
|
||||
Cond m_cond;
|
||||
mutable std::mutex m_lock;
|
||||
std::condition_variable m_cond;
|
||||
uint64_t m_max;
|
||||
uint64_t m_current;
|
||||
int m_ret;
|
||||
uint64_t m_current = 0;
|
||||
int m_ret = 0;
|
||||
bool m_ignore_enoent;
|
||||
uint32_t waiters = 0;
|
||||
};
|
||||
@ -318,19 +314,19 @@ private:
|
||||
|
||||
typedef std::map<uint64_t, Result> TidResult;
|
||||
|
||||
mutable Mutex m_lock;
|
||||
Cond m_cond;
|
||||
mutable std::mutex m_lock;
|
||||
std::condition_variable m_cond;
|
||||
uint64_t m_max;
|
||||
uint64_t m_current;
|
||||
int m_ret_val;
|
||||
uint64_t m_current = 0;
|
||||
int m_ret_val = 0;
|
||||
bool m_ignore_enoent;
|
||||
|
||||
uint64_t m_next_tid;
|
||||
uint64_t m_complete_tid;
|
||||
uint64_t m_next_tid = 0;
|
||||
uint64_t m_complete_tid = 0;
|
||||
|
||||
TidResult m_tid_result;
|
||||
|
||||
void complete_pending_ops();
|
||||
void complete_pending_ops(UNIQUE_LOCK_T(m_lock)& l);
|
||||
uint32_t waiters = 0;
|
||||
};
|
||||
|
||||
|
@ -118,11 +118,27 @@ not_fn_result<F> not_fn(F&& fn) {
|
||||
return not_fn_result<F>(std::forward<F>(fn));
|
||||
}
|
||||
|
||||
struct in_place_t {};
|
||||
constexpr in_place_t in_place{};
|
||||
|
||||
template<typename T>
|
||||
struct in_place_type_t {};
|
||||
|
||||
#ifdef __cpp_variable_templates
|
||||
template<typename T>
|
||||
constexpr in_place_type_t<T> in_place_type{};
|
||||
#endif // __cpp_variable_templates
|
||||
} // namespace _backport17
|
||||
using _backport14::make_unique;
|
||||
using _backport17::size;
|
||||
using _backport14::max;
|
||||
using _backport17::not_fn;
|
||||
using _backport17::in_place_t;
|
||||
using _backport17::in_place;
|
||||
using _backport17::in_place_type_t;
|
||||
#ifdef __cpp_variable_templates
|
||||
using _backport17::in_place_type;
|
||||
#endif // __cpp_variable_templates
|
||||
} // namespace ceph
|
||||
|
||||
#endif // CEPH_COMMON_BACKPORT14_H
|
||||
|
164
src/common/convenience.h
Normal file
164
src/common/convenience.h
Normal file
@ -0,0 +1,164 @@
|
||||
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
|
||||
// vim: ts=8 sw=2 smarttab
|
||||
/*
|
||||
* Ceph - scalable distributed file system
|
||||
*
|
||||
* Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
|
||||
*
|
||||
* This is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License version 2.1, as published by the Free Software
|
||||
* Foundation. See file COPYING.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <mutex>
|
||||
#include <memory>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
#include <boost/thread/shared_mutex.hpp>
|
||||
|
||||
#include "common/backport14.h"
|
||||
#include "common/shunique_lock.h"
|
||||
|
||||
#include "include/assert.h" // I despise you. Not you the reader, I'm talking
|
||||
// to the include file.
|
||||
|
||||
|
||||
#ifndef CEPH_COMMON_CONVENIENCE_H
|
||||
#define CEPH_COMMON_CONVENIENCE_H
|
||||
|
||||
namespace ceph {
|
||||
|
||||
// Lock Factories
|
||||
// ==============
|
||||
//
|
||||
// I used to, whenever I declared a mutex member variable of a class,
|
||||
// declare a pile of types like:
|
||||
// ```cpp
|
||||
// using unique_lock = ::std::unique_lock<decltype(membermutex)>;
|
||||
// ```
|
||||
// to avoid having to type that big, long type at every use. It also
|
||||
// let me change the mutex type later. It's inelegant and breaks down
|
||||
// if you have more than one type of mutex in the same class. So here
|
||||
// are some lock factories.
|
||||
template<typename Mutex, typename ...Args>
|
||||
inline auto uniquely_lock(Mutex&& m, Args&& ...args)
|
||||
-> std::unique_lock<remove_reference_t<Mutex> > {
|
||||
return std::unique_lock<remove_reference_t<Mutex> >(
|
||||
std::forward<Mutex>(m), std::forward<Args>(args)... );
|
||||
}
|
||||
|
||||
template<typename Mutex, typename ...Args>
|
||||
inline auto sharingly_lock(Mutex&& m, Args&& ...args)
|
||||
-> boost::shared_lock<remove_reference_t<Mutex> > {
|
||||
return
|
||||
boost::shared_lock<remove_reference_t<Mutex> >(
|
||||
std::forward<Mutex>(m), std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
template<typename Mutex, typename ...Args>
|
||||
inline auto shuniquely_lock(std::unique_lock<Mutex>&& m, Args&& ...args)
|
||||
-> shunique_lock<remove_reference_t<Mutex> > {
|
||||
return shunique_lock<remove_reference_t<Mutex> >(
|
||||
std::forward<std::unique_lock<Mutex> >(m), std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
template<typename Mutex, typename ...Args>
|
||||
inline auto shuniquely_lock(boost::shared_lock<Mutex>&& m, Args&& ...args)
|
||||
-> shunique_lock<remove_reference_t<Mutex> > {
|
||||
return shunique_lock<remove_reference_t<Mutex> >(
|
||||
std::forward<boost::shared_lock<Mutex> >(m),
|
||||
std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
template<typename Mutex, typename ...Args>
|
||||
inline auto shuniquely_lock(Mutex&& m, Args&& ...args)
|
||||
-> shunique_lock<remove_reference_t<Mutex> > {
|
||||
return shunique_lock<remove_reference_t<Mutex> >(
|
||||
std::forward<Mutex>(m), std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
// All right! These two don't work like the others. You cannot do
|
||||
// `auto l = guardedly_lock(m)` since copy elision before C++17 is
|
||||
// optional. C++17 makes it mandatory so these workarounds won't be
|
||||
// needed.
|
||||
//
|
||||
// To use this, you'll need to do something like:
|
||||
// `auto&& l = guardedly_lock(m)`
|
||||
// This way, we aren't actually copying or moving a value, we're
|
||||
// binding a reference to a temporary which extends its lifetime until
|
||||
// the end of the enclosing block.
|
||||
//
|
||||
// You may in fact want to use
|
||||
// `[[gnu::unused]] auto&& l = guardedly_lock(m)`
|
||||
// To avoid the unused variable warning. Since reference assignment
|
||||
// doesn't have side effects, normally, this just looks to the
|
||||
// compiler (whose static analysis is not the sharpest hammer in the
|
||||
// drawer) like a reference we bind for no reason and never
|
||||
// use. Perhaps future compilers will be smarter about this, but since
|
||||
// they'll also implement C++17 people might not care.
|
||||
//
|
||||
|
||||
template<typename Mutex>
|
||||
inline auto guardedly_lock(Mutex&& m)
|
||||
-> std::lock_guard<remove_reference_t<Mutex> > {
|
||||
m.lock();
|
||||
// So the way this works is that Copy List Initialization creates
|
||||
// one and only one Temporary. There is no implicit copy that is
|
||||
// generally optimized away the way there is if we were to just try
|
||||
// something like `return std::lock_guard<Mutex>(m)`.
|
||||
//
|
||||
// The function then returns this temporary as a prvalue. We cannot
|
||||
// bind it to a variable, because that would implicitly copy it
|
||||
// (even if in practice RVO would mean there is no copy), so instead
|
||||
// the user can bind it to a reference. (It has to be either a const
|
||||
// lvalue reference or an rvalue reference.)
|
||||
//
|
||||
// So we get something analogous to all the others with a mildly
|
||||
// wonky syntax. The need to use [[gnu::unused]] is honestly the
|
||||
// worst part. It makes this construction unfortunately rather
|
||||
// long.
|
||||
return { std::forward<Mutex>(m), std::adopt_lock };
|
||||
}
|
||||
|
||||
template<typename Mutex>
|
||||
inline auto guardedly_lock(Mutex&& m, std::adopt_lock_t)
|
||||
-> std::lock_guard<remove_reference_t<Mutex> > {
|
||||
return { std::forward<Mutex>(m), std::adopt_lock };
|
||||
}
|
||||
|
||||
template<typename Mutex, typename Fun, typename...Args>
|
||||
inline auto with_unique_lock(Mutex&& mutex, Fun&& fun, Args&&... args)
|
||||
-> decltype(fun(std::forward<Args>(args)...)) {
|
||||
// Yes I know there's a lock guard inside and not a unique lock, but
|
||||
// the caller doesn't need to know or care about the internal
|
||||
// details, and the semantics are those of unique locking.
|
||||
[[gnu::unused]] auto&& l = guardedly_lock(std::forward<Mutex>(mutex));
|
||||
return std::forward<Fun>(fun)(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
template<typename Mutex, typename Fun, typename...Args>
|
||||
inline auto with_shared_lock(Mutex&& mutex, Fun&& fun, Args&&... args)
|
||||
-> decltype(fun(std::forward<Args>(args)...)) {
|
||||
auto l = sharingly_lock(std::forward<Mutex>(mutex));
|
||||
return std::forward<Fun>(fun)(std::forward<Args>(args)...);
|
||||
}
|
||||
}
|
||||
|
||||
// Lock Types
|
||||
// ----------
|
||||
//
|
||||
// Lock factories are nice, but you still have to type out a huge,
|
||||
// obnoxious template type when declaring a function that takes or
|
||||
// returns a lock class.
|
||||
//
|
||||
#define UNIQUE_LOCK_T(m) \
|
||||
::std::unique_lock<ceph::remove_reference_t<decltype(m)>>
|
||||
#define SHARED_LOCK_T(m) \
|
||||
::std::shared_lock<ceph::remove_reference_t<decltype(m)>>
|
||||
#define SHUNIQUE_LOCK_T(m) \
|
||||
::ceph::shunique_lock<ceph::remove_reference_t<decltype(m)>>
|
||||
|
||||
#endif // CEPH_COMMON_CONVENIENCE_H
|
@ -324,4 +324,19 @@ private:
|
||||
PerfCounters *m_perf_counters;
|
||||
};
|
||||
|
||||
class PerfCountersDeleter {
|
||||
CephContext* cct;
|
||||
|
||||
public:
|
||||
PerfCountersDeleter() noexcept : cct(nullptr) {}
|
||||
PerfCountersDeleter(CephContext* cct) noexcept : cct(cct) {}
|
||||
void operator()(PerfCounters* p) noexcept {
|
||||
if (cct)
|
||||
cct->get_perfcounters_collection()->remove(p);
|
||||
delete p;
|
||||
}
|
||||
};
|
||||
|
||||
using PerfCountersRef = std::unique_ptr<PerfCounters, PerfCountersDeleter>;
|
||||
|
||||
#endif
|
||||
|
@ -17,6 +17,8 @@
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "common/backport14.h"
|
||||
|
||||
template <typename F>
|
||||
struct scope_guard {
|
||||
F f;
|
||||
@ -25,9 +27,12 @@ struct scope_guard {
|
||||
scope_guard(scope_guard &&) = default;
|
||||
scope_guard & operator=(const scope_guard &) = delete;
|
||||
scope_guard & operator=(scope_guard &&) = default;
|
||||
scope_guard(const F& f) : f(f) {}
|
||||
scope_guard(F &&f) : f(std::move(f)) {}
|
||||
template<typename... Args>
|
||||
scope_guard(ceph::in_place_t, Args&& ...args) : f(std::forward<Args>(args)...) {}
|
||||
~scope_guard() {
|
||||
f();
|
||||
std::move(f)(); // Support at-most-once functions
|
||||
}
|
||||
};
|
||||
|
||||
@ -36,4 +41,9 @@ scope_guard<F> make_scope_guard(F &&f) {
|
||||
return scope_guard<F>(std::forward<F>(f));
|
||||
}
|
||||
|
||||
template<typename F, typename... Args>
|
||||
scope_guard<F> make_scope_guard(ceph::in_place_type_t<F>, Args&& ...args) {
|
||||
return { ceph::in_place, std::forward<Args>(args)... };
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -2328,7 +2328,7 @@ bool Locker::check_inode_max_size(CInode *in, bool force_wrlock,
|
||||
update_size = false;
|
||||
}
|
||||
|
||||
calc_new_client_ranges(in, max(new_max_size, size), &new_ranges, &max_increased);
|
||||
calc_new_client_ranges(in, ceph::max(new_max_size, size), &new_ranges, &max_increased);
|
||||
|
||||
if (max_increased || latest->client_ranges != new_ranges)
|
||||
update_max = true;
|
||||
|
@ -599,7 +599,7 @@ bool PurgeQueue::drain(
|
||||
max_purge_ops = 0xffff;
|
||||
}
|
||||
|
||||
drain_initial = max(bytes_remaining, drain_initial);
|
||||
drain_initial = ceph::max(bytes_remaining, drain_initial);
|
||||
|
||||
*progress = drain_initial - bytes_remaining;
|
||||
*progress_total = drain_initial;
|
||||
|
@ -728,7 +728,7 @@ int64_t BlueStore::GarbageCollector::estimate(
|
||||
|
||||
// update gc_start_offset/gc_end_offset if needed
|
||||
gc_start_offset = min(gc_start_offset, (uint64_t)it->e.blob_start());
|
||||
gc_end_offset = max(gc_end_offset, (uint64_t)it->e.blob_end());
|
||||
gc_end_offset = ceph::max(gc_end_offset, (uint64_t)it->e.blob_end());
|
||||
|
||||
auto o = it->e.logical_offset;
|
||||
auto l = it->e.length;
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <stdio.h>
|
||||
#include <signal.h>
|
||||
#include "gtest/gtest.h"
|
||||
#include "common/backport14.h"
|
||||
#include "common/Mutex.h"
|
||||
#include "common/Thread.h"
|
||||
#include "common/Throttle.h"
|
||||
@ -42,20 +43,16 @@ protected:
|
||||
public:
|
||||
Throttle &throttle;
|
||||
int64_t count;
|
||||
bool waited;
|
||||
bool waited = false;
|
||||
|
||||
Thread_get(Throttle& _throttle, int64_t _count) :
|
||||
throttle(_throttle),
|
||||
count(_count),
|
||||
waited(false)
|
||||
{
|
||||
}
|
||||
throttle(_throttle), count(_count) {}
|
||||
|
||||
void *entry() override {
|
||||
usleep(5);
|
||||
waited = throttle.get(count);
|
||||
throttle.put(count);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
};
|
||||
};
|
||||
@ -216,7 +213,6 @@ TEST_F(ThrottleTest, wait) {
|
||||
} while(!waited);
|
||||
}
|
||||
|
||||
|
||||
TEST_F(ThrottleTest, destructor) {
|
||||
EXPECT_DEATH({
|
||||
int64_t throttle_max = 10;
|
||||
|
@ -241,7 +241,7 @@ int update_osdmap(ObjectStore& fs, OSDSuperblock& sb, MonitorDBStore& ms)
|
||||
unsigned nadded = 0;
|
||||
|
||||
OSDMap osdmap;
|
||||
for (auto e = max(last_committed+1, sb.oldest_map);
|
||||
for (auto e = ceph::max(last_committed+1, sb.oldest_map);
|
||||
e <= sb.newest_map; e++) {
|
||||
bool have_crc = false;
|
||||
uint32_t crc = -1;
|
||||
|
Loading…
Reference in New Issue
Block a user