mirror of
https://github.com/ceph/ceph
synced 2025-04-01 00:26:47 +00:00
PGStateUtils: remove PG*, move into PGStateHistory into PeeringState
I don't think there's any need to worry about the pg locking from PGStateHistory. NamedState::begin/exit and dump_pgstate_history are the only users, so the only calls should be under the peering event handler, the asok command, or the PG constructor and destructor. The first two already have the lock. The last should be safe as well as long as the state machine states are constructed and destructed after and before the PGStateHistory instance. As such, this patch removes most of that state leaving the epoch generation as an interface implemented by PG. The snap trimming state machine was already excluded, so that this patch leaves it disabled. Signed-off-by: Samuel Just <sjust@redhat.com>
This commit is contained in:
parent
1e4166a049
commit
d96c9072b7
@ -323,7 +323,6 @@ PG::PG(OSDService *o, OSDMapRef curmap,
|
||||
|
||||
PG::~PG()
|
||||
{
|
||||
pgstate_history.set_pg_in_destructor();
|
||||
#ifdef PG_DEBUG_REFS
|
||||
osd->remove_pgid(info.pgid, this);
|
||||
#endif
|
||||
@ -2382,7 +2381,8 @@ bool PG::set_force_recovery(bool b)
|
||||
did = true;
|
||||
}
|
||||
if (did) {
|
||||
dout(20) << __func__ << " state " << pgstate_history.get_current_state() << dendl;
|
||||
dout(20) << __func__ << " state " << recovery_state.get_current_state()
|
||||
<< dendl;
|
||||
osd->local_reserver.update_priority(info.pgid, get_recovery_priority());
|
||||
}
|
||||
return did;
|
||||
@ -2408,7 +2408,8 @@ bool PG::set_force_backfill(bool b)
|
||||
did = true;
|
||||
}
|
||||
if (did) {
|
||||
dout(20) << __func__ << " state " << pgstate_history.get_current_state() << dendl;
|
||||
dout(20) << __func__ << " state " << recovery_state.get_current_state()
|
||||
<< dendl;
|
||||
osd->local_reserver.update_priority(info.pgid, get_backfill_priority());
|
||||
}
|
||||
return did;
|
||||
@ -7034,7 +7035,7 @@ ostream& operator<<(ostream& out, const PG::BackfillInterval& bi)
|
||||
void PG::dump_pgstate_history(Formatter *f)
|
||||
{
|
||||
lock();
|
||||
pgstate_history.dump(f);
|
||||
recovery_state.dump_history(f);
|
||||
unlock();
|
||||
}
|
||||
|
||||
|
@ -196,7 +196,7 @@ struct PGPool {
|
||||
*
|
||||
*/
|
||||
|
||||
class PG : public DoutPrefixProvider {
|
||||
class PG : public DoutPrefixProvider, public EpochSource {
|
||||
friend class NamedState;
|
||||
friend class PeeringState;
|
||||
public:
|
||||
@ -222,7 +222,7 @@ public:
|
||||
ceph_assert(osdmap_ref);
|
||||
return osdmap_ref;
|
||||
}
|
||||
epoch_t get_osdmap_epoch() const {
|
||||
epoch_t get_osdmap_epoch() const override {
|
||||
return osdmap_ref->get_epoch();
|
||||
}
|
||||
|
||||
@ -942,8 +942,6 @@ public:
|
||||
bool dne() { return info.dne(); }
|
||||
|
||||
protected:
|
||||
PGStateHistory pgstate_history;
|
||||
|
||||
/*
|
||||
* peer_info -- projected (updates _before_ replicas ack)
|
||||
* peer_missing -- committed (updates _after_ replicas ack)
|
||||
|
@ -2,62 +2,36 @@
|
||||
// vim: ts=8 sw=2 smarttab
|
||||
|
||||
#include "PGStateUtils.h"
|
||||
#include "PG.h"
|
||||
#include "common/Clock.h"
|
||||
|
||||
/*------NamedState----*/
|
||||
NamedState::NamedState(
|
||||
PG *pg_, const char *state_name_)
|
||||
: state_name(state_name_), enter_time(ceph_clock_now()), pg(pg_) {
|
||||
pg->pgstate_history.enter(pg, enter_time, state_name);
|
||||
}
|
||||
|
||||
NamedState::~NamedState() {
|
||||
pg->pgstate_history.exit(state_name);
|
||||
}
|
||||
|
||||
/*---------PGStateHistory---------*/
|
||||
void PGStateHistory::enter(PG* pg, const utime_t entime, const char* state)
|
||||
{
|
||||
// Ignore trimming state machine for now
|
||||
if (::strstr(state, "Trimming") != NULL) {
|
||||
return;
|
||||
} else if (pi != nullptr) {
|
||||
pi->enter_state(entime, state);
|
||||
} else {
|
||||
// Store current state since we can't reliably take the PG lock here
|
||||
if ( tmppi == nullptr) {
|
||||
tmppi = std::unique_ptr<PGStateInstance>(new PGStateInstance);
|
||||
}
|
||||
|
||||
thispg = pg;
|
||||
tmppi->enter_state(entime, state);
|
||||
NamedState::NamedState(PGStateHistory *pgsh, const char *state_name)
|
||||
: pgsh(pgsh), state_name(state_name) {
|
||||
if(pgsh) {
|
||||
pgsh->enter(ceph_clock_now(), state_name);
|
||||
}
|
||||
}
|
||||
|
||||
void PGStateHistory::exit(const char* state) {
|
||||
// Ignore trimming state machine for now
|
||||
// Do nothing if PG is being destroyed!
|
||||
if (::strstr(state, "Trimming") != NULL || pg_in_destructor) {
|
||||
return;
|
||||
} else {
|
||||
bool ilocked = false;
|
||||
if(!thispg->is_locked()) {
|
||||
thispg->lock();
|
||||
ilocked = true;
|
||||
}
|
||||
if (pi == nullptr) {
|
||||
buffer.push_back(std::unique_ptr<PGStateInstance>(tmppi.release()));
|
||||
pi = buffer.back().get();
|
||||
pi->setepoch(thispg->get_osdmap_epoch());
|
||||
}
|
||||
NamedState::~NamedState() {
|
||||
if(pgsh) {
|
||||
pgsh->exit(state_name);
|
||||
}
|
||||
}
|
||||
|
||||
pi->exit_state(ceph_clock_now());
|
||||
if (::strcmp(state, "Reset") == 0) {
|
||||
this->reset();
|
||||
}
|
||||
if(ilocked) {
|
||||
thispg->unlock();
|
||||
}
|
||||
/*---------PGStateHistory---------*/
|
||||
void PGStateHistory::enter(const utime_t entime, const char* state)
|
||||
{
|
||||
if (pi == nullptr) {
|
||||
pi = std::unique_ptr<PGStateInstance>(new PGStateInstance);
|
||||
}
|
||||
pi->enter_state(entime, state);
|
||||
}
|
||||
|
||||
void PGStateHistory::exit(const char* state) {
|
||||
pi->setepoch(es->get_osdmap_epoch());
|
||||
pi->exit_state(ceph_clock_now());
|
||||
if (pi->empty()) {
|
||||
reset();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4,19 +4,28 @@
|
||||
#pragma once
|
||||
|
||||
#include "include/utime.h"
|
||||
#include "common/Formatter.h"
|
||||
|
||||
#include <stack>
|
||||
#include <vector>
|
||||
#include <boost/circular_buffer.hpp>
|
||||
|
||||
class PG;
|
||||
class PGStateHistory;
|
||||
|
||||
struct EpochSource {
|
||||
virtual epoch_t get_osdmap_epoch() const = 0;
|
||||
virtual ~EpochSource() {}
|
||||
};
|
||||
|
||||
struct NamedState {
|
||||
PGStateHistory *pgsh;
|
||||
const char *state_name;
|
||||
utime_t enter_time;
|
||||
PG* pg;
|
||||
const char *get_state_name() { return state_name; }
|
||||
NamedState(PG *pg_, const char *state_name_);
|
||||
NamedState(
|
||||
PGStateHistory *pgsh,
|
||||
const char *state_name_);
|
||||
virtual ~NamedState();
|
||||
};
|
||||
|
||||
@ -41,6 +50,10 @@ struct PGStateInstance {
|
||||
embedded_states.pop();
|
||||
}
|
||||
|
||||
bool empty() const {
|
||||
return embedded_states.empty();
|
||||
}
|
||||
|
||||
epoch_t this_epoch;
|
||||
utime_t enter_time;
|
||||
std::vector<state_history_entry> state_history;
|
||||
@ -48,32 +61,27 @@ struct PGStateInstance {
|
||||
};
|
||||
|
||||
class PGStateHistory {
|
||||
// Member access protected with the PG lock
|
||||
public:
|
||||
PGStateHistory() : buffer(10) {}
|
||||
PGStateHistory(EpochSource *es) : buffer(10), es(es) {}
|
||||
|
||||
void enter(PG* pg, const utime_t entime, const char* state);
|
||||
void enter(const utime_t entime, const char* state);
|
||||
|
||||
void exit(const char* state);
|
||||
|
||||
void reset() {
|
||||
buffer.push_back(std::move(pi));
|
||||
pi = nullptr;
|
||||
}
|
||||
|
||||
void set_pg_in_destructor() { pg_in_destructor = true; }
|
||||
|
||||
void dump(Formatter* f) const;
|
||||
|
||||
string get_current_state() {
|
||||
const char *get_current_state() const {
|
||||
if (pi == nullptr) return "unknown";
|
||||
return std::get<1>(pi->embedded_states.top());
|
||||
}
|
||||
|
||||
private:
|
||||
bool pg_in_destructor = false;
|
||||
PG* thispg = nullptr;
|
||||
std::unique_ptr<PGStateInstance> tmppi;
|
||||
PGStateInstance* pi = nullptr;
|
||||
std::unique_ptr<PGStateInstance> pi;
|
||||
boost::circular_buffer<std::unique_ptr<PGStateInstance>> buffer;
|
||||
|
||||
EpochSource *es;
|
||||
};
|
||||
|
@ -11,6 +11,14 @@
|
||||
#include "messages/MRecoveryReserve.h"
|
||||
#include "messages/MOSDScrubReserve.h"
|
||||
|
||||
PeeringState::PeeringState(CephContext *cct, spg_t spgid, PG *pg)
|
||||
: state_history(pg),
|
||||
machine(this, cct, spgid, pg, &state_history), cct(cct),
|
||||
spgid(spgid), pg(pg), orig_ctx(0) {
|
||||
machine.initiate();
|
||||
}
|
||||
|
||||
|
||||
void PeeringState::PeeringMachine::send_query(
|
||||
pg_shard_t to, const pg_query_t &query) {
|
||||
ceph_assert(state->rctx);
|
||||
@ -31,7 +39,7 @@ void PeeringState::PeeringMachine::send_query(
|
||||
/*------Crashed-------*/
|
||||
PeeringState::Crashed::Crashed(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Crashed")
|
||||
NamedState(context< PeeringMachine >().state_history, "Crashed")
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
ceph_abort_msg("we got a bad state machine event");
|
||||
@ -41,7 +49,7 @@ PeeringState::Crashed::Crashed(my_context ctx)
|
||||
/*------Initial-------*/
|
||||
PeeringState::Initial::Initial(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Initial")
|
||||
NamedState(context< PeeringMachine >().state_history, "Initial")
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
}
|
||||
@ -82,7 +90,7 @@ void PeeringState::Initial::exit()
|
||||
/*------Started-------*/
|
||||
PeeringState::Started::Started(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Started")
|
||||
NamedState(context< PeeringMachine >().state_history, "Started")
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
}
|
||||
@ -136,7 +144,7 @@ void PeeringState::Started::exit()
|
||||
/*--------Reset---------*/
|
||||
PeeringState::Reset::Reset(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Reset")
|
||||
NamedState(context< PeeringMachine >().state_history, "Reset")
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
PG *pg = context< PeeringMachine >().pg;
|
||||
@ -220,7 +228,7 @@ void PeeringState::Reset::exit()
|
||||
/*-------Start---------*/
|
||||
PeeringState::Start::Start(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Start")
|
||||
NamedState(context< PeeringMachine >().state_history, "Start")
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
|
||||
@ -245,7 +253,7 @@ void PeeringState::Start::exit()
|
||||
/*---------Primary--------*/
|
||||
PeeringState::Primary::Primary(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Started/Primary")
|
||||
NamedState(context< PeeringMachine >().state_history, "Started/Primary")
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
PG *pg = context< PeeringMachine >().pg;
|
||||
@ -350,7 +358,7 @@ void PeeringState::Primary::exit()
|
||||
/*---------Peering--------*/
|
||||
PeeringState::Peering::Peering(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Started/Primary/Peering"),
|
||||
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Peering"),
|
||||
history_les_bound(false)
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
@ -446,7 +454,7 @@ void PeeringState::Peering::exit()
|
||||
/*------Backfilling-------*/
|
||||
PeeringState::Backfilling::Backfilling(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Started/Primary/Active/Backfilling")
|
||||
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Active/Backfilling")
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
PG *pg = context< PeeringMachine >().pg;
|
||||
@ -560,7 +568,7 @@ void PeeringState::Backfilling::exit()
|
||||
|
||||
PeeringState::WaitRemoteBackfillReserved::WaitRemoteBackfillReserved(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Started/Primary/Active/WaitRemoteBackfillReserved"),
|
||||
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Active/WaitRemoteBackfillReserved"),
|
||||
backfill_osd_it(context< Active >().remote_shards_to_reserve_backfill.begin())
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
@ -658,7 +666,7 @@ PeeringState::WaitRemoteBackfillReserved::react(const RemoteReservationRevoked &
|
||||
/*--WaitLocalBackfillReserved--*/
|
||||
PeeringState::WaitLocalBackfillReserved::WaitLocalBackfillReserved(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Started/Primary/Active/WaitLocalBackfillReserved")
|
||||
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Active/WaitLocalBackfillReserved")
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
PG *pg = context< PeeringMachine >().pg;
|
||||
@ -686,7 +694,7 @@ void PeeringState::WaitLocalBackfillReserved::exit()
|
||||
/*----NotBackfilling------*/
|
||||
PeeringState::NotBackfilling::NotBackfilling(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Started/Primary/Active/NotBackfilling")
|
||||
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Active/NotBackfilling")
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
PG *pg = context< PeeringMachine >().pg;
|
||||
@ -718,7 +726,7 @@ void PeeringState::NotBackfilling::exit()
|
||||
/*----NotRecovering------*/
|
||||
PeeringState::NotRecovering::NotRecovering(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Started/Primary/Active/NotRecovering")
|
||||
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Active/NotRecovering")
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
PG *pg = context< PeeringMachine >().pg;
|
||||
@ -737,7 +745,7 @@ void PeeringState::NotRecovering::exit()
|
||||
/*---RepNotRecovering----*/
|
||||
PeeringState::RepNotRecovering::RepNotRecovering(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Started/ReplicaActive/RepNotRecovering")
|
||||
NamedState(context< PeeringMachine >().state_history, "Started/ReplicaActive/RepNotRecovering")
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
}
|
||||
@ -762,7 +770,7 @@ void PeeringState::RepNotRecovering::exit()
|
||||
/*---RepWaitRecoveryReserved--*/
|
||||
PeeringState::RepWaitRecoveryReserved::RepWaitRecoveryReserved(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Started/ReplicaActive/RepWaitRecoveryReserved")
|
||||
NamedState(context< PeeringMachine >().state_history, "Started/ReplicaActive/RepWaitRecoveryReserved")
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
}
|
||||
@ -802,7 +810,7 @@ void PeeringState::RepWaitRecoveryReserved::exit()
|
||||
/*-RepWaitBackfillReserved*/
|
||||
PeeringState::RepWaitBackfillReserved::RepWaitBackfillReserved(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Started/ReplicaActive/RepWaitBackfillReserved")
|
||||
NamedState(context< PeeringMachine >().state_history, "Started/ReplicaActive/RepWaitBackfillReserved")
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
}
|
||||
@ -980,7 +988,7 @@ PeeringState::RepWaitBackfillReserved::react(
|
||||
/*---RepRecovering-------*/
|
||||
PeeringState::RepRecovering::RepRecovering(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Started/ReplicaActive/RepRecovering")
|
||||
NamedState(context< PeeringMachine >().state_history, "Started/ReplicaActive/RepRecovering")
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
}
|
||||
@ -1043,7 +1051,7 @@ void PeeringState::RepRecovering::exit()
|
||||
/*------Activating--------*/
|
||||
PeeringState::Activating::Activating(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Started/Primary/Active/Activating")
|
||||
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Active/Activating")
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
}
|
||||
@ -1058,7 +1066,7 @@ void PeeringState::Activating::exit()
|
||||
|
||||
PeeringState::WaitLocalRecoveryReserved::WaitLocalRecoveryReserved(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Started/Primary/Active/WaitLocalRecoveryReserved")
|
||||
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Active/WaitLocalRecoveryReserved")
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
PG *pg = context< PeeringMachine >().pg;
|
||||
@ -1103,7 +1111,7 @@ void PeeringState::WaitLocalRecoveryReserved::exit()
|
||||
|
||||
PeeringState::WaitRemoteRecoveryReserved::WaitRemoteRecoveryReserved(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Started/Primary/Active/WaitRemoteRecoveryReserved"),
|
||||
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Active/WaitRemoteRecoveryReserved"),
|
||||
remote_recovery_reservation_it(context< Active >().remote_shards_to_reserve_recovery.begin())
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
@ -1144,7 +1152,7 @@ void PeeringState::WaitRemoteRecoveryReserved::exit()
|
||||
|
||||
PeeringState::Recovering::Recovering(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Started/Primary/Active/Recovering")
|
||||
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Active/Recovering")
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
|
||||
@ -1244,7 +1252,7 @@ void PeeringState::Recovering::exit()
|
||||
|
||||
PeeringState::Recovered::Recovered(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Started/Primary/Active/Recovered")
|
||||
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Active/Recovered")
|
||||
{
|
||||
pg_shard_t auth_log_shard;
|
||||
|
||||
@ -1287,7 +1295,7 @@ void PeeringState::Recovered::exit()
|
||||
|
||||
PeeringState::Clean::Clean(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Started/Primary/Active/Clean")
|
||||
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Active/Clean")
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
|
||||
@ -1330,7 +1338,7 @@ set<pg_shard_t> unique_osd_shard_set(const pg_shard_t & skip, const T &in)
|
||||
/*---------Active---------*/
|
||||
PeeringState::Active::Active(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Started/Primary/Active"),
|
||||
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Active"),
|
||||
remote_shards_to_reserve_recovery(
|
||||
unique_osd_shard_set(
|
||||
context< PeeringMachine >().pg->pg_whoami,
|
||||
@ -1775,7 +1783,7 @@ void PeeringState::Active::exit()
|
||||
/*------ReplicaActive-----*/
|
||||
PeeringState::ReplicaActive::ReplicaActive(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Started/ReplicaActive")
|
||||
NamedState(context< PeeringMachine >().state_history, "Started/ReplicaActive")
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
|
||||
@ -1871,7 +1879,7 @@ void PeeringState::ReplicaActive::exit()
|
||||
/*-------Stray---*/
|
||||
PeeringState::Stray::Stray(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Started/Stray")
|
||||
NamedState(context< PeeringMachine >().state_history, "Started/Stray")
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
|
||||
@ -1973,7 +1981,7 @@ void PeeringState::Stray::exit()
|
||||
/*--------ToDelete----------*/
|
||||
PeeringState::ToDelete::ToDelete(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Started/ToDelete")
|
||||
NamedState(context< PeeringMachine >().state_history, "Started/ToDelete")
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
PG *pg = context< PeeringMachine >().pg;
|
||||
@ -1993,7 +2001,7 @@ void PeeringState::ToDelete::exit()
|
||||
/*----WaitDeleteReserved----*/
|
||||
PeeringState::WaitDeleteReserved::WaitDeleteReserved(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg,
|
||||
NamedState(context< PeeringMachine >().state_history,
|
||||
"Started/ToDelete/WaitDeleteReseved")
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
@ -2031,7 +2039,7 @@ void PeeringState::WaitDeleteReserved::exit()
|
||||
/*----Deleting-----*/
|
||||
PeeringState::Deleting::Deleting(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Started/ToDelete/Deleting")
|
||||
NamedState(context< PeeringMachine >().state_history, "Started/ToDelete/Deleting")
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
PG *pg = context< PeeringMachine >().pg;
|
||||
@ -2060,7 +2068,7 @@ void PeeringState::Deleting::exit()
|
||||
/*--------GetInfo---------*/
|
||||
PeeringState::GetInfo::GetInfo(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Started/Primary/Peering/GetInfo")
|
||||
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Peering/GetInfo")
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
|
||||
@ -2205,7 +2213,8 @@ void PeeringState::GetInfo::exit()
|
||||
PeeringState::GetLog::GetLog(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(
|
||||
context< PeeringMachine >().pg, "Started/Primary/Peering/GetLog"),
|
||||
context< PeeringMachine >().state_history,
|
||||
"Started/Primary/Peering/GetLog"),
|
||||
msg(0)
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
@ -2333,7 +2342,7 @@ void PeeringState::GetLog::exit()
|
||||
/*------WaitActingChange--------*/
|
||||
PeeringState::WaitActingChange::WaitActingChange(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Started/Primary/WaitActingChange")
|
||||
NamedState(context< PeeringMachine >().state_history, "Started/Primary/WaitActingChange")
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
}
|
||||
@ -2393,7 +2402,7 @@ void PeeringState::WaitActingChange::exit()
|
||||
/*------Down--------*/
|
||||
PeeringState::Down::Down(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Started/Primary/Peering/Down")
|
||||
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Peering/Down")
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
PG *pg = context< PeeringMachine >().pg;
|
||||
@ -2455,7 +2464,7 @@ boost::statechart::result PeeringState::Down::react(const MNotifyRec& infoevt)
|
||||
/*------Incomplete--------*/
|
||||
PeeringState::Incomplete::Incomplete(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Started/Primary/Peering/Incomplete")
|
||||
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Peering/Incomplete")
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
PG *pg = context< PeeringMachine >().pg;
|
||||
@ -2522,7 +2531,7 @@ void PeeringState::Incomplete::exit()
|
||||
/*------GetMissing--------*/
|
||||
PeeringState::GetMissing::GetMissing(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Started/Primary/Peering/GetMissing")
|
||||
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Peering/GetMissing")
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
|
||||
@ -2666,7 +2675,7 @@ void PeeringState::GetMissing::exit()
|
||||
/*------WaitUpThru--------*/
|
||||
PeeringState::WaitUpThru::WaitUpThru(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< PeeringMachine >().pg, "Started/Primary/Peering/WaitUpThru")
|
||||
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Peering/WaitUpThru")
|
||||
{
|
||||
context< PeeringMachine >().log_enter(state_name);
|
||||
}
|
||||
|
@ -211,6 +211,7 @@ public:
|
||||
class PeeringMachine : public boost::statechart::state_machine< PeeringMachine, Initial > {
|
||||
PeeringState *state;
|
||||
public:
|
||||
PGStateHistory *state_history;
|
||||
CephContext *cct;
|
||||
spg_t spgid;
|
||||
PG *pg;
|
||||
@ -226,8 +227,14 @@ public:
|
||||
void log_enter(const char *state_name);
|
||||
void log_exit(const char *state_name, utime_t duration);
|
||||
|
||||
PeeringMachine(PeeringState *state, CephContext *cct, spg_t spgid, PG *pg) :
|
||||
state(state), cct(cct), spgid(spgid), pg(pg), event_count(0) {}
|
||||
PeeringMachine(
|
||||
PeeringState *state, CephContext *cct,
|
||||
spg_t spgid, PG *pg,
|
||||
PGStateHistory *state_history) :
|
||||
state(state),
|
||||
state_history(state_history),
|
||||
cct(cct), spgid(spgid),
|
||||
pg(pg), event_count(0) {}
|
||||
|
||||
/* Accessor functions for state methods */
|
||||
ObjectStore::Transaction* get_cur_transaction() {
|
||||
@ -948,6 +955,7 @@ public:
|
||||
void exit();
|
||||
};
|
||||
|
||||
PGStateHistory state_history;
|
||||
PeeringMachine machine;
|
||||
CephContext* cct;
|
||||
spg_t spgid;
|
||||
@ -967,10 +975,7 @@ public:
|
||||
boost::optional<PeeringCtx> rctx;
|
||||
|
||||
public:
|
||||
explicit PeeringState(CephContext *cct, spg_t spgid, PG *pg)
|
||||
: machine(this, cct, spgid, pg), cct(cct), spgid(spgid), pg(pg), orig_ctx(0) {
|
||||
machine.initiate();
|
||||
}
|
||||
PeeringState(CephContext *cct, spg_t spgid, PG *pg);
|
||||
|
||||
void handle_event(const boost::statechart::event_base &evt,
|
||||
PeeringCtx *rctx) {
|
||||
@ -986,4 +991,11 @@ public:
|
||||
end_handle();
|
||||
}
|
||||
|
||||
void dump_history(Formatter *f) const {
|
||||
state_history.dump(f);
|
||||
}
|
||||
|
||||
const char *get_current_state() const {
|
||||
return state_history.get_current_state();
|
||||
}
|
||||
};
|
||||
|
@ -15286,7 +15286,7 @@ void PrimaryLogPG::SnapTrimmer::log_exit(const char *state_name, utime_t enter_t
|
||||
/* NotTrimming */
|
||||
PrimaryLogPG::NotTrimming::NotTrimming(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< SnapTrimmer >().pg, "NotTrimming")
|
||||
NamedState(nullptr, "NotTrimming")
|
||||
{
|
||||
context< SnapTrimmer >().log_enter(state_name);
|
||||
}
|
||||
@ -15339,7 +15339,7 @@ boost::statechart::result PrimaryLogPG::WaitReservation::react(const SnapTrimRes
|
||||
/* AwaitAsyncWork */
|
||||
PrimaryLogPG::AwaitAsyncWork::AwaitAsyncWork(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< SnapTrimmer >().pg, "Trimming/AwaitAsyncWork")
|
||||
NamedState(nullptr, "Trimming/AwaitAsyncWork")
|
||||
{
|
||||
auto *pg = context< SnapTrimmer >().pg;
|
||||
context< SnapTrimmer >().log_enter(state_name);
|
||||
|
@ -1601,7 +1601,7 @@ private:
|
||||
|
||||
explicit Trimming(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< SnapTrimmer >().pg, "Trimming") {
|
||||
NamedState(nullptr, "Trimming") {
|
||||
context< SnapTrimmer >().log_enter(state_name);
|
||||
ceph_assert(context< SnapTrimmer >().can_trim());
|
||||
ceph_assert(in_flight.empty());
|
||||
@ -1626,7 +1626,7 @@ private:
|
||||
Context *wakeup = nullptr;
|
||||
explicit WaitTrimTimer(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< SnapTrimmer >().pg, "Trimming/WaitTrimTimer") {
|
||||
NamedState(nullptr, "Trimming/WaitTrimTimer") {
|
||||
context< SnapTrimmer >().log_enter(state_name);
|
||||
ceph_assert(context<Trimming>().in_flight.empty());
|
||||
struct OnTimer : Context {
|
||||
@ -1676,7 +1676,7 @@ private:
|
||||
> reactions;
|
||||
explicit WaitRWLock(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< SnapTrimmer >().pg, "Trimming/WaitRWLock") {
|
||||
NamedState(nullptr, "Trimming/WaitRWLock") {
|
||||
context< SnapTrimmer >().log_enter(state_name);
|
||||
ceph_assert(context<Trimming>().in_flight.empty());
|
||||
}
|
||||
@ -1699,7 +1699,7 @@ private:
|
||||
> reactions;
|
||||
explicit WaitRepops(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< SnapTrimmer >().pg, "Trimming/WaitRepops") {
|
||||
NamedState(nullptr, "Trimming/WaitRepops") {
|
||||
context< SnapTrimmer >().log_enter(state_name);
|
||||
ceph_assert(!context<Trimming>().in_flight.empty());
|
||||
}
|
||||
@ -1753,7 +1753,7 @@ private:
|
||||
|
||||
explicit WaitReservation(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< SnapTrimmer >().pg, "Trimming/WaitReservation") {
|
||||
NamedState(nullptr, "Trimming/WaitReservation") {
|
||||
context< SnapTrimmer >().log_enter(state_name);
|
||||
ceph_assert(context<Trimming>().in_flight.empty());
|
||||
auto *pg = context< SnapTrimmer >().pg;
|
||||
@ -1786,7 +1786,7 @@ private:
|
||||
> reactions;
|
||||
explicit WaitScrub(my_context ctx)
|
||||
: my_base(ctx),
|
||||
NamedState(context< SnapTrimmer >().pg, "Trimming/WaitScrub") {
|
||||
NamedState(nullptr, "Trimming/WaitScrub") {
|
||||
context< SnapTrimmer >().log_enter(state_name);
|
||||
}
|
||||
void exit() {
|
||||
|
Loading…
Reference in New Issue
Block a user