Merge PR #27696 into master

* refs/pull/27696/head:
	osd: make use of pg history and past_intervals in pg_create2 messages
	mon/OSDMonitor: track history and past_intervals for creating pgs
	osd/osd_types: make PastIntervals pi_compact_rep print participants
	osd/osd_types: take bare const OSDMap * to check_new_interval
	osd/osd_types: add pg_history_t ctor that takes creation epoch+stamp

Reviewed-by: Neha Ojha <nojha@redhat.com>
This commit is contained in:
Sage Weil 2019-04-24 08:15:19 -05:00
commit 08f4768bec
9 changed files with 347 additions and 100 deletions

View File

@ -12,11 +12,12 @@
class MOSDPGCreate2 : public Message {
public:
static constexpr int HEAD_VERSION = 1;
static constexpr int HEAD_VERSION = 2;
static constexpr int COMPAT_VERSION = 1;
epoch_t epoch = 0;
map<spg_t,pair<epoch_t,utime_t>> pgs;
map<spg_t,pair<pg_history_t,PastIntervals>> pg_extra;
MOSDPGCreate2()
: Message{MSG_OSD_PG_CREATE2, HEAD_VERSION, COMPAT_VERSION} {}
@ -38,12 +39,16 @@ public:
using ceph::encode;
encode(epoch, payload);
encode(pgs, payload);
encode(pg_extra, payload);
}
void decode_payload() override {
auto p = payload.cbegin();
using ceph::decode;
decode(epoch, p);
decode(pgs, p);
if (header.version >= 2) {
decode(pg_extra, p);
}
}
private:
template<class T, typename... Args>

View File

@ -14,10 +14,88 @@
struct creating_pgs_t {
epoch_t last_scan_epoch = 0;
/// pgs we are currently creating
std::map<pg_t, std::pair<epoch_t, utime_t> > pgs;
struct pg_create_info {
epoch_t create_epoch;
utime_t create_stamp;
struct create_info {
// NOTE: pre-octopus instances of this class will have a
// zeroed-out history
vector<int> up;
int up_primary = -1;
vector<int> acting;
int acting_primary = -1;
pg_history_t history;
PastIntervals past_intervals;
void encode(bufferlist& bl, uint64_t features) const {
using ceph::encode;
if (!HAVE_FEATURE(features, SERVER_OCTOPUS)) {
// was pair<epoch_t,utime_t> prior to octopus
encode(create_epoch, bl);
encode(create_stamp, bl);
return;
}
ENCODE_START(1, 1, bl);
encode(create_epoch, bl);
encode(create_stamp, bl);
encode(up, bl);
encode(up_primary, bl);
encode(acting, bl);
encode(acting_primary, bl);
encode(history, bl);
encode(past_intervals, bl);
ENCODE_FINISH(bl);
}
void decode_legacy(bufferlist::const_iterator& p) {
using ceph::decode;
decode(create_epoch, p);
decode(create_stamp, p);
}
void decode(bufferlist::const_iterator& p) {
using ceph::decode;
DECODE_START(1, p);
decode(create_epoch, p);
decode(create_stamp, p);
decode(up, p);
decode(up_primary, p);
decode(acting, p);
decode(acting_primary, p);
decode(history, p);
decode(past_intervals, p);
DECODE_FINISH(p);
}
void dump(Formatter *f) const {
f->dump_unsigned("create_epoch", create_epoch);
f->dump_stream("create_stamp") << create_stamp;
f->open_array_section("up");
for (auto& i : up) {
f->dump_unsigned("osd", i);
}
f->close_section();
f->dump_int("up_primary", up_primary);
f->open_array_section("acting");
for (auto& i : acting) {
f->dump_unsigned("osd", i);
}
f->close_section();
f->dump_int("acting_primary", up_primary);
f->dump_object("pg_history", history);
f->dump_object("past_intervals", past_intervals);
}
pg_create_info() {}
pg_create_info(epoch_t e, utime_t t)
: create_epoch(e),
create_stamp(t) {
// NOTE: we don't initialize the other fields here; see
// OSDMonitor::update_pending_pgs()
}
};
/// pgs we are currently creating
std::map<pg_t, pg_create_info> pgs;
struct pool_create_info {
epoch_t created;
utime_t modified;
uint64_t start = 0;
@ -42,7 +120,7 @@ struct creating_pgs_t {
};
/// queue of pgs we still need to create (poolid -> <created, set of ps>)
map<int64_t,create_info> queue;
map<int64_t,pool_create_info> queue;
/// pools that exist in the osdmap for which at least one pg has been created
std::set<int64_t> created_pools;
@ -76,18 +154,34 @@ struct creating_pgs_t {
queue.erase(removed_pool);
return total - pgs.size();
}
void encode(bufferlist& bl) const {
ENCODE_START(2, 1, bl);
void encode(bufferlist& bl, uint64_t features) const {
unsigned v = 3;
if (!HAVE_FEATURE(features, SERVER_OCTOPUS)) {
v = 2;
}
ENCODE_START(v, 1, bl);
encode(last_scan_epoch, bl);
encode(pgs, bl);
encode(pgs, bl, features);
encode(created_pools, bl);
encode(queue, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(2, bl);
DECODE_START(3, bl);
decode(last_scan_epoch, bl);
decode(pgs, bl);
if (struct_v >= 3) {
decode(pgs, bl);
} else {
// legacy pg encoding
pgs.clear();
uint32_t num;
decode(num, bl);
while (num--) {
pg_t pgid;
decode(pgid, bl);
pgs[pgid].decode_legacy(bl);
}
}
decode(created_pools, bl);
if (struct_v >= 2)
decode(queue, bl);
@ -99,8 +193,7 @@ struct creating_pgs_t {
for (auto& pg : pgs) {
f->open_object_section("pg");
f->dump_stream("pgid") << pg.first;
f->dump_unsigned("epoch", pg.second.first);
f->dump_stream("ctime") << pg.second.second;
f->dump_object("pg_create_info", pg.second);
f->close_section();
}
f->close_section();
@ -124,16 +217,17 @@ struct creating_pgs_t {
static void generate_test_instances(list<creating_pgs_t*>& o) {
auto c = new creating_pgs_t;
c->last_scan_epoch = 17;
c->pgs.emplace(pg_t{42, 2}, make_pair(31, utime_t{891, 113}));
c->pgs.emplace(pg_t{44, 2}, make_pair(31, utime_t{891, 113}));
c->pgs.emplace(pg_t{42, 2}, pg_create_info(31, utime_t{891, 113}));
c->pgs.emplace(pg_t{44, 2}, pg_create_info(31, utime_t{891, 113}));
c->created_pools = {0, 1};
o.push_back(c);
c = new creating_pgs_t;
c->last_scan_epoch = 18;
c->pgs.emplace(pg_t{42, 3}, make_pair(31, utime_t{891, 113}));
c->pgs.emplace(pg_t{42, 3}, pg_create_info(31, utime_t{891, 113}));
c->created_pools = {};
o.push_back(c);
}
};
WRITE_CLASS_ENCODER(creating_pgs_t::create_info);
WRITE_CLASS_ENCODER(creating_pgs_t);
WRITE_CLASS_ENCODER_FEATURES(creating_pgs_t::pg_create_info)
WRITE_CLASS_ENCODER(creating_pgs_t::pool_create_info)
WRITE_CLASS_ENCODER_FEATURES(creating_pgs_t)

View File

@ -843,8 +843,10 @@ OSDMonitor::update_pending_pgs(const OSDMap::Incremental& inc,
const pg_t pgid{ps, static_cast<uint64_t>(poolid)};
// NOTE: use the *current* epoch as the PG creation epoch so that the
// OSD does not have to generate a long set of PastIntervals.
pending_creatings.pgs.emplace(pgid, make_pair(inc.epoch,
p->second.modified));
pending_creatings.pgs.emplace(
pgid,
creating_pgs_t::pg_create_info(inc.epoch,
p->second.modified));
dout(10) << __func__ << " adding " << pgid << dendl;
}
p->second.start = end;
@ -859,6 +861,89 @@ OSDMonitor::update_pending_pgs(const OSDMap::Incremental& inc,
}
dout(10) << __func__ << " queue remaining: " << pending_creatings.queue.size()
<< " pools" << dendl;
if (mon->monmap->min_mon_release >= CEPH_RELEASE_OCTOPUS) {
// walk creating pgs' history and past_intervals forward
for (auto& i : pending_creatings.pgs) {
// this mirrors PG::start_peering_interval()
pg_t pgid = i.first;
// this is a bit imprecise, but sufficient?
struct min_size_predicate_t : public IsPGRecoverablePredicate {
const pg_pool_t *pi;
bool operator()(const set<pg_shard_t> &have) const {
return have.size() >= pi->min_size;
}
explicit min_size_predicate_t(const pg_pool_t *i) : pi(i) {}
} min_size_predicate(nextmap.get_pg_pool(pgid.pool()));
vector<int> up, acting;
int up_primary, acting_primary;
nextmap.pg_to_up_acting_osds(
pgid, &up, &up_primary, &acting, &acting_primary);
if (i.second.history.epoch_created == 0) {
// new pg entry, set it up
i.second.up = up;
i.second.acting = acting;
i.second.up_primary = up_primary;
i.second.acting_primary = acting_primary;
i.second.history = pg_history_t(i.second.create_epoch,
i.second.create_stamp);
dout(10) << __func__ << " pg " << pgid << " just added, "
<< " up " << i.second.up
<< " p " << i.second.up_primary
<< " acting " << i.second.acting
<< " p " << i.second.acting_primary
<< " history " << i.second.history
<< " past_intervals " << i.second.past_intervals
<< dendl;
} else {
std::stringstream debug;
if (PastIntervals::check_new_interval(
i.second.acting_primary, acting_primary,
i.second.acting, acting,
i.second.up_primary, up_primary,
i.second.up, up,
i.second.history.same_interval_since,
i.second.history.last_epoch_clean,
&nextmap,
&osdmap,
pgid,
&min_size_predicate,
&i.second.past_intervals,
&debug)) {
epoch_t e = inc.epoch;
i.second.history.same_interval_since = e;
if (i.second.up != up) {
i.second.history.same_up_since = e;
}
if (i.second.acting_primary != acting_primary) {
i.second.history.same_primary_since = e;
}
if (pgid.is_split(
osdmap.get_pg_num(pgid.pool()),
nextmap.get_pg_num(pgid.pool()),
nullptr)) {
i.second.history.last_epoch_split = e;
}
dout(10) << __func__ << " pg " << pgid << " new interval,"
<< " up " << i.second.up << " -> " << up
<< " p " << i.second.up_primary << " -> " << up_primary
<< " acting " << i.second.acting << " -> " << acting
<< " p " << i.second.acting_primary << " -> "
<< acting_primary
<< " history " << i.second.history
<< " past_intervals " << i.second.past_intervals
<< dendl;
dout(20) << " debug: " << debug.str() << dendl;
i.second.up = up;
i.second.acting = acting;
i.second.up_primary = up_primary;
i.second.acting_primary = acting_primary;
}
}
}
}
dout(10) << __func__
<< " " << (pending_creatings.pgs.size() - total)
<< "/" << pending_creatings.pgs.size()
@ -1102,7 +1187,13 @@ void OSDMonitor::encode_pending(MonitorDBStore::TransactionRef t)
// process the pool flag removal below in the same osdmap epoch.
auto pending_creatings = update_pending_pgs(pending_inc, tmp);
bufferlist creatings_bl;
encode(pending_creatings, creatings_bl);
uint64_t features = CEPH_FEATURES_ALL;
if (mon->monmap->min_mon_release < CEPH_RELEASE_OCTOPUS) {
dout(20) << __func__ << " encoding pending pgs without octopus features"
<< dendl;
features &= ~CEPH_FEATURE_SERVER_OCTOPUS;
}
encode(pending_creatings, creatings_bl, features);
t->put(OSD_PG_CREATING_PREFIX, "creating", creatings_bl);
// remove any old (or incompat) POOL_CREATING flags
@ -4240,7 +4331,7 @@ void OSDMonitor::update_creating_pgs()
<< dendl;
continue;
}
auto mapped = pg.second.first;
auto mapped = pg.second.create_epoch;
dout(20) << __func__ << " looking up " << pgid << "@" << mapped << dendl;
spg_t spgid(pgid);
mapping.get_primary_and_shard(pgid, &acting_primary, &spgid);
@ -4313,16 +4404,23 @@ epoch_t OSDMonitor::send_pg_creates(int osd, Connection *con, epoch_t next) cons
oldm = new MOSDPGCreate(creating_pgs_epoch);
}
oldm->mkpg.emplace(pg.pgid,
pg_create_t{create->second.first, pg.pgid, 0});
oldm->ctimes.emplace(pg.pgid, create->second.second);
pg_create_t{create->second.create_epoch, pg.pgid, 0});
oldm->ctimes.emplace(pg.pgid, create->second.create_stamp);
} else {
if (!m) {
m = new MOSDPGCreate2(creating_pgs_epoch);
}
m->pgs.emplace(pg, create->second);
m->pgs.emplace(pg, make_pair(create->second.create_epoch,
create->second.create_stamp));
if (create->second.history.epoch_created) {
dout(20) << __func__ << " " << pg << " " << create->second.history
<< " " << create->second.past_intervals << dendl;
m->pg_extra.emplace(pg, make_pair(create->second.history,
create->second.past_intervals));
}
}
dout(20) << __func__ << " will create " << pg
<< " at " << create->second.first << dendl;
<< " at " << create->second.create_epoch << dendl;
}
}
if (m) {
@ -12599,9 +12697,10 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op,
bool creating_now;
{
std::lock_guard<std::mutex> l(creating_pgs_lock);
auto emplaced = creating_pgs.pgs.emplace(pgid,
make_pair(osdmap.get_epoch(),
ceph_clock_now()));
auto emplaced = creating_pgs.pgs.emplace(
pgid,
creating_pgs_t::pg_create_info(osdmap.get_epoch(),
ceph_clock_now()));
creating_now = emplaced.second;
}
if (creating_now) {

View File

@ -4705,14 +4705,7 @@ void OSD::build_initial_pg_history(
PastIntervals *pi)
{
dout(10) << __func__ << " " << pgid << " created " << created << dendl;
h->epoch_created = created;
h->epoch_pool_created = created;
h->same_interval_since = created;
h->same_up_since = created;
h->same_primary_since = created;
h->last_scrub_stamp = created_stamp;
h->last_deep_scrub_stamp = created_stamp;
h->last_clean_scrub_stamp = created_stamp;
*h = pg_history_t(created, created_stamp);
OSDMapRef lastmap = service.get_map(created);
int up_primary, acting_primary;
@ -4746,8 +4739,8 @@ void OSD::build_initial_pg_history(
up, new_up,
h->same_interval_since,
h->last_epoch_clean,
osdmap,
lastmap,
osdmap.get(),
lastmap.get(),
pgid.pgid,
&min_size_predicate,
pi,
@ -9007,6 +9000,9 @@ void OSD::split_pgs(
*/
void OSD::handle_pg_create(OpRequestRef op)
{
// NOTE: this can be removed in P release (mimic is the last version to
// send MOSDPGCreate messages).
const MOSDPGCreate *m = static_cast<const MOSDPGCreate*>(op->get_req());
ceph_assert(m->get_type() == MSG_OSD_PG_CREATE);
@ -9270,33 +9266,55 @@ void OSD::handle_fast_pg_create(MOSDPGCreate2 *m)
spg_t pgid = p.first;
epoch_t created = p.second.first;
utime_t created_stamp = p.second.second;
dout(20) << __func__ << " " << pgid << " e" << created
<< "@" << created_stamp << dendl;
pg_history_t h;
h.epoch_created = created;
h.epoch_pool_created = created;
h.same_up_since = created;
h.same_interval_since = created;
h.same_primary_since = created;
h.last_scrub_stamp = created_stamp;
h.last_deep_scrub_stamp = created_stamp;
h.last_clean_scrub_stamp = created_stamp;
enqueue_peering_evt(
pgid,
PGPeeringEventRef(
std::make_shared<PGPeeringEvent>(
m->epoch,
m->epoch,
NullEvt(),
true,
new PGCreateInfo(
pgid,
created,
h,
PastIntervals(),
true)
)));
auto q = m->pg_extra.find(pgid);
if (q == m->pg_extra.end()) {
dout(20) << __func__ << " " << pgid << " e" << created
<< "@" << created_stamp
<< " (no history or past_intervals)" << dendl;
// pre-octopus ... no pg history. this can be removed in Q release.
enqueue_peering_evt(
pgid,
PGPeeringEventRef(
std::make_shared<PGPeeringEvent>(
m->epoch,
m->epoch,
NullEvt(),
true,
new PGCreateInfo(
pgid,
created,
pg_history_t(created, created_stamp),
PastIntervals(),
true)
)));
} else {
dout(20) << __func__ << " " << pgid << " e" << created
<< "@" << created_stamp
<< " history " << q->second.first
<< " pi " << q->second.second << dendl;
if (!q->second.second.empty() &&
m->epoch < q->second.second.get_bounds().second) {
clog->error() << "got pg_create on " << pgid << " epoch " << m->epoch
<< " and unmatched past_intervals " << q->second.second
<< " (history " << q->second.first << ")";
} else {
enqueue_peering_evt(
pgid,
PGPeeringEventRef(
std::make_shared<PGPeeringEvent>(
m->epoch,
m->epoch,
NullEvt(),
true,
new PGCreateInfo(
pgid,
m->epoch,
q->second.first,
q->second.second,
true)
)));
}
}
}
{

View File

@ -6122,8 +6122,8 @@ bool PG::should_restart_peering(
newupprimary,
up,
newup,
osdmap,
lastmap,
osdmap.get(),
lastmap.get(),
info.pgid.pgid)) {
dout(20) << "new interval newup " << newup
<< " newacting " << newacting << dendl;
@ -6277,8 +6277,8 @@ void PG::start_peering_interval(
oldup, newup,
info.history.same_interval_since,
info.history.last_epoch_clean,
osdmap,
lastmap,
osdmap.get(),
lastmap.get(),
info.pgid.pgid,
recoverable.get(),
&past_intervals,

View File

@ -3578,7 +3578,8 @@ public:
}
ostream &print(ostream &out) const override {
return out << "([" << first << "," << last
<< "] intervals=" << intervals << ")";
<< "] all_participants=" << all_participants
<< " intervals=" << intervals << ")";
}
void encode(ceph::buffer::list &bl) const override {
ENCODE_START(1, 1, bl);
@ -3774,8 +3775,8 @@ bool PastIntervals::is_new_interval(
int new_up_primary,
const vector<int> &old_up,
const vector<int> &new_up,
OSDMapRef osdmap,
OSDMapRef lastmap,
const OSDMap *osdmap,
const OSDMap *lastmap,
pg_t pgid)
{
const pg_pool_t *plast = lastmap->get_pg_pool(pgid.pool());
@ -3821,8 +3822,8 @@ bool PastIntervals::check_new_interval(
const vector<int> &new_up,
epoch_t same_interval_since,
epoch_t last_epoch_clean,
OSDMapRef osdmap,
OSDMapRef lastmap,
const OSDMap *osdmap,
const OSDMap *lastmap,
pg_t pgid,
IsPGRecoverablePredicate *could_have_gone_active,
PastIntervals *past_intervals,

View File

@ -2571,16 +2571,16 @@ WRITE_CLASS_ENCODER(pg_hit_set_history_t)
* history they need to worry about.
*/
struct pg_history_t {
epoch_t epoch_created; // epoch in which *pg* was created (pool or pg)
epoch_t epoch_pool_created; // epoch in which *pool* was created
epoch_t epoch_created = 0; // epoch in which *pg* was created (pool or pg)
epoch_t epoch_pool_created = 0; // epoch in which *pool* was created
// (note: may be pg creation epoch for
// pre-luminous clusters)
epoch_t last_epoch_started; // lower bound on last epoch started (anywhere, not necessarily locally)
epoch_t last_interval_started; // first epoch of last_epoch_started interval
epoch_t last_epoch_clean; // lower bound on last epoch the PG was completely clean.
epoch_t last_interval_clean; // first epoch of last_epoch_clean interval
epoch_t last_epoch_split; // as parent or child
epoch_t last_epoch_marked_full; // pool or cluster
epoch_t last_epoch_started = 0;; // lower bound on last epoch started (anywhere, not necessarily locally)
epoch_t last_interval_started = 0;; // first epoch of last_epoch_started interval
epoch_t last_epoch_clean = 0;; // lower bound on last epoch the PG was completely clean.
epoch_t last_interval_clean = 0;; // first epoch of last_epoch_clean interval
epoch_t last_epoch_split = 0;; // as parent or child
epoch_t last_epoch_marked_full = 0;; // pool or cluster
/**
* In the event of a map discontinuity, same_*_since may reflect the first
@ -2589,9 +2589,9 @@ struct pg_history_t {
* must have been a clean interval between e and now and that we cannot be
* in the active set during the interval containing e.
*/
epoch_t same_up_since; // same acting set since
epoch_t same_interval_since; // same acting AND up set since
epoch_t same_primary_since; // same primary at least back through this epoch.
epoch_t same_up_since = 0;; // same acting set since
epoch_t same_interval_since = 0;; // same acting AND up set since
epoch_t same_primary_since = 0;; // same primary at least back through this epoch.
eversion_t last_scrub;
eversion_t last_deep_scrub;
@ -2619,16 +2619,16 @@ struct pg_history_t {
l.last_clean_scrub_stamp == r.last_clean_scrub_stamp;
}
pg_history_t()
: epoch_created(0),
epoch_pool_created(0),
last_epoch_started(0),
last_interval_started(0),
last_epoch_clean(0),
last_interval_clean(0),
last_epoch_split(0),
last_epoch_marked_full(0),
same_up_since(0), same_interval_since(0), same_primary_since(0) {}
pg_history_t() {}
pg_history_t(epoch_t created, utime_t stamp)
: epoch_created(created),
epoch_pool_created(created),
same_up_since(created),
same_interval_since(created),
same_primary_since(created),
last_scrub_stamp(stamp),
last_deep_scrub_stamp(stamp),
last_clean_scrub_stamp(stamp) {}
bool merge(const pg_history_t &other) {
// Here, we only update the fields which cannot be calculated from the OSDmap.
@ -3137,8 +3137,8 @@ public:
int new_up_primary, ///< [in] up primary of osdmap
const std::vector<int> &old_up, ///< [in] up as of lastmap
const std::vector<int> &new_up, ///< [in] up as of osdmap
std::shared_ptr<const OSDMap> osdmap, ///< [in] current map
std::shared_ptr<const OSDMap> lastmap, ///< [in] last map
const OSDMap *osdmap, ///< [in] current map
const OSDMap *lastmap, ///< [in] last map
pg_t pgid ///< [in] pgid for pg
);
@ -3146,6 +3146,24 @@ public:
* Integrates a new map into *past_intervals, returns true
* if an interval was closed out.
*/
static bool check_new_interval(
int old_acting_primary, ///< [in] primary as of lastmap
int new_acting_primary, ///< [in] primary as of osdmap
const std::vector<int> &old_acting, ///< [in] acting as of lastmap
const std::vector<int> &new_acting, ///< [in] acting as of osdmap
int old_up_primary, ///< [in] up primary of lastmap
int new_up_primary, ///< [in] up primary of osdmap
const std::vector<int> &old_up, ///< [in] up as of lastmap
const std::vector<int> &new_up, ///< [in] up as of osdmap
epoch_t same_interval_since, ///< [in] as of osdmap
epoch_t last_epoch_clean, ///< [in] current
const OSDMap *osdmap, ///< [in] current map
const OSDMap *lastmap, ///< [in] last map
pg_t pgid, ///< [in] pgid for pg
IsPGRecoverablePredicate *could_have_gone_active, ///< [in] predicate whether the pg can be active
PastIntervals *past_intervals, ///< [out] intervals
std::ostream *out = 0 ///< [out] debug ostream
);
static bool check_new_interval(
int old_acting_primary, ///< [in] primary as of lastmap
int new_acting_primary, ///< [in] primary as of osdmap
@ -3163,7 +3181,19 @@ public:
IsPGRecoverablePredicate *could_have_gone_active, ///< [in] predicate whether the pg can be active
PastIntervals *past_intervals, ///< [out] intervals
std::ostream *out = 0 ///< [out] debug ostream
);
) {
return check_new_interval(
old_acting_primary, new_acting_primary,
old_acting, new_acting,
old_up_primary, new_up_primary,
old_up, new_up,
same_interval_since, last_epoch_clean,
osdmap.get(), lastmap.get(),
pgid,
could_have_gone_active,
past_intervals,
out);
}
friend std::ostream& operator<<(std::ostream& out, const PastIntervals &i);

View File

@ -188,7 +188,7 @@ TYPE(LevelDBStoreStats)
TYPE(ScrubResult)
#include "mon/CreatingPGs.h"
TYPE(creating_pgs_t)
TYPE_FEATUREFUL(creating_pgs_t)
#include "mgr/ServiceMap.h"
TYPE_FEATUREFUL(ServiceMap)

View File

@ -572,7 +572,7 @@ static int update_creating_pgs(MonitorDBStore& st)
creating.last_scan_epoch = last_osdmap_epoch;
bufferlist newbl;
::encode(creating, newbl);
encode(creating, newbl, CEPH_FEATURES_ALL);
auto t = make_shared<MonitorDBStore::Transaction>();
t->put("osd_pg_creating", "creating", newbl);