rgw_admin: show oldest metadata change not applied

in radosgw-admin sync status

Signed-off-by: Yehuda Sadeh <yehuda@redhat.com>
This commit is contained in:
Yehuda Sadeh 2016-03-09 14:07:16 -08:00
parent b15f1ec195
commit 03f8e624d9
3 changed files with 135 additions and 56 deletions

View File

@ -1574,6 +1574,20 @@ static int read_current_period_id(RGWRados* store, const std::string& realm_id,
return 0;
}
void flush_ss(stringstream& ss, list<string>& l)
{
if (!ss.str().empty()) {
l.push_back(ss.str());
}
ss.str("");
}
stringstream& push_ss(stringstream& ss, list<string>& l)
{
flush_ss(ss, l);
return ss;
}
static void get_md_sync_status(list<string>& status)
{
RGWMetaSyncStatusManager sync(store, store->get_async_rados());
@ -1631,19 +1645,13 @@ static void get_md_sync_status(list<string>& status)
}
stringstream ss;
ss << "full sync: " << num_full << "/" << total_shards << " shards";
status.push_back(ss.str());
ss.str("");
push_ss(ss, status) << "full sync: " << num_full << "/" << total_shards << " shards";
if (num_full > 0) {
ss << "full sync: " << full_total - full_complete << " entries to sync";
status.push_back(ss.str());
ss.str("");
push_ss(ss, status) << "full sync: " << full_total - full_complete << " entries to sync";
}
ss << "incremental sync: " << num_inc << "/" << total_shards << " shards";
status.push_back(ss.str());
ss.str("");
push_ss(ss, status) << "incremental sync: " << num_inc << "/" << total_shards << " shards";
rgw_mdlog_info log_info;
ret = sync.read_log_info(&log_info);
@ -1661,7 +1669,7 @@ static void get_md_sync_status(list<string>& status)
return;
}
vector<int> shards_behind;
map<int, string> shards_behind;
if (sync_status.sync_info.period != master_period) {
status.push_back(string("master is on a different period: master_period=" + master_period + " local_period=" + sync_status.sync_info.period));
@ -1677,7 +1685,7 @@ static void get_md_sync_status(list<string>& status)
}
auto master_marker = iter->second.marker;
if (master_marker > local_iter.second.marker) {
shards_behind.push_back(shard_id);
shards_behind[shard_id] = local_iter.second.marker;
}
}
}
@ -1686,11 +1694,34 @@ static void get_md_sync_status(list<string>& status)
if (total_behind == 0) {
status.push_back("metadata is caught up with master");
} else {
ss << "metadata is behind on " << total_behind << " shards";
status.push_back(ss.str());
ss.str("");
push_ss(ss, status) << "metadata is behind on " << total_behind << " shards";
map<int, rgw_mdlog_shard_data> master_pos;
ret = sync.read_master_log_shards_next(sync_status.sync_info.period, shards_behind, &master_pos);
if (ret < 0) {
derr << "ERROR: failed to fetch master next positions (" << cpp_strerror(-ret) << ")" << dendl;
} else {
utime_t oldest;
for (auto iter : master_pos) {
rgw_mdlog_shard_data& shard_data = iter.second;
if (!shard_data.entries.empty()) {
rgw_mdlog_entry& entry = shard_data.entries.front();
if (oldest.is_zero()) {
oldest = entry.timestamp;
} else if (!entry.timestamp.is_zero() && entry.timestamp < oldest) {
oldest = entry.timestamp;
}
}
}
if (!oldest.is_zero()) {
push_ss(ss, status) << "oldest change not applied: " << oldest;
}
}
}
flush_ss(ss, status);
}
static void tab_dump(const string& header, int width, const list<string>& entries)
@ -1709,7 +1740,7 @@ static void sync_status(Formatter *formatter)
RGWZoneGroup zonegroup = store->get_zonegroup();
RGWZone& zone = store->get_zone();
int width = 20;
int width = 15;
cout << std::setw(width) << "zonegroup" << std::setw(1) << " " << zonegroup.get_id() << " (" << zonegroup.get_name() << ")" << std::endl;
cout << std::setw(width) << "zone" << std::setw(1) << " " << zone.id << " (" << zone.name << ")" << std::endl;
@ -1722,7 +1753,7 @@ static void sync_status(Formatter *formatter)
get_md_sync_status(md_status);
}
tab_dump("metadata sync", 20, md_status);
tab_dump("metadata sync", width, md_status);
}
int main(int argc, char **argv)

View File

@ -122,38 +122,6 @@ void rgw_mdlog_info::decode_json(JSONObj *obj) {
JSONDecoder::decode_json("realm_epoch", realm_epoch, obj);
}
struct rgw_mdlog_entry {
string id;
string section;
string name;
utime_t timestamp;
RGWMetadataLogData log_data;
void decode_json(JSONObj *obj);
bool convert_from(cls_log_entry& le) {
id = le.id;
section = le.section;
name = le.name;
timestamp = le.timestamp;
try {
bufferlist::iterator iter = le.data.begin();
::decode(log_data, iter);
} catch (buffer::error& err) {
return false;
}
return true;
}
};
struct rgw_mdlog_shard_data {
string marker;
bool truncated;
vector<rgw_mdlog_entry> entries;
void decode_json(JSONObj *obj);
};
void rgw_mdlog_entry::decode_json(JSONObj *obj) {
JSONDecoder::decode_json("id", id, obj);
@ -225,7 +193,6 @@ public:
class RGWReadRemoteMDLogInfoCR : public RGWShardCollectCR {
RGWMetaSyncEnv *sync_env;
RGWMetadataLog *mdlog;
const std::string& period;
int num_shards;
@ -235,15 +202,40 @@ class RGWReadRemoteMDLogInfoCR : public RGWShardCollectCR {
#define READ_MDLOG_MAX_CONCURRENT 10
public:
RGWReadRemoteMDLogInfoCR(RGWMetaSyncEnv *_sync_env, RGWMetadataLog* mdlog,
RGWReadRemoteMDLogInfoCR(RGWMetaSyncEnv *_sync_env,
const std::string& period, int _num_shards,
map<int, RGWMetadataLogInfo> *_mdlog_info) : RGWShardCollectCR(_sync_env->cct, READ_MDLOG_MAX_CONCURRENT),
sync_env(_sync_env), mdlog(mdlog),
sync_env(_sync_env),
period(period), num_shards(_num_shards),
mdlog_info(_mdlog_info), shard_id(0) {}
bool spawn_next();
};
class RGWListRemoteMDLogCR : public RGWShardCollectCR {
RGWMetaSyncEnv *sync_env;
const std::string& period;
map<int, string> shards;
int max_entries_per_shard;
map<int, rgw_mdlog_shard_data> *result;
map<int, string>::iterator iter;
#define READ_MDLOG_MAX_CONCURRENT 10
public:
RGWListRemoteMDLogCR(RGWMetaSyncEnv *_sync_env,
const std::string& period, map<int, string>& _shards,
int _max_entries_per_shard,
map<int, rgw_mdlog_shard_data> *_result) : RGWShardCollectCR(_sync_env->cct, READ_MDLOG_MAX_CONCURRENT),
sync_env(_sync_env), period(period),
max_entries_per_shard(_max_entries_per_shard),
result(_result) {
shards.swap(_shards);
iter = shards.begin();
}
bool spawn_next();
};
RGWRemoteMetaLog::~RGWRemoteMetaLog()
{
delete error_logger;
@ -279,9 +271,16 @@ int RGWRemoteMetaLog::read_master_log_shards_info(string *master_period, map<int
*master_period = log_info.period;
RGWObjectCtx obj_ctx(store, NULL);
auto mdlog = store->meta_mgr->get_log(log_info.period);
return run(new RGWReadRemoteMDLogInfoCR(&sync_env, mdlog, log_info.period, log_info.num_shards, shards_info));
return run(new RGWReadRemoteMDLogInfoCR(&sync_env, log_info.period, log_info.num_shards, shards_info));
}
int RGWRemoteMetaLog::read_master_log_shards_next(const string& period, map<int, string> shard_markers, map<int, rgw_mdlog_shard_data> *result)
{
if (store->is_meta_master()) {
return 0;
}
return run(new RGWListRemoteMDLogCR(&sync_env, period, shard_markers, 1, result));
}
int RGWRemoteMetaLog::init()
@ -570,7 +569,7 @@ public:
return 0;
}
int handle_response() {
int request_complete() {
int ret = http_op->wait(result);
if (ret < 0 && ret != -ENOENT) {
ldout(sync_env->store->ctx(), 0) << "ERROR: failed to list remote mdlog shard, ret=" << ret << dendl;
@ -581,9 +580,22 @@ public:
};
bool RGWReadRemoteMDLogInfoCR::spawn_next() {
if (shard_id >= num_shards) {
return false;
}
spawn(new RGWReadRemoteMDLogShardInfoCR(sync_env, period, shard_id, &(*mdlog_info)[shard_id]), false);
shard_id++;
return (shard_id < num_shards);
return true;
}
bool RGWListRemoteMDLogCR::spawn_next() {
if (iter == shards.end()) {
return false;
}
spawn(new RGWListRemoteMDLogShardCR(sync_env, period, iter->first, iter->second, max_entries_per_shard, &(*result)[iter->first]), false);
++iter;
return true;
}
class RGWInitSyncStatusCoroutine : public RGWCoroutine {

View File

@ -21,6 +21,38 @@ struct rgw_mdlog_info {
};
struct rgw_mdlog_entry {
string id;
string section;
string name;
utime_t timestamp;
RGWMetadataLogData log_data;
void decode_json(JSONObj *obj);
bool convert_from(cls_log_entry& le) {
id = le.id;
section = le.section;
name = le.name;
timestamp = le.timestamp;
try {
bufferlist::iterator iter = le.data.begin();
::decode(log_data, iter);
} catch (buffer::error& err) {
return false;
}
return true;
}
};
struct rgw_mdlog_shard_data {
string marker;
bool truncated;
vector<rgw_mdlog_entry> entries;
void decode_json(JSONObj *obj);
};
class RGWAsyncRadosProcessor;
class RGWMetaSyncStatusManager;
class RGWMetaSyncCR;
@ -177,6 +209,7 @@ public:
int read_log_info(rgw_mdlog_info *log_info);
int read_master_log_shards_info(string *master_period, map<int, RGWMetadataLogInfo> *shards_info);
int read_master_log_shards_next(const string& period, map<int, string> shard_markers, map<int, rgw_mdlog_shard_data> *result);
int read_sync_status();
int init_sync_status();
int run_sync();
@ -234,6 +267,9 @@ public:
int read_master_log_shards_info(string *master_period, map<int, RGWMetadataLogInfo> *shards_info) {
return master_log.read_master_log_shards_info(master_period, shards_info);
}
int read_master_log_shards_next(const string& period, map<int, string> shard_markers, map<int, rgw_mdlog_shard_data> *result) {
return master_log.read_master_log_shards_next(period, shard_markers, result);
}
int run() { return master_log.run_sync(); }