Merge pull request #19985 from Liuchang0812/osd-pool-stats-in-mgr

mon, mgr: move "osd pool stats" command to mgr and mgr python module

Reviewed-by: John Spray <john.spray@redhat.com>
Reviewed-by: Kefu Chai <kchai@redhat.com>
This commit is contained in:
Kefu Chai 2018-04-23 22:52:22 +08:00 committed by GitHub
commit 2c41c2f160
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 134 additions and 109 deletions

View File

@ -308,6 +308,21 @@ PyObject *ActivePyModules::get_python(const std::string &what)
pg_map.dump_osd_stats(&f);
});
return f.get();
} else if (what == "osd_pool_stats") {
int64_t poolid = -ENOENT;
string pool_name;
PyFormatter f;
cluster_state.with_pgmap([&](const PGMap& pg_map) {
return cluster_state.with_osdmap([&](const OSDMap& osdmap) {
f.open_array_section("pool_stats");
for (auto &p : osdmap.get_pools()) {
poolid = p.first;
pg_map.dump_pool_stats_and_io_rate(poolid, osdmap, &f, nullptr);
}
f.close_section();
});
});
return f.get();
} else if (what == "health" || what == "mon_status") {
PyFormatter f;
bufferlist json;

View File

@ -1089,6 +1089,54 @@ bool DaemonServer::handle_command(MCommand *m)
});
cmdctx->reply(r, "");
return true;
} else if (prefix == "osd pool stats") {
string pool_name;
cmd_getval(g_ceph_context, cmdctx->cmdmap, "pool_name", pool_name);
int64_t poolid = -ENOENT;
bool one_pool = false;
r = cluster_state.with_pgmap([&](const PGMap& pg_map) {
return cluster_state.with_osdmap([&](const OSDMap& osdmap) {
if (!pool_name.empty()) {
poolid = osdmap.lookup_pg_pool_name(pool_name);
if (poolid < 0) {
assert(poolid == -ENOENT);
ss << "unrecognized pool '" << pool_name << "'";
return -ENOENT;
}
one_pool = true;
}
stringstream rs;
if (f)
f->open_array_section("pool_stats");
else {
if (osdmap.get_pools().empty()) {
ss << "there are no pools!";
goto stats_out;
}
}
for (auto &p : osdmap.get_pools()) {
if (!one_pool) {
poolid = p.first;
}
pg_map.dump_pool_stats_and_io_rate(poolid, osdmap, f.get(), &rs);
if (one_pool) {
break;
}
}
stats_out:
if (f) {
f->close_section();
f->flush(cmdctx->odata);
} else {
cmdctx->odata.append(rs.str());
}
return 0;
});
});
if (r != -EOPNOTSUPP) {
cmdctx->reply(r, ss);
return true;
}
} else if (prefix == "osd safe-to-destroy") {
vector<string> ids;
cmd_getval(g_ceph_context, cmdctx->cmdmap, "ids", ids);

View File

@ -75,7 +75,7 @@ COMMAND("osd blocked-by", \
"print histogram of which OSDs are blocking their peers", \
"osd", "r", "cli,rest")
COMMAND("osd pool stats " \
"name=name,type=CephString,req=false",
"name=pool_name,type=CephPoolname,req=false",
"obtain stats from all pools, or from specified pool",
"osd", "r", "cli,rest")
COMMAND("osd reweight-by-utilization " \

View File

@ -2114,6 +2114,65 @@ void PGMap::dump_filtered_pg_stats(ostream& ss, set<pg_t>& pgs) const
ss << tab;
}
void PGMap::dump_pool_stats_and_io_rate(int64_t poolid, const OSDMap &osd_map,
Formatter *f,
stringstream *rs) const {
string pool_name = osd_map.get_pool_name(poolid);
if (f) {
f->open_object_section("pool");
f->dump_string("pool_name", pool_name.c_str());
f->dump_int("pool_id", poolid);
f->open_object_section("recovery");
}
list<string> sl;
stringstream tss;
pool_recovery_summary(f, &sl, poolid);
if (!f && !sl.empty()) {
for (auto &p : sl)
tss << " " << p << "\n";
}
if (f) {
f->close_section(); // object section recovery
f->open_object_section("recovery_rate");
}
ostringstream rss;
pool_recovery_rate_summary(f, &rss, poolid);
if (!f && !rss.str().empty())
tss << " recovery io " << rss.str() << "\n";
if (f) {
f->close_section(); // object section recovery_rate
f->open_object_section("client_io_rate");
}
rss.clear();
rss.str("");
pool_client_io_rate_summary(f, &rss, poolid);
if (!f && !rss.str().empty())
tss << " client io " << rss.str() << "\n";
// dump cache tier IO rate for cache pool
const pg_pool_t *pool = osd_map.get_pg_pool(poolid);
if (pool->is_tier()) {
if (f) {
f->close_section(); // object section client_io_rate
f->open_object_section("cache_io_rate");
}
rss.clear();
rss.str("");
pool_cache_io_rate_summary(f, &rss, poolid);
if (!f && !rss.str().empty())
tss << " cache tier io " << rss.str() << "\n";
}
if (f) {
f->close_section(); // object section cache_io_rate
f->close_section(); // object section pool
} else {
*rs << "pool " << pool_name << " id " << poolid << "\n";
if (!tss.str().empty())
*rs << tss.str() << "\n";
else
*rs << " nothing is going on\n\n";
}
}
void PGMap::get_health_checks(
CephContext *cct,
const OSDMap& osdmap,
@ -3185,114 +3244,6 @@ int process_pg_map_command(
return 0;
}
if (prefix == "osd pool stats") {
string pool_name;
cmd_getval(g_ceph_context, cmdmap, "name", pool_name);
int64_t poolid = -ENOENT;
bool one_pool = false;
if (!pool_name.empty()) {
poolid = osdmap.lookup_pg_pool_name(pool_name);
if (poolid < 0) {
assert(poolid == -ENOENT);
*ss << "unrecognized pool '" << pool_name << "'";
return -ENOENT;
}
one_pool = true;
}
stringstream rs;
if (f)
f->open_array_section("pool_stats");
else {
if (osdmap.get_pools().empty()) {
*ss << "there are no pools!";
goto stats_out;
}
}
for (auto& p : osdmap.get_pools()) {
if (!one_pool)
poolid = p.first;
pool_name = osdmap.get_pool_name(poolid);
if (f) {
f->open_object_section("pool");
f->dump_string("pool_name", pool_name.c_str());
f->dump_int("pool_id", poolid);
f->open_object_section("recovery");
}
list<string> sl;
stringstream tss;
pg_map.pool_recovery_summary(f, &sl, poolid);
if (!f && !sl.empty()) {
for (auto& p : sl)
tss << " " << p << "\n";
}
if (f) {
f->close_section();
f->open_object_section("recovery_rate");
}
ostringstream rss;
pg_map.pool_recovery_rate_summary(f, &rss, poolid);
if (!f && !rss.str().empty())
tss << " recovery io " << rss.str() << "\n";
if (f) {
f->close_section();
f->open_object_section("client_io_rate");
}
rss.clear();
rss.str("");
pg_map.pool_client_io_rate_summary(f, &rss, poolid);
if (!f && !rss.str().empty())
tss << " client io " << rss.str() << "\n";
// dump cache tier IO rate for cache pool
const pg_pool_t *pool = osdmap.get_pg_pool(poolid);
if (pool->is_tier()) {
if (f) {
f->close_section();
f->open_object_section("cache_io_rate");
}
rss.clear();
rss.str("");
pg_map.pool_cache_io_rate_summary(f, &rss, poolid);
if (!f && !rss.str().empty())
tss << " cache tier io " << rss.str() << "\n";
}
if (f) {
f->close_section();
f->close_section();
} else {
rs << "pool " << pool_name << " id " << poolid << "\n";
if (!tss.str().empty())
rs << tss.str() << "\n";
else
rs << " nothing is going on\n\n";
}
if (one_pool)
break;
}
stats_out:
if (f) {
f->close_section();
f->flush(ds);
odata->append(ds);
} else {
odata->append(rs.str());
}
return 0;
}
return -EOPNOTSUPP;
}

View File

@ -152,6 +152,10 @@ public:
*/
int64_t get_pool_free_space(const OSDMap &osd_map, int64_t poolid) const;
/**
* Dump pool usage and io ops/bytes, used by "ceph df" command
*/
virtual void dump_pool_stats_full(const OSDMap &osd_map, stringstream *ss,
Formatter *f, bool verbose) const;
void dump_fs_stats(stringstream *ss, Formatter *f, bool verbose) const;
@ -405,6 +409,13 @@ public:
PGMapDigest::dump_pool_stats_full(osd_map, ss, f, verbose);
}
/*
* Dump client io rate, recovery io rate, cache io rate and recovery information.
* this function is used by "ceph osd pool stats" command
*/
void dump_pool_stats_and_io_rate(int64_t poolid, const OSDMap &osd_map, Formatter *f,
stringstream *ss) const;
void dump_pg_stats_plain(
ostream& ss,
const mempool::pgmap::unordered_map<pg_t, pg_stat_t>& pg_stats,