Merge pull request #14883 from tchaikov/wip-mgr-misc

mgr: Misc. bug fixes

Reviewed-by: Sage Weil <sage@redhat.com>
Reviewed-by: Kefu Chai <kchai@redhat.com>
This commit is contained in:
Kefu Chai 2017-05-01 09:15:18 +08:00 committed by GitHub
commit 61a87c2c31
8 changed files with 203 additions and 135 deletions

View File

@ -142,7 +142,7 @@ class Finisher {
/// Construct a named Finisher that logs its queue length. /// Construct a named Finisher that logs its queue length.
Finisher(CephContext *cct_, string name, string tn) : Finisher(CephContext *cct_, string name, string tn) :
cct(cct_), finisher_lock("Finisher::finisher_lock"), cct(cct_), finisher_lock("Finisher::" + name),
finisher_stop(false), finisher_running(false), finisher_stop(false), finisher_running(false),
thread_name(tn), logger(0), thread_name(tn), logger(0),
finisher_thread(this) { finisher_thread(this) {

View File

@ -30,6 +30,7 @@
#define dout_prefix *_dout << "mgr.server " << __func__ << " " #define dout_prefix *_dout << "mgr.server " << __func__ << " "
DaemonServer::DaemonServer(MonClient *monc_, DaemonServer::DaemonServer(MonClient *monc_,
Finisher &finisher_,
DaemonStateIndex &daemon_state_, DaemonStateIndex &daemon_state_,
ClusterState &cluster_state_, ClusterState &cluster_state_,
PyModules &py_modules_, PyModules &py_modules_,
@ -54,6 +55,7 @@ DaemonServer::DaemonServer(MonClient *monc_,
g_conf->mgr_mon_messages)), g_conf->mgr_mon_messages)),
msgr(nullptr), msgr(nullptr),
monc(monc_), monc(monc_),
finisher(finisher_),
daemon_state(daemon_state_), daemon_state(daemon_state_),
cluster_state(cluster_state_), cluster_state(cluster_state_),
py_modules(py_modules_), py_modules(py_modules_),
@ -371,40 +373,82 @@ bool DaemonServer::_allowed_command(
return capable; return capable;
} }
class ReplyOnFinish : public Context {
DaemonServer* mgr;
MCommand *m;
bufferlist odata;
public:
bufferlist from_mon;
string outs;
ReplyOnFinish(DaemonServer* mgr, MCommand *m, bufferlist&& odata)
: mgr(mgr), m(m), odata(std::move(odata))
{}
void finish(int r) override {
odata.claim_append(from_mon);
mgr->_reply(m, r, outs, odata);
}
};
bool DaemonServer::handle_command(MCommand *m) bool DaemonServer::handle_command(MCommand *m)
{ {
int r = 0; int r = 0;
std::stringstream ss; std::stringstream ss;
bufferlist odata;
std::string prefix; std::string prefix;
assert(lock.is_locked_by_me()); assert(lock.is_locked_by_me());
cmdmap_t cmdmap; /**
* The working data for processing an MCommand. This lives in
* a class to enable passing it into other threads for processing
* outside of the thread/locks that called handle_command.
*/
class CommandContext
{
public:
MCommand *m;
bufferlist odata;
cmdmap_t cmdmap;
// TODO background the call into python land so that we don't CommandContext(MCommand *m_)
// block a messenger thread on python code. : m(m_)
{
}
ConnectionRef con = m->get_connection(); ~CommandContext()
MgrSessionRef session(static_cast<MgrSession*>(con->get_priv())); {
m->put();
}
void reply(int r, const std::stringstream &ss)
{
reply(r, ss.str());
}
void reply(int r, const std::string &rs)
{
// Let the connection drop as soon as we've sent our response
ConnectionRef con = m->get_connection();
if (con) {
con->mark_disposable();
}
dout(1) << "do_command r=" << r << " " << rs << dendl;
if (con) {
MCommandReply *reply = new MCommandReply(r, rs);
reply->set_tid(m->get_tid());
reply->set_data(odata);
con->send_message(reply);
}
}
};
/**
* A context for receiving a bufferlist/error string from a background
* function and then calling back to a CommandContext when it's done
*/
class ReplyOnFinish : public Context {
std::shared_ptr<CommandContext> cmdctx;
public:
bufferlist from_mon;
string outs;
ReplyOnFinish(std::shared_ptr<CommandContext> cmdctx_)
: cmdctx(cmdctx_)
{}
void finish(int r) override {
cmdctx->odata.claim_append(from_mon);
cmdctx->reply(r, outs);
}
};
std::shared_ptr<CommandContext> cmdctx = std::make_shared<CommandContext>(m);
MgrSessionRef session(static_cast<MgrSession*>(m->get_connection()->get_priv()));
if (!session) { if (!session) {
return true; return true;
} }
@ -412,23 +456,23 @@ bool DaemonServer::handle_command(MCommand *m)
if (session->inst.name == entity_name_t()) if (session->inst.name == entity_name_t())
session->inst.name = m->get_source(); session->inst.name = m->get_source();
string format; std::string format;
boost::scoped_ptr<Formatter> f; boost::scoped_ptr<Formatter> f;
const MgrCommand *mgr_cmd;
map<string,string> param_str_map; map<string,string> param_str_map;
if (!cmdmap_from_json(m->cmd, &cmdmap, ss)) { if (!cmdmap_from_json(m->cmd, &(cmdctx->cmdmap), ss)) {
return _reply(m, -EINVAL, ss.str(), odata); cmdctx->reply(-EINVAL, ss);
return true;
} }
{ {
cmd_getval(g_ceph_context, cmdmap, "format", format, string("plain")); cmd_getval(g_ceph_context, cmdctx->cmdmap, "format", format, string("plain"));
f.reset(Formatter::create(format)); f.reset(Formatter::create(format));
} }
dout(4) << "decoded " << cmdmap.size() << dendl; cmd_getval(cct, cmdctx->cmdmap, "prefix", prefix);
cmd_getval(cct, cmdmap, "prefix", prefix);
dout(4) << "decoded " << cmdctx->cmdmap.size() << dendl;
dout(4) << "prefix=" << prefix << dendl; dout(4) << "prefix=" << prefix << dendl;
if (prefix == "get_command_descriptions") { if (prefix == "get_command_descriptions") {
@ -460,26 +504,36 @@ bool DaemonServer::handle_command(MCommand *m)
} }
#endif #endif
f.close_section(); // command_descriptions f.close_section(); // command_descriptions
f.flush(odata); f.flush(cmdctx->odata);
return _reply(m, r, ss.str(), odata); cmdctx->reply(0, ss);
return true;
} }
// lookup command // lookup command
mgr_cmd = _get_mgrcommand(prefix, mgr_commands, const MgrCommand *mgr_cmd = _get_mgrcommand(prefix, mgr_commands,
ARRAY_SIZE(mgr_commands)); ARRAY_SIZE(mgr_commands));
_generate_command_map(cmdmap, param_str_map); _generate_command_map(cmdctx->cmdmap, param_str_map);
if (!mgr_cmd) { if (!mgr_cmd) {
return _reply(m, -EINVAL, "command not supported", odata); MgrCommand py_command = {"", "", "py", "rw", "cli"};
} if (!_allowed_command(session.get(), py_command.module, prefix, cmdctx->cmdmap,
param_str_map, &py_command)) {
// validate user's permissions for requested command dout(1) << " access denied" << dendl;
if (!_allowed_command(session.get(), mgr_cmd->module, prefix, cmdmap, ss << "access denied";
param_str_map, mgr_cmd)) { cmdctx->reply(-EACCES, ss);
dout(1) << __func__ << " access denied" << dendl; return true;
audit_clog->info() << "from='" << session->inst << "' " }
<< "entity='" << session->entity_name << "' " } else {
<< "cmd=" << m->cmd << ": access denied"; // validate user's permissions for requested command
return _reply(m, -EACCES, "access denied", odata); if (!_allowed_command(session.get(), mgr_cmd->module, prefix, cmdctx->cmdmap,
param_str_map, mgr_cmd)) {
dout(1) << " access denied" << dendl;
audit_clog->info() << "from='" << session->inst << "' "
<< "entity='" << session->entity_name << "' "
<< "cmd=" << m->cmd << ": access denied";
ss << "access denied";
cmdctx->reply(-EACCES, ss);
return true;
}
} }
audit_clog->debug() audit_clog->debug()
@ -496,10 +550,11 @@ bool DaemonServer::handle_command(MCommand *m)
string scrubop = prefix.substr(3, string::npos); string scrubop = prefix.substr(3, string::npos);
pg_t pgid; pg_t pgid;
string pgidstr; string pgidstr;
cmd_getval(g_ceph_context, cmdmap, "pgid", pgidstr); cmd_getval(g_ceph_context, cmdctx->cmdmap, "pgid", pgidstr);
if (!pgid.parse(pgidstr.c_str())) { if (!pgid.parse(pgidstr.c_str())) {
ss << "invalid pgid '" << pgidstr << "'"; ss << "invalid pgid '" << pgidstr << "'";
return _reply(m, -EINVAL, ss.str(), odata); cmdctx->reply(-EINVAL, ss);
return true;
} }
bool pg_exists = false; bool pg_exists = false;
cluster_state.with_osdmap([&](const OSDMap& osdmap) { cluster_state.with_osdmap([&](const OSDMap& osdmap) {
@ -507,7 +562,8 @@ bool DaemonServer::handle_command(MCommand *m)
}); });
if (!pg_exists) { if (!pg_exists) {
ss << "pg " << pgid << " dne"; ss << "pg " << pgid << " dne";
return _reply(m, -ENOENT, ss.str(), odata); cmdctx->reply(-ENOENT, ss);
return true;
} }
int acting_primary = -1; int acting_primary = -1;
entity_inst_t inst; entity_inst_t inst;
@ -519,7 +575,8 @@ bool DaemonServer::handle_command(MCommand *m)
}); });
if (acting_primary == -1) { if (acting_primary == -1) {
ss << "pg " << pgid << " has no primary osd"; ss << "pg " << pgid << " has no primary osd";
return _reply(m, -EAGAIN, ss.str(), odata); cmdctx->reply(-EAGAIN, ss);
return true;
} }
vector<pg_t> pgs = { pgid }; vector<pg_t> pgs = { pgid };
msgr->send_message(new MOSDScrub(monc->get_fsid(), msgr->send_message(new MOSDScrub(monc->get_fsid(),
@ -529,7 +586,8 @@ bool DaemonServer::handle_command(MCommand *m)
inst); inst);
ss << "instructing pg " << pgid << " on osd." << acting_primary ss << "instructing pg " << pgid << " on osd." << acting_primary
<< " (" << inst << ") to " << scrubop; << " (" << inst << ") to " << scrubop;
return _reply(m, 0, ss.str(), odata); cmdctx->reply(0, ss);
return true;
} else if (prefix == "osd reweight-by-pg" || } else if (prefix == "osd reweight-by-pg" ||
prefix == "osd reweight-by-utilization" || prefix == "osd reweight-by-utilization" ||
prefix == "osd test-reweight-by-pg" || prefix == "osd test-reweight-by-pg" ||
@ -540,10 +598,10 @@ bool DaemonServer::handle_command(MCommand *m)
prefix == "osd test-reweight-by-pg" || prefix == "osd test-reweight-by-pg" ||
prefix == "osd test-reweight-by-utilization"; prefix == "osd test-reweight-by-utilization";
int64_t oload; int64_t oload;
cmd_getval(g_ceph_context, cmdmap, "oload", oload, int64_t(120)); cmd_getval(g_ceph_context, cmdctx->cmdmap, "oload", oload, int64_t(120));
set<int64_t> pools; set<int64_t> pools;
vector<string> poolnames; vector<string> poolnames;
cmd_getval(g_ceph_context, cmdmap, "pools", poolnames); cmd_getval(g_ceph_context, cmdctx->cmdmap, "pools", poolnames);
cluster_state.with_osdmap([&](const OSDMap& osdmap) { cluster_state.with_osdmap([&](const OSDMap& osdmap) {
for (const auto& poolname : poolnames) { for (const auto& poolname : poolnames) {
int64_t pool = osdmap.lookup_pg_pool_name(poolname); int64_t pool = osdmap.lookup_pg_pool_name(poolname);
@ -555,22 +613,25 @@ bool DaemonServer::handle_command(MCommand *m)
} }
}); });
if (r) { if (r) {
return _reply(m, r, ss.str(), odata); cmdctx->reply(r, ss);
return true;
} }
double max_change = g_conf->mon_reweight_max_change; double max_change = g_conf->mon_reweight_max_change;
cmd_getval(g_ceph_context, cmdmap, "max_change", max_change); cmd_getval(g_ceph_context, cmdctx->cmdmap, "max_change", max_change);
if (max_change <= 0.0) { if (max_change <= 0.0) {
ss << "max_change " << max_change << " must be positive"; ss << "max_change " << max_change << " must be positive";
return _reply(m, -EINVAL, ss.str(), odata); cmdctx->reply(-EINVAL, ss);
return true;
} }
int64_t max_osds = g_conf->mon_reweight_max_osds; int64_t max_osds = g_conf->mon_reweight_max_osds;
cmd_getval(g_ceph_context, cmdmap, "max_osds", max_osds); cmd_getval(g_ceph_context, cmdctx->cmdmap, "max_osds", max_osds);
if (max_osds <= 0) { if (max_osds <= 0) {
ss << "max_osds " << max_osds << " must be positive"; ss << "max_osds " << max_osds << " must be positive";
return _reply(m, -EINVAL, ss.str(), odata); cmdctx->reply(-EINVAL, ss);
return true;
} }
string no_increasing; string no_increasing;
cmd_getval(g_ceph_context, cmdmap, "no_increasing", no_increasing); cmd_getval(g_ceph_context, cmdctx->cmdmap, "no_increasing", no_increasing);
string out_str; string out_str;
mempool::osdmap::map<int32_t, uint32_t> new_weights; mempool::osdmap::map<int32_t, uint32_t> new_weights;
r = cluster_state.with_pgmap([&](const PGMap& pgmap) { r = cluster_state.with_pgmap([&](const PGMap& pgmap) {
@ -589,16 +650,19 @@ bool DaemonServer::handle_command(MCommand *m)
if (r >= 0) { if (r >= 0) {
dout(10) << "reweight::by_utilization: finished with " << out_str << dendl; dout(10) << "reweight::by_utilization: finished with " << out_str << dendl;
} }
if (f) if (f) {
f->flush(odata); f->flush(cmdctx->odata);
else } else {
odata.append(out_str); cmdctx->odata.append(out_str);
}
if (r < 0) { if (r < 0) {
ss << "FAILED reweight-by-pg"; ss << "FAILED reweight-by-pg";
return _reply(m, r, ss.str(), odata); cmdctx->reply(r, ss);
return true;
} else if (r == 0 || dry_run) { } else if (r == 0 || dry_run) {
ss << "no change"; ss << "no change";
return _reply(m, r, ss.str(), odata); cmdctx->reply(r, ss);
return true;
} else { } else {
json_spirit::Object json_object; json_spirit::Object json_object;
for (const auto& osd_weight : new_weights) { for (const auto& osd_weight : new_weights) {
@ -613,7 +677,7 @@ bool DaemonServer::handle_command(MCommand *m)
"\"prefix\": \"osd reweightn\", " "\"prefix\": \"osd reweightn\", "
"\"weights\": \"" + s + "\"" "\"weights\": \"" + s + "\""
"}"; "}";
auto on_finish = new ReplyOnFinish(this, m, std::move(odata)); auto on_finish = new ReplyOnFinish(cmdctx);
monc->start_mon_command({cmd}, {}, monc->start_mon_command({cmd}, {},
&on_finish->from_mon, &on_finish->outs, on_finish); &on_finish->from_mon, &on_finish->outs, on_finish);
return true; return true;
@ -621,63 +685,46 @@ bool DaemonServer::handle_command(MCommand *m)
} else { } else {
r = cluster_state.with_pgmap([&](const PGMap& pg_map) { r = cluster_state.with_pgmap([&](const PGMap& pg_map) {
return cluster_state.with_osdmap([&](const OSDMap& osdmap) { return cluster_state.with_osdmap([&](const OSDMap& osdmap) {
return process_pg_map_command(prefix, cmdmap, pg_map, osdmap, return process_pg_map_command(prefix, cmdctx->cmdmap, pg_map, osdmap,
f.get(), &ss, &odata); f.get(), &ss, &cmdctx->odata);
}); });
}); });
}
if (r != -EOPNOTSUPP) if (r != -EOPNOTSUPP) {
return _reply(m, r, ss.str(), odata); cmdctx->reply(r, ss);
// fall back to registered python handlers return true;
else {
// Let's find you a handler!
MgrPyModule *handler = nullptr;
auto py_commands = py_modules.get_commands();
for (const auto &pyc : py_commands) {
auto pyc_prefix = cmddesc_get_prefix(pyc.cmdstring);
dout(1) << "pyc_prefix: '" << pyc_prefix << "'" << dendl;
if (pyc_prefix == prefix) {
handler = pyc.handler;
break;
}
} }
if (handler == nullptr) {
ss << "No handler found for '" << prefix << "'";
dout(4) << "No handler found for '" << prefix << "'" << dendl;
return _reply(m, -EINVAL, ss.str(), odata);
}
// FIXME: go run this python part in another thread, not inline
// with a ms_dispatch, so that the python part can block if it
// wants to.
dout(4) << "passing through " << cmdmap.size() << dendl;
stringstream ds;
r = handler->handle_command(cmdmap, &ds, &ss);
odata.append(ds);
return _reply(m, 0, ss.str(), odata);
} }
}
bool DaemonServer::_reply(MCommand* m, // None of the special native commands,
int ret, MgrPyModule *handler = nullptr;
const std::string& s, auto py_commands = py_modules.get_commands();
const bufferlist& payload) for (const auto &pyc : py_commands) {
{ auto pyc_prefix = cmddesc_get_prefix(pyc.cmdstring);
dout(1) << __func__ << " r=" << ret << " " << s << dendl; dout(1) << "pyc_prefix: '" << pyc_prefix << "'" << dendl;
auto con = m->get_connection(); if (pyc_prefix == prefix) {
if (!con) { handler = pyc.handler;
dout(10) << __func__ << " connection dropped for command" << dendl; break;
m->put(); }
}
if (handler == nullptr) {
ss << "No handler found for '" << prefix << "'";
dout(4) << "No handler found for '" << prefix << "'" << dendl;
cmdctx->reply(-EINVAL, ss);
return true;
} else {
// Okay, now we have a handler to call, but we must not call it
// in this thread, because the python handlers can do anything,
// including blocking, and including calling back into mgr.
dout(4) << "passing through " << cmdctx->cmdmap.size() << dendl;
finisher.queue(new FunctionContext([cmdctx, handler](int r_) {
std::stringstream ds;
std::stringstream ss;
int r = handler->handle_command(cmdctx->cmdmap, &ds, &ss);
cmdctx->odata.append(ds);
cmdctx->reply(r, ss);
}));
return true; return true;
} }
// Let the connection drop as soon as we've sent our response
con->mark_disposable();
auto response = new MCommandReply(ret, s);
response->set_tid(m->get_tid());
response->set_data(payload);
con->send_message(response);
m->put();
return true;
} }

View File

@ -54,6 +54,7 @@ protected:
Messenger *msgr; Messenger *msgr;
MonClient *monc; MonClient *monc;
Finisher &finisher;
DaemonStateIndex &daemon_state; DaemonStateIndex &daemon_state;
ClusterState &cluster_state; ClusterState &cluster_state;
PyModules &py_modules; PyModules &py_modules;
@ -85,6 +86,7 @@ public:
entity_addr_t get_myaddr() const; entity_addr_t get_myaddr() const;
DaemonServer(MonClient *monc_, DaemonServer(MonClient *monc_,
Finisher &finisher_,
DaemonStateIndex &daemon_state_, DaemonStateIndex &daemon_state_,
ClusterState &cluster_state_, ClusterState &cluster_state_,
PyModules &py_modules_, PyModules &py_modules_,

View File

@ -48,7 +48,8 @@ Mgr::Mgr(MonClient *monc_, Messenger *clientm_, Objecter *objecter_,
finisher(g_ceph_context, "Mgr", "mgr-fin"), finisher(g_ceph_context, "Mgr", "mgr-fin"),
py_modules(daemon_state, cluster_state, *monc, finisher), py_modules(daemon_state, cluster_state, *monc, finisher),
cluster_state(monc, nullptr), cluster_state(monc, nullptr),
server(monc, daemon_state, cluster_state, py_modules, clog_, audit_clog_), server(monc, finisher, daemon_state, cluster_state, py_modules,
clog_, audit_clog_),
initialized(false), initialized(false),
initializing(false) initializing(false)
{ {
@ -353,17 +354,20 @@ void Mgr::load_config()
void Mgr::shutdown() void Mgr::shutdown()
{ {
// FIXME: pre-empt init() if it is currently running, so that it will
// give up the lock for us.
Mutex::Locker l(lock);
finisher.queue(new FunctionContext([&](int) { finisher.queue(new FunctionContext([&](int) {
// First stop the server so that we're not taking any more incoming {
// requests Mutex::Locker l(lock);
server.shutdown(); monc->sub_unwant("log-info");
monc->sub_unwant("mgrdigest");
monc->sub_unwant("fsmap");
// First stop the server so that we're not taking any more incoming
// requests
server.shutdown();
}
// after the messenger is stopped, signal modules to shutdown via finisher // after the messenger is stopped, signal modules to shutdown via finisher
py_modules.shutdown(); py_modules.shutdown();
})); }));
// Then stop the finisher to ensure its enqueued contexts aren't going // Then stop the finisher to ensure its enqueued contexts aren't going
// to touch references to the things we're about to tear down // to touch references to the things we're about to tear down
finisher.wait_for_empty(); finisher.wait_for_empty();
@ -445,6 +449,8 @@ void Mgr::handle_log(MLog *m)
for (const auto &e : m->entries) { for (const auto &e : m->entries) {
py_modules.notify_all(e); py_modules.notify_all(e);
} }
m->put();
} }
bool Mgr::ms_dispatch(Message *m) bool Mgr::ms_dispatch(Message *m)
@ -468,11 +474,12 @@ bool Mgr::ms_dispatch(Message *m)
ceph_abort(); ceph_abort();
py_modules.notify_all("mon_map", ""); py_modules.notify_all("mon_map", "");
m->put();
break; break;
case CEPH_MSG_FS_MAP: case CEPH_MSG_FS_MAP:
py_modules.notify_all("fs_map", ""); py_modules.notify_all("fs_map", "");
handle_fs_map((MFSMap*)m); handle_fs_map((MFSMap*)m);
m->put(); return false; // I shall let this pass through for Client
break; break;
case CEPH_MSG_OSD_MAP: case CEPH_MSG_OSD_MAP:
handle_osd_map(); handle_osd_map();
@ -486,7 +493,6 @@ bool Mgr::ms_dispatch(Message *m)
break; break;
case MSG_LOG: case MSG_LOG:
handle_log(static_cast<MLog *>(m)); handle_log(static_cast<MLog *>(m));
m->put();
break; break;
default: default:
@ -570,6 +576,7 @@ void Mgr::handle_mgr_digest(MMgrDigest* m)
// the pgmap might have changed since last time we were here. // the pgmap might have changed since last time we were here.
py_modules.notify_all("pg_summary", ""); py_modules.notify_all("pg_summary", "");
dout(10) << "done." << dendl; dout(10) << "done." << dendl;
m->put(); m->put();
} }

View File

@ -242,6 +242,8 @@ void MgrStandby::handle_mgr_map(MMgrMap* mmap)
active_mgr.reset(); active_mgr.reset();
} }
} }
mmap->put();
} }
bool MgrStandby::ms_dispatch(Message *m) bool MgrStandby::ms_dispatch(Message *m)
@ -256,13 +258,14 @@ bool MgrStandby::ms_dispatch(Message *m)
default: default:
if (active_mgr) { if (active_mgr) {
return active_mgr->ms_dispatch(m); lock.Unlock();
active_mgr->ms_dispatch(m);
lock.Lock();
} else { } else {
return false; return false;
} }
} }
m->put();
return true; return true;
} }

View File

@ -126,7 +126,12 @@ public:
dump(f); dump(f);
} else { } else {
if (get_active_gid() != 0) { if (get_active_gid() != 0) {
*ss << "active: " << get_active_name() << " "; *ss << "active: " << get_active_name();
if (!available) {
// If the daemon hasn't gone active yet, indicate that.
*ss << "(starting)";
}
*ss << " ";
} else { } else {
*ss << "no daemons active "; *ss << "no daemons active ";
} }

View File

@ -263,7 +263,9 @@ void MgrMonitor::check_sub(Subscription *sub)
} }
} else { } else {
assert(sub->type == "mgrdigest"); assert(sub->type == "mgrdigest");
send_digests(); if (digest_callback == nullptr) {
send_digests();
}
} }
} }
@ -536,7 +538,9 @@ bool MgrMonitor::prepare_command(MonOpRequestRef op)
void MgrMonitor::init() void MgrMonitor::init()
{ {
send_digests(); // To get it to schedule its own event if (digest_callback == nullptr) {
send_digests(); // To get it to schedule its own event
}
} }
void MgrMonitor::on_shutdown() void MgrMonitor::on_shutdown()

View File

@ -657,7 +657,7 @@ EOF
EOF EOF
fi fi
prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.$name" "$key_fn" prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.$name" "$key_fn"
ceph_adm -i "$key_fn" auth add "mds.$name" mon 'allow profile mds' osd 'allow *' mds 'allow' mgr 'allow' ceph_adm -i "$key_fn" auth add "mds.$name" mon 'allow profile mds' osd 'allow *' mds 'allow' mgr 'allow profile mds'
if [ "$standby" -eq 1 ]; then if [ "$standby" -eq 1 ]; then
prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.${name}s" \ prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.${name}s" \
"$CEPH_DEV_DIR/mds.${name}s/keyring" "$CEPH_DEV_DIR/mds.${name}s/keyring"