mirror of
https://github.com/ceph/ceph
synced 2025-02-23 02:57:21 +00:00
Merge pull request #41589 from tchaikov/wip-crimson-start-up-error
crimson: handle startup failures properly Reviewed-by: Radoslaw Zarzynski <rzarzyns@redhat.com>
This commit is contained in:
commit
1df55c2378
@ -67,6 +67,10 @@ AlienStore::AlienStore(const std::string& path, const ConfigValues& values)
|
||||
cct = std::make_unique<CephContext>(CEPH_ENTITY_TYPE_OSD);
|
||||
g_ceph_context = cct.get();
|
||||
cct->_conf.set_config_values(values);
|
||||
}
|
||||
|
||||
seastar::future<> AlienStore::start()
|
||||
{
|
||||
store = std::make_unique<BlueStore>(cct.get(), path);
|
||||
std::vector<uint64_t> cpu_cores = _parse_cpu_cores();
|
||||
// cores except the first "N_CORES_FOR_SEASTAR" ones will
|
||||
@ -86,15 +90,15 @@ AlienStore::AlienStore(const std::string& path, const ConfigValues& values)
|
||||
const auto num_threads =
|
||||
get_conf<uint64_t>("crimson_alien_op_num_threads");
|
||||
tp = std::make_unique<crimson::os::ThreadPool>(num_threads, 128, cpu_cores);
|
||||
}
|
||||
|
||||
seastar::future<> AlienStore::start()
|
||||
{
|
||||
return tp->start();
|
||||
}
|
||||
|
||||
seastar::future<> AlienStore::stop()
|
||||
{
|
||||
if (!tp) {
|
||||
// not really started yet
|
||||
return seastar::now();
|
||||
}
|
||||
return tp->submit([this] {
|
||||
for (auto [cid, ch]: coll_map) {
|
||||
static_cast<AlienCollection*>(ch.get())->collection.reset();
|
||||
@ -110,6 +114,7 @@ AlienStore::~AlienStore() = default;
|
||||
seastar::future<> AlienStore::mount()
|
||||
{
|
||||
logger().debug("{}", __func__);
|
||||
assert(tp);
|
||||
return tp->submit([this] {
|
||||
return store->mount();
|
||||
}).then([] (int r) {
|
||||
@ -121,6 +126,10 @@ seastar::future<> AlienStore::mount()
|
||||
seastar::future<> AlienStore::umount()
|
||||
{
|
||||
logger().info("{}", __func__);
|
||||
if (!tp) {
|
||||
// not really started yet
|
||||
return seastar::now();
|
||||
}
|
||||
return transaction_gate.close().then([this] {
|
||||
return tp->submit([this] {
|
||||
return store->umount();
|
||||
@ -135,6 +144,7 @@ seastar::future<> AlienStore::mkfs(uuid_d osd_fsid)
|
||||
{
|
||||
logger().debug("{}", __func__);
|
||||
store->set_fsid(osd_fsid);
|
||||
assert(tp);
|
||||
return tp->submit([this] {
|
||||
return store->mkfs();
|
||||
}).then([] (int r) {
|
||||
@ -150,6 +160,7 @@ AlienStore::list_objects(CollectionRef ch,
|
||||
uint64_t limit) const
|
||||
{
|
||||
logger().debug("{}", __func__);
|
||||
assert(tp);
|
||||
return seastar::do_with(std::vector<ghobject_t>(), ghobject_t(),
|
||||
[=] (auto &objects, auto &next) {
|
||||
objects.reserve(limit);
|
||||
@ -171,6 +182,7 @@ AlienStore::list_objects(CollectionRef ch,
|
||||
seastar::future<CollectionRef> AlienStore::create_new_collection(const coll_t& cid)
|
||||
{
|
||||
logger().debug("{}", __func__);
|
||||
assert(tp);
|
||||
return tp->submit([this, cid] {
|
||||
return store->create_new_collection(cid);
|
||||
}).then([this, cid] (ObjectStore::CollectionHandle c) {
|
||||
@ -194,6 +206,7 @@ seastar::future<CollectionRef> AlienStore::create_new_collection(const coll_t& c
|
||||
seastar::future<CollectionRef> AlienStore::open_collection(const coll_t& cid)
|
||||
{
|
||||
logger().debug("{}", __func__);
|
||||
assert(tp);
|
||||
return tp->submit([this, cid] {
|
||||
return store->open_collection(cid);
|
||||
}).then([this] (ObjectStore::CollectionHandle c) {
|
||||
@ -216,6 +229,7 @@ seastar::future<CollectionRef> AlienStore::open_collection(const coll_t& cid)
|
||||
seastar::future<std::vector<coll_t>> AlienStore::list_collections()
|
||||
{
|
||||
logger().debug("{}", __func__);
|
||||
assert(tp);
|
||||
|
||||
return seastar::do_with(std::vector<coll_t>{}, [=] (auto &ls) {
|
||||
return tp->submit([this, &ls] {
|
||||
@ -235,6 +249,7 @@ AlienStore::read(CollectionRef ch,
|
||||
uint32_t op_flags)
|
||||
{
|
||||
logger().debug("{}", __func__);
|
||||
assert(tp);
|
||||
return seastar::do_with(ceph::bufferlist{}, [=] (auto &bl) {
|
||||
return tp->submit(ch->get_cid().hash_to_shard(tp->size()), [=, &bl] {
|
||||
auto c = static_cast<AlienCollection*>(ch.get());
|
||||
@ -259,6 +274,7 @@ AlienStore::readv(CollectionRef ch,
|
||||
uint32_t op_flags)
|
||||
{
|
||||
logger().debug("{}", __func__);
|
||||
assert(tp);
|
||||
return seastar::do_with(ceph::bufferlist{},
|
||||
[this, ch, oid, &m, op_flags](auto& bl) {
|
||||
return tp->submit(ch->get_cid().hash_to_shard(tp->size()),
|
||||
@ -284,6 +300,7 @@ AlienStore::get_attr(CollectionRef ch,
|
||||
std::string_view name) const
|
||||
{
|
||||
logger().debug("{}", __func__);
|
||||
assert(tp);
|
||||
return seastar::do_with(ceph::bufferlist{}, [=] (auto &value) {
|
||||
return tp->submit(ch->get_cid().hash_to_shard(tp->size()), [=, &value] {
|
||||
auto c =static_cast<AlienCollection*>(ch.get());
|
||||
@ -307,6 +324,7 @@ AlienStore::get_attrs(CollectionRef ch,
|
||||
const ghobject_t& oid)
|
||||
{
|
||||
logger().debug("{}", __func__);
|
||||
assert(tp);
|
||||
return seastar::do_with(attrs_t{}, [=] (auto &aset) {
|
||||
return tp->submit(ch->get_cid().hash_to_shard(tp->size()), [=, &aset] {
|
||||
auto c = static_cast<AlienCollection*>(ch.get());
|
||||
@ -328,6 +346,7 @@ auto AlienStore::omap_get_values(CollectionRef ch,
|
||||
-> read_errorator::future<omap_values_t>
|
||||
{
|
||||
logger().debug("{}", __func__);
|
||||
assert(tp);
|
||||
return seastar::do_with(omap_values_t{}, [=] (auto &values) {
|
||||
return tp->submit(ch->get_cid().hash_to_shard(tp->size()), [=, &values] {
|
||||
auto c = static_cast<AlienCollection*>(ch.get());
|
||||
@ -351,6 +370,7 @@ auto AlienStore::omap_get_values(CollectionRef ch,
|
||||
-> read_errorator::future<std::tuple<bool, omap_values_t>>
|
||||
{
|
||||
logger().debug("{} with_start", __func__);
|
||||
assert(tp);
|
||||
return seastar::do_with(omap_values_t{}, [=] (auto &values) {
|
||||
return tp->submit(ch->get_cid().hash_to_shard(tp->size()), [=, &values] {
|
||||
auto c = static_cast<AlienCollection*>(ch.get());
|
||||
@ -386,6 +406,7 @@ seastar::future<> AlienStore::do_transaction(CollectionRef ch,
|
||||
return alien_coll->with_lock([this, ch, id, &txn, &done] {
|
||||
Context *crimson_wrapper =
|
||||
ceph::os::Transaction::collect_all_contexts(txn);
|
||||
assert(tp);
|
||||
return tp->submit(ch->get_cid().hash_to_shard(tp->size()),
|
||||
[this, ch, id, crimson_wrapper, &txn, &done] {
|
||||
txn.register_on_commit(new OnCommit(id, done, crimson_wrapper, txn));
|
||||
@ -403,6 +424,7 @@ seastar::future<> AlienStore::do_transaction(CollectionRef ch,
|
||||
seastar::future<> AlienStore::inject_data_error(const ghobject_t& o)
|
||||
{
|
||||
logger().debug("{}", __func__);
|
||||
assert(tp);
|
||||
return tp->submit([=] {
|
||||
return store->inject_data_error(o);
|
||||
});
|
||||
@ -411,6 +433,7 @@ seastar::future<> AlienStore::inject_data_error(const ghobject_t& o)
|
||||
seastar::future<> AlienStore::inject_mdata_error(const ghobject_t& o)
|
||||
{
|
||||
logger().debug("{}", __func__);
|
||||
assert(tp);
|
||||
return tp->submit([=] {
|
||||
return store->inject_mdata_error(o);
|
||||
});
|
||||
@ -420,6 +443,7 @@ seastar::future<> AlienStore::write_meta(const std::string& key,
|
||||
const std::string& value)
|
||||
{
|
||||
logger().debug("{}", __func__);
|
||||
assert(tp);
|
||||
return tp->submit([=] {
|
||||
return store->write_meta(key, value);
|
||||
}).then([] (int r) {
|
||||
@ -432,6 +456,7 @@ seastar::future<std::tuple<int, std::string>>
|
||||
AlienStore::read_meta(const std::string& key)
|
||||
{
|
||||
logger().debug("{}", __func__);
|
||||
assert(tp);
|
||||
return tp->submit([this, key] {
|
||||
std::string value;
|
||||
int r = store->read_meta(key, &value);
|
||||
@ -458,6 +483,7 @@ uuid_d AlienStore::get_fsid() const
|
||||
seastar::future<store_statfs_t> AlienStore::stat() const
|
||||
{
|
||||
logger().info("{}", __func__);
|
||||
assert(tp);
|
||||
return seastar::do_with(store_statfs_t{}, [this] (store_statfs_t &st) {
|
||||
return tp->submit([this, &st] {
|
||||
return store->statfs(&st, nullptr);
|
||||
@ -478,6 +504,7 @@ seastar::future<struct stat> AlienStore::stat(
|
||||
CollectionRef ch,
|
||||
const ghobject_t& oid)
|
||||
{
|
||||
assert(tp);
|
||||
return seastar::do_with((struct stat){}, [this, ch, oid](auto& st) {
|
||||
return tp->submit(ch->get_cid().hash_to_shard(tp->size()), [this, ch, oid, &st] {
|
||||
auto c = static_cast<AlienCollection*>(ch.get());
|
||||
@ -491,6 +518,7 @@ auto AlienStore::omap_get_header(CollectionRef ch,
|
||||
const ghobject_t& oid)
|
||||
-> read_errorator::future<ceph::bufferlist>
|
||||
{
|
||||
assert(tp);
|
||||
return seastar::do_with(ceph::bufferlist(), [=](auto& bl) {
|
||||
return tp->submit(ch->get_cid().hash_to_shard(tp->size()), [=, &bl] {
|
||||
auto c = static_cast<AlienCollection*>(ch.get());
|
||||
@ -515,6 +543,7 @@ seastar::future<std::map<uint64_t, uint64_t>> AlienStore::fiemap(
|
||||
uint64_t off,
|
||||
uint64_t len)
|
||||
{
|
||||
assert(tp);
|
||||
return seastar::do_with(std::map<uint64_t, uint64_t>(), [=](auto& destmap) {
|
||||
return tp->submit(ch->get_cid().hash_to_shard(tp->size()), [=, &destmap] {
|
||||
auto c = static_cast<AlienCollection*>(ch.get());
|
||||
@ -530,6 +559,7 @@ seastar::future<FuturizedStore::OmapIteratorRef> AlienStore::get_omap_iterator(
|
||||
CollectionRef ch,
|
||||
const ghobject_t& oid)
|
||||
{
|
||||
assert(tp);
|
||||
return tp->submit(ch->get_cid().hash_to_shard(tp->size()),
|
||||
[this, ch, oid] {
|
||||
auto c = static_cast<AlienCollection*>(ch.get());
|
||||
@ -545,6 +575,7 @@ seastar::future<FuturizedStore::OmapIteratorRef> AlienStore::get_omap_iterator(
|
||||
// needs further optimization.
|
||||
seastar::future<> AlienStore::AlienOmapIterator::seek_to_first()
|
||||
{
|
||||
assert(store->tp);
|
||||
return store->tp->submit(ch->get_cid().hash_to_shard(store->tp->size()),
|
||||
[this] {
|
||||
return iter->seek_to_first();
|
||||
@ -557,6 +588,7 @@ seastar::future<> AlienStore::AlienOmapIterator::seek_to_first()
|
||||
seastar::future<> AlienStore::AlienOmapIterator::upper_bound(
|
||||
const std::string& after)
|
||||
{
|
||||
assert(store->tp);
|
||||
return store->tp->submit(ch->get_cid().hash_to_shard(store->tp->size()),
|
||||
[this, after] {
|
||||
return iter->upper_bound(after);
|
||||
@ -569,6 +601,7 @@ seastar::future<> AlienStore::AlienOmapIterator::upper_bound(
|
||||
seastar::future<> AlienStore::AlienOmapIterator::lower_bound(
|
||||
const std::string& to)
|
||||
{
|
||||
assert(store->tp);
|
||||
return store->tp->submit(ch->get_cid().hash_to_shard(store->tp->size()),
|
||||
[this, to] {
|
||||
return iter->lower_bound(to);
|
||||
@ -580,6 +613,7 @@ seastar::future<> AlienStore::AlienOmapIterator::lower_bound(
|
||||
|
||||
seastar::future<> AlienStore::AlienOmapIterator::next()
|
||||
{
|
||||
assert(store->tp);
|
||||
return store->tp->submit(ch->get_cid().hash_to_shard(store->tp->size()),
|
||||
[this] {
|
||||
return iter->next();
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <seastar/core/app-template.hh>
|
||||
#include <seastar/core/print.hh>
|
||||
#include <seastar/core/thread.hh>
|
||||
#include <seastar/util/defer.hh>
|
||||
#include <seastar/util/std-compat.hh>
|
||||
|
||||
#include "auth/KeyRing.h"
|
||||
@ -142,32 +143,35 @@ seastar::future<> fetch_config()
|
||||
const AuthCapsInfo& caps)
|
||||
{}
|
||||
};
|
||||
auto auth_handler = std::make_unique<DummyAuthHandler>();
|
||||
auto msgr = crimson::net::Messenger::create(entity_name_t::CLIENT(),
|
||||
"temp_mon_client",
|
||||
get_nonce());
|
||||
configure_crc_handling(*msgr);
|
||||
auto monc = std::make_unique<crimson::mon::Client>(*msgr, *auth_handler);
|
||||
msgr->set_auth_client(monc.get());
|
||||
return msgr->start({monc.get()}).then([monc=monc.get()] {
|
||||
return monc->start();
|
||||
}).then([monc=monc.get()] {
|
||||
monc->sub_want("config", 0, 0);
|
||||
return monc->renew_subs();
|
||||
}).then([monc=monc.get()] {
|
||||
return seastar::async([] {
|
||||
auto auth_handler = std::make_unique<DummyAuthHandler>();
|
||||
auto msgr = crimson::net::Messenger::create(entity_name_t::CLIENT(),
|
||||
"temp_mon_client",
|
||||
get_nonce());
|
||||
configure_crc_handling(*msgr);
|
||||
crimson::mon::Client monc{*msgr, *auth_handler};
|
||||
msgr->set_auth_client(&monc);
|
||||
msgr->start({&monc}).get();
|
||||
auto stop_msgr = seastar::defer([&] {
|
||||
// stop msgr here also, in case monc fails to start.
|
||||
msgr->stop();
|
||||
msgr->shutdown().get();
|
||||
});
|
||||
monc.start().handle_exception([] (auto ep) {
|
||||
seastar::fprint(std::cerr, "FATAL: unable to connect to cluster: {}\n", ep);
|
||||
return seastar::make_exception_future<>(ep);
|
||||
}).get();
|
||||
auto stop_monc = seastar::defer([&] {
|
||||
// unregister me from msgr first.
|
||||
msgr->stop();
|
||||
monc.stop().get();
|
||||
});
|
||||
monc.sub_want("config", 0, 0);
|
||||
monc.renew_subs().get();
|
||||
// wait for monmap and config
|
||||
return monc->wait_for_config();
|
||||
}).then([monc=monc.get()] {
|
||||
return local_conf().set_val("fsid", monc->get_fsid().to_string());
|
||||
}).then([monc=monc.get(), msgr=msgr.get()] {
|
||||
msgr->stop();
|
||||
return monc->stop();
|
||||
}).then([msgr=msgr.get()] {
|
||||
return msgr->shutdown();
|
||||
}).then([msgr=std::move(msgr),
|
||||
auth_handler=std::move(auth_handler),
|
||||
monc=std::move(monc)]
|
||||
{});
|
||||
monc.wait_for_config().get();
|
||||
local_conf().set_val("fsid", monc.get_fsid().to_string()).get();
|
||||
});
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[])
|
||||
@ -198,75 +202,81 @@ int main(int argc, char* argv[])
|
||||
using crimson::common::sharded_conf;
|
||||
using crimson::common::sharded_perf_coll;
|
||||
try {
|
||||
return app.run_deprecated(app_args.size(), const_cast<char**>(app_args.data()),
|
||||
return app.run(app_args.size(), const_cast<char**>(app_args.data()),
|
||||
[&, &ceph_args=ceph_args] {
|
||||
auto& config = app.configuration();
|
||||
return seastar::async([&] {
|
||||
FatalSignal fatal_signal;
|
||||
if (config.count("debug")) {
|
||||
seastar::global_logger_registry().set_all_loggers_level(
|
||||
seastar::log_level::debug
|
||||
);
|
||||
}
|
||||
sharded_conf().start(init_params.name, cluster_name).get();
|
||||
seastar::engine().at_exit([] {
|
||||
return sharded_conf().stop();
|
||||
});
|
||||
sharded_perf_coll().start().get();
|
||||
seastar::engine().at_exit([] {
|
||||
return sharded_perf_coll().stop();
|
||||
});
|
||||
local_conf().parse_config_files(conf_file_list).get();
|
||||
local_conf().parse_argv(ceph_args).get();
|
||||
if (const auto ret = pidfile_write(local_conf()->pid_file);
|
||||
ret == -EACCES || ret == -EAGAIN) {
|
||||
ceph_abort_msg(
|
||||
"likely there is another crimson-osd instance with the same id");
|
||||
} else if (ret < 0) {
|
||||
ceph_abort_msg(fmt::format("pidfile_write failed with {} {}",
|
||||
ret, cpp_strerror(-ret)));
|
||||
}
|
||||
// just ignore SIGHUP, we don't reread settings. keep in mind signals
|
||||
// handled by S* must be blocked for alien threads (see AlienStore).
|
||||
seastar::engine().handle_signal(SIGHUP, [] {});
|
||||
const int whoami = std::stoi(local_conf()->name.get_id());
|
||||
const auto nonce = get_nonce();
|
||||
crimson::net::MessengerRef cluster_msgr, client_msgr;
|
||||
crimson::net::MessengerRef hb_front_msgr, hb_back_msgr;
|
||||
for (auto [msgr, name] : {make_pair(std::ref(cluster_msgr), "cluster"s),
|
||||
make_pair(std::ref(client_msgr), "client"s),
|
||||
make_pair(std::ref(hb_front_msgr), "hb_front"s),
|
||||
make_pair(std::ref(hb_back_msgr), "hb_back"s)}) {
|
||||
msgr = crimson::net::Messenger::create(entity_name_t::OSD(whoami), name,
|
||||
nonce);
|
||||
configure_crc_handling(*msgr);
|
||||
}
|
||||
osd.start_single(whoami, nonce,
|
||||
cluster_msgr, client_msgr,
|
||||
hb_front_msgr, hb_back_msgr).get();
|
||||
if (config.count("mkkey")) {
|
||||
make_keyring().handle_exception([](std::exception_ptr) {
|
||||
seastar::engine().exit(1);
|
||||
}).get();
|
||||
}
|
||||
if (config.count("no-mon-config") == 0) {
|
||||
fetch_config().get();
|
||||
}
|
||||
if (config.count("mkfs")) {
|
||||
osd.invoke_on(
|
||||
0,
|
||||
&crimson::osd::OSD::mkfs,
|
||||
local_conf().get_val<uuid_d>("osd_uuid"),
|
||||
local_conf().get_val<uuid_d>("fsid")).get();
|
||||
}
|
||||
seastar::engine().at_exit([&] {
|
||||
return osd.stop();
|
||||
});
|
||||
if (config.count("mkkey") || config.count("mkfs")) {
|
||||
seastar::engine().exit(0);
|
||||
} else {
|
||||
osd.invoke_on(0, &crimson::osd::OSD::start).get();
|
||||
try {
|
||||
FatalSignal fatal_signal;
|
||||
if (config.count("debug")) {
|
||||
seastar::global_logger_registry().set_all_loggers_level(
|
||||
seastar::log_level::debug
|
||||
);
|
||||
}
|
||||
sharded_conf().start(init_params.name, cluster_name).get();
|
||||
auto stop_conf = seastar::defer([] {
|
||||
sharded_conf().stop().get();
|
||||
});
|
||||
sharded_perf_coll().start().get();
|
||||
auto stop_perf_coll = seastar::defer([] {
|
||||
sharded_perf_coll().stop().get();
|
||||
});
|
||||
local_conf().parse_config_files(conf_file_list).get();
|
||||
local_conf().parse_argv(ceph_args).get();
|
||||
if (const auto ret = pidfile_write(local_conf()->pid_file);
|
||||
ret == -EACCES || ret == -EAGAIN) {
|
||||
ceph_abort_msg(
|
||||
"likely there is another crimson-osd instance with the same id");
|
||||
} else if (ret < 0) {
|
||||
ceph_abort_msg(fmt::format("pidfile_write failed with {} {}",
|
||||
ret, cpp_strerror(-ret)));
|
||||
}
|
||||
// just ignore SIGHUP, we don't reread settings. keep in mind signals
|
||||
// handled by S* must be blocked for alien threads (see AlienStore).
|
||||
seastar::engine().handle_signal(SIGHUP, [] {});
|
||||
const int whoami = std::stoi(local_conf()->name.get_id());
|
||||
const auto nonce = get_nonce();
|
||||
crimson::net::MessengerRef cluster_msgr, client_msgr;
|
||||
crimson::net::MessengerRef hb_front_msgr, hb_back_msgr;
|
||||
for (auto [msgr, name] : {make_pair(std::ref(cluster_msgr), "cluster"s),
|
||||
make_pair(std::ref(client_msgr), "client"s),
|
||||
make_pair(std::ref(hb_front_msgr), "hb_front"s),
|
||||
make_pair(std::ref(hb_back_msgr), "hb_back"s)}) {
|
||||
msgr = crimson::net::Messenger::create(entity_name_t::OSD(whoami),
|
||||
name,
|
||||
nonce);
|
||||
configure_crc_handling(*msgr);
|
||||
}
|
||||
osd.start_single(whoami, nonce,
|
||||
cluster_msgr, client_msgr,
|
||||
hb_front_msgr, hb_back_msgr).get();
|
||||
auto stop_osd = seastar::defer([&] {
|
||||
osd.stop().get();
|
||||
});
|
||||
if (config.count("mkkey")) {
|
||||
make_keyring().get();
|
||||
}
|
||||
if (config.count("no-mon-config") == 0) {
|
||||
fetch_config().get();
|
||||
}
|
||||
if (config.count("mkfs")) {
|
||||
osd.invoke_on(
|
||||
0,
|
||||
&crimson::osd::OSD::mkfs,
|
||||
local_conf().get_val<uuid_d>("osd_uuid"),
|
||||
local_conf().get_val<uuid_d>("fsid")).get();
|
||||
}
|
||||
if (config.count("mkkey") || config.count("mkfs")) {
|
||||
return EXIT_SUCCESS;
|
||||
} else {
|
||||
osd.invoke_on(0, &crimson::osd::OSD::start).get();
|
||||
}
|
||||
} catch (...) {
|
||||
seastar::fprint(std::cerr, "FATAL: startup failed: %s\n", std::current_exception());
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
seastar::fprint(std::cout, "crimson shutdown complete");
|
||||
return EXIT_SUCCESS;
|
||||
});
|
||||
});
|
||||
} catch (...) {
|
||||
|
@ -1,3 +1,6 @@
|
||||
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
|
||||
// vim: ts=8 sw=2 smarttab
|
||||
|
||||
#include "common/ceph_argparse.h"
|
||||
#include "common/ceph_time.h"
|
||||
#include "messages/MPing.h"
|
||||
|
@ -187,7 +187,6 @@ RESTFUL_URLS=""
|
||||
|
||||
conf_fn="$CEPH_CONF_PATH/ceph.conf"
|
||||
keyring_fn="$CEPH_CONF_PATH/keyring"
|
||||
osdmap_fn="/tmp/ceph_osdmap.$$"
|
||||
monmap_fn="/tmp/ceph_monmap.$$"
|
||||
inc_osd_num=0
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user