Merge pull request #28071 from tchaikov/wip-crimson-perf-regres

crimson,osd: performance fixes

Reviewed-by: Radoslaw Zarzynski <rzarzyns@redhat.com>
This commit is contained in:
Kefu Chai 2019-05-14 00:00:00 +08:00 committed by GitHub
commit 5a79b4c822
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 20 additions and 28 deletions

View File

@ -1,6 +1,6 @@
add_library(crimson::cflags INTERFACE IMPORTED)
set_target_properties(crimson::cflags PROPERTIES
INTERFACE_COMPILE_DEFINITIONS "WITH_SEASTAR=1"
INTERFACE_COMPILE_DEFINITIONS "WITH_SEASTAR=1;BOOST_SP_DISABLE_THREADS"
INTERFACE_LINK_LIBRARIES Seastar::seastar)
set(crimson_common_srcs

View File

@ -1015,18 +1015,14 @@ seastar::future<Ref<MOSDOpReply>> PG::do_osd_ops(Ref<MOSDOp> m)
// TODO: issue requests in parallel if they don't write,
// with writes being basically a synchronization barrier
return seastar::do_for_each(std::begin(m->ops), std::end(m->ops),
[m,&txn,this,os](OSDOp& osd_op) {
return do_osd_op(*os, osd_op, txn);
}).then([m,&txn,this,os=std::move(os)] {
// XXX: the entire lambda can be scheduled conditionally
// XXX: I'm not txn.empty() is what we want here
return !txn.empty() ? backend->store_object_state(os, *m, txn)
: seastar::now();
[m,&txn,this,pos=os.get()](OSDOp& osd_op) {
return do_osd_op(*pos, osd_op, txn);
}).then([&txn,m,this,os=std::move(os)]() mutable {
// XXX: the entire lambda could be scheduled conditionally. ::if_then()?
return txn.empty() ? seastar::now()
: backend->mutate_object(std::move(os), std::move(txn), *m);
});
}).then([&] {
return txn.empty() ? seastar::now()
: backend->submit_transaction(std::move(txn));
}).then([=] {
}).then([m,this] {
auto reply = make_message<MOSDOpReply>(m.get(), 0, get_osdmap_epoch(),
0, false);
reply->add_flags(CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK);

View File

@ -152,12 +152,12 @@ PGBackend::_load_ss(const hobject_t& oid)
}
seastar::future<>
PGBackend::store_object_state(
//const hobject_t& oid,
const cached_os_t os,
const MOSDOp& m,
ceph::os::Transaction& txn)
PGBackend::mutate_object(
cached_os_t&& os,
ceph::os::Transaction&& txn,
const MOSDOp& m)
{
logger().trace("mutate_object: num_ops={}", txn.get_num_ops());
if (os->exists) {
#if 0
os.oi.version = ctx->at_version;
@ -179,7 +179,7 @@ PGBackend::store_object_state(
// reset cached ObjectState without enforcing eviction
os->oi = object_info_t(os->oi.soid);
}
return seastar::now();
return store->do_transaction(coll, std::move(txn));
}
seastar::future<>
@ -271,9 +271,3 @@ seastar::future<> PGBackend::writefull(
}
return seastar::now();
}
seastar::future<> PGBackend::submit_transaction(ceph::os::Transaction&& txn)
{
logger().trace("submit_transaction: num_ops={}", txn.get_num_ops());
return store->do_transaction(coll, std::move(txn));
}

View File

@ -33,9 +33,6 @@ public:
ceph::os::CyanStore* store,
const ec_profile_t& ec_profile);
using cached_os_t = boost::local_shared_ptr<ObjectState>;
seastar::future<> store_object_state(const cached_os_t os,
const MOSDOp& m,
ceph::os::Transaction& txn);
seastar::future<cached_os_t> get_object_state(const hobject_t& oid);
seastar::future<> evict_object_state(const hobject_t& oid);
seastar::future<bufferlist> read(const object_info_t& oi,
@ -48,7 +45,10 @@ public:
ObjectState& os,
const OSDOp& osd_op,
ceph::os::Transaction& trans);
seastar::future<> submit_transaction(ceph::os::Transaction&& txn);
seastar::future<> mutate_object(
cached_os_t&& os,
ceph::os::Transaction&& txn,
const MOSDOp& m);
protected:
const shard_id_t shard;

View File

@ -35,6 +35,8 @@ struct ObjectState {
ObjectState(const object_info_t &oi_, bool exists_)
: oi(oi_), exists(exists_) {}
ObjectState(object_info_t &&oi_, bool exists_)
: oi(std::move(oi_)), exists(exists_) {}
};
typedef std::shared_ptr<ObjectContext> ObjectContextRef;