Merge pull request #18405 from dillaman/wip-21849

librbd: default to sparse-reads for any IO operation over 64K

Reviewed-by: Mykola Golub <to.my.trociny@gmail.com>
This commit is contained in:
Mykola Golub 2017-10-24 11:02:49 +03:00 committed by GitHub
commit f2f893aeb1
12 changed files with 306 additions and 69 deletions

View File

@ -5257,6 +5257,18 @@ static std::vector<Option> get_rbd_options() {
.set_default(false)
.set_description("localize parent requests to closest OSD"),
Option("rbd_sparse_read_threshold_bytes", Option::TYPE_UINT,
Option::LEVEL_ADVANCED)
.set_default(64_K)
.set_description("threshold for issuing a sparse-read")
.set_long_description("minimum number of sequential bytes to read against "
"an object before issuing a sparse-read request to "
"the cluster. 0 implies it must be a full object read"
"to issue a sparse-read, 1 implies always use "
"sparse-read, and any value larger than the maximum "
"object size will disable sparse-read for all "
"requests"),
Option("rbd_readahead_trigger_requests", Option::TYPE_INT, Option::LEVEL_ADVANCED)
.set_default(10)
.set_description("number of sequential requests necessary to trigger readahead"),

View File

@ -980,6 +980,7 @@ struct C_InvalidateCache : public Context {
"rbd_localize_snap_reads", false)(
"rbd_balance_parent_reads", false)(
"rbd_localize_parent_reads", false)(
"rbd_sparse_read_threshold_bytes", false)(
"rbd_readahead_trigger_requests", false)(
"rbd_readahead_max_bytes", false)(
"rbd_readahead_disable_after_bytes", false)(
@ -1038,6 +1039,7 @@ struct C_InvalidateCache : public Context {
ASSIGN_OPTION(localize_snap_reads, bool);
ASSIGN_OPTION(balance_parent_reads, bool);
ASSIGN_OPTION(localize_parent_reads, bool);
ASSIGN_OPTION(sparse_read_threshold_bytes, uint64_t);
ASSIGN_OPTION(readahead_trigger_requests, int64_t);
ASSIGN_OPTION(readahead_max_bytes, int64_t);
ASSIGN_OPTION(readahead_disable_after_bytes, int64_t);
@ -1062,6 +1064,10 @@ struct C_InvalidateCache : public Context {
if (thread_safe) {
ASSIGN_OPTION(journal_pool, std::string);
}
if (sparse_read_threshold_bytes == 0) {
sparse_read_threshold_bytes = get_object_size();
}
}
ExclusiveLock<ImageCtx> *ImageCtx::create_exclusive_lock() {

View File

@ -52,8 +52,8 @@ namespace librbd {
namespace io {
class AioCompletion;
class AsyncOperation;
template <typename> class CopyupRequest;
template <typename> class ImageRequestWQ;
class CopyupRequest;
}
namespace journal { struct Policy; }
@ -140,7 +140,7 @@ namespace librbd {
Readahead readahead;
uint64_t total_bytes_read;
std::map<uint64_t, io::CopyupRequest*> copyup_list;
std::map<uint64_t, io::CopyupRequest<ImageCtx>*> copyup_list;
xlist<io::AsyncOperation*> async_ops;
xlist<AsyncRequest<>*> async_requests;
@ -176,6 +176,7 @@ namespace librbd {
bool localize_snap_reads;
bool balance_parent_reads;
bool localize_parent_reads;
uint64_t sparse_read_threshold_bytes;
uint32_t readahead_trigger_requests;
uint64_t readahead_max_bytes;
uint64_t readahead_disable_after_bytes;

View File

@ -80,11 +80,11 @@ private:
} // anonymous namespace
CopyupRequest::CopyupRequest(ImageCtx *ictx, const std::string &oid,
uint64_t objectno, Extents &&image_extents,
const ZTracer::Trace &parent_trace)
: m_ictx(ictx), m_oid(oid), m_object_no(objectno),
template <typename I>
CopyupRequest<I>::CopyupRequest(I *ictx, const std::string &oid,
uint64_t objectno, Extents &&image_extents,
const ZTracer::Trace &parent_trace)
: m_ictx(util::get_image_ctx(ictx)), m_oid(oid), m_object_no(objectno),
m_image_extents(image_extents),
m_trace(util::create_trace(*m_ictx, "copy-up", parent_trace)),
m_state(STATE_READ_FROM_PARENT)
@ -92,17 +92,20 @@ CopyupRequest::CopyupRequest(ImageCtx *ictx, const std::string &oid,
m_async_op.start_op(*m_ictx);
}
CopyupRequest::~CopyupRequest() {
template <typename I>
CopyupRequest<I>::~CopyupRequest() {
assert(m_pending_requests.empty());
m_async_op.finish_op();
}
void CopyupRequest::append_request(ObjectRequest<> *req) {
template <typename I>
void CopyupRequest<I>::append_request(ObjectRequest<I> *req) {
ldout(m_ictx->cct, 20) << req << dendl;
m_pending_requests.push_back(req);
}
void CopyupRequest::complete_requests(int r) {
template <typename I>
void CopyupRequest<I>::complete_requests(int r) {
while (!m_pending_requests.empty()) {
vector<ObjectRequest<> *>::iterator it = m_pending_requests.begin();
ObjectRequest<> *req = *it;
@ -112,7 +115,8 @@ void CopyupRequest::complete_requests(int r) {
}
}
bool CopyupRequest::send_copyup() {
template <typename I>
bool CopyupRequest<I>::send_copyup() {
bool add_copyup_op = !m_copyup_data.is_zero();
bool copy_on_read = m_pending_requests.empty();
if (!add_copyup_op && copy_on_read) {
@ -189,7 +193,8 @@ bool CopyupRequest::send_copyup() {
return false;
}
bool CopyupRequest::is_copyup_required() {
template <typename I>
bool CopyupRequest<I>::is_copyup_required() {
bool noop = true;
for (const ObjectRequest<> *req : m_pending_requests) {
if (!req->is_op_payload_empty()) {
@ -201,7 +206,8 @@ bool CopyupRequest::is_copyup_required() {
return (m_copyup_data.is_zero() && noop);
}
void CopyupRequest::send()
template <typename I>
void CopyupRequest<I>::send()
{
m_state = STATE_READ_FROM_PARENT;
AioCompletion *comp = AioCompletion::create_and_start(
@ -215,7 +221,8 @@ void CopyupRequest::send()
ReadResult{&m_copyup_data}, 0, m_trace);
}
void CopyupRequest::complete(int r)
template <typename I>
void CopyupRequest<I>::complete(int r)
{
if (should_complete(r)) {
complete_requests(r);
@ -223,7 +230,8 @@ void CopyupRequest::complete(int r)
}
}
bool CopyupRequest::should_complete(int r)
template <typename I>
bool CopyupRequest<I>::should_complete(int r)
{
CephContext *cct = m_ictx->cct;
ldout(cct, 20) << "oid " << m_oid
@ -277,17 +285,18 @@ bool CopyupRequest::should_complete(int r)
return (r < 0);
}
void CopyupRequest::remove_from_list()
template <typename I>
void CopyupRequest<I>::remove_from_list()
{
Mutex::Locker l(m_ictx->copyup_list_lock);
map<uint64_t, CopyupRequest*>::iterator it =
m_ictx->copyup_list.find(m_object_no);
auto it = m_ictx->copyup_list.find(m_object_no);
assert(it != m_ictx->copyup_list.end());
m_ictx->copyup_list.erase(it);
}
bool CopyupRequest::send_object_map_head() {
template <typename I>
bool CopyupRequest<I>::send_object_map_head() {
CephContext *cct = m_ictx->cct;
ldout(cct, 20) << dendl;
@ -346,7 +355,8 @@ bool CopyupRequest::send_object_map_head() {
return send_object_map();
}
bool CopyupRequest::send_object_map() {
template <typename I>
bool CopyupRequest<I>::send_object_map() {
// avoid possible recursive lock attempts
if (m_snap_ids.empty()) {
// no object map update required
@ -371,3 +381,5 @@ bool CopyupRequest::send_object_map() {
} // namespace io
} // namespace librbd
template class librbd::io::CopyupRequest<librbd::ImageCtx>;

View File

@ -26,13 +26,21 @@ namespace io {
struct AioCompletion;
template <typename I> class ObjectRequest;
template <typename ImageCtxT = librbd::ImageCtx>
class CopyupRequest {
public:
CopyupRequest(ImageCtx *ictx, const std::string &oid, uint64_t objectno,
static CopyupRequest* create(ImageCtxT *ictx, const std::string &oid,
uint64_t objectno, Extents &&image_extents,
const ZTracer::Trace &parent_trace) {
return new CopyupRequest(ictx, oid, objectno, std::move(image_extents),
parent_trace);
}
CopyupRequest(ImageCtxT *ictx, const std::string &oid, uint64_t objectno,
Extents &&image_extents, const ZTracer::Trace &parent_trace);
~CopyupRequest();
void append_request(ObjectRequest<ImageCtx> *req);
void append_request(ObjectRequest<ImageCtxT> *req);
void send();
@ -84,7 +92,7 @@ private:
State m_state;
ceph::bufferlist m_copyup_data;
std::vector<ObjectRequest<ImageCtx> *> m_pending_requests;
std::vector<ObjectRequest<ImageCtxT> *> m_pending_requests;
std::atomic<unsigned> m_pending_copyups { 0 };
AsyncOperation m_async_op;
@ -107,4 +115,6 @@ private:
} // namespace io
} // namespace librbd
extern template class librbd::io::CopyupRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IO_COPYUP_REQUEST_H

View File

@ -355,7 +355,7 @@ void ImageReadRequest<I>::send_request() {
aio_comp);
ObjectReadRequest<I> *req = ObjectReadRequest<I>::create(
&image_ctx, extent.oid.name, extent.objectno, extent.offset,
extent.length, extent.buffer_extents, snap_id, true, m_op_flags,
extent.length, extent.buffer_extents, snap_id, m_op_flags,
this->m_trace, req_comp);
req_comp->request = req;

View File

@ -31,6 +31,19 @@
namespace librbd {
namespace io {
namespace {
template <typename I>
inline bool is_copy_on_read(I *ictx, librados::snap_t snap_id) {
assert(ictx->snap_lock.is_locked());
return (ictx->clone_copy_on_read &&
!ictx->read_only && snap_id == CEPH_NOSNAP &&
(ictx->exclusive_lock == nullptr ||
ictx->exclusive_lock->is_lock_owner()));
}
} // anonymous namespace
template <typename I>
ObjectRequest<I>*
ObjectRequest<I>::create_remove(I *ictx, const std::string &oid,
@ -121,7 +134,7 @@ ObjectRequest<I>::create_compare_and_write(I *ictx, const std::string &oid,
}
template <typename I>
ObjectRequest<I>::ObjectRequest(ImageCtx *ictx, const std::string &oid,
ObjectRequest<I>::ObjectRequest(I *ictx, const std::string &oid,
uint64_t objectno, uint64_t off,
uint64_t len, librados::snap_t snap_id,
bool hide_enoent, const char *trace_name,
@ -186,35 +199,26 @@ bool ObjectRequest<I>::compute_parent_extents() {
return false;
}
static inline bool is_copy_on_read(ImageCtx *ictx, librados::snap_t snap_id) {
assert(ictx->snap_lock.is_locked());
return (ictx->clone_copy_on_read &&
!ictx->read_only && snap_id == CEPH_NOSNAP &&
(ictx->exclusive_lock == nullptr ||
ictx->exclusive_lock->is_lock_owner()));
}
/** read **/
template <typename I>
ObjectReadRequest<I>::ObjectReadRequest(I *ictx, const std::string &oid,
uint64_t objectno, uint64_t offset,
uint64_t len, Extents& be,
librados::snap_t snap_id, bool sparse,
int op_flags,
librados::snap_t snap_id, int op_flags,
const ZTracer::Trace &parent_trace,
Context *completion)
: ObjectRequest<I>(util::get_image_ctx(ictx), oid, objectno, offset, len,
snap_id, false, "read", parent_trace, completion),
m_buffer_extents(be), m_tried_parent(false), m_sparse(sparse),
m_op_flags(op_flags), m_state(LIBRBD_AIO_READ_FLAT) {
: ObjectRequest<I>(ictx, oid, objectno, offset, len, snap_id, false, "read",
parent_trace, completion),
m_buffer_extents(be), m_tried_parent(false), m_op_flags(op_flags),
m_state(LIBRBD_AIO_READ_FLAT) {
guard_read();
}
template <typename I>
void ObjectReadRequest<I>::guard_read()
{
ImageCtx *image_ctx = this->m_ictx;
I *image_ctx = this->m_ictx;
RWLock::RLocker snap_locker(image_ctx->snap_lock);
RWLock::RLocker parent_locker(image_ctx->parent_lock);
@ -227,7 +231,7 @@ void ObjectReadRequest<I>::guard_read()
template <typename I>
bool ObjectReadRequest<I>::should_complete(int r)
{
ImageCtx *image_ctx = this->m_ictx;
I *image_ctx = this->m_ictx;
ldout(image_ctx->cct, 20) << this->m_oid << " "
<< this->m_object_off << "~" << this->m_object_len
<< " r = " << r << dendl;
@ -302,7 +306,7 @@ bool ObjectReadRequest<I>::should_complete(int r)
template <typename I>
void ObjectReadRequest<I>::send() {
ImageCtx *image_ctx = this->m_ictx;
I *image_ctx = this->m_ictx;
ldout(image_ctx->cct, 20) << this->m_oid << " " << this->m_object_off
<< "~" << this->m_object_len
<< dendl;
@ -321,7 +325,7 @@ void ObjectReadRequest<I>::send() {
librados::ObjectReadOperation op;
int flags = image_ctx->get_read_flags(this->m_snap_id);
if (m_sparse) {
if (this->m_object_len >= image_ctx->sparse_read_threshold_bytes) {
op.sparse_read(this->m_object_off, this->m_object_len, &m_ext_map,
&m_read_data, nullptr);
} else {
@ -342,7 +346,7 @@ void ObjectReadRequest<I>::send() {
template <typename I>
void ObjectReadRequest<I>::send_copyup()
{
ImageCtx *image_ctx = this->m_ictx;
I *image_ctx = this->m_ictx;
ldout(image_ctx->cct, 20) << this->m_oid << " " << this->m_object_off
<< "~" << this->m_object_len << dendl;
@ -357,11 +361,10 @@ void ObjectReadRequest<I>::send_copyup()
}
Mutex::Locker copyup_locker(image_ctx->copyup_list_lock);
map<uint64_t, CopyupRequest*>::iterator it =
image_ctx->copyup_list.find(this->m_object_no);
auto it = image_ctx->copyup_list.find(this->m_object_no);
if (it == image_ctx->copyup_list.end()) {
// create and kick off a CopyupRequest
CopyupRequest *new_req = new CopyupRequest(
auto new_req = CopyupRequest<I>::create(
image_ctx, this->m_oid, this->m_object_no,
std::move(this->m_parent_extents), this->m_trace);
this->m_parent_extents.clear();
@ -374,15 +377,15 @@ void ObjectReadRequest<I>::send_copyup()
template <typename I>
void ObjectReadRequest<I>::read_from_parent(Extents&& parent_extents)
{
ImageCtx *image_ctx = this->m_ictx;
I *image_ctx = this->m_ictx;
AioCompletion *parent_completion = AioCompletion::create_and_start<
ObjectRequest<I> >(this, image_ctx, AIO_TYPE_READ);
ObjectRequest<I> >(this, util::get_image_ctx(image_ctx), AIO_TYPE_READ);
ldout(image_ctx->cct, 20) << "parent completion " << parent_completion
<< " extents " << parent_extents << dendl;
ImageRequest<>::aio_read(image_ctx->parent, parent_completion,
std::move(parent_extents),
ReadResult{&m_read_data}, 0, this->m_trace);
ImageRequest<I>::aio_read(image_ctx->parent, parent_completion,
std::move(parent_extents),
ReadResult{&m_read_data}, 0, this->m_trace);
}
/** write **/
@ -568,13 +571,10 @@ void AbstractObjectWriteRequest::send_copyup()
m_state = LIBRBD_AIO_WRITE_COPYUP;
m_ictx->copyup_list_lock.Lock();
map<uint64_t, CopyupRequest*>::iterator it =
m_ictx->copyup_list.find(m_object_no);
auto it = m_ictx->copyup_list.find(m_object_no);
if (it == m_ictx->copyup_list.end()) {
CopyupRequest *new_req = new CopyupRequest(m_ictx, m_oid,
m_object_no,
std::move(m_parent_extents),
this->m_trace);
auto new_req = CopyupRequest<>::create(
m_ictx, m_oid, m_object_no, std::move(m_parent_extents), this->m_trace);
m_parent_extents.clear();
// make sure to wait on this CopyupRequest

View File

@ -21,7 +21,7 @@ struct ImageCtx;
namespace io {
struct AioCompletion;
class CopyupRequest;
template <typename> class CopyupRequest;
class ObjectRemoveRequest;
class ObjectTruncateRequest;
class ObjectWriteRequest;
@ -97,7 +97,7 @@ public:
const ZTracer::Trace &parent_trace,
Context *completion);
ObjectRequest(ImageCtx *ictx, const std::string &oid,
ObjectRequest(ImageCtxT *ictx, const std::string &oid,
uint64_t objectno, uint64_t off, uint64_t len,
librados::snap_t snap_id, bool hide_enoent,
const char *trace_name, const ZTracer::Trace &parent_trace,
@ -129,7 +129,7 @@ public:
protected:
bool compute_parent_extents();
ImageCtx *m_ictx;
ImageCtxT *m_ictx;
std::string m_oid;
uint64_t m_object_no, m_object_off, m_object_len;
librados::snap_t m_snap_id;
@ -151,20 +151,19 @@ public:
static ObjectReadRequest* create(ImageCtxT *ictx, const std::string &oid,
uint64_t objectno, uint64_t offset,
uint64_t len, Extents &buffer_extents,
librados::snap_t snap_id, bool sparse,
int op_flags,
librados::snap_t snap_id, int op_flags,
const ZTracer::Trace &parent_trace,
Context *completion) {
return new ObjectReadRequest(ictx, oid, objectno, offset, len,
buffer_extents, snap_id, sparse, op_flags,
buffer_extents, snap_id, op_flags,
parent_trace, completion);
}
ObjectReadRequest(ImageCtxT *ictx, const std::string &oid,
uint64_t objectno, uint64_t offset, uint64_t len,
Extents& buffer_extents, librados::snap_t snap_id,
bool sparse, int op_flags,
const ZTracer::Trace &parent_trace, Context *completion);
int op_flags, const ZTracer::Trace &parent_trace,
Context *completion);
bool should_complete(int r) override;
void send() override;
@ -197,7 +196,6 @@ public:
private:
Extents m_buffer_extents;
bool m_tried_parent;
bool m_sparse;
int m_op_flags;
ceph::bufferlist m_read_data;
ExtentMap m_ext_map;

View File

@ -38,6 +38,7 @@ set(unittest_librbd_srcs
image/test_mock_RemoveRequest.cc
io/test_mock_ImageRequest.cc
io/test_mock_ImageRequestWQ.cc
io/test_mock_ObjectRequest.cc
journal/test_mock_OpenRequest.cc
journal/test_mock_PromoteRequest.cc
journal/test_mock_Replay.cc

View File

@ -136,8 +136,7 @@ struct ObjectReadRequest<librbd::MockTestImageCtx> : public ObjectRequest<librbd
const std::string &oid,
uint64_t objectno, uint64_t offset,
uint64_t len, Extents &buffer_extents,
librados::snap_t snap_id, bool sparse,
int op_flags,
librados::snap_t snap_id, int op_flags,
const ZTracer::Trace &parent_trace,
Context *completion) {
assert(s_instance != nullptr);

View File

@ -0,0 +1,189 @@
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/librbd/test_mock_fixture.h"
#include "test/librbd/test_support.h"
#include "test/librbd/mock/MockImageCtx.h"
#include "test/librbd/mock/MockObjectMap.h"
#include "test/librbd/mock/cache/MockImageCache.h"
#include "test/librados_test_stub/MockTestMemIoCtxImpl.h"
#include "test/librados_test_stub/MockTestMemRadosClient.h"
#include "librbd/io/CopyupRequest.h"
#include "librbd/io/ImageRequest.h"
#include "librbd/io/ObjectRequest.h"
namespace librbd {
namespace {
struct MockTestImageCtx : public MockImageCtx {
MockTestImageCtx(ImageCtx &image_ctx) : MockImageCtx(image_ctx) {
}
};
} // anonymous namespace
namespace util {
inline ImageCtx *get_image_ctx(MockImageCtx *image_ctx) {
return image_ctx->image_ctx;
}
} // namespace util
namespace io {
template <>
struct CopyupRequest<librbd::MockImageCtx> {
static CopyupRequest* create(librbd::MockImageCtx *ictx,
const std::string &oid, uint64_t objectno,
Extents &&image_extents,
const ZTracer::Trace &parent_trace) {
return nullptr;
}
MOCK_METHOD0(send, void());
};
template <>
struct ImageRequest<librbd::MockImageCtx> {
static void aio_read(librbd::MockImageCtx *ictx, AioCompletion *c,
Extents &&image_extents, ReadResult &&read_result,
int op_flags, const ZTracer::Trace &parent_trace) {
}
};
} // namespace io
} // namespace librbd
#include "librbd/io/ObjectRequest.cc"
namespace librbd {
namespace io {
using ::testing::_;
using ::testing::DoDefault;
using ::testing::InSequence;
using ::testing::Invoke;
using ::testing::Return;
using ::testing::WithArg;
struct TestMockIoObjectRequest : public TestMockFixture {
typedef ObjectRequest<librbd::MockImageCtx> MockObjectRequest;
typedef ObjectReadRequest<librbd::MockImageCtx> MockObjectReadRequest;
void expect_object_may_exist(MockTestImageCtx &mock_image_ctx,
uint64_t object_no, bool exists) {
if (mock_image_ctx.object_map != nullptr) {
EXPECT_CALL(*mock_image_ctx.object_map, object_may_exist(object_no))
.WillOnce(Return(exists));
}
}
void expect_get_parent_overlap(MockTestImageCtx &mock_image_ctx,
librados::snap_t snap_id, uint64_t overlap,
int r) {
EXPECT_CALL(mock_image_ctx, get_parent_overlap(snap_id, _))
.WillOnce(WithArg<1>(Invoke([overlap, r](uint64_t *o) {
*o = overlap;
return r;
})));
}
void expect_prune_parent_extents(MockTestImageCtx &mock_image_ctx,
const MockObjectRequest::Extents& extents,
uint64_t overlap, uint64_t object_overlap) {
EXPECT_CALL(mock_image_ctx, prune_parent_extents(_, overlap))
.WillOnce(WithArg<0>(Invoke([extents, object_overlap](MockObjectRequest::Extents& e) {
e = extents;
return object_overlap;
})));
}
void expect_get_read_flags(MockTestImageCtx &mock_image_ctx,
librados::snap_t snap_id, int flags) {
EXPECT_CALL(mock_image_ctx, get_read_flags(snap_id))
.WillOnce(Return(flags));
}
void expect_read(MockTestImageCtx &mock_image_ctx,
const std::string& oid, uint64_t off, uint64_t len,
int r) {
auto& expect = EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.data_ctx),
read(oid, len, off, _));
if (r < 0) {
expect.WillOnce(Return(r));
} else {
expect.WillOnce(DoDefault());
}
}
void expect_sparse_read(MockTestImageCtx &mock_image_ctx,
const std::string& oid, uint64_t off, uint64_t len,
int r) {
auto& expect = EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.data_ctx),
sparse_read(oid, off, len, _, _));
if (r < 0) {
expect.WillOnce(Return(r));
} else {
expect.WillOnce(DoDefault());
}
}
};
TEST_F(TestMockIoObjectRequest, Read) {
librbd::ImageCtx *ictx;
ASSERT_EQ(0, open_image(m_image_name, &ictx));
ictx->sparse_read_threshold_bytes = 8096;
MockTestImageCtx mock_image_ctx(*ictx);
MockObjectMap mock_object_map;
if (ictx->test_features(RBD_FEATURE_OBJECT_MAP)) {
mock_image_ctx.object_map = &mock_object_map;
}
InSequence seq;
expect_get_parent_overlap(mock_image_ctx, CEPH_NOSNAP, 0, 0);
expect_prune_parent_extents(mock_image_ctx, {}, 0, 0);
expect_object_may_exist(mock_image_ctx, 0, true);
expect_get_read_flags(mock_image_ctx, CEPH_NOSNAP, 0);
expect_read(mock_image_ctx, "object0", 0, 4096, 0);
C_SaferCond ctx;
MockObjectReadRequest::Extents extents{{0, 4096}};
auto req = MockObjectReadRequest::create(
&mock_image_ctx, "object0", 0, 0, 4096, extents, CEPH_NOSNAP, 0, {}, &ctx);
req->send();
ASSERT_EQ(-ENOENT, ctx.wait());
}
TEST_F(TestMockIoObjectRequest, SparseReadThreshold) {
librbd::ImageCtx *ictx;
ASSERT_EQ(0, open_image(m_image_name, &ictx));
ictx->sparse_read_threshold_bytes = ictx->get_object_size();
MockTestImageCtx mock_image_ctx(*ictx);
MockObjectMap mock_object_map;
if (ictx->test_features(RBD_FEATURE_OBJECT_MAP)) {
mock_image_ctx.object_map = &mock_object_map;
}
InSequence seq;
expect_get_parent_overlap(mock_image_ctx, CEPH_NOSNAP, 0, 0);
expect_prune_parent_extents(mock_image_ctx, {}, 0, 0);
expect_object_may_exist(mock_image_ctx, 0, true);
expect_get_read_flags(mock_image_ctx, CEPH_NOSNAP, 0);
expect_sparse_read(mock_image_ctx, "object0", 0,
ictx->sparse_read_threshold_bytes, 0);
C_SaferCond ctx;
MockObjectReadRequest::Extents extents{
{0, ictx->sparse_read_threshold_bytes}};
auto req = MockObjectReadRequest::create(
&mock_image_ctx, "object0", 0, 0, ictx->sparse_read_threshold_bytes,
extents, CEPH_NOSNAP, 0, {}, &ctx);
req->send();
ASSERT_EQ(-ENOENT, ctx.wait());
}
} // namespace io
} // namespace librbd

View File

@ -66,6 +66,7 @@ struct MockImageCtx {
parent_lock(image_ctx.parent_lock),
object_map_lock(image_ctx.object_map_lock),
async_ops_lock(image_ctx.async_ops_lock),
copyup_list_lock(image_ctx.copyup_list_lock),
order(image_ctx.order),
size(image_ctx.size),
features(image_ctx.features),
@ -92,6 +93,7 @@ struct MockImageCtx {
concurrent_management_ops(image_ctx.concurrent_management_ops),
blacklist_on_break_lock(image_ctx.blacklist_on_break_lock),
blacklist_expire_seconds(image_ctx.blacklist_expire_seconds),
sparse_read_threshold_bytes(image_ctx.sparse_read_threshold_bytes),
journal_order(image_ctx.journal_order),
journal_splay_width(image_ctx.journal_splay_width),
journal_commit_age(image_ctx.journal_commit_age),
@ -148,6 +150,7 @@ struct MockImageCtx {
MOCK_CONST_METHOD0(get_current_size, uint64_t());
MOCK_CONST_METHOD1(get_image_size, uint64_t(librados::snap_t));
MOCK_CONST_METHOD1(get_object_count, uint64_t(librados::snap_t));
MOCK_CONST_METHOD1(get_read_flags, int(librados::snap_t));
MOCK_CONST_METHOD2(get_snap_id,
librados::snap_t(cls::rbd::SnapshotNamespace snap_namespace,
std::string in_snap_name));
@ -158,6 +161,8 @@ struct MockImageCtx {
ParentSpec *pspec));
MOCK_CONST_METHOD2(get_parent_overlap, int(librados::snap_t in_snap_id,
uint64_t *overlap));
MOCK_CONST_METHOD2(prune_parent_extents, uint64_t(vector<pair<uint64_t,uint64_t> >& ,
uint64_t));
MOCK_CONST_METHOD2(is_snap_protected, int(librados::snap_t in_snap_id,
bool *is_protected));
@ -247,6 +252,7 @@ struct MockImageCtx {
RWLock &parent_lock;
RWLock &object_map_lock;
Mutex &async_ops_lock;
Mutex &copyup_list_lock;
uint8_t order;
uint64_t size;
@ -268,6 +274,8 @@ struct MockImageCtx {
xlist<AsyncRequest<MockImageCtx>*> async_requests;
std::list<Context*> async_requests_waiters;
std::map<uint64_t, io::CopyupRequest<MockImageCtx>*> copyup_list;
io::MockImageRequestWQ *io_work_queue;
MockContextWQ *op_work_queue;
@ -292,6 +300,7 @@ struct MockImageCtx {
int concurrent_management_ops;
bool blacklist_on_break_lock;
uint32_t blacklist_expire_seconds;
uint64_t sparse_read_threshold_bytes;
uint8_t journal_order;
uint8_t journal_splay_width;
double journal_commit_age;