Merge branch 'next'

This commit is contained in:
Josh Durgin 2015-06-17 20:16:06 -07:00
commit 4a3f615068
13 changed files with 361 additions and 108 deletions

View File

@ -299,8 +299,8 @@ namespace librbd {
bool AbstractWrite::should_complete(int r)
{
ldout(m_ictx->cct, 20) << "write " << this << " " << m_oid << " "
<< m_object_off << "~" << m_object_len
ldout(m_ictx->cct, 20) << get_write_type() << " " << this << " " << m_oid
<< " " << m_object_off << "~" << m_object_len
<< " should_complete: r = " << r << dendl;
bool finished = true;
@ -355,12 +355,13 @@ namespace librbd {
case LIBRBD_AIO_WRITE_COPYUP:
ldout(m_ictx->cct, 20) << "WRITE_COPYUP" << dendl;
m_state = LIBRBD_AIO_WRITE_GUARD;
if (r < 0) {
return should_complete(r);
m_state = LIBRBD_AIO_WRITE_ERROR;
complete(r);
finished = false;
} else {
finished = send_post();
}
finished = send_post();
break;
case LIBRBD_AIO_WRITE_FLAT:
@ -385,42 +386,50 @@ namespace librbd {
void AbstractWrite::send() {
assert(m_ictx->owner_lock.is_locked());
ldout(m_ictx->cct, 20) << "send " << this << " " << m_oid << " "
<< m_object_off << "~" << m_object_len << dendl;
ldout(m_ictx->cct, 20) << "send " << get_write_type() << " " << this <<" "
<< m_oid << " " << m_object_off << "~"
<< m_object_len << dendl;
send_pre();
}
void AbstractWrite::send_pre() {
assert(m_ictx->owner_lock.is_locked());
RWLock::RLocker snap_lock(m_ictx->snap_lock);
if (!m_ictx->object_map.enabled()) {
send_write();
return;
bool write = false;
{
RWLock::RLocker snap_lock(m_ictx->snap_lock);
if (!m_ictx->object_map.enabled()) {
write = true;
} else {
// should have been flushed prior to releasing lock
assert(m_ictx->image_watcher->is_lock_owner());
ldout(m_ictx->cct, 20) << "send_pre " << this << " " << m_oid << " "
<< m_object_off << "~" << m_object_len << dendl;
m_state = LIBRBD_AIO_WRITE_PRE;
uint8_t new_state;
boost::optional<uint8_t> current_state;
pre_object_map_update(&new_state);
RWLock::WLocker object_map_locker(m_ictx->object_map_lock);
if (m_ictx->object_map[m_object_no] != new_state) {
FunctionContext *ctx = new FunctionContext(
boost::bind(&AioRequest::complete, this, _1));
bool updated = m_ictx->object_map.aio_update(m_object_no, new_state,
current_state, ctx);
assert(updated);
} else {
write = true;
}
}
}
// should have been flushed prior to releasing lock
assert(m_ictx->image_watcher->is_lock_owner());
ldout(m_ictx->cct, 20) << "send_pre " << this << " " << m_oid << " "
<< m_object_off << "~" << m_object_len << dendl;
m_state = LIBRBD_AIO_WRITE_PRE;
uint8_t new_state;
boost::optional<uint8_t> current_state;
pre_object_map_update(&new_state);
RWLock::WLocker object_map_locker(m_ictx->object_map_lock);
if (m_ictx->object_map[m_object_no] == new_state) {
// avoid possible recursive lock attempts
if (write) {
// no object map update required
send_write();
return;
}
FunctionContext *ctx = new FunctionContext(
boost::bind(&AioRequest::complete, this, _1));
bool updated = m_ictx->object_map.aio_update(m_object_no, new_state,
current_state, ctx);
assert(updated);
}
bool AbstractWrite::send_post() {
@ -501,4 +510,12 @@ namespace librbd {
wr->write(m_object_off, m_write_data);
wr->set_op_flags2(m_op_flags);
}
void AioRemove::guard_write() {
// do nothing to disable write guard only if deep-copyup not required
RWLock::RLocker snap_locker(m_ictx->snap_lock);
if (!m_ictx->snaps.empty()) {
AbstractWrite::guard_write();
}
}
}

View File

@ -179,6 +179,7 @@ namespace librbd {
std::vector<librados::snap_t> m_snaps;
virtual void add_write_ops(librados::ObjectWriteOperation *wr) = 0;
virtual const char* get_write_type() const = 0;
virtual void guard_write();
virtual void pre_object_map_update(uint8_t *new_state) = 0;
virtual bool post_object_map_update() {
@ -208,6 +209,11 @@ namespace librbd {
}
protected:
virtual void add_write_ops(librados::ObjectWriteOperation *wr);
virtual const char* get_write_type() const {
return "write";
}
virtual void pre_object_map_update(uint8_t *new_state) {
*new_state = OBJECT_EXISTS;
}
@ -235,6 +241,12 @@ namespace librbd {
}
}
virtual const char* get_write_type() const {
if (has_parent()) {
return "remove (trunc)";
}
return "remove";
}
virtual void pre_object_map_update(uint8_t *new_state) {
if (has_parent()) {
m_object_state = OBJECT_EXISTS;
@ -251,14 +263,37 @@ namespace librbd {
return true;
}
virtual void guard_write() {
// do nothing to disable write guard
}
virtual void guard_write();
private:
uint8_t m_object_state;
};
class AioTrim : public AbstractWrite {
public:
AioTrim(ImageCtx *ictx, const std::string &oid, uint64_t object_no,
const ::SnapContext &snapc, Context *completion)
: AbstractWrite(ictx, oid, object_no, 0, 0, snapc, completion, true) {
}
protected:
virtual void add_write_ops(librados::ObjectWriteOperation *wr) {
wr->remove();
}
virtual const char* get_write_type() const {
return "remove (trim)";
}
virtual void pre_object_map_update(uint8_t *new_state) {
*new_state = OBJECT_PENDING;
}
virtual bool post_object_map_update() {
return true;
}
};
class AioTruncate : public AbstractWrite {
public:
AioTruncate(ImageCtx *ictx, const std::string &oid, uint64_t object_no,
@ -274,6 +309,10 @@ namespace librbd {
wr->truncate(m_object_off);
}
virtual const char* get_write_type() const {
return "truncate";
}
virtual void pre_object_map_update(uint8_t *new_state) {
*new_state = OBJECT_EXISTS;
}
@ -294,6 +333,10 @@ namespace librbd {
wr->zero(m_object_off, m_object_len);
}
virtual const char* get_write_type() const {
return "zero";
}
virtual void pre_object_map_update(uint8_t *new_state) {
*new_state = OBJECT_EXISTS;
}

View File

@ -29,10 +29,6 @@ public:
return m_new_size;
}
inline uint64_t get_parent_overlap() const {
return m_new_parent_overlap;
}
private:
/**
* Resize goes through the following state machine to resize the image

View File

@ -24,25 +24,49 @@
namespace librbd
{
class AsyncTrimObjectContext : public C_AsyncObjectThrottle {
class C_CopyupObject : public C_AsyncObjectThrottle {
public:
AsyncTrimObjectContext(AsyncObjectThrottle &throttle, ImageCtx *image_ctx,
uint64_t object_no)
C_CopyupObject(AsyncObjectThrottle &throttle, ImageCtx *image_ctx,
::SnapContext snapc, uint64_t object_no)
: C_AsyncObjectThrottle(throttle, *image_ctx), m_snapc(snapc),
m_object_no(object_no)
{
}
virtual int send() {
assert(m_image_ctx.owner_lock.is_locked());
assert(!m_image_ctx.image_watcher->is_lock_supported() ||
m_image_ctx.image_watcher->is_lock_owner());
string oid = m_image_ctx.get_object_name(m_object_no);
ldout(m_image_ctx.cct, 10) << "removing (with copyup) " << oid << dendl;
AbstractWrite *req = new AioTrim(&m_image_ctx, oid, m_object_no, m_snapc,
this);
req->send();
return 0;
}
private:
::SnapContext m_snapc;
uint64_t m_object_no;
};
class C_RemoveObject : public C_AsyncObjectThrottle {
public:
C_RemoveObject(AsyncObjectThrottle &throttle, ImageCtx *image_ctx,
uint64_t object_no)
: C_AsyncObjectThrottle(throttle, *image_ctx), m_object_no(object_no)
{
}
virtual int send() {
assert(m_image_ctx.owner_lock.is_locked());
assert(!m_image_ctx.image_watcher->is_lock_supported() ||
m_image_ctx.image_watcher->is_lock_owner());
if (!m_image_ctx.object_map.object_may_exist(m_object_no)) {
return 1;
}
if (m_image_ctx.image_watcher->is_lock_supported() &&
!m_image_ctx.image_watcher->is_lock_owner()) {
return -ERESTART;
}
string oid = m_image_ctx.get_object_name(m_object_no);
ldout(m_image_ctx.cct, 10) << "removing " << oid << dendl;
@ -61,7 +85,8 @@ private:
AsyncTrimRequest::AsyncTrimRequest(ImageCtx &image_ctx, Context *on_finish,
uint64_t original_size, uint64_t new_size,
ProgressContext &prog_ctx)
: AsyncRequest(image_ctx, on_finish), m_new_size(new_size), m_prog_ctx(prog_ctx)
: AsyncRequest(image_ctx, on_finish), m_new_size(new_size),
m_prog_ctx(prog_ctx)
{
uint64_t period = m_image_ctx.get_stripe_period();
uint64_t new_num_periods = ((m_new_size + period - 1) / period);
@ -89,6 +114,11 @@ bool AsyncTrimRequest::should_complete(int r)
}
switch (m_state) {
case STATE_COPYUP_OBJECTS:
ldout(cct, 5) << " COPYUP_OBJECTS" << dendl;
send_pre_remove();
break;
case STATE_PRE_REMOVE:
ldout(cct, 5) << " PRE_REMOVE" << dendl;
{
@ -112,7 +142,7 @@ bool AsyncTrimRequest::should_complete(int r)
case STATE_CLEAN_BOUNDARY:
ldout(cct, 5) << "CLEAN_BOUNDARY" << dendl;
finish();
finish(0);
break;
case STATE_FINISHED:
@ -128,12 +158,58 @@ bool AsyncTrimRequest::should_complete(int r)
}
void AsyncTrimRequest::send() {
send_copyup_objects();
}
void AsyncTrimRequest::send_copyup_objects() {
assert(m_image_ctx.owner_lock.is_locked());
if (m_delete_start < m_num_objects) {
send_pre_remove();
} else {
assert(!m_image_ctx.image_watcher->is_lock_supported() ||
m_image_ctx.image_watcher->is_lock_owner());
if (m_delete_start >= m_num_objects) {
send_clean_boundary();
return;
}
::SnapContext snapc;
bool has_snapshots;
uint64_t parent_overlap;
{
RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
RWLock::RLocker parent_locker(m_image_ctx.parent_lock);
snapc = m_image_ctx.snapc;
has_snapshots = !m_image_ctx.snaps.empty();
int r = m_image_ctx.get_parent_overlap(m_image_ctx.get_copyup_snap_id(),
&parent_overlap);
assert(r == 0);
}
// copyup is only required for portion of image that overlaps parent
uint64_t copyup_end = Striper::get_num_objects(m_image_ctx.layout,
parent_overlap);
// TODO: protect against concurrent shrink and snap create?
if (copyup_end <= m_delete_start || !has_snapshots) {
send_pre_remove();
return;
}
uint64_t copyup_start = m_delete_start;
m_delete_start = copyup_end;
ldout(m_image_ctx.cct, 5) << this << " send_copyup_objects: "
<< " start object=" << copyup_start << ", "
<< " end object=" << copyup_end << dendl;
m_state = STATE_COPYUP_OBJECTS;
Context *ctx = create_callback_context();
AsyncObjectThrottle::ContextFactory context_factory(
boost::lambda::bind(boost::lambda::new_ptr<C_CopyupObject>(),
boost::lambda::_1, &m_image_ctx, snapc, boost::lambda::_2));
AsyncObjectThrottle *throttle = new AsyncObjectThrottle(
this, m_image_ctx, context_factory, ctx, &m_prog_ctx, copyup_start,
copyup_end);
throttle->start_ops(m_image_ctx.concurrent_management_ops);
}
void AsyncTrimRequest::send_remove_objects() {
@ -146,7 +222,7 @@ void AsyncTrimRequest::send_remove_objects() {
Context *ctx = create_callback_context();
AsyncObjectThrottle::ContextFactory context_factory(
boost::lambda::bind(boost::lambda::new_ptr<AsyncTrimObjectContext>(),
boost::lambda::bind(boost::lambda::new_ptr<C_RemoveObject>(),
boost::lambda::_1, &m_image_ctx, boost::lambda::_2));
AsyncObjectThrottle *throttle = new AsyncObjectThrottle(
this, m_image_ctx, context_factory, ctx, &m_prog_ctx, m_delete_start,
@ -156,6 +232,10 @@ void AsyncTrimRequest::send_remove_objects() {
void AsyncTrimRequest::send_pre_remove() {
assert(m_image_ctx.owner_lock.is_locked());
if (m_delete_start >= m_num_objects) {
send_clean_boundary();
return;
}
bool remove_objects = false;
{
@ -228,16 +308,17 @@ void AsyncTrimRequest::send_clean_boundary() {
assert(m_image_ctx.owner_lock.is_locked());
CephContext *cct = m_image_ctx.cct;
if (m_delete_off <= m_new_size) {
finish();
finish(0);
return;
}
// should have been canceled prior to releasing lock
assert(!m_image_ctx.image_watcher->is_lock_supported() ||
m_image_ctx.image_watcher->is_lock_owner());
uint64_t delete_len = m_delete_off - m_new_size;
ldout(m_image_ctx.cct, 5) << this << " send_clean_boundary: "
<< " delete_start=" << m_delete_start
<< " num_objects=" << m_num_objects << dendl;
<< " delete_off=" << m_delete_off
<< " length=" << delete_len << dendl;
m_state = STATE_CLEAN_BOUNDARY;
::SnapContext snapc;
@ -249,8 +330,8 @@ void AsyncTrimRequest::send_clean_boundary() {
// discard the weird boundary
std::vector<ObjectExtent> extents;
Striper::file_to_extents(cct, m_image_ctx.format_string,
&m_image_ctx.layout, m_new_size,
m_delete_off - m_new_size, 0, extents);
&m_image_ctx.layout, m_new_size, delete_len, 0,
extents);
ContextCompletion *completion =
new ContextCompletion(create_callback_context(), true);
@ -261,8 +342,8 @@ void AsyncTrimRequest::send_clean_boundary() {
AbstractWrite *req;
if (p->offset == 0) {
req = new AioRemove(&m_image_ctx, p->oid.name, p->objectno, snapc,
req_comp);
req = new AioTrim(&m_image_ctx, p->oid.name, p->objectno, snapc,
req_comp);
} else {
req = new AioTruncate(&m_image_ctx, p->oid.name, p->objectno,
p->offset, snapc, req_comp);
@ -272,9 +353,9 @@ void AsyncTrimRequest::send_clean_boundary() {
completion->finish_adding_requests();
}
void AsyncTrimRequest::finish() {
void AsyncTrimRequest::finish(int r) {
m_state = STATE_FINISHED;
async_complete(0);
async_complete(r);
}
} // namespace librbd

View File

@ -31,7 +31,11 @@ protected:
* | . .
* | . . . . . . . . . . . . .
* | . .
* v v .
* v . .
* STATE_COPYUP_OBJECTS . . . . .
* | . . .
* | . . .
* v v v .
* STATE_PRE_REMOVE ---> STATE_REMOVE_OBJECTS .
* | . . .
* /-----------------------/ . . . . . . . .
@ -44,6 +48,8 @@ protected:
*
* @endverbatim
*
* The _COPYUP_OBJECTS state is skipped if there is no parent overlap
* within the new image size and the image does not have any snapshots.
* The _PRE_REMOVE/_POST_REMOVE states are skipped if the object map
* isn't enabled. The _REMOVE_OBJECTS state is skipped if no whole objects
* are removed. The _CLEAN_BOUNDARY state is skipped if no boundary
@ -52,6 +58,7 @@ protected:
*/
enum State {
STATE_COPYUP_OBJECTS,
STATE_PRE_REMOVE,
STATE_REMOVE_OBJECTS,
STATE_POST_REMOVE,
@ -70,11 +77,12 @@ private:
uint64_t m_new_size;
ProgressContext &m_prog_ctx;
void send_copyup_objects();
void send_remove_objects();
void send_pre_remove();
void send_post_remove();
void send_clean_boundary();
void finish();
void finish(int r);
};
} // namespace librbd

View File

@ -104,8 +104,9 @@ private:
bool add_copyup_op = !m_copyup_data.is_zero();
bool copy_on_read = m_pending_requests.empty();
if (!add_copyup_op && copy_on_read) {
// no copyup data and CoR operation
return true;
// copyup empty object to prevent future CoR attempts
m_copyup_data.clear();
add_copyup_op = true;
}
ldout(m_ictx->cct, 20) << __func__ << " " << this
@ -166,8 +167,7 @@ private:
librados::AioCompletion *comp =
librados::Rados::aio_create_completion(create_callback_context(), NULL,
rados_ctx_cb);
r = m_ictx->md_ctx.aio_operate(m_oid, comp, &write_op, snapc.seq.val,
snaps);
r = m_ictx->data_ctx.aio_operate(m_oid, comp, &write_op);
assert(r == 0);
comp->release();
}
@ -221,10 +221,8 @@ private:
case STATE_READ_FROM_PARENT:
ldout(cct, 20) << "READ_FROM_PARENT" << dendl;
remove_from_list();
if (r >= 0) {
if (r >= 0 || r == -ENOENT) {
return send_object_map();
} else if (r == -ENOENT) {
return send_copyup();
}
break;
@ -238,7 +236,12 @@ private:
pending_copyups = m_pending_copyups.dec();
ldout(cct, 20) << "COPYUP (" << pending_copyups << " pending)"
<< dendl;
if (r < 0) {
if (r == -ENOENT) {
// hide the -ENOENT error if this is the last op
if (pending_copyups == 0) {
complete_requests(0);
}
} else if (r < 0) {
complete_requests(r);
}
return (pending_copyups == 0);
@ -266,17 +269,20 @@ private:
RWLock::RLocker owner_locker(m_ictx->owner_lock);
RWLock::RLocker snap_locker(m_ictx->snap_lock);
if (m_ictx->object_map.enabled()) {
bool copy_on_read = m_pending_requests.empty();
if (!m_ictx->image_watcher->is_lock_owner()) {
ldout(m_ictx->cct, 20) << "exclusive lock not held for copyup request"
<< dendl;
assert(m_pending_requests.empty());
ldout(m_ictx->cct, 20) << "exclusive lock not held for copyup request"
<< dendl;
assert(copy_on_read);
return true;
}
RWLock::WLocker object_map_locker(m_ictx->object_map_lock);
if (m_ictx->object_map[m_object_no] != OBJECT_EXISTS ||
!m_ictx->snaps.empty()) {
if (copy_on_read && m_ictx->object_map[m_object_no] != OBJECT_EXISTS) {
// CoW already updates the HEAD object map
m_snap_ids.push_back(CEPH_NOSNAP);
}
if (!m_ictx->snaps.empty()) {
m_snap_ids.insert(m_snap_ids.end(), m_ictx->snaps.begin(),
m_ictx->snaps.end());
}

View File

@ -572,12 +572,6 @@ public:
int ImageCtx::get_parent_overlap(snap_t in_snap_id, uint64_t *overlap) const
{
assert(snap_lock.is_locked());
if (in_snap_id == CEPH_NOSNAP && !async_resize_reqs.empty() &&
async_resize_reqs.front()->shrinking()) {
*overlap = async_resize_reqs.front()->get_parent_overlap();
return 0;
}
const parent_info *info = get_parent_info(in_snap_id);
if (info) {
*overlap = info->overlap;
@ -586,6 +580,17 @@ public:
return -ENOENT;
}
uint64_t ImageCtx::get_copyup_snap_id() const
{
assert(snap_lock.is_locked());
// copyup requires the largest possible parent overlap,
// which is always the oldest snapshot (if any).
if (!snaps.empty()) {
return snaps.back();
}
return CEPH_NOSNAP;
}
void ImageCtx::aio_read_from_cache(object_t o, uint64_t object_no,
bufferlist *bl, size_t len,
uint64_t off, Context *onfinish,
@ -747,22 +752,6 @@ public:
image_watcher = NULL;
}
size_t ImageCtx::parent_io_len(uint64_t offset, size_t length,
snap_t in_snap_id)
{
uint64_t overlap = 0;
get_parent_overlap(in_snap_id, &overlap);
size_t parent_len = 0;
if (get_parent_pool_id(in_snap_id) != -1 && offset <= overlap)
parent_len = min(overlap, offset + length) - offset;
ldout(cct, 20) << __func__ << " off = " << offset << " len = " << length
<< " overlap = " << overlap << " parent_io_len = "
<< parent_len << dendl;
return parent_len;
}
uint64_t ImageCtx::prune_parent_extents(vector<pair<uint64_t,uint64_t> >& objectx,
uint64_t overlap)
{

View File

@ -210,6 +210,7 @@ namespace librbd {
uint64_t get_parent_snap_id(librados::snap_t in_snap_id) const;
int get_parent_overlap(librados::snap_t in_snap_id,
uint64_t *overlap) const;
uint64_t get_copyup_snap_id() const;
void aio_read_from_cache(object_t o, uint64_t object_no, bufferlist *bl,
size_t len, uint64_t off, Context *onfinish,
int fadvise_flags);
@ -225,8 +226,6 @@ namespace librbd {
void clear_nonexistence_cache();
int register_watch();
void unregister_watch();
size_t parent_io_len(uint64_t offset, size_t length,
librados::snap_t in_snap_id);
uint64_t prune_parent_extents(vector<pair<uint64_t,uint64_t> >& objectx,
uint64_t overlap);

View File

@ -678,7 +678,7 @@ void ObjectWriteOperation::set_alloc_hint(uint64_t expected_object_size,
void ObjectWriteOperation::truncate(uint64_t off) {
TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
o->ops.push_back(boost::bind(&TestIoCtxImpl::truncate, _1, _2, off));
o->ops.push_back(boost::bind(&TestIoCtxImpl::truncate, _1, _2, off, _4));
}
void ObjectWriteOperation::write(uint64_t off, const bufferlist& bl) {

View File

@ -113,7 +113,8 @@ public:
std::map<uint64_t,uint64_t> *m,
bufferlist *data_bl) = 0;
virtual int stat(const std::string& oid, uint64_t *psize, time_t *pmtime) = 0;
virtual int truncate(const std::string& oid, uint64_t size) = 0;
virtual int truncate(const std::string& oid, uint64_t size,
const SnapContext &snapc) = 0;
virtual int tmap_update(const std::string& oid, bufferlist& cmdbl);
virtual int unwatch(uint64_t handle);
virtual int watch(const std::string& o, uint64_t *handle,

View File

@ -385,7 +385,8 @@ int TestMemIoCtxImpl::stat(const std::string& oid, uint64_t *psize,
return 0;
}
int TestMemIoCtxImpl::truncate(const std::string& oid, uint64_t size) {
int TestMemIoCtxImpl::truncate(const std::string& oid, uint64_t size,
const SnapContext &snapc) {
if (get_snap_read() != CEPH_NOSNAP) {
return -EROFS;
}
@ -393,7 +394,7 @@ int TestMemIoCtxImpl::truncate(const std::string& oid, uint64_t size) {
TestMemRadosClient::SharedFile file;
{
RWLock::WLocker l(m_pool->file_lock);
file = get_file(oid, true, get_snap_context());
file = get_file(oid, true, snapc);
}
RWLock::WLocker l(file->lock);
@ -510,7 +511,7 @@ int TestMemIoCtxImpl::zero(const std::string& oid, uint64_t off, uint64_t len) {
}
}
if (truncate_redirect) {
return truncate(oid, off);
return truncate(oid, off, get_snap_context());
}
bufferlist bl;

View File

@ -43,7 +43,8 @@ public:
virtual int sparse_read(const std::string& oid, uint64_t off, uint64_t len,
std::map<uint64_t,uint64_t> *m, bufferlist *data_bl);
virtual int stat(const std::string& oid, uint64_t *psize, time_t *pmtime);
virtual int truncate(const std::string& oid, uint64_t size);
virtual int truncate(const std::string& oid, uint64_t size,
const SnapContext &snapc);
virtual int write(const std::string& oid, bufferlist& bl, size_t len,
uint64_t off, const SnapContext &snapc);
virtual int write_full(const std::string& oid, bufferlist& bl,

View File

@ -493,3 +493,114 @@ TEST_F(TestInternal, SnapshotCopyup)
}
}
TEST_F(TestInternal, ResizeCopyup)
{
REQUIRE_FEATURE(RBD_FEATURE_LAYERING);
m_image_name = get_temp_image_name();
m_image_size = 1 << 14;
uint64_t features = 0;
get_features(&features);
int order = 12;
ASSERT_EQ(0, m_rbd.create2(m_ioctx, m_image_name.c_str(), m_image_size,
features, &order));
librbd::ImageCtx *ictx;
ASSERT_EQ(0, open_image(m_image_name, &ictx));
bufferlist bl;
bl.append(std::string(4096, '1'));
for (size_t i = 0; i < m_image_size; i += bl.length()) {
ASSERT_EQ(bl.length(), librbd::write(ictx, i, bl.length(), bl.c_str(), 0));
}
ASSERT_EQ(0, librbd::snap_create(ictx, "snap1"));
ASSERT_EQ(0, librbd::snap_protect(ictx, "snap1"));
std::string clone_name = get_temp_image_name();
ASSERT_EQ(0, librbd::clone(m_ioctx, m_image_name.c_str(), "snap1", m_ioctx,
clone_name.c_str(), features, &order, 0, 0));
librbd::ImageCtx *ictx2;
ASSERT_EQ(0, open_image(clone_name, &ictx2));
ASSERT_EQ(0, librbd::snap_create(ictx2, "snap1"));
bufferptr read_ptr(bl.length());
bufferlist read_bl;
read_bl.push_back(read_ptr);
// verify full / partial object removal properly copyup
librbd::NoOpProgressContext no_op;
ASSERT_EQ(0, librbd::resize(ictx2, m_image_size - (1 << order) - 32, no_op));
ASSERT_EQ(0, librbd::snap_set(ictx2, "snap1"));
{
// hide the parent from the snapshot
RWLock::WLocker snap_locker(ictx2->snap_lock);
ictx2->snap_info.begin()->second.parent = librbd::parent_info();
}
for (size_t i = 2 << order; i < m_image_size; i += bl.length()) {
ASSERT_EQ(bl.length(), librbd::read(ictx2, i, bl.length(), read_bl.c_str(),
0));
ASSERT_TRUE(bl.contents_equal(read_bl));
}
}
TEST_F(TestInternal, DiscardCopyup)
{
REQUIRE_FEATURE(RBD_FEATURE_LAYERING);
m_image_name = get_temp_image_name();
m_image_size = 1 << 14;
uint64_t features = 0;
get_features(&features);
int order = 12;
ASSERT_EQ(0, m_rbd.create2(m_ioctx, m_image_name.c_str(), m_image_size,
features, &order));
librbd::ImageCtx *ictx;
ASSERT_EQ(0, open_image(m_image_name, &ictx));
bufferlist bl;
bl.append(std::string(4096, '1'));
for (size_t i = 0; i < m_image_size; i += bl.length()) {
ASSERT_EQ(bl.length(), librbd::write(ictx, i, bl.length(), bl.c_str(), 0));
}
ASSERT_EQ(0, librbd::snap_create(ictx, "snap1"));
ASSERT_EQ(0, librbd::snap_protect(ictx, "snap1"));
std::string clone_name = get_temp_image_name();
ASSERT_EQ(0, librbd::clone(m_ioctx, m_image_name.c_str(), "snap1", m_ioctx,
clone_name.c_str(), features, &order, 0, 0));
librbd::ImageCtx *ictx2;
ASSERT_EQ(0, open_image(clone_name, &ictx2));
ASSERT_EQ(0, librbd::snap_create(ictx2, "snap1"));
bufferptr read_ptr(bl.length());
bufferlist read_bl;
read_bl.push_back(read_ptr);
ASSERT_EQ(static_cast<int>(m_image_size - 64),
librbd::discard(ictx2, 32, m_image_size - 64));
ASSERT_EQ(0, librbd::snap_set(ictx2, "snap1"));
{
// hide the parent from the snapshot
RWLock::WLocker snap_locker(ictx2->snap_lock);
ictx2->snap_info.begin()->second.parent = librbd::parent_info();
}
for (size_t i = 0; i < m_image_size; i += bl.length()) {
ASSERT_EQ(bl.length(), librbd::read(ictx2, i, bl.length(), read_bl.c_str(),
0));
ASSERT_TRUE(bl.contents_equal(read_bl));
}
}