mirror of
https://github.com/ceph/ceph
synced 2025-02-23 11:07:35 +00:00
os/bluestore: _do_remove: dirty shard individually as each blob is unshared
Two problems with old code: 1- dirty_shard range is inclusive, so we might dirty the shard after b_end 2- we might unshare blobs in two shards with an unloaded shard in between, which would mean dirtying a shard that isn't loaded. Fix by ensuring the shard for each unshared blob is dirty individually. Fixes: http://tracker.ceph.com/issues/20849 Signed-off-by: Sage Weil <sage@redhat.com>
This commit is contained in:
parent
95a0288368
commit
c275379348
@ -10504,8 +10504,6 @@ int BlueStore::_do_remove(
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t b_start = OBJECT_MAX_SIZE;
|
||||
uint32_t b_end = 0;
|
||||
for (auto& e : h->extent_map.extent_map) {
|
||||
const bluestore_blob_t& b = e.blob->get_blob();
|
||||
SharedBlob *sb = e.blob->shared_blob.get();
|
||||
@ -10515,17 +10513,9 @@ int BlueStore::_do_remove(
|
||||
dout(20) << __func__ << " unsharing " << e << dendl;
|
||||
bluestore_blob_t& blob = e.blob->dirty_blob();
|
||||
blob.clear_flag(bluestore_blob_t::FLAG_SHARED);
|
||||
if (e.logical_offset < b_start) {
|
||||
b_start = e.logical_offset;
|
||||
}
|
||||
if (e.logical_end() > b_end) {
|
||||
b_end = e.logical_end();
|
||||
}
|
||||
h->extent_map.dirty_range(e.logical_offset, 1);
|
||||
}
|
||||
}
|
||||
|
||||
assert(b_end > b_start);
|
||||
h->extent_map.dirty_range(b_start, b_end - b_start);
|
||||
txc->write_onode(h);
|
||||
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user