kv/KeyValueDB, os/bluestore: Made fsck faster by not caching results

Added ability to pass extra options when creating iterators to KV.
Used that ability to perform fsck operations without caching results.

Signed-off-by: Adam Kupczyk <akupczyk@redhat.com>
This commit is contained in:
Adam Kupczyk 2020-04-27 11:49:19 +02:00
parent 63d690fcc5
commit f2db105438
8 changed files with 28 additions and 24 deletions

View File

@ -312,12 +312,13 @@ private:
}
};
public:
virtual WholeSpaceIterator get_wholespace_iterator() = 0;
virtual Iterator get_iterator(const std::string &prefix) {
typedef uint32_t IteratorOpts;
static const uint32_t ITERATOR_NOCACHE = 1;
virtual WholeSpaceIterator get_wholespace_iterator(IteratorOpts opts = 0) = 0;
virtual Iterator get_iterator(const std::string &prefix, IteratorOpts opts = 0) {
return std::make_shared<PrefixIteratorImpl>(
prefix,
get_wholespace_iterator());
get_wholespace_iterator(opts));
}
virtual uint64_t get_estimated_size(std::map<std::string,uint64_t> &extra) = 0;

View File

@ -402,7 +402,7 @@ err:
}
WholeSpaceIterator get_wholespace_iterator() override {
WholeSpaceIterator get_wholespace_iterator(IteratorOpts opts = 0) override {
return std::make_shared<LevelDBWholeSpaceIteratorImpl>(
db->NewIterator(leveldb::ReadOptions()));
}

View File

@ -212,7 +212,7 @@ public:
return 0;
}
WholeSpaceIterator get_wholespace_iterator() override {
WholeSpaceIterator get_wholespace_iterator(IteratorOpts opts = 0) override {
return std::shared_ptr<KeyValueDB::WholeSpaceIteratorImpl>(
new MDBWholeSpaceIteratorImpl(&m_map, &m_lock, &iterator_seq_no, m_using_btree));
}

View File

@ -2625,7 +2625,7 @@ public:
}
};
KeyValueDB::Iterator RocksDBStore::get_iterator(const std::string& prefix)
KeyValueDB::Iterator RocksDBStore::get_iterator(const std::string& prefix, IteratorOpts opts)
{
auto cf_it = cf_handles.find(prefix);
if (cf_it != cf_handles.end()) {
@ -2640,7 +2640,7 @@ KeyValueDB::Iterator RocksDBStore::get_iterator(const std::string& prefix)
cf_it->second.handles);
}
} else {
return KeyValueDB::get_iterator(prefix);
return KeyValueDB::get_iterator(prefix, opts);
}
}
@ -2649,11 +2649,14 @@ rocksdb::Iterator* RocksDBStore::new_shard_iterator(rocksdb::ColumnFamilyHandle*
return db->NewIterator(rocksdb::ReadOptions(), cf);
}
RocksDBStore::WholeSpaceIterator RocksDBStore::get_wholespace_iterator()
RocksDBStore::WholeSpaceIterator RocksDBStore::get_wholespace_iterator(IteratorOpts opts)
{
if (cf_handles.size() == 0) {
rocksdb::ReadOptions opt = rocksdb::ReadOptions();
if (opts & ITERATOR_NOCACHE)
opt.fill_cache=false;
return std::make_shared<RocksDBWholeSpaceIteratorImpl>(
db->NewIterator(rocksdb::ReadOptions(), default_cf));
db->NewIterator(opt, default_cf));
} else {
return std::make_shared<WholeMergeIteratorImpl>(this);
}

View File

@ -450,7 +450,7 @@ public:
size_t value_size() override;
};
Iterator get_iterator(const std::string& prefix) override;
Iterator get_iterator(const std::string& prefix, IteratorOpts opts = 0) override;
private:
/// this iterator spans single cf
rocksdb::Iterator* new_shard_iterator(rocksdb::ColumnFamilyHandle* cf);
@ -565,7 +565,7 @@ err:
bbt_opts.block_cache);
}
WholeSpaceIterator get_wholespace_iterator() override;
WholeSpaceIterator get_wholespace_iterator(IteratorOpts opts = 0) override;
private:
WholeSpaceIterator get_default_cf_iterator();
};

View File

@ -6138,7 +6138,7 @@ void BlueStore::_fsck_collections(int64_t* errors)
{
if (collections_had_errors) {
dout(10) << __func__ << dendl;
KeyValueDB::Iterator it = db->get_iterator(PREFIX_COLL);
KeyValueDB::Iterator it = db->get_iterator(PREFIX_COLL, KeyValueDB::ITERATOR_NOCACHE);
for (it->upper_bound(string());
it->valid();
it->next()) {
@ -6187,7 +6187,7 @@ void BlueStore::_open_statfs()
} else {
per_pool_stat_collection = true;
dout(10) << __func__ << " per-pool statfs is enabled" << dendl;
KeyValueDB::Iterator it = db->get_iterator(PREFIX_STAT);
KeyValueDB::Iterator it = db->get_iterator(PREFIX_STAT, KeyValueDB::ITERATOR_NOCACHE);
for (it->upper_bound(string());
it->valid();
it->next()) {
@ -7236,7 +7236,7 @@ void BlueStore::_fsck_check_pool_statfs(
int64_t& warnings,
BlueStoreRepairer* repairer)
{
auto it = db->get_iterator(PREFIX_STAT);
auto it = db->get_iterator(PREFIX_STAT, KeyValueDB::ITERATOR_NOCACHE);
if (it) {
for (it->lower_bound(string()); it->valid(); it->next()) {
string key = it->key();
@ -7864,7 +7864,7 @@ void BlueStore::_fsck_check_objects(FSCKDepth depth,
size_t processed_myself = 0;
auto it = db->get_iterator(PREFIX_OBJ);
auto it = db->get_iterator(PREFIX_OBJ, KeyValueDB::ITERATOR_NOCACHE);
mempool::bluestore_fsck::list<string> expecting_shards;
if (it) {
const size_t thread_count = cct->_conf->bluestore_fsck_quick_fix_threads;
@ -8369,7 +8369,7 @@ int BlueStore::_fsck_on_open(BlueStore::FSCKDepth depth, bool repair)
}
dout(1) << __func__ << " checking shared_blobs" << dendl;
it = db->get_iterator(PREFIX_SHARED_BLOB);
it = db->get_iterator(PREFIX_SHARED_BLOB, KeyValueDB::ITERATOR_NOCACHE);
if (it) {
// FIXME minor: perhaps simplify for shallow mode?
// fill global if not overriden below
@ -8453,7 +8453,7 @@ int BlueStore::_fsck_on_open(BlueStore::FSCKDepth depth, bool repair)
auto& space_tracker = repairer.get_space_usage_tracker();
auto& misref_extents = repairer.get_misreferences();
interval_set<uint64_t> to_release;
it = db->get_iterator(PREFIX_OBJ);
it = db->get_iterator(PREFIX_OBJ, KeyValueDB::ITERATOR_NOCACHE);
if (it) {
// fill global if not overriden below
auto expected_statfs = &expected_store_statfs;
@ -8690,7 +8690,7 @@ int BlueStore::_fsck_on_open(BlueStore::FSCKDepth depth, bool repair)
if (depth != FSCK_SHALLOW) {
dout(1) << __func__ << " checking for stray omap data " << dendl;
it = db->get_iterator(PREFIX_OMAP);
it = db->get_iterator(PREFIX_OMAP, KeyValueDB::ITERATOR_NOCACHE);
if (it) {
uint64_t last_omap_head = 0;
for (it->lower_bound(string()); it->valid(); it->next()) {
@ -8706,7 +8706,7 @@ int BlueStore::_fsck_on_open(BlueStore::FSCKDepth depth, bool repair)
}
}
}
it = db->get_iterator(PREFIX_PGMETA_OMAP);
it = db->get_iterator(PREFIX_PGMETA_OMAP, KeyValueDB::ITERATOR_NOCACHE);
if (it) {
uint64_t last_omap_head = 0;
for (it->lower_bound(string()); it->valid(); it->next()) {
@ -8722,7 +8722,7 @@ int BlueStore::_fsck_on_open(BlueStore::FSCKDepth depth, bool repair)
}
}
}
it = db->get_iterator(PREFIX_PERPOOL_OMAP);
it = db->get_iterator(PREFIX_PERPOOL_OMAP, KeyValueDB::ITERATOR_NOCACHE);
if (it) {
uint64_t last_omap_head = 0;
for (it->lower_bound(string()); it->valid(); it->next()) {
@ -8743,7 +8743,7 @@ int BlueStore::_fsck_on_open(BlueStore::FSCKDepth depth, bool repair)
}
}
dout(1) << __func__ << " checking deferred events" << dendl;
it = db->get_iterator(PREFIX_DEFERRED);
it = db->get_iterator(PREFIX_DEFERRED, KeyValueDB::ITERATOR_NOCACHE);
if (it) {
for (it->lower_bound(string()); it->valid(); it->next()) {
bufferlist bl = it->value();

View File

@ -234,7 +234,7 @@ int KeyValueDBMemory::rm_range_keys(const string &prefix, const string &start, c
return 0;
}
KeyValueDB::WholeSpaceIterator KeyValueDBMemory::get_wholespace_iterator() {
KeyValueDB::WholeSpaceIterator KeyValueDBMemory::get_wholespace_iterator(IteratorOpts opts) {
return std::shared_ptr<KeyValueDB::WholeSpaceIteratorImpl>(
new WholeSpaceMemIterator(this)
);

View File

@ -186,5 +186,5 @@ private:
friend class WholeSpaceMemIterator;
public:
WholeSpaceIterator get_wholespace_iterator() override;
WholeSpaceIterator get_wholespace_iterator(IteratorOpts opts = 0) override;
};