os/bluestore/AvlAllocator: specialize _block_picker()

before this change AvlAllocator::_block_picker() is used by both the
best-fit mode and first-fit mode. but since we cannot achieve the
locality by searching in the area pointed by curosr in best-fit mode,
we just pass a dummy cursor to AvlAllocator::_block_picker() when
searching in the best-fit mode.

but since the range_size_tree is already sorted by the size of ranges,
if _block_picker() fails to find one by the size, we should just give
up right away, and instead try again using a smaller size.

after this change, instead of sharing AvlAllocator::_block_picker()
across both the first-fit mode and the best-fit mode, this method
is specialize to two different variants: one for first-fit, and the
simplified one for best-fit.

Signed-off-by: Kefu Chai <kchai@redhat.com>
This commit is contained in:
Kefu Chai 2021-06-02 15:11:30 +08:00
parent 7513b24aa5
commit 4837166f9e
2 changed files with 33 additions and 14 deletions

View File

@ -29,15 +29,13 @@ namespace {
* a suitable block to allocate. This will search the specified AVL
* tree looking for a block that matches the specified criteria.
*/
template<class Tree>
uint64_t AvlAllocator::_block_picker(const Tree& t,
uint64_t *cursor,
uint64_t size,
uint64_t align)
uint64_t AvlAllocator::_pick_block_after(uint64_t *cursor,
uint64_t size,
uint64_t align)
{
const auto compare = t.key_comp();
auto rs_start = t.lower_bound(range_t{*cursor, size}, compare);
for (auto rs = rs_start; rs != t.end(); ++rs) {
const auto compare = range_tree.key_comp();
auto rs_start = range_tree.lower_bound(range_t{*cursor, size}, compare);
for (auto rs = rs_start; rs != range_tree.end(); ++rs) {
uint64_t offset = p2roundup(rs->start, align);
if (offset + size <= rs->end) {
*cursor = offset + size;
@ -49,7 +47,7 @@ uint64_t AvlAllocator::_block_picker(const Tree& t,
return -1ULL;
}
// If we reached end, start from beginning till cursor.
for (auto rs = t.begin(); rs != rs_start; ++rs) {
for (auto rs = range_tree.begin(); rs != rs_start; ++rs) {
uint64_t offset = p2roundup(rs->start, align);
if (offset + size <= rs->end) {
*cursor = offset + size;
@ -59,6 +57,22 @@ uint64_t AvlAllocator::_block_picker(const Tree& t,
return -1ULL;
}
uint64_t AvlAllocator::_pick_block_fits(uint64_t size,
uint64_t align)
{
// instead of searching from cursor, just pick the smallest range which fits
// the needs
const auto compare = range_size_tree.key_comp();
auto rs_start = range_size_tree.lower_bound(range_t{0, size}, compare);
for (auto rs = rs_start; rs != range_size_tree.end(); ++rs) {
uint64_t offset = p2roundup(rs->start, align);
if (offset + size <= rs->end) {
return offset;
}
}
return -1ULL;
}
void AvlAllocator::_add_to_tree(uint64_t start, uint64_t size)
{
ceph_assert(size != 0);
@ -233,9 +247,8 @@ int AvlAllocator::_allocate(
if (force_range_size_alloc ||
max_size < range_size_alloc_threshold ||
free_pct < range_size_alloc_free_pct) {
uint64_t fake_cursor = 0;
do {
start = _block_picker(range_size_tree, &fake_cursor, size, unit);
start = _pick_block_fits(size, unit);
dout(20) << __func__ << " best fit=" << start << " size=" << size << dendl;
if (start != uint64_t(-1ULL)) {
break;
@ -257,7 +270,7 @@ int AvlAllocator::_allocate(
ceph_assert(align != 0);
uint64_t* cursor = &lbas[cbits(align) - 1];
start = _block_picker(range_tree, cursor, size, unit);
start = _pick_block_after(cursor, size, unit);
dout(20) << __func__ << " first fit=" << start << " size=" << size << dendl;
if (start != uint64_t(-1ULL)) {
break;

View File

@ -92,8 +92,14 @@ public:
void shutdown() override;
private:
template<class Tree>
uint64_t _block_picker(const Tree& t, uint64_t *cursor, uint64_t size,
// pick a range by search from cursor forward
uint64_t _pick_block_after(
uint64_t *cursor,
uint64_t size,
uint64_t align);
// pick a range with exactly the same size or larger
uint64_t _pick_block_fits(
uint64_t size,
uint64_t align);
int _allocate(
uint64_t size,