1
0
mirror of https://github.com/kdave/btrfs-progs synced 2025-04-26 13:07:55 +00:00

btrfs-progs: block-group: refactor how we read one block group item

Structure btrfs_block_group has the following members which are
currently read from on-disk block group item and key:

- length - from item key
- used
- flags - from block group item

However for incoming skinny block group tree, we are going to read those
members from different sources.

This patch will refactor such read by:

- Refactor length/used/flags initialization into one function
  The new function, fill_one_block_group() will handle the
  initialization of such members.

- Use btrfs_block_group::length to replace key::offset
  Since skinny block group item would have a different meaning for its
  key offset.

Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Qu Wenruo 2020-05-05 08:02:21 +08:00 committed by David Sterba
parent e815f5734b
commit b2c8f806c4

View File

@ -172,6 +172,7 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
struct rb_node *parent = NULL; struct rb_node *parent = NULL;
struct btrfs_block_group *cache; struct btrfs_block_group *cache;
ASSERT(block_group->length != 0);
p = &info->block_group_cache_tree.rb_node; p = &info->block_group_cache_tree.rb_node;
while (*p) { while (*p) {
@ -2622,6 +2623,27 @@ error:
return ret; return ret;
} }
static int read_block_group_item(struct btrfs_block_group *cache,
struct btrfs_path *path,
const struct btrfs_key *key)
{
struct extent_buffer *leaf = path->nodes[0];
struct btrfs_block_group_item bgi;
int slot = path->slots[0];
ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY);
cache->start = key->objectid;
cache->length = key->offset;
read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
sizeof(bgi));
cache->used = btrfs_stack_block_group_used(&bgi);
cache->flags = btrfs_stack_block_group_flags(&bgi);
return 0;
}
/* /*
* Read out one BLOCK_GROUP_ITEM and insert it into block group cache. * Read out one BLOCK_GROUP_ITEM and insert it into block group cache.
* *
@ -2634,7 +2656,6 @@ static int read_one_block_group(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf = path->nodes[0]; struct extent_buffer *leaf = path->nodes[0];
struct btrfs_space_info *space_info; struct btrfs_space_info *space_info;
struct btrfs_block_group *cache; struct btrfs_block_group *cache;
struct btrfs_block_group_item bgi;
struct btrfs_key key; struct btrfs_key key;
int slot = path->slots[0]; int slot = path->slots[0];
int ret; int ret;
@ -2652,14 +2673,11 @@ static int read_one_block_group(struct btrfs_fs_info *fs_info,
cache = kzalloc(sizeof(*cache), GFP_NOFS); cache = kzalloc(sizeof(*cache), GFP_NOFS);
if (!cache) if (!cache)
return -ENOMEM; return -ENOMEM;
read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot), ret = read_block_group_item(cache, path, &key);
sizeof(bgi)); if (ret < 0) {
cache->start = key.objectid; free(cache);
cache->length = key.offset; return ret;
cache->cached = 0; }
cache->pinned = 0;
cache->flags = btrfs_stack_block_group_flags(&bgi);
cache->used = btrfs_stack_block_group_used(&bgi);
INIT_LIST_HEAD(&cache->dirty_list); INIT_LIST_HEAD(&cache->dirty_list);
set_avail_alloc_bits(fs_info, cache->flags); set_avail_alloc_bits(fs_info, cache->flags);