btrfs-progs: factor out create_chunk()

Factor out create_chunk() from btrfs_alloc_chunk(). This new function
creates a chunk.

There is no functional changes.

Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Naohiro Aota 2021-04-06 17:05:48 +09:00 committed by David Sterba
parent 0e3865206e
commit 1e344dc8cf
1 changed files with 127 additions and 104 deletions

View File

@ -149,6 +149,7 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
};
struct alloc_chunk_ctl {
u64 start;
u64 type;
int num_stripes;
int max_stripes;
@ -156,6 +157,7 @@ struct alloc_chunk_ctl {
int sub_stripes;
u64 calc_size;
u64 min_stripe_size;
u64 num_bytes;
u64 max_chunk_size;
int stripe_len;
int total_devs;
@ -1118,20 +1120,131 @@ static int decide_stripe_size(struct btrfs_fs_info *info,
}
}
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *info, u64 *start,
u64 *num_bytes, u64 type)
static int create_chunk(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *info, struct alloc_chunk_ctl *ctl,
struct list_head *private_devs)
{
u64 dev_offset;
struct btrfs_root *extent_root = info->extent_root;
struct btrfs_root *chunk_root = info->chunk_root;
struct btrfs_stripe *stripes;
struct btrfs_device *device = NULL;
struct btrfs_chunk *chunk;
struct list_head private_devs;
struct list_head *dev_list = &info->fs_devices->devices;
struct list_head *cur;
struct map_lookup *map;
int ret;
int index;
struct btrfs_key key;
u64 offset;
ret = find_next_chunk(info, &offset);
if (ret)
return ret;
key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
key.type = BTRFS_CHUNK_ITEM_KEY;
key.offset = offset;
chunk = kmalloc(btrfs_chunk_item_size(ctl->num_stripes), GFP_NOFS);
if (!chunk)
return -ENOMEM;
map = kmalloc(btrfs_map_lookup_size(ctl->num_stripes), GFP_NOFS);
if (!map) {
kfree(chunk);
return -ENOMEM;
}
stripes = &chunk->stripe;
ctl->num_bytes = chunk_bytes_by_type(ctl->type, ctl->calc_size, ctl);
index = 0;
while (index < ctl->num_stripes) {
u64 dev_offset;
struct btrfs_stripe *stripe;
BUG_ON(list_empty(private_devs));
cur = private_devs->next;
device = list_entry(cur, struct btrfs_device, dev_list);
/* loop over this device again if we're doing a dup group */
if (!(ctl->type & BTRFS_BLOCK_GROUP_DUP) ||
(index == ctl->num_stripes - 1))
list_move(&device->dev_list, dev_list);
ret = btrfs_alloc_dev_extent(trans, device, key.offset,
ctl->calc_size, &dev_offset);
if (ret < 0)
goto out_chunk_map;
device->bytes_used += ctl->calc_size;
ret = btrfs_update_device(trans, device);
if (ret < 0)
goto out_chunk_map;
map->stripes[index].dev = device;
map->stripes[index].physical = dev_offset;
stripe = stripes + index;
btrfs_set_stack_stripe_devid(stripe, device->devid);
btrfs_set_stack_stripe_offset(stripe, dev_offset);
memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
index++;
}
BUG_ON(!list_empty(private_devs));
/* key was set above */
btrfs_set_stack_chunk_length(chunk, ctl->num_bytes);
btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
btrfs_set_stack_chunk_stripe_len(chunk, ctl->stripe_len);
btrfs_set_stack_chunk_type(chunk, ctl->type);
btrfs_set_stack_chunk_num_stripes(chunk, ctl->num_stripes);
btrfs_set_stack_chunk_io_align(chunk, ctl->stripe_len);
btrfs_set_stack_chunk_io_width(chunk, ctl->stripe_len);
btrfs_set_stack_chunk_sector_size(chunk, info->sectorsize);
btrfs_set_stack_chunk_sub_stripes(chunk, ctl->sub_stripes);
map->sector_size = info->sectorsize;
map->stripe_len = ctl->stripe_len;
map->io_align = ctl->stripe_len;
map->io_width = ctl->stripe_len;
map->type = ctl->type;
map->num_stripes = ctl->num_stripes;
map->sub_stripes = ctl->sub_stripes;
ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
btrfs_chunk_item_size(ctl->num_stripes));
BUG_ON(ret);
ctl->start = key.offset;
map->ce.start = key.offset;
map->ce.size = ctl->num_bytes;
ret = insert_cache_extent(&info->mapping_tree.cache_tree, &map->ce);
if (ret < 0)
goto out_chunk_map;
if (ctl->type & BTRFS_BLOCK_GROUP_SYSTEM) {
ret = btrfs_add_system_chunk(info, &key,
chunk, btrfs_chunk_item_size(ctl->num_stripes));
if (ret < 0)
goto out_chunk;
}
kfree(chunk);
return ret;
out_chunk_map:
kfree(map);
out_chunk:
kfree(chunk);
return ret;
}
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *info, u64 *start,
u64 *num_bytes, u64 type)
{
struct btrfs_device *device = NULL;
struct list_head private_devs;
struct list_head *dev_list = &info->fs_devices->devices;
struct list_head *cur;
u64 min_free;
u64 avail = 0;
u64 max_avail = 0;
@ -1139,14 +1252,14 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
int looped = 0;
int ret;
int index;
struct btrfs_key key;
u64 offset;
if (list_empty(dev_list)) {
if (list_empty(dev_list))
return -ENOSPC;
}
ctl.type = type;
/* start and num_bytes will be set by create_chunk() */
ctl.start = 0;
ctl.num_bytes = 0;
init_alloc_chunk_ctl(info, &ctl);
if (ctl.num_stripes < ctl.min_stripes)
return -ENOSPC;
@ -1165,8 +1278,8 @@ again:
else
min_free = ctl.calc_size;
/* build a private list of devices we will allocate from */
while(index < ctl.num_stripes) {
/* Build a private list of devices we will allocate from */
while (index < ctl.num_stripes) {
device = list_entry(cur, struct btrfs_device, dev_list);
ret = btrfs_device_avail_bytes(trans, device, &avail);
if (ret)
@ -1200,101 +1313,11 @@ again:
}
return -ENOSPC;
}
ret = find_next_chunk(info, &offset);
if (ret)
return ret;
key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
key.type = BTRFS_CHUNK_ITEM_KEY;
key.offset = offset;
chunk = kmalloc(btrfs_chunk_item_size(ctl.num_stripes), GFP_NOFS);
if (!chunk)
return -ENOMEM;
ret = create_chunk(trans, info, &ctl, &private_devs);
*start = ctl.start;
*num_bytes = ctl.num_bytes;
map = kmalloc(btrfs_map_lookup_size(ctl.num_stripes), GFP_NOFS);
if (!map) {
kfree(chunk);
return -ENOMEM;
}
stripes = &chunk->stripe;
*num_bytes = chunk_bytes_by_type(type, ctl.calc_size, &ctl);
index = 0;
while(index < ctl.num_stripes) {
struct btrfs_stripe *stripe;
BUG_ON(list_empty(&private_devs));
cur = private_devs.next;
device = list_entry(cur, struct btrfs_device, dev_list);
/* loop over this device again if we're doing a dup group */
if (!(type & BTRFS_BLOCK_GROUP_DUP) ||
(index == ctl.num_stripes - 1))
list_move(&device->dev_list, dev_list);
ret = btrfs_alloc_dev_extent(trans, device, key.offset,
ctl.calc_size, &dev_offset);
if (ret < 0)
goto out_chunk_map;
device->bytes_used += ctl.calc_size;
ret = btrfs_update_device(trans, device);
if (ret < 0)
goto out_chunk_map;
map->stripes[index].dev = device;
map->stripes[index].physical = dev_offset;
stripe = stripes + index;
btrfs_set_stack_stripe_devid(stripe, device->devid);
btrfs_set_stack_stripe_offset(stripe, dev_offset);
memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
index++;
}
BUG_ON(!list_empty(&private_devs));
/* key was set above */
btrfs_set_stack_chunk_length(chunk, *num_bytes);
btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
btrfs_set_stack_chunk_stripe_len(chunk, ctl.stripe_len);
btrfs_set_stack_chunk_type(chunk, type);
btrfs_set_stack_chunk_num_stripes(chunk, ctl.num_stripes);
btrfs_set_stack_chunk_io_align(chunk, ctl.stripe_len);
btrfs_set_stack_chunk_io_width(chunk, ctl.stripe_len);
btrfs_set_stack_chunk_sector_size(chunk, info->sectorsize);
btrfs_set_stack_chunk_sub_stripes(chunk, ctl.sub_stripes);
map->sector_size = info->sectorsize;
map->stripe_len = ctl.stripe_len;
map->io_align = ctl.stripe_len;
map->io_width = ctl.stripe_len;
map->type = type;
map->num_stripes = ctl.num_stripes;
map->sub_stripes = ctl.sub_stripes;
ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
btrfs_chunk_item_size(ctl.num_stripes));
BUG_ON(ret);
*start = key.offset;;
map->ce.start = key.offset;
map->ce.size = *num_bytes;
ret = insert_cache_extent(&info->mapping_tree.cache_tree, &map->ce);
if (ret < 0)
goto out_chunk_map;
if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
ret = btrfs_add_system_chunk(info, &key,
chunk, btrfs_chunk_item_size(ctl.num_stripes));
if (ret < 0)
goto out_chunk;
}
kfree(chunk);
return ret;
out_chunk_map:
kfree(map);
out_chunk:
kfree(chunk);
return ret;
}