btrfs-progs: update btrfs_insert_empty_items to match the kernel
In the kernel we have a control struct called btrfs_item_batch that encodes all of the information for bulk inserting a bunch of items. Update btrfs_insert_empty_times to match the in-kernel implementation to make sync'ing ctree.c more straightforward. Signed-off-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
0fe11183af
commit
8920697c4c
|
@ -2693,8 +2693,7 @@ void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
|
|||
int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct btrfs_path *path,
|
||||
struct btrfs_key *cpu_key, u32 *data_size,
|
||||
int nr)
|
||||
const struct btrfs_item_batch *batch)
|
||||
{
|
||||
struct extent_buffer *leaf;
|
||||
int ret = 0;
|
||||
|
@ -2702,20 +2701,16 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
|
|||
int i;
|
||||
u32 nritems;
|
||||
u32 total_size = 0;
|
||||
u32 total_data = 0;
|
||||
unsigned int data_end;
|
||||
struct btrfs_disk_key disk_key;
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
total_data += data_size[i];
|
||||
}
|
||||
|
||||
/* create a root if there isn't one */
|
||||
if (!root->node)
|
||||
BUG();
|
||||
|
||||
total_size = total_data + nr * sizeof(struct btrfs_item);
|
||||
ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
|
||||
total_size = batch->total_data_size +
|
||||
(batch->nr * sizeof(struct btrfs_item));
|
||||
ret = btrfs_search_slot(trans, root, &batch->keys[0], path, total_size, 1);
|
||||
if (ret == 0) {
|
||||
return -EEXIST;
|
||||
}
|
||||
|
@ -2754,35 +2749,38 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
|
|||
u32 ioff;
|
||||
|
||||
ioff = btrfs_item_offset(leaf, i);
|
||||
btrfs_set_item_offset(leaf, i, ioff - total_data);
|
||||
btrfs_set_item_offset(leaf, i,
|
||||
ioff - batch->total_data_size);
|
||||
}
|
||||
|
||||
/* shift the items */
|
||||
memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, slot + nr),
|
||||
memmove_extent_buffer(leaf,
|
||||
btrfs_item_nr_offset(leaf, slot + batch->nr),
|
||||
btrfs_item_nr_offset(leaf, slot),
|
||||
(nritems - slot) * sizeof(struct btrfs_item));
|
||||
|
||||
/* shift the data */
|
||||
memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, 0) +
|
||||
data_end - total_data, btrfs_item_nr_offset(leaf, 0) +
|
||||
data_end - batch->total_data_size,
|
||||
btrfs_item_nr_offset(leaf, 0) +
|
||||
data_end, old_data - data_end);
|
||||
data_end = old_data;
|
||||
}
|
||||
|
||||
/* setup the item for the new data */
|
||||
for (i = 0; i < nr; i++) {
|
||||
btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
|
||||
for (i = 0; i < batch->nr; i++) {
|
||||
btrfs_cpu_key_to_disk(&disk_key, &batch->keys[i]);
|
||||
btrfs_set_item_key(leaf, &disk_key, slot + i);
|
||||
btrfs_set_item_offset(leaf, slot + i, data_end - data_size[i]);
|
||||
data_end -= data_size[i];
|
||||
btrfs_set_item_size(leaf, slot + i, data_size[i]);
|
||||
data_end -= batch->data_sizes[i];
|
||||
btrfs_set_item_offset(leaf, slot + i, data_end);
|
||||
btrfs_set_item_size(leaf, slot + i, batch->data_sizes[i]);
|
||||
}
|
||||
btrfs_set_header_nritems(leaf, nritems + nr);
|
||||
btrfs_set_header_nritems(leaf, nritems + batch->nr);
|
||||
btrfs_mark_buffer_dirty(leaf);
|
||||
|
||||
ret = 0;
|
||||
if (slot == 0) {
|
||||
btrfs_cpu_key_to_disk(&disk_key, cpu_key);
|
||||
btrfs_cpu_key_to_disk(&disk_key, &batch->keys[0]);
|
||||
fixup_low_keys(path, &disk_key, 1);
|
||||
}
|
||||
|
||||
|
|
|
@ -1004,12 +1004,38 @@ static inline int btrfs_del_item(struct btrfs_trans_handle *trans,
|
|||
return btrfs_del_items(trans, root, path, path->slots[0], 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Describes a batch of items to insert in a btree. This is used by
|
||||
* btrfs_insert_empty_items().
|
||||
*/
|
||||
struct btrfs_item_batch {
|
||||
/*
|
||||
* Pointer to an array containing the keys of the items to insert (in
|
||||
* sorted order).
|
||||
*/
|
||||
const struct btrfs_key *keys;
|
||||
/* Pointer to an array containing the data size for each item to insert. */
|
||||
const u32 *data_sizes;
|
||||
/*
|
||||
* The sum of data sizes for all items. The caller can compute this while
|
||||
* setting up the data_sizes array, so it ends up being more efficient
|
||||
* than having btrfs_insert_empty_items() or setup_item_for_insert()
|
||||
* doing it, as it would avoid an extra loop over a potentially large
|
||||
* array, and in the case of setup_item_for_insert(), we would be doing
|
||||
* it while holding a write lock on a leaf and often on upper level nodes
|
||||
* too, unnecessarily increasing the size of a critical section.
|
||||
*/
|
||||
u32 total_data_size;
|
||||
/* Size of the keys and data_sizes arrays (number of items in the batch). */
|
||||
int nr;
|
||||
};
|
||||
|
||||
int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
|
||||
*root, struct btrfs_key *key, void *data, u32 data_size);
|
||||
int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct btrfs_path *path,
|
||||
struct btrfs_key *cpu_key, u32 *data_size, int nr);
|
||||
const struct btrfs_item_batch *batch);
|
||||
|
||||
static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
|
@ -1017,7 +1043,14 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_key *key,
|
||||
u32 data_size)
|
||||
{
|
||||
return btrfs_insert_empty_items(trans, root, path, key, &data_size, 1);
|
||||
struct btrfs_item_batch batch;
|
||||
|
||||
batch.keys = key;
|
||||
batch.data_sizes = &data_size;
|
||||
batch.total_data_size = data_size;
|
||||
batch.nr = 1;
|
||||
|
||||
return btrfs_insert_empty_items(trans, root, path, &batch);
|
||||
}
|
||||
|
||||
int btrfs_next_sibling_tree_block(struct btrfs_fs_info *fs_info,
|
||||
|
|
Loading…
Reference in New Issue