btrfs-progs: zoned: redirty clean extent buffers

Tree manipulating operations like merging nodes often release
once-allocated tree nodes. Btrfs cleans such nodes so that pages in the
node are not uselessly written out. On ZONED drives, however, such
optimization blocks the following IOs as the cancellation of the write
out of the freed blocks breaks the sequential write sequence expected by
the device.

Check if next dirty extent buffer is continuous to a previously written
one. If not, it redirty extent buffers between the previous one and the
next one, so that all dirty buffers are written sequentially.

Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Naohiro Aota 2021-04-26 15:27:31 +09:00 committed by David Sterba
parent feff533e34
commit bfd34b7876
4 changed files with 45 additions and 0 deletions

View File

@ -1140,6 +1140,7 @@ struct btrfs_block_group {
* allocation. This is used only with ZONED mode enabled.
*/
u64 alloc_offset;
u64 write_offset;
};
struct btrfs_device;

View File

@ -18,6 +18,7 @@
#include "kernel-shared/disk-io.h"
#include "kernel-shared/transaction.h"
#include "kernel-shared/delayed-ref.h"
#include "kernel-shared/zoned.h"
#include "common/messages.h"
struct btrfs_trans_handle* btrfs_start_transaction(struct btrfs_root *root,
@ -138,10 +139,15 @@ int __commit_transaction(struct btrfs_trans_handle *trans,
int ret;
while(1) {
again:
ret = find_first_extent_bit(tree, 0, &start, &end,
EXTENT_DIRTY);
if (ret)
break;
if (btrfs_redirty_extent_buffer_for_zoned(fs_info, start, end))
goto again;
while(start <= end) {
eb = find_first_extent_buffer(tree, start);
BUG_ON(!eb || eb->start != start);

View File

@ -847,10 +847,40 @@ out:
ret = -EIO;
}
if (!ret)
cache->write_offset = cache->alloc_offset;
free(alloc_offsets);
return ret;
}
bool btrfs_redirty_extent_buffer_for_zoned(struct btrfs_fs_info *fs_info,
u64 start, u64 end)
{
u64 next;
struct btrfs_block_group *cache;
struct extent_buffer *eb;
if (!btrfs_is_zoned(fs_info))
return false;
cache = btrfs_lookup_first_block_group(fs_info, start);
BUG_ON(!cache);
if (cache->start + cache->write_offset < start) {
next = cache->start + cache->write_offset;
BUG_ON(next + fs_info->nodesize > start);
eb = btrfs_find_create_tree_block(fs_info, next);
btrfs_mark_buffer_dirty(eb);
free_extent_buffer(eb);
return true;
}
cache->write_offset += (end + 1 - start);
return false;
}
#endif
int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)

View File

@ -90,6 +90,8 @@ u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
u64 hole_end, u64 num_bytes);
int btrfs_load_block_group_zone_info(struct btrfs_fs_info *fs_info,
struct btrfs_block_group *cache);
bool btrfs_redirty_extent_buffer_for_zoned(struct btrfs_fs_info *fs_info,
u64 start, u64 end);
#else
@ -127,6 +129,12 @@ static inline int btrfs_load_block_group_zone_info(
return 0;
}
static inline bool btrfs_redirty_extent_buffer_for_zoned(
struct btrfs_fs_info *fs_info, u64 start, u64 end)
{
return false;
}
#endif /* BTRFS_ZONED */
static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)