btrfs-progs/kernel-shared/zoned.h
Naohiro Aota f08410f078 btrfs-progs: zoned: load zone's allocation offset
A zoned filesystem must allocate blocks at the zones' write pointer. The
device's write pointer position can be mapped to a logical address
within a block group. To facilitate this, add an "alloc_offset" to the
block group to track the logical addresses of the write pointer.

This logical address is populated in btrfs_load_block_group_zone_info()
from the write pointers of corresponding zones.

For now, zoned filesystems the single profile. Supporting non-single
profile with zone append writing is not trivial. For example, in the DUP
profile, we send a zone append writing IO to two zones on a device. The
device reply with written LBAs for the IOs. If the offsets of the
returned addresses from the beginning of the zone are different, then it
results in different logical addresses.

We need fine-grained logical to physical mapping to support such
separated physical address issue. Since it should require additional
metadata type, disable non-single profiles for now.

This commit supports the case all the zones in a block group are
sequential. The next patch will handle the case having a conventional
zone.

Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-05-06 16:41:45 +02:00

138 lines
3.1 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __BTRFS_ZONED_H__
#define __BTRFS_ZONED_H__
#include "kerncompat.h"
#include <stdbool.h>
#include "kernel-shared/disk-io.h"
#include "kernel-shared/volumes.h"
#ifdef BTRFS_ZONED
#include <linux/blkzoned.h>
#else
struct blk_zone {
int dummy;
};
#endif /* BTRFS_ZONED */
/* Number of superblock log zones */
#define BTRFS_NR_SB_LOG_ZONES 2
/*
* Zoned block device models
*/
enum btrfs_zoned_model {
ZONED_NONE,
ZONED_HOST_AWARE,
ZONED_HOST_MANAGED,
};
/*
* Zone information for a zoned block device.
*/
struct btrfs_zoned_device_info {
enum btrfs_zoned_model model;
u64 zone_size;
u64 max_zone_append_size;
u32 nr_zones;
struct blk_zone *zones;
};
enum btrfs_zoned_model zoned_model(const char *file);
u64 zone_size(const char *file);
int btrfs_get_zone_info(int fd, const char *file,
struct btrfs_zoned_device_info **zinfo);
int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info);
int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info);
#ifdef BTRFS_ZONED
size_t btrfs_sb_io(int fd, void *buf, off_t offset, int rw);
static inline size_t sbread(int fd, void *buf, off_t offset)
{
return btrfs_sb_io(fd, buf, offset, READ);
}
static inline size_t sbwrite(int fd, void *buf, off_t offset)
{
return btrfs_sb_io(fd, buf, offset, WRITE);
}
static inline bool zone_is_sequential(struct btrfs_zoned_device_info *zinfo,
u64 bytenr)
{
unsigned int zno;
if (!zinfo || zinfo->model == ZONED_NONE)
return false;
zno = bytenr / zinfo->zone_size;
return zinfo->zones[zno].type == BLK_ZONE_TYPE_SEQWRITE_REQ;
}
static inline bool btrfs_dev_is_empty_zone(struct btrfs_device *device, u64 pos)
{
struct btrfs_zoned_device_info *zinfo = device->zone_info;
unsigned int zno;
if (!zone_is_sequential(zinfo, pos))
return true;
zno = pos / zinfo->zone_size;
return zinfo->zones[zno].cond == BLK_ZONE_COND_EMPTY;
}
int btrfs_reset_dev_zone(int fd, struct blk_zone *zone);
u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
u64 hole_end, u64 num_bytes);
int btrfs_load_block_group_zone_info(struct btrfs_fs_info *fs_info,
struct btrfs_block_group *cache);
#else
#define sbread(fd, buf, offset) \
pread64(fd, buf, BTRFS_SUPER_INFO_SIZE, offset)
#define sbwrite(fd, buf, offset) \
pwrite64(fd, buf, BTRFS_SUPER_INFO_SIZE, offset)
static inline int btrfs_reset_dev_zone(int fd, struct blk_zone *zone)
{
return 0;
}
static inline bool zone_is_sequential(struct btrfs_zoned_device_info *zinfo,
u64 bytenr)
{
return false;
}
static inline u64 btrfs_find_allocatable_zones(struct btrfs_device *device,
u64 hole_start, u64 hole_end,
u64 num_bytes)
{
return hole_start;
}
static inline bool btrfs_dev_is_empty_zone(struct btrfs_device *device, u64 pos)
{
return true;
}
static inline int btrfs_load_block_group_zone_info(
struct btrfs_fs_info *fs_info, struct btrfs_block_group *cache)
{
return 0;
}
#endif /* BTRFS_ZONED */
static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)
{
return zone_is_sequential(device->zone_info, pos);
}
#endif /* __BTRFS_ZONED_H__ */