btrfs-progs/kernel-shared/zoned.h

138 lines
3.1 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __BTRFS_ZONED_H__
#define __BTRFS_ZONED_H__
#include "kerncompat.h"
#include <stdbool.h>
#include "kernel-shared/disk-io.h"
#include "kernel-shared/volumes.h"
#ifdef BTRFS_ZONED
#include <linux/blkzoned.h>
#else
struct blk_zone {
int dummy;
};
#endif /* BTRFS_ZONED */
/* Number of superblock log zones */
#define BTRFS_NR_SB_LOG_ZONES 2
/*
* Zoned block device models
*/
enum btrfs_zoned_model {
ZONED_NONE,
ZONED_HOST_AWARE,
ZONED_HOST_MANAGED,
};
/*
* Zone information for a zoned block device.
*/
struct btrfs_zoned_device_info {
enum btrfs_zoned_model model;
u64 zone_size;
u64 max_zone_append_size;
u32 nr_zones;
struct blk_zone *zones;
};
enum btrfs_zoned_model zoned_model(const char *file);
u64 zone_size(const char *file);
int btrfs_get_zone_info(int fd, const char *file,
struct btrfs_zoned_device_info **zinfo);
int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info);
int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info);
#ifdef BTRFS_ZONED
size_t btrfs_sb_io(int fd, void *buf, off_t offset, int rw);
static inline size_t sbread(int fd, void *buf, off_t offset)
{
return btrfs_sb_io(fd, buf, offset, READ);
}
static inline size_t sbwrite(int fd, void *buf, off_t offset)
{
return btrfs_sb_io(fd, buf, offset, WRITE);
}
static inline bool zone_is_sequential(struct btrfs_zoned_device_info *zinfo,
u64 bytenr)
{
unsigned int zno;
if (!zinfo || zinfo->model == ZONED_NONE)
return false;
zno = bytenr / zinfo->zone_size;
return zinfo->zones[zno].type == BLK_ZONE_TYPE_SEQWRITE_REQ;
}
static inline bool btrfs_dev_is_empty_zone(struct btrfs_device *device, u64 pos)
{
struct btrfs_zoned_device_info *zinfo = device->zone_info;
unsigned int zno;
if (!zone_is_sequential(zinfo, pos))
return true;
zno = pos / zinfo->zone_size;
return zinfo->zones[zno].cond == BLK_ZONE_COND_EMPTY;
}
int btrfs_reset_dev_zone(int fd, struct blk_zone *zone);
u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
u64 hole_end, u64 num_bytes);
int btrfs_load_block_group_zone_info(struct btrfs_fs_info *fs_info,
struct btrfs_block_group *cache);
#else
#define sbread(fd, buf, offset) \
pread64(fd, buf, BTRFS_SUPER_INFO_SIZE, offset)
#define sbwrite(fd, buf, offset) \
pwrite64(fd, buf, BTRFS_SUPER_INFO_SIZE, offset)
static inline int btrfs_reset_dev_zone(int fd, struct blk_zone *zone)
{
return 0;
}
static inline bool zone_is_sequential(struct btrfs_zoned_device_info *zinfo,
u64 bytenr)
{
return false;
}
static inline u64 btrfs_find_allocatable_zones(struct btrfs_device *device,
u64 hole_start, u64 hole_end,
u64 num_bytes)
{
return hole_start;
}
static inline bool btrfs_dev_is_empty_zone(struct btrfs_device *device, u64 pos)
{
return true;
}
static inline int btrfs_load_block_group_zone_info(
struct btrfs_fs_info *fs_info, struct btrfs_block_group *cache)
{
return 0;
}
#endif /* BTRFS_ZONED */
static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)
{
return zone_is_sequential(device->zone_info, pos);
}
#endif /* __BTRFS_ZONED_H__ */