btrfs-progs: minor source sync with kernel 6.8-rc3
Sync a few more file on the source level with kernel 6.8-rc3, no functional changes. Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
8e6f651421
commit
bec6bc8eee
|
@ -28,7 +28,7 @@ static bool check_setget_bounds(const struct extent_buffer *eb,
|
|||
|
||||
/*
|
||||
* MODIFIED:
|
||||
* - We don't have eb->pages.
|
||||
* - We don't have eb->folios
|
||||
*/
|
||||
void btrfs_init_map_token(struct btrfs_map_token *token, struct extent_buffer *eb)
|
||||
{
|
||||
|
@ -39,7 +39,7 @@ void btrfs_init_map_token(struct btrfs_map_token *token, struct extent_buffer *e
|
|||
|
||||
/*
|
||||
* MODIFIED:
|
||||
* - We don't have eb->pages, simply wrap the set/get helpers.
|
||||
* - We don't have eb->folios, simply wrap the set/get helpers.
|
||||
*/
|
||||
|
||||
/*
|
||||
|
@ -61,7 +61,7 @@ void btrfs_init_map_token(struct btrfs_map_token *token, struct extent_buffer *e
|
|||
* an offset into the extent buffer page array, cast to a specific type. This
|
||||
* gives us all the type checking.
|
||||
*
|
||||
* The extent buffer pages stored in the array pages do not form a contiguous
|
||||
* The extent buffer pages stored in the array folios may not form a contiguous
|
||||
* phyusical range, but the API functions assume the linear offset to the range
|
||||
* from 0 to metadata node size.
|
||||
*/
|
||||
|
|
|
@ -98,7 +98,7 @@ static inline void btrfs_set_token_##name(struct btrfs_map_token *token,\
|
|||
|
||||
/*
|
||||
* MODIFIED:
|
||||
* - We have eb->data, not eb->pages[0]
|
||||
* - We have eb->data, not eb->folios[0]
|
||||
*/
|
||||
#define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \
|
||||
static inline u##bits btrfs_##name(const struct extent_buffer *eb) \
|
||||
|
@ -126,10 +126,8 @@ static inline void btrfs_set_##name(type *s, u##bits val) \
|
|||
static inline u64 btrfs_device_total_bytes(const struct extent_buffer *eb,
|
||||
struct btrfs_dev_item *s)
|
||||
{
|
||||
_static_assert(sizeof(u64) ==
|
||||
sizeof(((struct btrfs_dev_item *)0))->total_bytes);
|
||||
return btrfs_get_64(eb, s, offsetof(struct btrfs_dev_item,
|
||||
total_bytes));
|
||||
_static_assert(sizeof(u64) == sizeof(((struct btrfs_dev_item *)0))->total_bytes);
|
||||
return btrfs_get_64(eb, s, offsetof(struct btrfs_dev_item, total_bytes));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -140,8 +138,7 @@ static inline void btrfs_set_device_total_bytes(const struct extent_buffer *eb,
|
|||
struct btrfs_dev_item *s,
|
||||
u64 val)
|
||||
{
|
||||
_static_assert(sizeof(u64) ==
|
||||
sizeof(((struct btrfs_dev_item *)0))->total_bytes);
|
||||
_static_assert(sizeof(u64) == sizeof(((struct btrfs_dev_item *)0))->total_bytes);
|
||||
btrfs_set_64(eb, s, offsetof(struct btrfs_dev_item, total_bytes), val);
|
||||
}
|
||||
|
||||
|
@ -257,7 +254,7 @@ static inline void btrfs_set_stripe_devid_nr(struct extent_buffer *eb,
|
|||
struct btrfs_chunk *c, int nr,
|
||||
u64 val)
|
||||
{
|
||||
btrfs_set_stripe_devid(eb, btrfs_stripe_nr(c, nr), val);
|
||||
btrfs_set_stripe_devid(eb, btrfs_stripe_nr(c, nr), val);
|
||||
}
|
||||
|
||||
/* struct btrfs_block_group_item */
|
||||
|
@ -278,36 +275,6 @@ BTRFS_SETGET_FUNCS(free_space_extent_count, struct btrfs_free_space_info,
|
|||
extent_count, 32);
|
||||
BTRFS_SETGET_FUNCS(free_space_flags, struct btrfs_free_space_info, flags, 32);
|
||||
|
||||
/* struct btrfs_stripe_extent */
|
||||
BTRFS_SETGET_FUNCS(stripe_extent_encoding, struct btrfs_stripe_extent, encoding, 8);
|
||||
BTRFS_SETGET_FUNCS(raid_stride_devid, struct btrfs_raid_stride, devid, 64);
|
||||
BTRFS_SETGET_FUNCS(raid_stride_offset, struct btrfs_raid_stride, offset, 64);
|
||||
|
||||
static inline struct btrfs_raid_stride *btrfs_raid_stride_nr(
|
||||
struct btrfs_stripe_extent *dps,
|
||||
int nr)
|
||||
{
|
||||
unsigned long offset = (unsigned long)dps;
|
||||
|
||||
offset += offsetof(struct btrfs_stripe_extent, strides);
|
||||
offset += nr * sizeof(struct btrfs_raid_stride);
|
||||
return (struct btrfs_raid_stride *)offset;
|
||||
}
|
||||
|
||||
static inline u64 btrfs_raid_stride_devid_nr(struct extent_buffer *eb,
|
||||
struct btrfs_stripe_extent *dps,
|
||||
int nr)
|
||||
{
|
||||
return btrfs_raid_stride_devid(eb, btrfs_raid_stride_nr(dps, nr));
|
||||
}
|
||||
|
||||
static inline u64 btrfs_raid_stride_offset_nr(struct extent_buffer *eb,
|
||||
struct btrfs_stripe_extent *dps,
|
||||
int nr)
|
||||
{
|
||||
return btrfs_raid_stride_offset(eb, btrfs_raid_stride_nr(dps, nr));
|
||||
}
|
||||
|
||||
/* struct btrfs_inode_ref */
|
||||
BTRFS_SETGET_FUNCS(inode_ref_name_len, struct btrfs_inode_ref, name_len, 16);
|
||||
BTRFS_SETGET_FUNCS(inode_ref_index, struct btrfs_inode_ref, index, 64);
|
||||
|
@ -355,6 +322,38 @@ BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32);
|
|||
BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64);
|
||||
BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32);
|
||||
|
||||
BTRFS_SETGET_FUNCS(stripe_extent_encoding, struct btrfs_stripe_extent, encoding, 8);
|
||||
BTRFS_SETGET_FUNCS(raid_stride_devid, struct btrfs_raid_stride, devid, 64);
|
||||
BTRFS_SETGET_FUNCS(raid_stride_offset, struct btrfs_raid_stride, offset, 64);
|
||||
BTRFS_SETGET_STACK_FUNCS(stack_stripe_extent_encoding,
|
||||
struct btrfs_stripe_extent, encoding, 8);
|
||||
BTRFS_SETGET_STACK_FUNCS(stack_raid_stride_devid, struct btrfs_raid_stride, devid, 64);
|
||||
|
||||
static inline struct btrfs_raid_stride *btrfs_raid_stride_nr(
|
||||
struct btrfs_stripe_extent *dps,
|
||||
int nr)
|
||||
{
|
||||
unsigned long offset = (unsigned long)dps;
|
||||
|
||||
offset += offsetof(struct btrfs_stripe_extent, strides);
|
||||
offset += nr * sizeof(struct btrfs_raid_stride);
|
||||
return (struct btrfs_raid_stride *)offset;
|
||||
}
|
||||
|
||||
static inline u64 btrfs_raid_stride_devid_nr(struct extent_buffer *eb,
|
||||
struct btrfs_stripe_extent *dps,
|
||||
int nr)
|
||||
{
|
||||
return btrfs_raid_stride_devid(eb, btrfs_raid_stride_nr(dps, nr));
|
||||
}
|
||||
|
||||
static inline u64 btrfs_raid_stride_offset_nr(struct extent_buffer *eb,
|
||||
struct btrfs_stripe_extent *dps,
|
||||
int nr)
|
||||
{
|
||||
return btrfs_raid_stride_offset(eb, btrfs_raid_stride_nr(dps, nr));
|
||||
}
|
||||
|
||||
/* struct btrfs_dev_extent */
|
||||
BTRFS_SETGET_FUNCS(dev_extent_chunk_tree, struct btrfs_dev_extent, chunk_tree, 64);
|
||||
BTRFS_SETGET_FUNCS(dev_extent_chunk_objectid, struct btrfs_dev_extent,
|
||||
|
@ -399,6 +398,9 @@ BTRFS_SETGET_FUNCS(extent_data_ref_count, struct btrfs_extent_data_ref, count, 3
|
|||
|
||||
BTRFS_SETGET_FUNCS(shared_data_ref_count, struct btrfs_shared_data_ref, count, 32);
|
||||
|
||||
BTRFS_SETGET_FUNCS(extent_owner_ref_root_id, struct btrfs_extent_owner_ref,
|
||||
root_id, 64);
|
||||
|
||||
BTRFS_SETGET_FUNCS(extent_inline_ref_type, struct btrfs_extent_inline_ref,
|
||||
type, 8);
|
||||
BTRFS_SETGET_FUNCS(extent_inline_ref_offset, struct btrfs_extent_inline_ref,
|
||||
|
@ -420,8 +422,6 @@ static inline u32 btrfs_extent_inline_ref_size(int type)
|
|||
return 0;
|
||||
}
|
||||
|
||||
BTRFS_SETGET_FUNCS(extent_owner_ref_root_id, struct btrfs_extent_owner_ref, root_id, 64);
|
||||
|
||||
/* struct btrfs_node */
|
||||
BTRFS_SETGET_FUNCS(key_blockptr, struct btrfs_key_ptr, blockptr, 64);
|
||||
BTRFS_SETGET_FUNCS(key_generation, struct btrfs_key_ptr, generation, 64);
|
||||
|
|
|
@ -232,9 +232,9 @@ noinline void btrfs_release_path(struct btrfs_path *p)
|
|||
* cause could be a bug, eg. due to ENOSPC, and not for common errors that are
|
||||
* caused by external factors.
|
||||
*/
|
||||
bool __cold abort_should_print_stack(int errno)
|
||||
bool __cold abort_should_print_stack(int error)
|
||||
{
|
||||
switch (errno) {
|
||||
switch (error) {
|
||||
case -EIO:
|
||||
case -EROFS:
|
||||
case -ENOMEM:
|
||||
|
|
|
@ -25,12 +25,11 @@ struct list_head;
|
|||
|
||||
static inline u32 BTRFS_MAX_INLINE_DATA_SIZE(const struct btrfs_fs_info *info)
|
||||
{
|
||||
return BTRFS_MAX_ITEM_SIZE(info) -
|
||||
BTRFS_FILE_EXTENT_INLINE_DATA_START;
|
||||
return BTRFS_MAX_ITEM_SIZE(info) - BTRFS_FILE_EXTENT_INLINE_DATA_START;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the number of bytes used by the item on disk, minus the size of any
|
||||
* Return the number of bytes used by the item on disk, minus the size of any
|
||||
* extent headers. If a file is compressed on disk, this is the compressed
|
||||
* size.
|
||||
*/
|
||||
|
|
|
@ -10,14 +10,13 @@
|
|||
#ifdef CONFIG_PRINTK
|
||||
|
||||
#define STATE_STRING_PREFACE ": state "
|
||||
#define STATE_STRING_BUF_LEN (sizeof(STATE_STRING_PREFACE) + BTRFS_FS_STATE_COUNT)
|
||||
#define STATE_STRING_BUF_LEN (sizeof(STATE_STRING_PREFACE) + BTRFS_FS_STATE_COUNT + 1)
|
||||
|
||||
/*
|
||||
* Characters to print to indicate error conditions or uncommon filesystem state.
|
||||
* RO is not an error.
|
||||
*/
|
||||
static const char fs_state_chars[] = {
|
||||
[BTRFS_FS_STATE_ERROR] = 'E',
|
||||
[BTRFS_FS_STATE_REMOUNTING] = 'M',
|
||||
[BTRFS_FS_STATE_RO] = 0,
|
||||
[BTRFS_FS_STATE_TRANS_ABORTED] = 'A',
|
||||
|
@ -37,6 +36,11 @@ static void btrfs_state_to_string(const struct btrfs_fs_info *info, char *buf)
|
|||
memcpy(curr, STATE_STRING_PREFACE, sizeof(STATE_STRING_PREFACE));
|
||||
curr += sizeof(STATE_STRING_PREFACE) - 1;
|
||||
|
||||
if (BTRFS_FS_ERROR(info)) {
|
||||
*curr++ = 'E';
|
||||
states_printed = true;
|
||||
}
|
||||
|
||||
for_each_set_bit(bit, &fs_state, sizeof(fs_state)) {
|
||||
WARN_ON_ONCE(bit >= BTRFS_FS_STATE_COUNT);
|
||||
if ((bit < BTRFS_FS_STATE_COUNT) && fs_state_chars[bit]) {
|
||||
|
@ -106,8 +110,8 @@ const char * __attribute_const__ btrfs_decode_error(int error)
|
|||
}
|
||||
|
||||
/*
|
||||
* __btrfs_handle_fs_error decodes expected errors from the caller and
|
||||
* invokes the appropriate error response.
|
||||
* Decodes expected errors from the caller and invokes the appropriate error
|
||||
* response.
|
||||
*/
|
||||
__cold
|
||||
void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function,
|
||||
|
@ -121,7 +125,7 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
|
|||
|
||||
#ifdef CONFIG_PRINTK_INDEX
|
||||
printk_index_subsys_emit(
|
||||
"BTRFS: error (device %s%s) in %s:%d: error=%d %s", KERN_CRIT, fmt);
|
||||
"BTRFS: error (device %s%s) in %s:%d: errno=%d %s", KERN_CRIT, fmt);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -145,7 +149,7 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
|
|||
sb->s_id, statestr, function, line, error, errstr, PV_VAL(vaf));
|
||||
va_end(args);
|
||||
} else {
|
||||
pr_crit("BTRFS: error (device %s%s) in %s:%d: error=%d %s\n",
|
||||
pr_crit("BTRFS: error (device %s%s) in %s:%d: errno=%d %s\n",
|
||||
sb->s_id, statestr, function, line, error, errstr);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -9,11 +9,16 @@
|
|||
|
||||
struct btrfs_fs_info;
|
||||
|
||||
/*
|
||||
* We want to be able to override this in btrfs-progs.
|
||||
*/
|
||||
#ifdef __KERNEL__
|
||||
|
||||
static inline __printf(2, 3) __cold
|
||||
void btrfs_no_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PRINTK
|
||||
|
|
|
@ -140,4 +140,24 @@ static inline struct rb_node *rb_simple_insert(struct rb_root *root, u64 bytenr,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static inline bool bitmap_test_range_all_set(const unsigned long *addr,
|
||||
unsigned long start,
|
||||
unsigned long nbits)
|
||||
{
|
||||
unsigned long found_zero;
|
||||
|
||||
found_zero = find_next_zero_bit(addr, start + nbits, start);
|
||||
return (found_zero == start + nbits);
|
||||
}
|
||||
|
||||
static inline bool bitmap_test_range_all_zero(const unsigned long *addr,
|
||||
unsigned long start,
|
||||
unsigned long nbits)
|
||||
{
|
||||
unsigned long found_set;
|
||||
|
||||
found_set = find_next_bit(addr, start + nbits, start);
|
||||
return (found_set == start + nbits);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -34,6 +34,7 @@ struct btrfs_root;
|
|||
|
||||
enum btrfs_trans_state {
|
||||
TRANS_STATE_RUNNING,
|
||||
TRANS_STATE_COMMIT_PREP,
|
||||
TRANS_STATE_COMMIT_START,
|
||||
TRANS_STATE_COMMIT_DOING,
|
||||
TRANS_STATE_UNBLOCKED,
|
||||
|
@ -113,9 +114,6 @@ struct btrfs_transaction {
|
|||
*/
|
||||
atomic_t pending_ordered;
|
||||
wait_queue_head_t pending_wait;
|
||||
|
||||
spinlock_t releasing_ebs_lock;
|
||||
struct list_head releasing_ebs;
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -175,7 +173,7 @@ struct btrfs_pending_snapshot {
|
|||
struct list_head list;
|
||||
};
|
||||
|
||||
bool __cold abort_should_print_stack(int errno);
|
||||
bool __cold abort_should_print_stack(int error);
|
||||
|
||||
void btrfs_abort_transaction(struct btrfs_trans_handle *trans, int error);
|
||||
int btrfs_end_transaction(struct btrfs_trans_handle *trans);
|
||||
|
@ -215,7 +213,7 @@ void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
|
|||
void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans);
|
||||
void __cold __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
|
||||
const char *function,
|
||||
unsigned int line, int errno, bool first_hit);
|
||||
unsigned int line, int error, bool first_hit);
|
||||
|
||||
int __init btrfs_transaction_init(void);
|
||||
void __cold btrfs_transaction_exit(void);
|
||||
|
|
|
@ -484,6 +484,20 @@ static int check_root_key(struct extent_buffer *leaf, struct btrfs_key *key,
|
|||
btrfs_item_key_to_cpu(leaf, &item_key, slot);
|
||||
is_root_item = (item_key.type == BTRFS_ROOT_ITEM_KEY);
|
||||
|
||||
/*
|
||||
* Bad rootid for reloc trees.
|
||||
*
|
||||
* Reloc trees are only for subvolume trees, other trees only need
|
||||
* to be COWed to be relocated.
|
||||
*/
|
||||
if (unlikely(is_root_item && key->objectid == BTRFS_TREE_RELOC_OBJECTID &&
|
||||
!is_fstree(key->offset))) {
|
||||
generic_err(leaf, slot,
|
||||
"invalid reloc tree for root %lld, root id is not a subvolume tree",
|
||||
key->offset);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
|
||||
/* No such tree id */
|
||||
if (unlikely(key->objectid == 0)) {
|
||||
if (is_root_item)
|
||||
|
@ -1306,6 +1320,8 @@ static int check_extent_item(struct extent_buffer *leaf,
|
|||
unsigned long ptr; /* Current pointer inside inline refs */
|
||||
unsigned long end; /* Extent item end */
|
||||
const u32 item_size = btrfs_item_size(leaf, slot);
|
||||
u8 last_type = 0;
|
||||
u64 last_seq = U64_MAX;
|
||||
u64 flags;
|
||||
u64 generation;
|
||||
u64 total_refs; /* Total refs in btrfs_extent_item */
|
||||
|
@ -1352,6 +1368,18 @@ static int check_extent_item(struct extent_buffer *leaf,
|
|||
* 2.2) Ref type specific data
|
||||
* Either using btrfs_extent_inline_ref::offset, or specific
|
||||
* data structure.
|
||||
*
|
||||
* All above inline items should follow the order:
|
||||
*
|
||||
* - All btrfs_extent_inline_ref::type should be in an ascending
|
||||
* order
|
||||
*
|
||||
* - Within the same type, the items should follow a descending
|
||||
* order by their sequence number. The sequence number is
|
||||
* determined by:
|
||||
* * btrfs_extent_inline_ref::offset for all types other than
|
||||
* EXTENT_DATA_REF
|
||||
* * hash_extent_data_ref() for EXTENT_DATA_REF
|
||||
*/
|
||||
if (unlikely(item_size < sizeof(*ei))) {
|
||||
extent_err(leaf, slot,
|
||||
|
@ -1433,6 +1461,7 @@ static int check_extent_item(struct extent_buffer *leaf,
|
|||
struct btrfs_extent_inline_ref *iref;
|
||||
struct btrfs_extent_data_ref *dref;
|
||||
struct btrfs_shared_data_ref *sref;
|
||||
u64 seq;
|
||||
u64 dref_offset;
|
||||
u64 inline_offset;
|
||||
u8 inline_type;
|
||||
|
@ -1446,10 +1475,11 @@ static int check_extent_item(struct extent_buffer *leaf,
|
|||
iref = (struct btrfs_extent_inline_ref *)ptr;
|
||||
inline_type = btrfs_extent_inline_ref_type(leaf, iref);
|
||||
inline_offset = btrfs_extent_inline_ref_offset(leaf, iref);
|
||||
seq = inline_offset;
|
||||
if (unlikely(ptr + btrfs_extent_inline_ref_size(inline_type) > end)) {
|
||||
extent_err(leaf, slot,
|
||||
"inline ref item overflows extent item, ptr %lu iref size %u end %lu",
|
||||
ptr, inline_type, end);
|
||||
ptr, btrfs_extent_inline_ref_size(inline_type), end);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
|
||||
|
@ -1476,6 +1506,10 @@ static int check_extent_item(struct extent_buffer *leaf,
|
|||
case BTRFS_EXTENT_DATA_REF_KEY:
|
||||
dref = (struct btrfs_extent_data_ref *)(&iref->offset);
|
||||
dref_offset = btrfs_extent_data_ref_offset(leaf, dref);
|
||||
seq = hash_extent_data_ref(
|
||||
btrfs_extent_data_ref_root(leaf, dref),
|
||||
btrfs_extent_data_ref_objectid(leaf, dref),
|
||||
btrfs_extent_data_ref_offset(leaf, dref));
|
||||
if (unlikely(!IS_ALIGNED(dref_offset,
|
||||
fs_info->sectorsize))) {
|
||||
extent_err(leaf, slot,
|
||||
|
@ -1505,6 +1539,24 @@ static int check_extent_item(struct extent_buffer *leaf,
|
|||
inline_type);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
if (inline_type < last_type) {
|
||||
extent_err(leaf, slot,
|
||||
"inline ref out-of-order: has type %u, prev type %u",
|
||||
inline_type, last_type);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
/* Type changed, allow the sequence starts from U64_MAX again. */
|
||||
if (inline_type > last_type)
|
||||
last_seq = U64_MAX;
|
||||
if (seq > last_seq) {
|
||||
extent_err(leaf, slot,
|
||||
"inline ref out-of-order: has type %u offset %llu seq 0x%llx, prev type %u seq 0x%llx",
|
||||
inline_type, inline_offset, seq,
|
||||
last_type, last_seq);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
last_type = inline_type;
|
||||
last_seq = seq;
|
||||
ptr += btrfs_extent_inline_ref_size(inline_type);
|
||||
}
|
||||
/* No padding is allowed */
|
||||
|
@ -1666,6 +1718,44 @@ static int check_inode_ref(struct extent_buffer *leaf,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int check_raid_stripe_extent(const struct extent_buffer *leaf,
|
||||
const struct btrfs_key *key, int slot)
|
||||
{
|
||||
struct btrfs_stripe_extent *stripe_extent =
|
||||
btrfs_item_ptr(leaf, slot, struct btrfs_stripe_extent);
|
||||
|
||||
if (unlikely(!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize))) {
|
||||
generic_err(leaf, slot,
|
||||
"invalid key objectid for raid stripe extent, have %llu expect aligned to %u",
|
||||
key->objectid, leaf->fs_info->sectorsize);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
|
||||
if (unlikely(!btrfs_fs_incompat(leaf->fs_info, RAID_STRIPE_TREE))) {
|
||||
generic_err(leaf, slot,
|
||||
"RAID_STRIPE_EXTENT present but RAID_STRIPE_TREE incompat bit unset");
|
||||
return -EUCLEAN;
|
||||
}
|
||||
|
||||
switch (btrfs_stripe_extent_encoding(leaf, stripe_extent)) {
|
||||
case BTRFS_STRIPE_RAID0:
|
||||
case BTRFS_STRIPE_RAID1:
|
||||
case BTRFS_STRIPE_DUP:
|
||||
case BTRFS_STRIPE_RAID10:
|
||||
case BTRFS_STRIPE_RAID5:
|
||||
case BTRFS_STRIPE_RAID6:
|
||||
case BTRFS_STRIPE_RAID1C3:
|
||||
case BTRFS_STRIPE_RAID1C4:
|
||||
break;
|
||||
default:
|
||||
generic_err(leaf, slot, "invalid raid stripe encoding %u",
|
||||
btrfs_stripe_extent_encoding(leaf, stripe_extent));
|
||||
return -EUCLEAN;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Common point to switch the item-specific validation.
|
||||
*/
|
||||
|
@ -1675,8 +1765,8 @@ static enum btrfs_tree_block_status check_leaf_item(struct extent_buffer *leaf,
|
|||
struct btrfs_key *prev_key)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = leaf->fs_info;
|
||||
struct btrfs_chunk *chunk;
|
||||
int ret = 0;
|
||||
struct btrfs_chunk *chunk;
|
||||
|
||||
if (fs_info->skip_leaf_item_checks)
|
||||
return 0;
|
||||
|
@ -1724,6 +1814,9 @@ static enum btrfs_tree_block_status check_leaf_item(struct extent_buffer *leaf,
|
|||
case BTRFS_EXTENT_DATA_REF_KEY:
|
||||
ret = check_extent_data_ref(leaf, key, slot);
|
||||
break;
|
||||
case BTRFS_RAID_STRIPE_KEY:
|
||||
ret = check_raid_stripe_extent(leaf, key, slot);
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include "kernel-shared/uapi/btrfs_tree.h"
|
||||
|
||||
struct extent_buffer;
|
||||
struct btrfs_chunk;
|
||||
|
||||
/* All the extra info needed to verify the parentness of a tree block. */
|
||||
struct btrfs_tree_parent_check {
|
||||
|
@ -24,7 +25,7 @@ struct btrfs_tree_parent_check {
|
|||
|
||||
/*
|
||||
* Expected transid, can be 0 to skip the check, but such skip
|
||||
* should only be utlized for backref walk related code.
|
||||
* should only be utilized for backref walk related code.
|
||||
*/
|
||||
u64 transid;
|
||||
|
||||
|
|
|
@ -238,7 +238,8 @@ int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
|
|||
}
|
||||
|
||||
/*
|
||||
* ulist_del - delete one node from ulist
|
||||
* Delete one node from ulist.
|
||||
*
|
||||
* @ulist: ulist to remove node from
|
||||
* @val: value to delete
|
||||
* @aux: aux to delete
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include "kernel-shared/uapi/btrfs.h"
|
||||
#include "kernel-shared/uapi/btrfs_tree.h"
|
||||
#include "kernel-shared/ctree.h"
|
||||
#include "kernel-shared/messages.h"
|
||||
#include "kernel-shared/transaction.h"
|
||||
#include "common/messages.h"
|
||||
#include "common/utils.h"
|
||||
|
@ -117,7 +118,7 @@ int btrfs_lookup_uuid_received_subvol_item(int fd, const u8 *uuid,
|
|||
}
|
||||
|
||||
int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
|
||||
u64 subid)
|
||||
u64 subid)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
struct btrfs_root *uuid_root = fs_info->uuid_root;
|
||||
|
@ -148,7 +149,8 @@ int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
|
|||
|
||||
ret = btrfs_search_slot(trans, uuid_root, &key, path, -1, 1);
|
||||
if (ret < 0) {
|
||||
warning("error %d while searching for uuid item!", ret);
|
||||
btrfs_warn(fs_info, "error %d while searching for uuid item!",
|
||||
ret);
|
||||
goto out;
|
||||
}
|
||||
if (ret > 0) {
|
||||
|
@ -161,7 +163,8 @@ int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
|
|||
offset = btrfs_item_ptr_offset(eb, slot);
|
||||
item_size = btrfs_item_size(eb, slot);
|
||||
if (!IS_ALIGNED(item_size, sizeof(u64))) {
|
||||
warning("uuid item with illegal size %u!", item_size);
|
||||
btrfs_warn(fs_info, "uuid item with illegal size %lu!",
|
||||
(unsigned long)item_size);
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue