2018-01-18 07:49:39 +00:00
|
|
|
/*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public
|
|
|
|
* License v2 as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public
|
|
|
|
* License along with this program; if not, write to the
|
|
|
|
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
|
|
* Boston, MA 021110-1307, USA.
|
|
|
|
*/
|
|
|
|
|
2022-09-14 15:06:52 +00:00
|
|
|
#include "kerncompat.h"
|
2022-09-15 12:13:34 +00:00
|
|
|
#include <errno.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
2018-01-18 08:03:09 +00:00
|
|
|
#include <time.h>
|
2022-09-15 12:13:34 +00:00
|
|
|
#include "kernel-lib/rbtree.h"
|
|
|
|
#include "kernel-shared/extent_io.h"
|
2020-08-18 13:56:04 +00:00
|
|
|
#include "kernel-shared/ctree.h"
|
2020-08-18 13:56:04 +00:00
|
|
|
#include "kernel-shared/transaction.h"
|
2020-08-18 13:56:04 +00:00
|
|
|
#include "kernel-shared/disk-io.h"
|
2021-06-09 06:27:42 +00:00
|
|
|
#include "kernel-shared/volumes.h"
|
btrfs-progs: check: don't calculate csum for preallocated file extents
[BUG]
If a btrfs filesystem has preallocated file extents, 'btrfs check
--init-csum-tree' will create csum item for preallocated extents, and
cause error:
# mkfs.btrfs -f test.img
# mount test.img /mnt/btrfs
# fallocate -l 32K /mnt/btrfs/file
# umount /mnt/btrfs
# btrfs check --init-csum-tree --force test.img
...
[4/7] checking fs roots
root 5 inode 257 errors 800, odd csum item
ERROR: errors found in fs roots
found 376832 bytes used, error(s) found
And the csum tree is not empty, containing csum for the preallocated
extent:
$ btrfs ins dump-tree -t csum test.img
btrfs-progs v5.15.1
checksum tree key (CSUM_TREE ROOT_ITEM 0)
leaf 30408704 items 1 free space 16226 generation 9 owner CSUM_TREE
leaf 30408704 flags 0x1(WRITTEN) backref revision 1
fs uuid ecc79835-5611-4609-b985-e4ccd6f15b54
chunk uuid b1c75553-5b82-4aa6-bbbe-e7f50643b1a8
item 0 key (EXTENT_CSUM EXTENT_CSUM 13631488) itemoff 16251 itemsize 32
range start 13631488 end 13664256 length 32768
[CAUSE]
For `--init-csum-tree` alone, we will use extent tree to iterate each
data extent, and calculate csum for them.
But extent items alone can not tell us if the file extent belongs to a
NODATASUM inode, nor if it's preallocated.
Thus we create csums for those data extents, and cause the problem.
[FIX]
However the fix is not that simple, we can not just generate csum for
non-preallocated range.
As the following case we still need csum for the un-referred part:
xfs_io -f -c "pwrite 0 8K" -c "sync" -c "punch 0 4K"
So here we have to go another direction by:
- Always generate csum for the whole data extent
This is the same as the old code
- Iterate the file extents, and delete csum for preallocated range
or NODATASUM inodes
Issue: #430
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2022-01-14 00:51:21 +00:00
|
|
|
#include "kernel-shared/backref.h"
|
2022-11-23 22:37:29 +00:00
|
|
|
#include "kernel-shared/compression.h"
|
2022-09-14 15:06:52 +00:00
|
|
|
#include "common/internal.h"
|
|
|
|
#include "common/messages.h"
|
|
|
|
#include "common/utils.h"
|
2018-02-01 16:14:42 +00:00
|
|
|
#include "check/mode-common.h"
|
2022-09-27 17:43:33 +00:00
|
|
|
#include "check/repair.h"
|
2018-01-18 07:49:39 +00:00
|
|
|
|
2022-09-27 17:27:18 +00:00
|
|
|
struct task_ctx g_task_ctx = { 0 };
|
|
|
|
|
2018-03-14 20:11:09 +00:00
|
|
|
/*
|
|
|
|
* Check if the inode referenced by the given data reference uses the extent
|
|
|
|
* at disk_bytenr as a non-prealloc extent.
|
|
|
|
*
|
|
|
|
* Returns 1 if true, 0 if false and < 0 on error.
|
|
|
|
*/
|
2020-08-17 17:07:06 +00:00
|
|
|
static int check_prealloc_data_ref(u64 disk_bytenr,
|
2018-03-14 20:11:09 +00:00
|
|
|
struct btrfs_extent_data_ref *dref,
|
|
|
|
struct extent_buffer *eb)
|
|
|
|
{
|
|
|
|
u64 rootid = btrfs_extent_data_ref_root(eb, dref);
|
|
|
|
u64 objectid = btrfs_extent_data_ref_objectid(eb, dref);
|
|
|
|
u64 offset = btrfs_extent_data_ref_offset(eb, dref);
|
|
|
|
struct btrfs_root *root;
|
|
|
|
struct btrfs_key key;
|
|
|
|
struct btrfs_path path;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
btrfs_init_path(&path);
|
|
|
|
key.objectid = rootid;
|
|
|
|
key.type = BTRFS_ROOT_ITEM_KEY;
|
|
|
|
key.offset = (u64)-1;
|
2020-08-17 16:10:34 +00:00
|
|
|
root = btrfs_read_fs_root(gfs_info, &key);
|
2018-03-14 20:11:09 +00:00
|
|
|
if (IS_ERR(root)) {
|
|
|
|
ret = PTR_ERR(root);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
key.objectid = objectid;
|
|
|
|
key.type = BTRFS_EXTENT_DATA_KEY;
|
|
|
|
key.offset = offset;
|
|
|
|
ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
|
|
|
|
if (ret > 0) {
|
|
|
|
fprintf(stderr,
|
|
|
|
"Missing file extent item for inode %llu, root %llu, offset %llu",
|
|
|
|
objectid, rootid, offset);
|
|
|
|
ret = -ENOENT;
|
|
|
|
}
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
struct btrfs_file_extent_item *fi;
|
|
|
|
int extent_type;
|
|
|
|
|
|
|
|
if (path.slots[0] >= btrfs_header_nritems(path.nodes[0])) {
|
|
|
|
ret = btrfs_next_leaf(root, &path);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
if (ret > 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
|
|
|
|
if (key.objectid != objectid ||
|
|
|
|
key.type != BTRFS_EXTENT_DATA_KEY)
|
|
|
|
break;
|
|
|
|
|
|
|
|
fi = btrfs_item_ptr(path.nodes[0], path.slots[0],
|
|
|
|
struct btrfs_file_extent_item);
|
|
|
|
extent_type = btrfs_file_extent_type(path.nodes[0], fi);
|
|
|
|
if (extent_type != BTRFS_FILE_EXTENT_REG &&
|
|
|
|
extent_type != BTRFS_FILE_EXTENT_PREALLOC)
|
|
|
|
goto next;
|
|
|
|
|
|
|
|
if (btrfs_file_extent_disk_bytenr(path.nodes[0], fi) !=
|
|
|
|
disk_bytenr)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (extent_type == BTRFS_FILE_EXTENT_REG) {
|
|
|
|
ret = 1;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
next:
|
|
|
|
path.slots[0]++;
|
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
out:
|
|
|
|
btrfs_release_path(&path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if a shared data reference points to a node that has a file extent item
|
|
|
|
* pointing to the extent at @disk_bytenr that is not of type prealloc.
|
|
|
|
*
|
|
|
|
* Returns 1 if true, 0 if false and < 0 on error.
|
|
|
|
*/
|
2020-08-17 17:07:06 +00:00
|
|
|
static int check_prealloc_shared_data_ref(u64 parent, u64 disk_bytenr)
|
2018-03-14 20:11:09 +00:00
|
|
|
{
|
|
|
|
struct extent_buffer *eb;
|
|
|
|
u32 nr;
|
|
|
|
int i;
|
|
|
|
int ret = 0;
|
|
|
|
|
2020-08-17 16:10:34 +00:00
|
|
|
eb = read_tree_block(gfs_info, parent, 0);
|
2018-03-14 20:11:09 +00:00
|
|
|
if (!extent_buffer_uptodate(eb)) {
|
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
nr = btrfs_header_nritems(eb);
|
|
|
|
for (i = 0; i < nr; i++) {
|
|
|
|
struct btrfs_key key;
|
|
|
|
struct btrfs_file_extent_item *fi;
|
|
|
|
int extent_type;
|
|
|
|
|
|
|
|
btrfs_item_key_to_cpu(eb, &key, i);
|
|
|
|
if (key.type != BTRFS_EXTENT_DATA_KEY)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
|
|
|
|
extent_type = btrfs_file_extent_type(eb, fi);
|
|
|
|
if (extent_type != BTRFS_FILE_EXTENT_REG &&
|
|
|
|
extent_type != BTRFS_FILE_EXTENT_PREALLOC)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (btrfs_file_extent_disk_bytenr(eb, fi) == disk_bytenr &&
|
|
|
|
extent_type == BTRFS_FILE_EXTENT_REG) {
|
|
|
|
ret = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
free_extent_buffer(eb);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if a prealloc extent is shared by multiple inodes and if any inode has
|
|
|
|
* already written to that extent. This is to avoid emitting invalid warnings
|
|
|
|
* about odd csum items (a inode has an extent entirely marked as prealloc
|
|
|
|
* but another inode shares it and has already written to it).
|
|
|
|
*
|
|
|
|
* Note: right now it does not check if the number of checksum items in the
|
|
|
|
* csum tree matches the number of bytes written into the ex-prealloc extent.
|
|
|
|
* It's complex to deal with that because the prealloc extent might have been
|
|
|
|
* partially written through multiple inodes and we would have to keep track of
|
|
|
|
* ranges, merging them and notice ranges that fully or partially overlap, to
|
|
|
|
* avoid false reports of csum items missing for areas of the prealloc extent
|
|
|
|
* that were not written to - for example if we have a 1M prealloc extent, we
|
|
|
|
* can have only the first half of it written, but 2 different inodes refer to
|
|
|
|
* the its first half (through reflinks/cloning), so keeping a counter of bytes
|
|
|
|
* covered by checksum items is not enough, as the correct value would be 512K
|
|
|
|
* and not 1M (whence the need to track ranges).
|
|
|
|
*
|
|
|
|
* Returns 0 if the prealloc extent was not written yet by any inode, 1 if
|
|
|
|
* at least one other inode has written to it, and < 0 on error.
|
|
|
|
*/
|
2020-08-17 17:07:06 +00:00
|
|
|
int check_prealloc_extent_written(u64 disk_bytenr, u64 num_bytes)
|
2018-03-14 20:11:09 +00:00
|
|
|
{
|
2021-11-08 19:26:41 +00:00
|
|
|
struct btrfs_root *extent_root = btrfs_extent_root(gfs_info,
|
|
|
|
disk_bytenr);
|
2018-03-14 20:11:09 +00:00
|
|
|
struct btrfs_path path;
|
|
|
|
struct btrfs_key key;
|
|
|
|
int ret;
|
|
|
|
struct btrfs_extent_item *ei;
|
|
|
|
u32 item_size;
|
|
|
|
unsigned long ptr;
|
|
|
|
unsigned long end;
|
|
|
|
|
|
|
|
key.objectid = disk_bytenr;
|
|
|
|
key.type = BTRFS_EXTENT_ITEM_KEY;
|
|
|
|
key.offset = num_bytes;
|
|
|
|
|
|
|
|
btrfs_init_path(&path);
|
2021-11-08 19:26:41 +00:00
|
|
|
ret = btrfs_search_slot(NULL, extent_root, &key, &path, 0, 0);
|
2018-03-14 20:11:09 +00:00
|
|
|
if (ret > 0) {
|
|
|
|
fprintf(stderr,
|
|
|
|
"Missing extent item in extent tree for disk_bytenr %llu, num_bytes %llu\n",
|
|
|
|
disk_bytenr, num_bytes);
|
|
|
|
ret = -ENOENT;
|
|
|
|
}
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* First check all inline refs. */
|
|
|
|
ei = btrfs_item_ptr(path.nodes[0], path.slots[0],
|
|
|
|
struct btrfs_extent_item);
|
2022-02-22 22:26:20 +00:00
|
|
|
item_size = btrfs_item_size(path.nodes[0], path.slots[0]);
|
2018-03-14 20:11:09 +00:00
|
|
|
ptr = (unsigned long)(ei + 1);
|
|
|
|
end = (unsigned long)ei + item_size;
|
|
|
|
while (ptr < end) {
|
|
|
|
struct btrfs_extent_inline_ref *iref;
|
|
|
|
int type;
|
|
|
|
|
|
|
|
iref = (struct btrfs_extent_inline_ref *)ptr;
|
|
|
|
type = btrfs_extent_inline_ref_type(path.nodes[0], iref);
|
|
|
|
ASSERT(type == BTRFS_EXTENT_DATA_REF_KEY ||
|
|
|
|
type == BTRFS_SHARED_DATA_REF_KEY);
|
|
|
|
|
|
|
|
if (type == BTRFS_EXTENT_DATA_REF_KEY) {
|
|
|
|
struct btrfs_extent_data_ref *dref;
|
|
|
|
|
|
|
|
dref = (struct btrfs_extent_data_ref *)(&iref->offset);
|
2020-08-17 17:07:06 +00:00
|
|
|
ret = check_prealloc_data_ref(disk_bytenr,
|
2018-03-14 20:11:09 +00:00
|
|
|
dref, path.nodes[0]);
|
|
|
|
if (ret != 0)
|
|
|
|
goto out;
|
|
|
|
} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
|
|
|
|
u64 parent;
|
|
|
|
|
|
|
|
parent = btrfs_extent_inline_ref_offset(path.nodes[0],
|
|
|
|
iref);
|
2020-08-17 17:07:06 +00:00
|
|
|
ret = check_prealloc_shared_data_ref(parent,
|
2018-03-14 20:11:09 +00:00
|
|
|
disk_bytenr);
|
|
|
|
if (ret != 0)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ptr += btrfs_extent_inline_ref_size(type);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Now check if there are any non-inlined refs. */
|
|
|
|
path.slots[0]++;
|
|
|
|
while (true) {
|
|
|
|
if (path.slots[0] >= btrfs_header_nritems(path.nodes[0])) {
|
2021-11-08 19:26:41 +00:00
|
|
|
ret = btrfs_next_leaf(extent_root, &path);
|
2018-03-14 20:11:09 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
if (ret > 0) {
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
|
|
|
|
if (key.objectid != disk_bytenr)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
|
|
|
|
struct btrfs_extent_data_ref *dref;
|
|
|
|
|
|
|
|
dref = btrfs_item_ptr(path.nodes[0], path.slots[0],
|
|
|
|
struct btrfs_extent_data_ref);
|
2020-08-17 17:07:06 +00:00
|
|
|
ret = check_prealloc_data_ref(disk_bytenr,
|
2018-03-14 20:11:09 +00:00
|
|
|
dref, path.nodes[0]);
|
|
|
|
if (ret != 0)
|
|
|
|
goto out;
|
|
|
|
} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
|
2020-08-17 17:07:06 +00:00
|
|
|
ret = check_prealloc_shared_data_ref(key.offset,
|
2018-03-14 20:11:09 +00:00
|
|
|
disk_bytenr);
|
|
|
|
if (ret != 0)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
path.slots[0]++;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
btrfs_release_path(&path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-01-18 07:49:39 +00:00
|
|
|
/*
|
|
|
|
* Search in csum tree to find how many bytes of range [@start, @start + @len)
|
|
|
|
* has the corresponding csum item.
|
|
|
|
*
|
|
|
|
* @start: range start
|
|
|
|
* @len: range length
|
|
|
|
* @found: return value of found csum bytes
|
|
|
|
* unit is BYTE.
|
|
|
|
*/
|
2020-08-17 17:07:06 +00:00
|
|
|
int count_csum_range(u64 start, u64 len, u64 *found)
|
2018-01-18 07:49:39 +00:00
|
|
|
{
|
2021-11-10 20:07:59 +00:00
|
|
|
struct btrfs_root *csum_root = btrfs_csum_root(gfs_info, start);
|
2018-01-18 07:49:39 +00:00
|
|
|
struct btrfs_key key;
|
|
|
|
struct btrfs_path path;
|
|
|
|
struct extent_buffer *leaf;
|
|
|
|
int ret;
|
|
|
|
size_t size;
|
|
|
|
*found = 0;
|
|
|
|
u64 csum_end;
|
2021-10-21 01:40:20 +00:00
|
|
|
u16 csum_size = gfs_info->csum_size;
|
2018-01-18 07:49:39 +00:00
|
|
|
|
|
|
|
btrfs_init_path(&path);
|
|
|
|
|
|
|
|
key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
|
|
|
|
key.offset = start;
|
|
|
|
key.type = BTRFS_EXTENT_CSUM_KEY;
|
|
|
|
|
2021-11-10 20:07:59 +00:00
|
|
|
ret = btrfs_search_slot(NULL, csum_root, &key, &path, 0, 0);
|
2018-01-18 07:49:39 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
if (ret > 0 && path.slots[0] > 0) {
|
|
|
|
leaf = path.nodes[0];
|
|
|
|
btrfs_item_key_to_cpu(leaf, &key, path.slots[0] - 1);
|
|
|
|
if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
|
|
|
|
key.type == BTRFS_EXTENT_CSUM_KEY)
|
|
|
|
path.slots[0]--;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (len > 0) {
|
|
|
|
leaf = path.nodes[0];
|
|
|
|
if (path.slots[0] >= btrfs_header_nritems(leaf)) {
|
2021-11-10 20:07:59 +00:00
|
|
|
ret = btrfs_next_leaf(csum_root, &path);
|
2018-01-18 07:49:39 +00:00
|
|
|
if (ret > 0)
|
|
|
|
break;
|
|
|
|
else if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
leaf = path.nodes[0];
|
|
|
|
}
|
|
|
|
|
|
|
|
btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
|
|
|
|
if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
|
|
|
|
key.type != BTRFS_EXTENT_CSUM_KEY)
|
|
|
|
break;
|
|
|
|
|
|
|
|
btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
|
|
|
|
if (key.offset >= start + len)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (key.offset > start)
|
|
|
|
start = key.offset;
|
|
|
|
|
2022-02-22 22:26:20 +00:00
|
|
|
size = btrfs_item_size(leaf, path.slots[0]);
|
2018-01-18 07:49:39 +00:00
|
|
|
csum_end = key.offset + (size / csum_size) *
|
2020-08-17 16:10:34 +00:00
|
|
|
gfs_info->sectorsize;
|
2018-01-18 07:49:39 +00:00
|
|
|
if (csum_end > start) {
|
|
|
|
size = min(csum_end - start, len);
|
|
|
|
len -= size;
|
|
|
|
start += size;
|
|
|
|
*found += size;
|
|
|
|
}
|
|
|
|
|
|
|
|
path.slots[0]++;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
btrfs_release_path(&path);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-01-18 08:03:09 +00:00
|
|
|
/*
|
|
|
|
* Wrapper to insert one inode item into given @root
|
|
|
|
* Timestamp will be set to current time.
|
|
|
|
*
|
|
|
|
* @root: the root to insert inode item into
|
|
|
|
* @ino: inode number
|
|
|
|
* @size: inode size
|
|
|
|
* @nbytes: nbytes (real used size, without hole)
|
|
|
|
* @nlink: number of links
|
|
|
|
* @mode: file mode, including S_IF* bits
|
|
|
|
*/
|
|
|
|
int insert_inode_item(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root, u64 ino, u64 size,
|
|
|
|
u64 nbytes, u64 nlink, u32 mode)
|
|
|
|
{
|
|
|
|
struct btrfs_inode_item ii;
|
|
|
|
time_t now = time(NULL);
|
|
|
|
int ret;
|
|
|
|
|
2018-06-07 02:49:58 +00:00
|
|
|
memset(&ii, 0, sizeof(ii));
|
2018-01-18 08:03:09 +00:00
|
|
|
btrfs_set_stack_inode_size(&ii, size);
|
|
|
|
btrfs_set_stack_inode_nbytes(&ii, nbytes);
|
|
|
|
btrfs_set_stack_inode_nlink(&ii, nlink);
|
|
|
|
btrfs_set_stack_inode_mode(&ii, mode);
|
|
|
|
btrfs_set_stack_inode_generation(&ii, trans->transid);
|
|
|
|
btrfs_set_stack_timespec_sec(&ii.ctime, now);
|
|
|
|
btrfs_set_stack_timespec_sec(&ii.mtime, now);
|
|
|
|
|
|
|
|
ret = btrfs_insert_inode(trans, root, ino, &ii);
|
|
|
|
ASSERT(!ret);
|
|
|
|
|
|
|
|
warning("root %llu inode %llu recreating inode item, this may "
|
|
|
|
"be incomplete, please check permissions and content after "
|
|
|
|
"the fsck completes.\n", (unsigned long long)root->objectid,
|
|
|
|
(unsigned long long)ino);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2018-01-18 08:07:19 +00:00
|
|
|
|
|
|
|
static int get_highest_inode(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root, struct btrfs_path *path,
|
|
|
|
u64 *highest_ino)
|
|
|
|
{
|
|
|
|
struct btrfs_key key, found_key;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
btrfs_init_path(path);
|
|
|
|
key.objectid = BTRFS_LAST_FREE_OBJECTID;
|
|
|
|
key.offset = -1;
|
|
|
|
key.type = BTRFS_INODE_ITEM_KEY;
|
|
|
|
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
|
|
|
|
if (ret == 1) {
|
|
|
|
btrfs_item_key_to_cpu(path->nodes[0], &found_key,
|
|
|
|
path->slots[0] - 1);
|
|
|
|
*highest_ino = found_key.objectid;
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
if (*highest_ino >= BTRFS_LAST_FREE_OBJECTID)
|
|
|
|
ret = -EOVERFLOW;
|
|
|
|
btrfs_release_path(path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Link inode to dir 'lost+found'. Increase @ref_count.
|
|
|
|
*
|
|
|
|
* Returns 0 means success.
|
|
|
|
* Returns <0 means failure.
|
|
|
|
*/
|
|
|
|
int link_inode_to_lostfound(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root,
|
|
|
|
struct btrfs_path *path,
|
|
|
|
u64 ino, char *namebuf, u32 name_len,
|
|
|
|
u8 filetype, u64 *ref_count)
|
|
|
|
{
|
|
|
|
char *dir_name = "lost+found";
|
|
|
|
u64 lost_found_ino;
|
|
|
|
int ret;
|
|
|
|
u32 mode = 0700;
|
|
|
|
|
|
|
|
btrfs_release_path(path);
|
|
|
|
ret = get_highest_inode(trans, root, path, &lost_found_ino);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
lost_found_ino++;
|
|
|
|
|
|
|
|
ret = btrfs_mkdir(trans, root, dir_name, strlen(dir_name),
|
|
|
|
BTRFS_FIRST_FREE_OBJECTID, &lost_found_ino,
|
|
|
|
mode);
|
|
|
|
if (ret < 0) {
|
2018-10-25 12:10:54 +00:00
|
|
|
errno = -ret;
|
|
|
|
error("failed to create '%s' dir: %m", dir_name);
|
2018-01-18 08:07:19 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
ret = btrfs_add_link(trans, root, ino, lost_found_ino,
|
|
|
|
namebuf, name_len, filetype, NULL, 1, 0);
|
|
|
|
/*
|
|
|
|
* Add ".INO" suffix several times to handle case where
|
|
|
|
* "FILENAME.INO" is already taken by another file.
|
|
|
|
*/
|
|
|
|
while (ret == -EEXIST) {
|
|
|
|
/*
|
|
|
|
* Conflicting file name, add ".INO" as suffix * +1 for '.'
|
|
|
|
*/
|
|
|
|
if (name_len + count_digits(ino) + 1 > BTRFS_NAME_LEN) {
|
|
|
|
ret = -EFBIG;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
snprintf(namebuf + name_len, BTRFS_NAME_LEN - name_len,
|
|
|
|
".%llu", ino);
|
|
|
|
name_len += count_digits(ino) + 1;
|
|
|
|
ret = btrfs_add_link(trans, root, ino, lost_found_ino, namebuf,
|
|
|
|
name_len, filetype, NULL, 1, 0);
|
|
|
|
}
|
|
|
|
if (ret < 0) {
|
2018-10-25 12:10:54 +00:00
|
|
|
errno = -ret;
|
|
|
|
error("failed to link the inode %llu to %s dir: %m",
|
|
|
|
ino, dir_name);
|
2018-01-18 08:07:19 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
++*ref_count;
|
|
|
|
printf("Moving file '%.*s' to '%s' dir since it has no valid backref\n",
|
|
|
|
name_len, namebuf, dir_name);
|
|
|
|
out:
|
|
|
|
btrfs_release_path(path);
|
|
|
|
if (ret)
|
|
|
|
error("failed to move file '%.*s' to '%s' dir", name_len,
|
|
|
|
namebuf, dir_name);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-01-18 08:19:53 +00:00
|
|
|
/*
|
2018-11-26 17:01:42 +00:00
|
|
|
* Extra (optional) check for dev_item size to report possible problem on a new
|
2018-01-18 08:19:53 +00:00
|
|
|
* kernel.
|
|
|
|
*/
|
|
|
|
void check_dev_size_alignment(u64 devid, u64 total_bytes, u32 sectorsize)
|
|
|
|
{
|
|
|
|
if (!IS_ALIGNED(total_bytes, sectorsize)) {
|
|
|
|
warning(
|
|
|
|
"unaligned total_bytes detected for devid %llu, have %llu should be aligned to %u",
|
|
|
|
devid, total_bytes, sectorsize);
|
|
|
|
warning(
|
|
|
|
"this is OK for older kernel, but may cause kernel warning for newer kernels");
|
|
|
|
warning("this can be fixed by 'btrfs rescue fix-device-size'");
|
|
|
|
}
|
|
|
|
}
|
2018-01-18 08:25:11 +00:00
|
|
|
|
|
|
|
void reada_walk_down(struct btrfs_root *root, struct extent_buffer *node,
|
|
|
|
int slot)
|
|
|
|
{
|
|
|
|
u64 bytenr;
|
|
|
|
u64 ptr_gen;
|
|
|
|
u32 nritems;
|
|
|
|
int i;
|
|
|
|
int level;
|
|
|
|
|
|
|
|
level = btrfs_header_level(node);
|
|
|
|
if (level != 1)
|
|
|
|
return;
|
|
|
|
|
|
|
|
nritems = btrfs_header_nritems(node);
|
|
|
|
for (i = slot; i < nritems; i++) {
|
|
|
|
bytenr = btrfs_node_blockptr(node, i);
|
|
|
|
ptr_gen = btrfs_node_ptr_generation(node, i);
|
2020-08-17 16:10:34 +00:00
|
|
|
readahead_tree_block(gfs_info, bytenr, ptr_gen);
|
2018-01-18 08:25:11 +00:00
|
|
|
}
|
|
|
|
}
|
2018-01-18 08:30:10 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check the child node/leaf by the following condition:
|
|
|
|
* 1. the first item key of the node/leaf should be the same with the one
|
|
|
|
* in parent.
|
|
|
|
* 2. block in parent node should match the child node/leaf.
|
|
|
|
* 3. generation of parent node and child's header should be consistent.
|
|
|
|
*
|
|
|
|
* Or the child node/leaf pointed by the key in parent is not valid.
|
|
|
|
*
|
|
|
|
* We hope to check leaf owner too, but since subvol may share leaves,
|
|
|
|
* which makes leaf owner check not so strong, key check should be
|
|
|
|
* sufficient enough for that case.
|
|
|
|
*/
|
|
|
|
int check_child_node(struct extent_buffer *parent, int slot,
|
|
|
|
struct extent_buffer *child)
|
|
|
|
{
|
|
|
|
struct btrfs_key parent_key;
|
|
|
|
struct btrfs_key child_key;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
btrfs_node_key_to_cpu(parent, &parent_key, slot);
|
|
|
|
if (btrfs_header_level(child) == 0)
|
|
|
|
btrfs_item_key_to_cpu(child, &child_key, 0);
|
|
|
|
else
|
|
|
|
btrfs_node_key_to_cpu(child, &child_key, 0);
|
|
|
|
|
|
|
|
if (memcmp(&parent_key, &child_key, sizeof(parent_key))) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
fprintf(stderr,
|
|
|
|
"Wrong key of child node/leaf, wanted: (%llu, %u, %llu), have: (%llu, %u, %llu)\n",
|
|
|
|
parent_key.objectid, parent_key.type, parent_key.offset,
|
|
|
|
child_key.objectid, child_key.type, child_key.offset);
|
|
|
|
}
|
|
|
|
if (btrfs_header_bytenr(child) != btrfs_node_blockptr(parent, slot)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
fprintf(stderr, "Wrong block of child node/leaf, wanted: %llu, have: %llu\n",
|
|
|
|
btrfs_node_blockptr(parent, slot),
|
|
|
|
btrfs_header_bytenr(child));
|
|
|
|
}
|
|
|
|
if (btrfs_node_ptr_generation(parent, slot) !=
|
|
|
|
btrfs_header_generation(child)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
fprintf(stderr, "Wrong generation of child node/leaf, wanted: %llu, have: %llu\n",
|
|
|
|
btrfs_header_generation(child),
|
|
|
|
btrfs_node_ptr_generation(parent, slot));
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
2018-01-18 08:38:23 +00:00
|
|
|
|
2020-08-17 17:07:06 +00:00
|
|
|
void reset_cached_block_groups()
|
2018-01-18 08:38:23 +00:00
|
|
|
{
|
2020-05-01 06:52:19 +00:00
|
|
|
struct btrfs_block_group *cache;
|
2018-01-18 08:38:23 +00:00
|
|
|
u64 start, end;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
while (1) {
|
2020-08-17 16:10:34 +00:00
|
|
|
ret = find_first_extent_bit(&gfs_info->free_space_cache, 0,
|
2018-01-18 08:38:23 +00:00
|
|
|
&start, &end, EXTENT_DIRTY);
|
|
|
|
if (ret)
|
|
|
|
break;
|
2020-08-17 16:10:34 +00:00
|
|
|
clear_extent_dirty(&gfs_info->free_space_cache, start, end);
|
2018-01-18 08:38:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
start = 0;
|
|
|
|
while (1) {
|
2020-08-17 16:10:34 +00:00
|
|
|
cache = btrfs_lookup_first_block_group(gfs_info, start);
|
2018-01-18 08:38:23 +00:00
|
|
|
if (!cache)
|
|
|
|
break;
|
|
|
|
if (cache->cached)
|
|
|
|
cache->cached = 0;
|
2020-05-01 06:52:17 +00:00
|
|
|
start = cache->start + cache->length;
|
2018-01-18 08:38:23 +00:00
|
|
|
}
|
|
|
|
}
|
2018-05-08 08:29:57 +00:00
|
|
|
|
2021-11-10 20:08:02 +00:00
|
|
|
int pin_metadata_blocks(void)
|
2018-05-08 08:29:58 +00:00
|
|
|
{
|
2021-11-10 20:08:02 +00:00
|
|
|
return btrfs_mark_used_tree_blocks(gfs_info,
|
|
|
|
&gfs_info->pinned_extents);
|
2018-05-08 08:29:58 +00:00
|
|
|
}
|
|
|
|
|
2020-08-17 17:07:06 +00:00
|
|
|
int exclude_metadata_blocks(void)
|
2018-05-08 08:29:58 +00:00
|
|
|
{
|
|
|
|
struct extent_io_tree *excluded_extents;
|
|
|
|
|
|
|
|
excluded_extents = malloc(sizeof(*excluded_extents));
|
|
|
|
if (!excluded_extents)
|
|
|
|
return -ENOMEM;
|
|
|
|
extent_io_tree_init(excluded_extents);
|
2020-08-17 16:10:34 +00:00
|
|
|
gfs_info->excluded_extents = excluded_extents;
|
2018-05-08 08:29:58 +00:00
|
|
|
|
2021-11-10 20:08:02 +00:00
|
|
|
return btrfs_mark_used_tree_blocks(gfs_info, excluded_extents);
|
2018-05-08 08:29:58 +00:00
|
|
|
}
|
|
|
|
|
2020-08-17 17:07:06 +00:00
|
|
|
void cleanup_excluded_extents(void)
|
2018-05-08 08:29:58 +00:00
|
|
|
{
|
2020-08-17 16:10:34 +00:00
|
|
|
if (gfs_info->excluded_extents) {
|
|
|
|
extent_io_tree_cleanup(gfs_info->excluded_extents);
|
|
|
|
free(gfs_info->excluded_extents);
|
2018-05-08 08:29:58 +00:00
|
|
|
}
|
2020-08-17 16:10:34 +00:00
|
|
|
gfs_info->excluded_extents = NULL;
|
2018-05-08 08:29:58 +00:00
|
|
|
}
|
2018-10-25 05:20:46 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Delete one corrupted dir item whose hash doesn't match its name.
|
|
|
|
*
|
|
|
|
* Since its hash is incorrect, we can't use btrfs_name_hash() to calculate
|
|
|
|
* the search key, but rely on @di_key parameter to do the search.
|
|
|
|
*/
|
|
|
|
int delete_corrupted_dir_item(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root,
|
|
|
|
struct btrfs_key *di_key, char *namebuf,
|
|
|
|
u32 namelen)
|
|
|
|
{
|
|
|
|
struct btrfs_dir_item *di_item;
|
|
|
|
struct btrfs_path path;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
btrfs_init_path(&path);
|
|
|
|
ret = btrfs_search_slot(trans, root, di_key, &path, 0, 1);
|
|
|
|
if (ret > 0) {
|
|
|
|
error("key (%llu %u %llu) doesn't exist in root %llu",
|
|
|
|
di_key->objectid, di_key->type, di_key->offset,
|
|
|
|
root->root_key.objectid);
|
|
|
|
ret = -ENOENT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (ret < 0) {
|
|
|
|
error("failed to search root %llu: %d",
|
|
|
|
root->root_key.objectid, ret);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
di_item = btrfs_match_dir_item_name(root, &path, namebuf, namelen);
|
|
|
|
if (!di_item) {
|
|
|
|
/*
|
|
|
|
* This is possible if the dir_item has incorrect namelen.
|
|
|
|
* But in that case, we shouldn't reach repair path here.
|
|
|
|
*/
|
|
|
|
error("no dir item named '%s' found with key (%llu %u %llu)",
|
|
|
|
namebuf, di_key->objectid, di_key->type,
|
|
|
|
di_key->offset);
|
|
|
|
ret = -ENOENT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
ret = btrfs_delete_one_dir_name(trans, root, &path, di_item);
|
|
|
|
if (ret < 0)
|
|
|
|
error("failed to delete one dir name: %d", ret);
|
|
|
|
|
|
|
|
out:
|
|
|
|
btrfs_release_path(&path);
|
|
|
|
return ret;
|
|
|
|
}
|
2019-04-01 05:55:47 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Reset the mode of inode (specified by @root and @ino) to @mode.
|
|
|
|
*
|
|
|
|
* Caller should ensure @path is not populated, the @path is mainly for caller
|
|
|
|
* to grab the correct new path of the inode.
|
|
|
|
*
|
|
|
|
* Return 0 if repair is done, @path will point to the correct inode item.
|
|
|
|
* Return <0 for errors.
|
|
|
|
*/
|
|
|
|
int reset_imode(struct btrfs_trans_handle *trans, struct btrfs_root *root,
|
|
|
|
struct btrfs_path *path, u64 ino, u32 mode)
|
|
|
|
{
|
|
|
|
struct btrfs_inode_item *iitem;
|
|
|
|
struct extent_buffer *leaf;
|
|
|
|
struct btrfs_key key;
|
|
|
|
int slot;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
key.objectid = ino;
|
|
|
|
key.type = BTRFS_INODE_ITEM_KEY;
|
|
|
|
key.offset = 0;
|
|
|
|
|
|
|
|
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
|
|
|
|
if (ret > 0)
|
|
|
|
ret = -ENOENT;
|
|
|
|
if (ret < 0) {
|
|
|
|
errno = -ret;
|
|
|
|
error("failed to search tree %llu: %m",
|
|
|
|
root->root_key.objectid);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
slot = path->slots[0];
|
|
|
|
iitem = btrfs_item_ptr(leaf, slot, struct btrfs_inode_item);
|
|
|
|
btrfs_set_inode_mode(leaf, iitem, mode);
|
|
|
|
btrfs_mark_buffer_dirty(leaf);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-09-12 03:11:31 +00:00
|
|
|
static int find_file_type_dir_index(struct btrfs_root *root, u64 ino, u64 dirid,
|
|
|
|
u64 index, const char *name, u32 name_len,
|
|
|
|
u32 *imode_ret)
|
|
|
|
{
|
|
|
|
struct btrfs_path path;
|
|
|
|
struct btrfs_key key;
|
|
|
|
struct btrfs_key location;
|
|
|
|
struct btrfs_dir_item *di;
|
|
|
|
char namebuf[BTRFS_NAME_LEN] = {0};
|
|
|
|
bool found = false;
|
|
|
|
u8 filetype;
|
|
|
|
u32 len;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
btrfs_init_path(&path);
|
|
|
|
key.objectid = dirid;
|
|
|
|
key.offset = index;
|
|
|
|
key.type = BTRFS_DIR_INDEX_KEY;
|
|
|
|
|
|
|
|
ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
|
|
|
|
if (ret > 0) {
|
|
|
|
ret = -ENOENT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
di = btrfs_item_ptr(path.nodes[0], path.slots[0],
|
|
|
|
struct btrfs_dir_item);
|
|
|
|
btrfs_dir_item_key_to_cpu(path.nodes[0], di, &location);
|
|
|
|
|
|
|
|
/* Various basic check */
|
|
|
|
if (location.objectid != ino || location.type != BTRFS_INODE_ITEM_KEY ||
|
|
|
|
location.offset != 0)
|
|
|
|
goto out;
|
|
|
|
filetype = btrfs_dir_type(path.nodes[0], di);
|
|
|
|
if (filetype >= BTRFS_FT_MAX || filetype == BTRFS_FT_UNKNOWN)
|
|
|
|
goto out;
|
|
|
|
len = min_t(u32, BTRFS_NAME_LEN,
|
2022-02-22 22:26:20 +00:00
|
|
|
btrfs_item_size(path.nodes[0], path.slots[0]) - sizeof(*di));
|
2019-09-12 03:11:31 +00:00
|
|
|
len = min_t(u32, len, btrfs_dir_name_len(path.nodes[0], di));
|
|
|
|
read_extent_buffer(path.nodes[0], namebuf, (unsigned long)(di + 1), len);
|
|
|
|
if (name_len != len || memcmp(namebuf, name, len))
|
|
|
|
goto out;
|
|
|
|
found = true;
|
|
|
|
*imode_ret = btrfs_type_to_imode(filetype);
|
|
|
|
out:
|
|
|
|
btrfs_release_path(&path);
|
|
|
|
if (!found && !ret)
|
|
|
|
ret = -ENOENT;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int find_file_type_dir_item(struct btrfs_root *root, u64 ino, u64 dirid,
|
|
|
|
const char *name, u32 name_len,
|
|
|
|
u32 *imode_ret)
|
|
|
|
{
|
|
|
|
struct btrfs_path path;
|
|
|
|
struct btrfs_key key;
|
|
|
|
struct btrfs_key location;
|
|
|
|
struct btrfs_dir_item *di;
|
|
|
|
char namebuf[BTRFS_NAME_LEN] = {0};
|
|
|
|
bool found = false;
|
|
|
|
unsigned long cur;
|
|
|
|
unsigned long end;
|
|
|
|
u8 filetype;
|
|
|
|
u32 len;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
btrfs_init_path(&path);
|
|
|
|
key.objectid = dirid;
|
|
|
|
key.offset = btrfs_name_hash(name, name_len);
|
|
|
|
key.type = BTRFS_DIR_INDEX_KEY;
|
|
|
|
|
|
|
|
ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
|
|
|
|
if (ret > 0) {
|
|
|
|
ret = -ENOENT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
cur = btrfs_item_ptr_offset(path.nodes[0], path.slots[0]);
|
2022-02-22 22:26:20 +00:00
|
|
|
end = cur + btrfs_item_size(path.nodes[0], path.slots[0]);
|
2019-09-12 03:11:31 +00:00
|
|
|
while (cur < end) {
|
|
|
|
di = (struct btrfs_dir_item *)cur;
|
|
|
|
cur += btrfs_dir_name_len(path.nodes[0], di) + sizeof(*di);
|
|
|
|
|
|
|
|
btrfs_dir_item_key_to_cpu(path.nodes[0], di, &location);
|
|
|
|
/* Various basic check */
|
|
|
|
if (location.objectid != ino ||
|
|
|
|
location.type != BTRFS_INODE_ITEM_KEY ||
|
|
|
|
location.offset != 0)
|
|
|
|
continue;
|
|
|
|
filetype = btrfs_dir_type(path.nodes[0], di);
|
|
|
|
if (filetype >= BTRFS_FT_MAX || filetype == BTRFS_FT_UNKNOWN)
|
|
|
|
continue;
|
|
|
|
len = min_t(u32, BTRFS_NAME_LEN,
|
2022-02-22 22:26:20 +00:00
|
|
|
btrfs_item_size(path.nodes[0], path.slots[0]) -
|
2019-09-12 03:11:31 +00:00
|
|
|
sizeof(*di));
|
|
|
|
len = min_t(u32, len, btrfs_dir_name_len(path.nodes[0], di));
|
|
|
|
read_extent_buffer(path.nodes[0], namebuf,
|
|
|
|
(unsigned long)(di + 1), len);
|
|
|
|
if (name_len != len || memcmp(namebuf, name, len))
|
|
|
|
continue;
|
|
|
|
*imode_ret = btrfs_type_to_imode(filetype);
|
|
|
|
found = true;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
btrfs_release_path(&path);
|
|
|
|
if (!found && !ret)
|
|
|
|
ret = -ENOENT;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int find_file_type(struct btrfs_root *root, u64 ino, u64 dirid,
|
|
|
|
u64 index, const char *name, u32 name_len,
|
|
|
|
u32 *imode_ret)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
ret = find_file_type_dir_index(root, ino, dirid, index, name, name_len,
|
|
|
|
imode_ret);
|
|
|
|
if (ret == 0)
|
|
|
|
return ret;
|
|
|
|
return find_file_type_dir_item(root, ino, dirid, name, name_len,
|
|
|
|
imode_ret);
|
|
|
|
}
|
|
|
|
|
2019-09-12 03:11:34 +00:00
|
|
|
int detect_imode(struct btrfs_root *root, struct btrfs_path *path,
|
btrfs-progs: check: Make repair_imode_common() handle inodes in subvolume trees
[[PROBLEM]]
Before this patch, repair_imode_common() can only handle two types of
inodes:
- Free space cache inodes
- ROOT DIR inodes
For inodes in subvolume trees, the core complexity is how to determine
the correct imode, thus it was not implemented.
However there are more reports of incorrect imode in subvolume trees, we
need to support such fix.
[[ENHANCEMENT]]
So this patch adds a new function, detect_imode(), to detect imode for
inodes in subvolume trees. The policy here is, try our best to find a
valid imode to recovery. If no convicing info can be found, fail out.
That function will determine imode by:
1) Search for INODE_REF of the inode
If we have INODE_REF, we will then try to find DIR_ITEM/DIR_INDEX.
As long as one valid DIR_ITEM or DIR_INDEX can be found, we convert
the BTRFS_FT_* to imode, then call it a day.
This should be the most accurate way.
2) Search for DIR_INDEX/DIR_ITEM belongs to this inode
If above search fails, we falls back to locate the DIR_INDEX/DIR_ITEM
just after the INODE_ITEM.
Thus this only works for non-empty directory.
If any can be found, it's definitely a directory.
3) Search for EXTENT_DATA belongs to this inode
If EXTENT_DATA can be found, it's either REG or LNK.
Thus this only works for non-empty file or soft link.
For this case, we default to REG, as user can inspect the file to
determine if it's a file or just a path.
4) Use rdev to detect BLK/CHR
If all above fails, but INODE_ITEM has non-zero rdev, then it's either
a BLK or CHR file. Then we default to BLK.
5) Fail out if none of above methods succeeded
No educated guess to make things worse.
[[SHORTCOMING]]
The above search is not perfect, there are cases where we can't repair:
E.g. orphan empty regular inode. Since it's already orphan, it has no
INODE_REF. And it's regular empty file, it has no DIR_INDEX nor
EXTENT_DATA nor rdev. Thus we can't recover. Although for this case, it
really doesn't matter as it's already orphan and will be deleted anyway.
Furthermore, due to the DIR_ITEM/DIR_INDEX/INODE_REF repair code which
can happen before imode repair, it's possible that DIR_ITEM search code
may not be executed. If there is only DIR_ITEM remaining, repair code
will remove the DIR_ITEM completely and move the inode to lost+found,
leaving us no info to rebuild imode. If there is DIR_INDEX missing,
repair code will re-insert the DIR_INDEX, then imode repair code will go
DIR_INDEX directly.
But overall, the repair code should handle the invalid imode caused by
older kernels without problem.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2019-09-12 03:11:32 +00:00
|
|
|
u32 *imode_ret)
|
|
|
|
{
|
|
|
|
struct btrfs_key key;
|
|
|
|
struct btrfs_inode_item iitem;
|
|
|
|
bool found = false;
|
|
|
|
u64 ino;
|
2019-11-18 06:30:49 +00:00
|
|
|
u32 imode = 0;
|
btrfs-progs: check: Make repair_imode_common() handle inodes in subvolume trees
[[PROBLEM]]
Before this patch, repair_imode_common() can only handle two types of
inodes:
- Free space cache inodes
- ROOT DIR inodes
For inodes in subvolume trees, the core complexity is how to determine
the correct imode, thus it was not implemented.
However there are more reports of incorrect imode in subvolume trees, we
need to support such fix.
[[ENHANCEMENT]]
So this patch adds a new function, detect_imode(), to detect imode for
inodes in subvolume trees. The policy here is, try our best to find a
valid imode to recovery. If no convicing info can be found, fail out.
That function will determine imode by:
1) Search for INODE_REF of the inode
If we have INODE_REF, we will then try to find DIR_ITEM/DIR_INDEX.
As long as one valid DIR_ITEM or DIR_INDEX can be found, we convert
the BTRFS_FT_* to imode, then call it a day.
This should be the most accurate way.
2) Search for DIR_INDEX/DIR_ITEM belongs to this inode
If above search fails, we falls back to locate the DIR_INDEX/DIR_ITEM
just after the INODE_ITEM.
Thus this only works for non-empty directory.
If any can be found, it's definitely a directory.
3) Search for EXTENT_DATA belongs to this inode
If EXTENT_DATA can be found, it's either REG or LNK.
Thus this only works for non-empty file or soft link.
For this case, we default to REG, as user can inspect the file to
determine if it's a file or just a path.
4) Use rdev to detect BLK/CHR
If all above fails, but INODE_ITEM has non-zero rdev, then it's either
a BLK or CHR file. Then we default to BLK.
5) Fail out if none of above methods succeeded
No educated guess to make things worse.
[[SHORTCOMING]]
The above search is not perfect, there are cases where we can't repair:
E.g. orphan empty regular inode. Since it's already orphan, it has no
INODE_REF. And it's regular empty file, it has no DIR_INDEX nor
EXTENT_DATA nor rdev. Thus we can't recover. Although for this case, it
really doesn't matter as it's already orphan and will be deleted anyway.
Furthermore, due to the DIR_ITEM/DIR_INDEX/INODE_REF repair code which
can happen before imode repair, it's possible that DIR_ITEM search code
may not be executed. If there is only DIR_ITEM remaining, repair code
will remove the DIR_ITEM completely and move the inode to lost+found,
leaving us no info to rebuild imode. If there is DIR_INDEX missing,
repair code will re-insert the DIR_INDEX, then imode repair code will go
DIR_INDEX directly.
But overall, the repair code should handle the invalid imode caused by
older kernels without problem.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2019-09-12 03:11:32 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
|
|
|
|
ino = key.objectid;
|
|
|
|
read_extent_buffer(path->nodes[0], &iitem,
|
|
|
|
btrfs_item_ptr_offset(path->nodes[0], path->slots[0]),
|
|
|
|
sizeof(iitem));
|
|
|
|
/* root inode */
|
|
|
|
if (ino == BTRFS_FIRST_FREE_OBJECTID) {
|
|
|
|
imode = S_IFDIR;
|
|
|
|
found = true;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
struct btrfs_inode_ref *iref;
|
|
|
|
struct extent_buffer *leaf;
|
|
|
|
unsigned long cur;
|
|
|
|
unsigned long end;
|
|
|
|
char namebuf[BTRFS_NAME_LEN] = {0};
|
|
|
|
u64 index;
|
|
|
|
u32 namelen;
|
|
|
|
int slot;
|
|
|
|
|
|
|
|
ret = btrfs_next_item(root, path);
|
|
|
|
if (ret > 0) {
|
|
|
|
/* falls back to rdev check */
|
|
|
|
ret = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
slot = path->slots[0];
|
|
|
|
btrfs_item_key_to_cpu(leaf, &key, slot);
|
|
|
|
if (key.objectid != ino)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We ignore some types to make life easier:
|
|
|
|
* - XATTR
|
|
|
|
* Both REG and DIR can have xattr, so not useful
|
|
|
|
*/
|
|
|
|
switch (key.type) {
|
|
|
|
case BTRFS_INODE_REF_KEY:
|
|
|
|
/* The most accurate way to determine filetype */
|
|
|
|
cur = btrfs_item_ptr_offset(leaf, slot);
|
2022-02-22 22:26:20 +00:00
|
|
|
end = cur + btrfs_item_size(leaf, slot);
|
btrfs-progs: check: Make repair_imode_common() handle inodes in subvolume trees
[[PROBLEM]]
Before this patch, repair_imode_common() can only handle two types of
inodes:
- Free space cache inodes
- ROOT DIR inodes
For inodes in subvolume trees, the core complexity is how to determine
the correct imode, thus it was not implemented.
However there are more reports of incorrect imode in subvolume trees, we
need to support such fix.
[[ENHANCEMENT]]
So this patch adds a new function, detect_imode(), to detect imode for
inodes in subvolume trees. The policy here is, try our best to find a
valid imode to recovery. If no convicing info can be found, fail out.
That function will determine imode by:
1) Search for INODE_REF of the inode
If we have INODE_REF, we will then try to find DIR_ITEM/DIR_INDEX.
As long as one valid DIR_ITEM or DIR_INDEX can be found, we convert
the BTRFS_FT_* to imode, then call it a day.
This should be the most accurate way.
2) Search for DIR_INDEX/DIR_ITEM belongs to this inode
If above search fails, we falls back to locate the DIR_INDEX/DIR_ITEM
just after the INODE_ITEM.
Thus this only works for non-empty directory.
If any can be found, it's definitely a directory.
3) Search for EXTENT_DATA belongs to this inode
If EXTENT_DATA can be found, it's either REG or LNK.
Thus this only works for non-empty file or soft link.
For this case, we default to REG, as user can inspect the file to
determine if it's a file or just a path.
4) Use rdev to detect BLK/CHR
If all above fails, but INODE_ITEM has non-zero rdev, then it's either
a BLK or CHR file. Then we default to BLK.
5) Fail out if none of above methods succeeded
No educated guess to make things worse.
[[SHORTCOMING]]
The above search is not perfect, there are cases where we can't repair:
E.g. orphan empty regular inode. Since it's already orphan, it has no
INODE_REF. And it's regular empty file, it has no DIR_INDEX nor
EXTENT_DATA nor rdev. Thus we can't recover. Although for this case, it
really doesn't matter as it's already orphan and will be deleted anyway.
Furthermore, due to the DIR_ITEM/DIR_INDEX/INODE_REF repair code which
can happen before imode repair, it's possible that DIR_ITEM search code
may not be executed. If there is only DIR_ITEM remaining, repair code
will remove the DIR_ITEM completely and move the inode to lost+found,
leaving us no info to rebuild imode. If there is DIR_INDEX missing,
repair code will re-insert the DIR_INDEX, then imode repair code will go
DIR_INDEX directly.
But overall, the repair code should handle the invalid imode caused by
older kernels without problem.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2019-09-12 03:11:32 +00:00
|
|
|
while (cur < end) {
|
|
|
|
iref = (struct btrfs_inode_ref *)cur;
|
|
|
|
namelen = min_t(u32, end - cur - sizeof(&iref),
|
|
|
|
btrfs_inode_ref_name_len(leaf, iref));
|
|
|
|
index = btrfs_inode_ref_index(leaf, iref);
|
|
|
|
read_extent_buffer(leaf, namebuf,
|
|
|
|
(unsigned long)(iref + 1), namelen);
|
|
|
|
ret = find_file_type(root, ino, key.offset,
|
|
|
|
index, namebuf, namelen,
|
|
|
|
&imode);
|
|
|
|
if (ret == 0) {
|
|
|
|
found = true;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
cur += sizeof(*iref) + namelen;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case BTRFS_DIR_ITEM_KEY:
|
|
|
|
case BTRFS_DIR_INDEX_KEY:
|
|
|
|
imode = S_IFDIR;
|
|
|
|
found = true;
|
|
|
|
goto out;
|
|
|
|
case BTRFS_EXTENT_DATA_KEY:
|
|
|
|
/*
|
|
|
|
* Both REG and LINK could have EXTENT_DATA.
|
|
|
|
* We just fall back to REG as user can inspect the
|
|
|
|
* content.
|
|
|
|
*/
|
|
|
|
imode = S_IFREG;
|
|
|
|
found = true;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
/*
|
|
|
|
* Both CHR and BLK uses rdev, no way to distinguish them, so fall back
|
|
|
|
* to BLK. But either way it doesn't really matter, as CHR/BLK on btrfs
|
|
|
|
* should be pretty rare, and no real data will be lost.
|
|
|
|
*/
|
|
|
|
if (!found && btrfs_stack_inode_rdev(&iitem) != 0) {
|
|
|
|
imode = S_IFBLK;
|
|
|
|
found = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (found) {
|
|
|
|
ret = 0;
|
|
|
|
*imode_ret = (imode | 0700);
|
|
|
|
} else {
|
|
|
|
ret = -ENOENT;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-04-01 05:55:47 +00:00
|
|
|
/*
|
|
|
|
* Reset the inode mode of the inode specified by @path.
|
|
|
|
*
|
|
|
|
* Caller should ensure the @path is pointing to an INODE_ITEM and root is tree
|
|
|
|
* root. Repair imode for other trees is not supported yet.
|
|
|
|
*
|
|
|
|
* Return 0 if repair is successful.
|
|
|
|
* Return <0 if error happens.
|
|
|
|
*/
|
|
|
|
int repair_imode_common(struct btrfs_root *root, struct btrfs_path *path)
|
|
|
|
{
|
|
|
|
struct btrfs_trans_handle *trans;
|
|
|
|
struct btrfs_key key;
|
|
|
|
u32 imode;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
|
|
|
|
ASSERT(key.type == BTRFS_INODE_ITEM_KEY);
|
btrfs-progs: check: Make repair_imode_common() handle inodes in subvolume trees
[[PROBLEM]]
Before this patch, repair_imode_common() can only handle two types of
inodes:
- Free space cache inodes
- ROOT DIR inodes
For inodes in subvolume trees, the core complexity is how to determine
the correct imode, thus it was not implemented.
However there are more reports of incorrect imode in subvolume trees, we
need to support such fix.
[[ENHANCEMENT]]
So this patch adds a new function, detect_imode(), to detect imode for
inodes in subvolume trees. The policy here is, try our best to find a
valid imode to recovery. If no convicing info can be found, fail out.
That function will determine imode by:
1) Search for INODE_REF of the inode
If we have INODE_REF, we will then try to find DIR_ITEM/DIR_INDEX.
As long as one valid DIR_ITEM or DIR_INDEX can be found, we convert
the BTRFS_FT_* to imode, then call it a day.
This should be the most accurate way.
2) Search for DIR_INDEX/DIR_ITEM belongs to this inode
If above search fails, we falls back to locate the DIR_INDEX/DIR_ITEM
just after the INODE_ITEM.
Thus this only works for non-empty directory.
If any can be found, it's definitely a directory.
3) Search for EXTENT_DATA belongs to this inode
If EXTENT_DATA can be found, it's either REG or LNK.
Thus this only works for non-empty file or soft link.
For this case, we default to REG, as user can inspect the file to
determine if it's a file or just a path.
4) Use rdev to detect BLK/CHR
If all above fails, but INODE_ITEM has non-zero rdev, then it's either
a BLK or CHR file. Then we default to BLK.
5) Fail out if none of above methods succeeded
No educated guess to make things worse.
[[SHORTCOMING]]
The above search is not perfect, there are cases where we can't repair:
E.g. orphan empty regular inode. Since it's already orphan, it has no
INODE_REF. And it's regular empty file, it has no DIR_INDEX nor
EXTENT_DATA nor rdev. Thus we can't recover. Although for this case, it
really doesn't matter as it's already orphan and will be deleted anyway.
Furthermore, due to the DIR_ITEM/DIR_INDEX/INODE_REF repair code which
can happen before imode repair, it's possible that DIR_ITEM search code
may not be executed. If there is only DIR_ITEM remaining, repair code
will remove the DIR_ITEM completely and move the inode to lost+found,
leaving us no info to rebuild imode. If there is DIR_INDEX missing,
repair code will re-insert the DIR_INDEX, then imode repair code will go
DIR_INDEX directly.
But overall, the repair code should handle the invalid imode caused by
older kernels without problem.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2019-09-12 03:11:32 +00:00
|
|
|
if (root->objectid == BTRFS_ROOT_TREE_OBJECTID) {
|
|
|
|
/* In root tree we only have two possible imode */
|
|
|
|
if (key.objectid == BTRFS_ROOT_TREE_OBJECTID)
|
|
|
|
imode = S_IFDIR | 0755;
|
|
|
|
else
|
|
|
|
imode = S_IFREG | 0600;
|
|
|
|
} else {
|
|
|
|
ret = detect_imode(root, path, &imode);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2019-04-01 05:55:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
trans = btrfs_start_transaction(root, 1);
|
|
|
|
if (IS_ERR(trans)) {
|
|
|
|
ret = PTR_ERR(trans);
|
|
|
|
errno = -ret;
|
2022-09-30 07:12:06 +00:00
|
|
|
error_msg(ERROR_MSG_START_TRANS, "%m");
|
2019-04-01 05:55:47 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
btrfs_release_path(path);
|
|
|
|
|
|
|
|
ret = reset_imode(trans, root, path, key.objectid, imode);
|
|
|
|
if (ret < 0)
|
|
|
|
goto abort;
|
|
|
|
ret = btrfs_commit_transaction(trans, root);
|
|
|
|
if (!ret)
|
|
|
|
printf("reset mode for inode %llu root %llu\n",
|
|
|
|
key.objectid, root->root_key.objectid);
|
|
|
|
return ret;
|
|
|
|
abort:
|
|
|
|
btrfs_abort_transaction(trans, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
2019-04-01 05:55:49 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For free space inodes, we can't call check_inode_item() as free space
|
|
|
|
* cache inode doesn't have INODE_REF.
|
|
|
|
* We just check its inode mode.
|
|
|
|
*/
|
2020-08-17 17:07:06 +00:00
|
|
|
int check_repair_free_space_inode(struct btrfs_path *path)
|
2019-04-01 05:55:49 +00:00
|
|
|
{
|
|
|
|
struct btrfs_inode_item *iitem;
|
|
|
|
struct btrfs_key key;
|
|
|
|
u32 mode;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
|
|
|
|
ASSERT(key.type == BTRFS_INODE_ITEM_KEY && is_fstree(key.objectid));
|
|
|
|
iitem = btrfs_item_ptr(path->nodes[0], path->slots[0],
|
|
|
|
struct btrfs_inode_item);
|
|
|
|
mode = btrfs_inode_mode(path->nodes[0], iitem);
|
|
|
|
if (mode != FREE_SPACE_CACHE_INODE_MODE) {
|
|
|
|
error(
|
|
|
|
"free space cache inode %llu has invalid mode: has 0%o expect 0%o",
|
|
|
|
key.objectid, mode, FREE_SPACE_CACHE_INODE_MODE);
|
|
|
|
ret = -EUCLEAN;
|
2022-09-27 21:51:35 +00:00
|
|
|
if (opt_check_repair) {
|
2020-08-17 16:10:34 +00:00
|
|
|
ret = repair_imode_common(gfs_info->tree_root, path);
|
2019-04-01 05:55:49 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
2019-08-12 06:34:20 +00:00
|
|
|
|
|
|
|
int recow_extent_buffer(struct btrfs_root *root, struct extent_buffer *eb)
|
|
|
|
{
|
|
|
|
struct btrfs_path path;
|
|
|
|
struct btrfs_trans_handle *trans;
|
|
|
|
struct btrfs_key key;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
printf("Recowing metadata block %llu\n", eb->start);
|
|
|
|
key.objectid = btrfs_header_owner(eb);
|
|
|
|
key.type = BTRFS_ROOT_ITEM_KEY;
|
|
|
|
key.offset = (u64)-1;
|
|
|
|
|
2020-08-17 16:10:34 +00:00
|
|
|
root = btrfs_read_fs_root(gfs_info, &key);
|
2019-08-12 06:34:20 +00:00
|
|
|
if (IS_ERR(root)) {
|
|
|
|
fprintf(stderr, "Couldn't find owner root %llu\n",
|
|
|
|
key.objectid);
|
|
|
|
return PTR_ERR(root);
|
|
|
|
}
|
|
|
|
|
|
|
|
trans = btrfs_start_transaction(root, 1);
|
|
|
|
if (IS_ERR(trans))
|
|
|
|
return PTR_ERR(trans);
|
|
|
|
|
|
|
|
btrfs_init_path(&path);
|
|
|
|
path.lowest_level = btrfs_header_level(eb);
|
|
|
|
if (path.lowest_level)
|
|
|
|
btrfs_node_key_to_cpu(eb, &key, 0);
|
|
|
|
else
|
|
|
|
btrfs_item_key_to_cpu(eb, &key, 0);
|
|
|
|
|
|
|
|
ret = btrfs_search_slot(trans, root, &key, &path, 0, 1);
|
|
|
|
btrfs_commit_transaction(trans, root);
|
|
|
|
btrfs_release_path(&path);
|
|
|
|
return ret;
|
|
|
|
}
|
2020-08-11 11:44:48 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Try to get correct extent item generation.
|
|
|
|
*
|
|
|
|
* Return 0 if we get a correct generation.
|
|
|
|
* Return <0 if we failed to get one.
|
|
|
|
*/
|
|
|
|
int get_extent_item_generation(u64 bytenr, u64 *gen_ret)
|
|
|
|
{
|
2021-11-08 19:26:41 +00:00
|
|
|
struct btrfs_root *root = btrfs_extent_root(gfs_info, bytenr);
|
2020-08-11 11:44:48 +00:00
|
|
|
struct btrfs_extent_item *ei;
|
|
|
|
struct btrfs_path path;
|
|
|
|
struct btrfs_key key;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
key.objectid = bytenr;
|
|
|
|
key.type = BTRFS_METADATA_ITEM_KEY;
|
|
|
|
key.offset = (u64)-1;
|
|
|
|
|
|
|
|
btrfs_init_path(&path);
|
|
|
|
ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
|
|
|
|
/* Not possible */
|
|
|
|
if (ret == 0)
|
|
|
|
ret = -EUCLEAN;
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
ret = btrfs_previous_extent_item(root, &path, bytenr);
|
|
|
|
if (ret > 0)
|
|
|
|
ret = -ENOENT;
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ei = btrfs_item_ptr(path.nodes[0], path.slots[0], struct btrfs_extent_item);
|
|
|
|
|
|
|
|
if (btrfs_extent_flags(path.nodes[0], ei) &
|
|
|
|
BTRFS_EXTENT_FLAG_TREE_BLOCK) {
|
|
|
|
struct extent_buffer *eb;
|
|
|
|
|
|
|
|
eb = read_tree_block(gfs_info, bytenr, 0);
|
|
|
|
if (extent_buffer_uptodate(eb)) {
|
|
|
|
*gen_ret = btrfs_header_generation(eb);
|
|
|
|
ret = 0;
|
|
|
|
} else {
|
|
|
|
ret = -EIO;
|
|
|
|
}
|
|
|
|
free_extent_buffer(eb);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* TODO: Grab proper data generation for data extents.
|
|
|
|
* But this is not an urgent objective, as we can still
|
|
|
|
* use transaction id as fall back
|
|
|
|
*/
|
|
|
|
ret = -ENOTSUP;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
btrfs_release_path(&path);
|
|
|
|
return ret;
|
|
|
|
}
|
2021-06-09 06:27:42 +00:00
|
|
|
|
|
|
|
int repair_dev_item_bytes_used(struct btrfs_fs_info *fs_info,
|
|
|
|
u64 devid, u64 bytes_used_expected)
|
|
|
|
{
|
|
|
|
struct btrfs_trans_handle *trans;
|
|
|
|
struct btrfs_device *device;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
device = btrfs_find_device_by_devid(fs_info->fs_devices, devid, 0);
|
|
|
|
if (!device) {
|
|
|
|
error("failed to find device with devid %llu", devid);
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Bytes_used matches, not what we can repair */
|
|
|
|
if (device->bytes_used == bytes_used_expected)
|
|
|
|
return -ENOTSUP;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We have to set the device bytes_used right now, before starting a
|
|
|
|
* new transaction, as it may allocate a new chunk and modify
|
|
|
|
* device->bytes_used.
|
|
|
|
*/
|
|
|
|
device->bytes_used = bytes_used_expected;
|
|
|
|
trans = btrfs_start_transaction(fs_info->chunk_root, 1);
|
|
|
|
if (IS_ERR(trans)) {
|
|
|
|
ret = PTR_ERR(trans);
|
|
|
|
errno = -ret;
|
2022-09-30 07:12:06 +00:00
|
|
|
error_msg(ERROR_MSG_START_TRANS, "%m");
|
2021-06-09 06:27:42 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Manually update the device item in chunk tree */
|
|
|
|
ret = btrfs_update_device(trans, device);
|
|
|
|
if (ret < 0) {
|
|
|
|
errno = -ret;
|
|
|
|
error("failed to update device item for devid %llu: %m", devid);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Commit transaction not only to save the above change but also update
|
|
|
|
* the device item in super block.
|
|
|
|
*/
|
|
|
|
ret = btrfs_commit_transaction(trans, fs_info->chunk_root);
|
|
|
|
if (ret < 0) {
|
|
|
|
errno = -ret;
|
2022-09-30 07:12:06 +00:00
|
|
|
error_msg(ERROR_MSG_START_TRANS, "%m");
|
2021-06-09 06:27:42 +00:00
|
|
|
} else {
|
|
|
|
printf("reset devid %llu bytes_used to %llu\n", devid,
|
|
|
|
device->bytes_used);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
error:
|
|
|
|
btrfs_abort_transaction(trans, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
2022-01-14 00:51:20 +00:00
|
|
|
|
|
|
|
static int populate_csum(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *csum_root, char *buf, u64 start,
|
|
|
|
u64 len)
|
|
|
|
{
|
|
|
|
u64 offset = 0;
|
|
|
|
u64 sectorsize;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
while (offset < len) {
|
|
|
|
sectorsize = gfs_info->sectorsize;
|
2022-04-05 12:48:27 +00:00
|
|
|
ret = read_data_from_disk(gfs_info, buf, start + offset,
|
|
|
|
§orsize, 0);
|
2022-01-14 00:51:20 +00:00
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
ret = btrfs_csum_file_block(trans, start + len, start + offset,
|
|
|
|
buf, sectorsize);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
offset += sectorsize;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fill_csum_tree_from_one_fs_root(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *cur_root)
|
|
|
|
{
|
|
|
|
struct btrfs_root *csum_root;
|
|
|
|
struct btrfs_path path;
|
|
|
|
struct btrfs_key key;
|
|
|
|
struct extent_buffer *node;
|
|
|
|
struct btrfs_file_extent_item *fi;
|
|
|
|
char *buf = NULL;
|
2022-01-14 00:51:22 +00:00
|
|
|
u64 skip_ino = 0;
|
2022-01-14 00:51:20 +00:00
|
|
|
u64 start = 0;
|
|
|
|
u64 len = 0;
|
|
|
|
int slot = 0;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
buf = malloc(gfs_info->sectorsize);
|
|
|
|
if (!buf)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
btrfs_init_path(&path);
|
|
|
|
key.objectid = 0;
|
|
|
|
key.offset = 0;
|
|
|
|
key.type = 0;
|
|
|
|
ret = btrfs_search_slot(NULL, cur_root, &key, &path, 0, 0);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
/* Iterate all regular file extents and fill its csum */
|
|
|
|
while (1) {
|
2022-01-14 00:51:22 +00:00
|
|
|
u8 type;
|
|
|
|
|
2022-01-14 00:51:20 +00:00
|
|
|
btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
|
|
|
|
|
2022-01-14 00:51:22 +00:00
|
|
|
if (key.type != BTRFS_EXTENT_DATA_KEY &&
|
|
|
|
key.type != BTRFS_INODE_ITEM_KEY)
|
|
|
|
goto next;
|
|
|
|
|
|
|
|
/* This item belongs to an inode with NODATASUM, skip it */
|
|
|
|
if (key.objectid == skip_ino)
|
|
|
|
goto next;
|
|
|
|
|
|
|
|
if (key.type == BTRFS_INODE_ITEM_KEY) {
|
|
|
|
struct btrfs_inode_item *ii;
|
|
|
|
|
|
|
|
ii = btrfs_item_ptr(path.nodes[0], path.slots[0],
|
|
|
|
struct btrfs_inode_item);
|
|
|
|
/* Check if the inode has NODATASUM flag */
|
|
|
|
if (btrfs_inode_flags(path.nodes[0], ii) & BTRFS_INODE_NODATASUM)
|
|
|
|
skip_ino = key.objectid;
|
2022-01-14 00:51:20 +00:00
|
|
|
goto next;
|
2022-01-14 00:51:22 +00:00
|
|
|
}
|
2022-01-14 00:51:20 +00:00
|
|
|
node = path.nodes[0];
|
|
|
|
slot = path.slots[0];
|
|
|
|
fi = btrfs_item_ptr(node, slot, struct btrfs_file_extent_item);
|
2022-01-14 00:51:22 +00:00
|
|
|
type = btrfs_file_extent_type(node, fi);
|
|
|
|
|
|
|
|
/* Skip inline extents */
|
|
|
|
if (type == BTRFS_FILE_EXTENT_INLINE)
|
2022-01-14 00:51:20 +00:00
|
|
|
goto next;
|
|
|
|
|
2022-01-14 00:51:22 +00:00
|
|
|
start = btrfs_file_extent_disk_bytenr(node, fi);
|
|
|
|
/* Skip holes */
|
|
|
|
if (start == 0)
|
|
|
|
goto next;
|
|
|
|
/*
|
|
|
|
* Always generate the csum for the whole preallocated/regular
|
|
|
|
* first, then remove the csum for preallocated range.
|
|
|
|
*
|
|
|
|
* This is to handle holes on regular extents like:
|
|
|
|
* xfs_io -f -c "pwrite 0 8k" -c "sync" -c "punch 0 4k".
|
|
|
|
*
|
|
|
|
* This behavior will cost extra IO/CPU time, but there is
|
|
|
|
* not other way to ensure the correctness.
|
|
|
|
*/
|
2022-01-14 00:51:20 +00:00
|
|
|
csum_root = btrfs_csum_root(gfs_info, start);
|
2022-01-14 00:51:22 +00:00
|
|
|
len = btrfs_file_extent_disk_num_bytes(node, fi);
|
2022-01-14 00:51:20 +00:00
|
|
|
ret = populate_csum(trans, csum_root, buf, start, len);
|
|
|
|
if (ret == -EEXIST)
|
|
|
|
ret = 0;
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
2022-01-14 00:51:22 +00:00
|
|
|
|
|
|
|
/* Delete the csum for the preallocated range */
|
|
|
|
if (type == BTRFS_FILE_EXTENT_PREALLOC) {
|
|
|
|
start += btrfs_file_extent_offset(node, fi);
|
|
|
|
len = btrfs_file_extent_num_bytes(node, fi);
|
|
|
|
ret = btrfs_del_csums(trans, start, len);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
}
|
2022-01-14 00:51:20 +00:00
|
|
|
next:
|
|
|
|
/*
|
|
|
|
* TODO: if next leaf is corrupted, jump to nearest next valid
|
|
|
|
* leaf.
|
|
|
|
*/
|
|
|
|
ret = btrfs_next_item(cur_root, &path);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
if (ret > 0) {
|
|
|
|
ret = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
btrfs_release_path(&path);
|
|
|
|
free(buf);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fill_csum_tree_from_fs(struct btrfs_trans_handle *trans)
|
|
|
|
{
|
|
|
|
struct btrfs_path path;
|
|
|
|
struct btrfs_root *tree_root = gfs_info->tree_root;
|
|
|
|
struct btrfs_root *cur_root;
|
|
|
|
struct extent_buffer *node;
|
|
|
|
struct btrfs_key key;
|
|
|
|
int slot = 0;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
btrfs_init_path(&path);
|
|
|
|
key.objectid = BTRFS_FS_TREE_OBJECTID;
|
|
|
|
key.offset = 0;
|
|
|
|
key.type = BTRFS_ROOT_ITEM_KEY;
|
|
|
|
ret = btrfs_search_slot(NULL, tree_root, &key, &path, 0, 0);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
if (ret > 0) {
|
|
|
|
ret = -ENOENT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
node = path.nodes[0];
|
|
|
|
slot = path.slots[0];
|
|
|
|
btrfs_item_key_to_cpu(node, &key, slot);
|
|
|
|
if (key.objectid > BTRFS_LAST_FREE_OBJECTID)
|
|
|
|
goto out;
|
|
|
|
if (key.type != BTRFS_ROOT_ITEM_KEY)
|
|
|
|
goto next;
|
|
|
|
if (!is_fstree(key.objectid))
|
|
|
|
goto next;
|
|
|
|
key.offset = (u64)-1;
|
|
|
|
|
|
|
|
cur_root = btrfs_read_fs_root(gfs_info, &key);
|
|
|
|
if (IS_ERR(cur_root) || !cur_root) {
|
|
|
|
fprintf(stderr, "Fail to read fs/subvol tree: %lld\n",
|
|
|
|
key.objectid);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
ret = fill_csum_tree_from_one_fs_root(trans, cur_root);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
next:
|
|
|
|
ret = btrfs_next_item(tree_root, &path);
|
|
|
|
if (ret > 0) {
|
|
|
|
ret = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
btrfs_release_path(&path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
btrfs-progs: check: don't calculate csum for preallocated file extents
[BUG]
If a btrfs filesystem has preallocated file extents, 'btrfs check
--init-csum-tree' will create csum item for preallocated extents, and
cause error:
# mkfs.btrfs -f test.img
# mount test.img /mnt/btrfs
# fallocate -l 32K /mnt/btrfs/file
# umount /mnt/btrfs
# btrfs check --init-csum-tree --force test.img
...
[4/7] checking fs roots
root 5 inode 257 errors 800, odd csum item
ERROR: errors found in fs roots
found 376832 bytes used, error(s) found
And the csum tree is not empty, containing csum for the preallocated
extent:
$ btrfs ins dump-tree -t csum test.img
btrfs-progs v5.15.1
checksum tree key (CSUM_TREE ROOT_ITEM 0)
leaf 30408704 items 1 free space 16226 generation 9 owner CSUM_TREE
leaf 30408704 flags 0x1(WRITTEN) backref revision 1
fs uuid ecc79835-5611-4609-b985-e4ccd6f15b54
chunk uuid b1c75553-5b82-4aa6-bbbe-e7f50643b1a8
item 0 key (EXTENT_CSUM EXTENT_CSUM 13631488) itemoff 16251 itemsize 32
range start 13631488 end 13664256 length 32768
[CAUSE]
For `--init-csum-tree` alone, we will use extent tree to iterate each
data extent, and calculate csum for them.
But extent items alone can not tell us if the file extent belongs to a
NODATASUM inode, nor if it's preallocated.
Thus we create csums for those data extents, and cause the problem.
[FIX]
However the fix is not that simple, we can not just generate csum for
non-preallocated range.
As the following case we still need csum for the un-referred part:
xfs_io -f -c "pwrite 0 8K" -c "sync" -c "punch 0 4K"
So here we have to go another direction by:
- Always generate csum for the whole data extent
This is the same as the old code
- Iterate the file extents, and delete csum for preallocated range
or NODATASUM inodes
Issue: #430
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2022-01-14 00:51:21 +00:00
|
|
|
static int remove_csum_for_file_extent(u64 ino, u64 offset, u64 rootid, void *ctx)
|
|
|
|
{
|
|
|
|
struct btrfs_trans_handle *trans = (struct btrfs_trans_handle *)ctx;
|
|
|
|
struct btrfs_fs_info *fs_info = trans->fs_info;
|
|
|
|
struct btrfs_file_extent_item *fi;
|
|
|
|
struct btrfs_inode_item *ii;
|
|
|
|
struct btrfs_path path = {};
|
|
|
|
struct btrfs_key key;
|
|
|
|
struct btrfs_root *root;
|
|
|
|
bool nocsum = false;
|
|
|
|
u8 type;
|
|
|
|
u64 disk_bytenr;
|
|
|
|
u64 disk_len;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
key.objectid = rootid;
|
|
|
|
key.type = BTRFS_ROOT_ITEM_KEY;
|
|
|
|
key.offset = (u64)-1;
|
|
|
|
root = btrfs_read_fs_root(fs_info, &key);
|
|
|
|
if (IS_ERR(root)) {
|
|
|
|
ret = PTR_ERR(root);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if the inode has NODATASUM flag */
|
|
|
|
key.objectid = ino;
|
|
|
|
key.type = BTRFS_INODE_ITEM_KEY;
|
|
|
|
key.offset = 0;
|
|
|
|
ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
|
|
|
|
if (ret > 0)
|
|
|
|
ret = -ENOENT;
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ii = btrfs_item_ptr(path.nodes[0], path.slots[0],
|
|
|
|
struct btrfs_inode_item);
|
|
|
|
if (btrfs_inode_flags(path.nodes[0], ii) & BTRFS_INODE_NODATASUM)
|
|
|
|
nocsum = true;
|
|
|
|
|
|
|
|
btrfs_release_path(&path);
|
|
|
|
|
|
|
|
/* Check the file extent item and delete csum if needed */
|
|
|
|
key.objectid = ino;
|
|
|
|
key.type = BTRFS_EXTENT_DATA_KEY;
|
|
|
|
key.offset = offset;
|
|
|
|
ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
|
|
|
|
if (ret > 0)
|
|
|
|
ret = -ENOENT;
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
fi = btrfs_item_ptr(path.nodes[0], path.slots[0],
|
|
|
|
struct btrfs_file_extent_item);
|
|
|
|
type = btrfs_file_extent_type(path.nodes[0], fi);
|
|
|
|
|
|
|
|
if (btrfs_file_extent_disk_bytenr(path.nodes[0], fi) == 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* Compressed extent should have csum, skip it */
|
|
|
|
if (btrfs_file_extent_compression(path.nodes[0], fi) !=
|
|
|
|
BTRFS_COMPRESS_NONE)
|
|
|
|
goto out;
|
|
|
|
/*
|
|
|
|
* We only want to delete the csum range if the inode has NODATASUM
|
|
|
|
* flag or it's a preallocated extent.
|
|
|
|
*/
|
|
|
|
if (!(nocsum || type == BTRFS_FILE_EXTENT_PREALLOC))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* If NODATASUM, we need to remove all csum for the extent */
|
|
|
|
if (nocsum) {
|
|
|
|
disk_bytenr = btrfs_file_extent_disk_bytenr(path.nodes[0], fi);
|
|
|
|
disk_len = btrfs_file_extent_disk_num_bytes(path.nodes[0], fi);
|
|
|
|
} else {
|
|
|
|
disk_bytenr = btrfs_file_extent_disk_bytenr(path.nodes[0], fi) +
|
|
|
|
btrfs_file_extent_offset(path.nodes[0], fi);
|
|
|
|
disk_len = btrfs_file_extent_num_bytes(path.nodes[0], fi);
|
|
|
|
}
|
|
|
|
btrfs_release_path(&path);
|
|
|
|
|
|
|
|
/* Now delete the csum for the preallocated or nodatasum range */
|
|
|
|
ret = btrfs_del_csums(trans, disk_bytenr, disk_len);
|
|
|
|
out:
|
|
|
|
btrfs_release_path(&path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-01-14 00:51:20 +00:00
|
|
|
static int fill_csum_tree_from_extent(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *extent_root)
|
|
|
|
{
|
|
|
|
struct btrfs_root *csum_root;
|
|
|
|
struct btrfs_path path;
|
|
|
|
struct btrfs_extent_item *ei;
|
|
|
|
struct extent_buffer *leaf;
|
|
|
|
char *buf;
|
|
|
|
struct btrfs_key key;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
btrfs_init_path(&path);
|
|
|
|
key.objectid = 0;
|
|
|
|
key.type = BTRFS_EXTENT_ITEM_KEY;
|
|
|
|
key.offset = 0;
|
|
|
|
ret = btrfs_search_slot(NULL, extent_root, &key, &path, 0, 0);
|
|
|
|
if (ret < 0) {
|
|
|
|
btrfs_release_path(&path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
buf = malloc(gfs_info->sectorsize);
|
|
|
|
if (!buf) {
|
|
|
|
btrfs_release_path(&path);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
if (path.slots[0] >= btrfs_header_nritems(path.nodes[0])) {
|
|
|
|
ret = btrfs_next_leaf(extent_root, &path);
|
|
|
|
if (ret < 0)
|
|
|
|
break;
|
|
|
|
if (ret) {
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
leaf = path.nodes[0];
|
|
|
|
|
|
|
|
btrfs_item_key_to_cpu(leaf, &key, path.slots[0]);
|
|
|
|
if (key.type != BTRFS_EXTENT_ITEM_KEY) {
|
|
|
|
path.slots[0]++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
ei = btrfs_item_ptr(leaf, path.slots[0],
|
|
|
|
struct btrfs_extent_item);
|
|
|
|
if (!(btrfs_extent_flags(leaf, ei) &
|
|
|
|
BTRFS_EXTENT_FLAG_DATA)) {
|
|
|
|
path.slots[0]++;
|
|
|
|
continue;
|
|
|
|
}
|
btrfs-progs: check: don't calculate csum for preallocated file extents
[BUG]
If a btrfs filesystem has preallocated file extents, 'btrfs check
--init-csum-tree' will create csum item for preallocated extents, and
cause error:
# mkfs.btrfs -f test.img
# mount test.img /mnt/btrfs
# fallocate -l 32K /mnt/btrfs/file
# umount /mnt/btrfs
# btrfs check --init-csum-tree --force test.img
...
[4/7] checking fs roots
root 5 inode 257 errors 800, odd csum item
ERROR: errors found in fs roots
found 376832 bytes used, error(s) found
And the csum tree is not empty, containing csum for the preallocated
extent:
$ btrfs ins dump-tree -t csum test.img
btrfs-progs v5.15.1
checksum tree key (CSUM_TREE ROOT_ITEM 0)
leaf 30408704 items 1 free space 16226 generation 9 owner CSUM_TREE
leaf 30408704 flags 0x1(WRITTEN) backref revision 1
fs uuid ecc79835-5611-4609-b985-e4ccd6f15b54
chunk uuid b1c75553-5b82-4aa6-bbbe-e7f50643b1a8
item 0 key (EXTENT_CSUM EXTENT_CSUM 13631488) itemoff 16251 itemsize 32
range start 13631488 end 13664256 length 32768
[CAUSE]
For `--init-csum-tree` alone, we will use extent tree to iterate each
data extent, and calculate csum for them.
But extent items alone can not tell us if the file extent belongs to a
NODATASUM inode, nor if it's preallocated.
Thus we create csums for those data extents, and cause the problem.
[FIX]
However the fix is not that simple, we can not just generate csum for
non-preallocated range.
As the following case we still need csum for the un-referred part:
xfs_io -f -c "pwrite 0 8K" -c "sync" -c "punch 0 4K"
So here we have to go another direction by:
- Always generate csum for the whole data extent
This is the same as the old code
- Iterate the file extents, and delete csum for preallocated range
or NODATASUM inodes
Issue: #430
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2022-01-14 00:51:21 +00:00
|
|
|
/*
|
|
|
|
* Generate the datasum unconditionally first.
|
|
|
|
*
|
|
|
|
* This will generate csum for preallocated extents, but that
|
|
|
|
* will be later deleted.
|
|
|
|
*
|
|
|
|
* This is to address cases like this:
|
|
|
|
* fallocate 0 8K
|
|
|
|
* pwrite 0 4k
|
|
|
|
* sync
|
|
|
|
* punch 0 4k
|
|
|
|
*
|
|
|
|
* Above case we will have csum for [0, 4K) and that's valid.
|
|
|
|
*/
|
2022-01-14 00:51:20 +00:00
|
|
|
csum_root = btrfs_csum_root(gfs_info, key.objectid);
|
|
|
|
ret = populate_csum(trans, csum_root, buf, key.objectid,
|
|
|
|
key.offset);
|
btrfs-progs: check: don't calculate csum for preallocated file extents
[BUG]
If a btrfs filesystem has preallocated file extents, 'btrfs check
--init-csum-tree' will create csum item for preallocated extents, and
cause error:
# mkfs.btrfs -f test.img
# mount test.img /mnt/btrfs
# fallocate -l 32K /mnt/btrfs/file
# umount /mnt/btrfs
# btrfs check --init-csum-tree --force test.img
...
[4/7] checking fs roots
root 5 inode 257 errors 800, odd csum item
ERROR: errors found in fs roots
found 376832 bytes used, error(s) found
And the csum tree is not empty, containing csum for the preallocated
extent:
$ btrfs ins dump-tree -t csum test.img
btrfs-progs v5.15.1
checksum tree key (CSUM_TREE ROOT_ITEM 0)
leaf 30408704 items 1 free space 16226 generation 9 owner CSUM_TREE
leaf 30408704 flags 0x1(WRITTEN) backref revision 1
fs uuid ecc79835-5611-4609-b985-e4ccd6f15b54
chunk uuid b1c75553-5b82-4aa6-bbbe-e7f50643b1a8
item 0 key (EXTENT_CSUM EXTENT_CSUM 13631488) itemoff 16251 itemsize 32
range start 13631488 end 13664256 length 32768
[CAUSE]
For `--init-csum-tree` alone, we will use extent tree to iterate each
data extent, and calculate csum for them.
But extent items alone can not tell us if the file extent belongs to a
NODATASUM inode, nor if it's preallocated.
Thus we create csums for those data extents, and cause the problem.
[FIX]
However the fix is not that simple, we can not just generate csum for
non-preallocated range.
As the following case we still need csum for the un-referred part:
xfs_io -f -c "pwrite 0 8K" -c "sync" -c "punch 0 4K"
So here we have to go another direction by:
- Always generate csum for the whole data extent
This is the same as the old code
- Iterate the file extents, and delete csum for preallocated range
or NODATASUM inodes
Issue: #430
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2022-01-14 00:51:21 +00:00
|
|
|
if (ret < 0)
|
|
|
|
break;
|
|
|
|
ret = iterate_extent_inodes(trans->fs_info, key.objectid, 0, 0,
|
|
|
|
remove_csum_for_file_extent, trans);
|
2022-01-14 00:51:20 +00:00
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
path.slots[0]++;
|
|
|
|
}
|
|
|
|
|
|
|
|
btrfs_release_path(&path);
|
|
|
|
free(buf);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Recalculate the csum and put it into the csum tree.
|
|
|
|
*
|
|
|
|
* @search_fs_tree: How to get the data extent item.
|
|
|
|
* If true, iterate all fs roots to get all
|
|
|
|
* extent data (which can be slow).
|
|
|
|
* Otherwise, search extent tree for extent data.
|
|
|
|
*/
|
|
|
|
int fill_csum_tree(struct btrfs_trans_handle *trans, bool search_fs_tree)
|
|
|
|
{
|
|
|
|
struct btrfs_root *root;
|
|
|
|
struct rb_node *n;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (search_fs_tree)
|
|
|
|
return fill_csum_tree_from_fs(trans);
|
|
|
|
|
|
|
|
root = btrfs_extent_root(gfs_info, 0);
|
|
|
|
while (1) {
|
|
|
|
ret = fill_csum_tree_from_extent(trans, root);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
n = rb_next(&root->rb_node);
|
|
|
|
if (!n)
|
|
|
|
break;
|
|
|
|
root = rb_entry(n, struct btrfs_root, rb_node);
|
|
|
|
if (root->root_key.objectid != BTRFS_EXTENT_TREE_OBJECTID)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
2022-02-28 00:50:07 +00:00
|
|
|
|
|
|
|
static int get_num_devs_in_chunk_tree(struct btrfs_fs_info *fs_info)
|
|
|
|
{
|
|
|
|
struct btrfs_root *chunk_root = fs_info->chunk_root;
|
|
|
|
struct btrfs_path path = { 0 };
|
|
|
|
struct btrfs_key key = { 0 };
|
|
|
|
int found_devs = 0;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = btrfs_search_slot(NULL, chunk_root, &key, &path, 0, 0);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* We should be the first slot, and chunk tree should not be empty*/
|
|
|
|
ASSERT(path.slots[0] == 0 && btrfs_header_nritems(path.nodes[0]));
|
|
|
|
|
|
|
|
btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
|
|
|
|
|
|
|
|
while (key.objectid == BTRFS_DEV_ITEMS_OBJECTID &&
|
|
|
|
key.type == BTRFS_DEV_ITEM_KEY) {
|
|
|
|
found_devs++;
|
|
|
|
|
|
|
|
ret = btrfs_next_item(chunk_root, &path);
|
|
|
|
if (ret < 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This should not happen, as we should have CHUNK items after
|
|
|
|
* DEV items, but since we're only to get the num devices, no
|
|
|
|
* need to bother that problem.
|
|
|
|
*/
|
|
|
|
if (ret > 0) {
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
|
|
|
|
}
|
|
|
|
btrfs_release_path(&path);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
return found_devs;
|
|
|
|
}
|
|
|
|
|
|
|
|
int check_and_repair_super_num_devs(struct btrfs_fs_info *fs_info)
|
|
|
|
{
|
|
|
|
int found_devs;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = get_num_devs_in_chunk_tree(fs_info);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
found_devs = ret;
|
|
|
|
|
|
|
|
if (found_devs == btrfs_super_num_devices(fs_info->super_copy))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Now the found devs in chunk tree mismatch with super block */
|
|
|
|
error("super num devices mismatch, have %llu expect %u",
|
|
|
|
btrfs_super_num_devices(fs_info->super_copy),
|
|
|
|
found_devs);
|
|
|
|
|
2022-09-27 21:51:35 +00:00
|
|
|
if (!opt_check_repair)
|
2022-02-28 00:50:07 +00:00
|
|
|
return -EUCLEAN;
|
|
|
|
|
|
|
|
/*
|
btrfs-progs: do not use btrfs_commit_transaction() just to update super blocks
There are several call sites utilizing btrfs_commit_transaction() just
to update members in super blocks, without any metadata update.
This can be problematic for some simple call sites, like zero_log_tree()
or check_and_repair_super_num_devs().
If we have big problems preventing the fs to be mounted in the first
place, and need to clear the log or super block size, but by some other
problems in extent tree, we're unable to allocate new blocks.
Then we fall into a deadlock that, we need to mount (even
ro,rescue=all) to collect extra info, but btrfs-progs can not do any
super block updates.
Fix the problem by allowing the following super blocks only operations
to be done without using btrfs_commit_transaction():
- btrfs_fix_super_size()
- check_and_repair_super_num_devs()
- zero_log_tree().
There are some exceptions in btrfstune.c, related to the csum type
conversion and seed flags.
In those btrfstune cases, we in fact wants to proper error report in
btrfs_commit_transaction(), as those operations are not mount critical,
and any early error can be helpful to expose any problems in the fs.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2022-05-18 11:23:00 +00:00
|
|
|
* Repair is simple, reset the super block value and write back all the
|
|
|
|
* super blocks. Do not use transaction for that.
|
2022-02-28 00:50:07 +00:00
|
|
|
*/
|
|
|
|
btrfs_set_super_num_devices(fs_info->super_copy, found_devs);
|
btrfs-progs: do not use btrfs_commit_transaction() just to update super blocks
There are several call sites utilizing btrfs_commit_transaction() just
to update members in super blocks, without any metadata update.
This can be problematic for some simple call sites, like zero_log_tree()
or check_and_repair_super_num_devs().
If we have big problems preventing the fs to be mounted in the first
place, and need to clear the log or super block size, but by some other
problems in extent tree, we're unable to allocate new blocks.
Then we fall into a deadlock that, we need to mount (even
ro,rescue=all) to collect extra info, but btrfs-progs can not do any
super block updates.
Fix the problem by allowing the following super blocks only operations
to be done without using btrfs_commit_transaction():
- btrfs_fix_super_size()
- check_and_repair_super_num_devs()
- zero_log_tree().
There are some exceptions in btrfstune.c, related to the csum type
conversion and seed flags.
In those btrfstune cases, we in fact wants to proper error report in
btrfs_commit_transaction(), as those operations are not mount critical,
and any early error can be helpful to expose any problems in the fs.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2022-05-18 11:23:00 +00:00
|
|
|
ret = write_all_supers(fs_info);
|
2022-02-28 00:50:07 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
errno = -ret;
|
btrfs-progs: do not use btrfs_commit_transaction() just to update super blocks
There are several call sites utilizing btrfs_commit_transaction() just
to update members in super blocks, without any metadata update.
This can be problematic for some simple call sites, like zero_log_tree()
or check_and_repair_super_num_devs().
If we have big problems preventing the fs to be mounted in the first
place, and need to clear the log or super block size, but by some other
problems in extent tree, we're unable to allocate new blocks.
Then we fall into a deadlock that, we need to mount (even
ro,rescue=all) to collect extra info, but btrfs-progs can not do any
super block updates.
Fix the problem by allowing the following super blocks only operations
to be done without using btrfs_commit_transaction():
- btrfs_fix_super_size()
- check_and_repair_super_num_devs()
- zero_log_tree().
There are some exceptions in btrfstune.c, related to the csum type
conversion and seed flags.
In those btrfstune cases, we in fact wants to proper error report in
btrfs_commit_transaction(), as those operations are not mount critical,
and any early error can be helpful to expose any problems in the fs.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2022-05-18 11:23:00 +00:00
|
|
|
error("failed to write super blocks: %m");
|
2022-02-28 00:50:07 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
printf("Successfully reset super num devices to %u\n", found_devs);
|
|
|
|
return 0;
|
|
|
|
}
|