2013-09-09 20:41:58 +00:00
|
|
|
#!/bin/bash
|
|
|
|
#
|
|
|
|
# loop through all of our bad images and make sure fsck repairs them properly
|
|
|
|
#
|
|
|
|
# It's GPL, same as everything else in this tree.
|
|
|
|
#
|
|
|
|
|
|
|
|
here=`pwd`
|
2014-05-29 10:01:44 +00:00
|
|
|
TEST_DEV=
|
|
|
|
TEST_MNT=
|
|
|
|
RESULT="fsck-tests-results.txt"
|
2013-09-09 20:41:58 +00:00
|
|
|
|
|
|
|
_fail()
|
|
|
|
{
|
2014-05-29 10:01:44 +00:00
|
|
|
echo "$*" | tee -a $RESULT
|
2013-09-09 20:41:58 +00:00
|
|
|
exit 1
|
|
|
|
}
|
|
|
|
|
2014-05-29 10:01:44 +00:00
|
|
|
run_check()
|
|
|
|
{
|
|
|
|
echo "############### $@" >> $RESULT 2>&1
|
|
|
|
"$@" >> $RESULT 2>&1 || _fail "failed: $@"
|
|
|
|
}
|
|
|
|
|
|
|
|
rm -f $RESULT
|
|
|
|
|
|
|
|
# test rely on corrupting blocks tool
|
|
|
|
run_check make btrfs-corrupt-block
|
2013-09-09 20:41:58 +00:00
|
|
|
|
Btrfs-progs: check, ability to detect and fix outdated snapshot root items
This change adds code to detect and fix the issue introduced in the kernel
release 3.17, where creation of read-only snapshots lead to a corrupted
filesystem if they were created at a moment when the source subvolume/snapshot
had orphan items. The issue was that the on-disk root items became incorrect,
referring to the pre orphan cleanup root node instead of the post orphan
cleanup root node.
A test filesystem can be generated with the test case recently submitted for
xfstests/fstests, which is essencially the following (bash script):
workout()
{
ops=$1
procs=$2
num_snapshots=$3
_scratch_mkfs >> $seqres.full 2>&1
_scratch_mount
snapshot_cmd="$BTRFS_UTIL_PROG subvolume snapshot -r $SCRATCH_MNT"
snapshot_cmd="$snapshot_cmd $SCRATCH_MNT/snap_\`date +'%H_%M_%S_%N'\`"
run_check $FSSTRESS_PROG -p $procs \
-x "$snapshot_cmd" -X $num_snapshots -d $SCRATCH_MNT -n $ops
}
ops=10000
procs=4
snapshots=500
workout $ops $procs $snapshots
Example of btrfsck's (btrfs check) behaviour against such filesystem:
$ btrfsck /dev/loop0
root item for root 311, current bytenr 44630016, current gen 60, current level 1, new bytenr 44957696, new gen 61, new level 1
root item for root 1480, current bytenr 1003569152, current gen 1271, current level 1, new bytenr 1004175360, new gen 1272, new level 1
root item for root 1509, current bytenr 1037434880, current gen 1300, current level 1, new bytenr 1038467072, new gen 1301, new level 1
root item for root 1562, current bytenr 33636352, current gen 1354, current level 1, new bytenr 34455552, new gen 1355, new level 1
root item for root 3094, current bytenr 1011712000, current gen 2935, current level 1, new bytenr 1008484352, new gen 2936, new level 1
root item for root 3716, current bytenr 80805888, current gen 3578, current level 1, new bytenr 73515008, new gen 3579, new level 1
root item for root 4085, current bytenr 714031104, current gen 3958, current level 1, new bytenr 716816384, new gen 3959, new level 1
Found 7 roots with an outdated root item.
Please run a filesystem check with the option --repair to fix them.
$ echo $?
1
$ btrfsck --repair /dev/loop0
enabling repair mode
fixing root item for root 311, current bytenr 44630016, current gen 60, current level 1, new bytenr 44957696, new gen 61, new level 1
fixing root item for root 1480, current bytenr 1003569152, current gen 1271, current level 1, new bytenr 1004175360, new gen 1272, new level 1
fixing root item for root 1509, current bytenr 1037434880, current gen 1300, current level 1, new bytenr 1038467072, new gen 1301, new level 1
fixing root item for root 1562, current bytenr 33636352, current gen 1354, current level 1, new bytenr 34455552, new gen 1355, new level 1
fixing root item for root 3094, current bytenr 1011712000, current gen 2935, current level 1, new bytenr 1008484352, new gen 2936, new level 1
fixing root item for root 3716, current bytenr 80805888, current gen 3578, current level 1, new bytenr 73515008, new gen 3579, new level 1
fixing root item for root 4085, current bytenr 714031104, current gen 3958, current level 1, new bytenr 716816384, new gen 3959, new level 1
Fixed 7 roots.
Checking filesystem on /dev/loop0
UUID: 2186e9b9-c977-4a35-9c7b-69c6609d4620
checking extents
checking free space cache
cache and super generation don't match, space cache will be invalidated
checking fs roots
checking csums
checking root refs
found 618537000 bytes used err is 0
total csum bytes: 130824
total tree bytes: 601620480
total fs tree bytes: 580288512
total extent tree bytes: 18464768
btree space waste bytes: 136939144
file data blocks allocated: 34150318080
referenced 27815415808
Btrfs v3.17-rc3-2-gbbe1dd8
$ echo $?
0
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: David Sterba <dsterba@suse.cz>
2014-10-17 17:20:08 +00:00
|
|
|
# Some broken filesystem images are kept as .img files, created by the tool
|
|
|
|
# btrfs-image, and others are kept as .tar.xz files that contain raw filesystem
|
|
|
|
# image (the backing file of a loop device, as a sparse file). The reason for
|
|
|
|
# keeping some as tarballs of raw images is that for these cases btrfs-image
|
|
|
|
# isn't able to preserve all the (bad) filesystem structure for some reason.
|
2014-10-17 16:26:08 +00:00
|
|
|
for i in $(find $here/tests/fsck-tests -name '*.img' -o -name '*.tar.xz' | sort)
|
2013-09-09 20:41:58 +00:00
|
|
|
do
|
2013-10-01 12:58:33 +00:00
|
|
|
echo " [TEST] $(basename $i)"
|
2014-05-29 10:01:44 +00:00
|
|
|
echo "testing image $i" >> $RESULT
|
|
|
|
|
Btrfs-progs: check, ability to detect and fix outdated snapshot root items
This change adds code to detect and fix the issue introduced in the kernel
release 3.17, where creation of read-only snapshots lead to a corrupted
filesystem if they were created at a moment when the source subvolume/snapshot
had orphan items. The issue was that the on-disk root items became incorrect,
referring to the pre orphan cleanup root node instead of the post orphan
cleanup root node.
A test filesystem can be generated with the test case recently submitted for
xfstests/fstests, which is essencially the following (bash script):
workout()
{
ops=$1
procs=$2
num_snapshots=$3
_scratch_mkfs >> $seqres.full 2>&1
_scratch_mount
snapshot_cmd="$BTRFS_UTIL_PROG subvolume snapshot -r $SCRATCH_MNT"
snapshot_cmd="$snapshot_cmd $SCRATCH_MNT/snap_\`date +'%H_%M_%S_%N'\`"
run_check $FSSTRESS_PROG -p $procs \
-x "$snapshot_cmd" -X $num_snapshots -d $SCRATCH_MNT -n $ops
}
ops=10000
procs=4
snapshots=500
workout $ops $procs $snapshots
Example of btrfsck's (btrfs check) behaviour against such filesystem:
$ btrfsck /dev/loop0
root item for root 311, current bytenr 44630016, current gen 60, current level 1, new bytenr 44957696, new gen 61, new level 1
root item for root 1480, current bytenr 1003569152, current gen 1271, current level 1, new bytenr 1004175360, new gen 1272, new level 1
root item for root 1509, current bytenr 1037434880, current gen 1300, current level 1, new bytenr 1038467072, new gen 1301, new level 1
root item for root 1562, current bytenr 33636352, current gen 1354, current level 1, new bytenr 34455552, new gen 1355, new level 1
root item for root 3094, current bytenr 1011712000, current gen 2935, current level 1, new bytenr 1008484352, new gen 2936, new level 1
root item for root 3716, current bytenr 80805888, current gen 3578, current level 1, new bytenr 73515008, new gen 3579, new level 1
root item for root 4085, current bytenr 714031104, current gen 3958, current level 1, new bytenr 716816384, new gen 3959, new level 1
Found 7 roots with an outdated root item.
Please run a filesystem check with the option --repair to fix them.
$ echo $?
1
$ btrfsck --repair /dev/loop0
enabling repair mode
fixing root item for root 311, current bytenr 44630016, current gen 60, current level 1, new bytenr 44957696, new gen 61, new level 1
fixing root item for root 1480, current bytenr 1003569152, current gen 1271, current level 1, new bytenr 1004175360, new gen 1272, new level 1
fixing root item for root 1509, current bytenr 1037434880, current gen 1300, current level 1, new bytenr 1038467072, new gen 1301, new level 1
fixing root item for root 1562, current bytenr 33636352, current gen 1354, current level 1, new bytenr 34455552, new gen 1355, new level 1
fixing root item for root 3094, current bytenr 1011712000, current gen 2935, current level 1, new bytenr 1008484352, new gen 2936, new level 1
fixing root item for root 3716, current bytenr 80805888, current gen 3578, current level 1, new bytenr 73515008, new gen 3579, new level 1
fixing root item for root 4085, current bytenr 714031104, current gen 3958, current level 1, new bytenr 716816384, new gen 3959, new level 1
Fixed 7 roots.
Checking filesystem on /dev/loop0
UUID: 2186e9b9-c977-4a35-9c7b-69c6609d4620
checking extents
checking free space cache
cache and super generation don't match, space cache will be invalidated
checking fs roots
checking csums
checking root refs
found 618537000 bytes used err is 0
total csum bytes: 130824
total tree bytes: 601620480
total fs tree bytes: 580288512
total extent tree bytes: 18464768
btree space waste bytes: 136939144
file data blocks allocated: 34150318080
referenced 27815415808
Btrfs v3.17-rc3-2-gbbe1dd8
$ echo $?
0
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: David Sterba <dsterba@suse.cz>
2014-10-17 17:20:08 +00:00
|
|
|
extension=${i#*.}
|
|
|
|
|
|
|
|
if [ $extension == "img" ]; then
|
|
|
|
run_check $here/btrfs-image -r $i test.img
|
|
|
|
else
|
|
|
|
run_check tar xJf $i
|
|
|
|
fi
|
2013-09-09 20:41:58 +00:00
|
|
|
|
2014-12-12 14:50:41 +00:00
|
|
|
$here/btrfs check test.img >> $RESULT 2>&1
|
|
|
|
[ $? -eq 0 ] && _fail "btrfs check should have detected corruption"
|
2013-09-09 20:41:58 +00:00
|
|
|
|
2014-12-12 14:50:41 +00:00
|
|
|
run_check $here/btrfs check --repair test.img
|
|
|
|
run_check $here/btrfs check test.img
|
2013-09-09 20:41:58 +00:00
|
|
|
done
|
2014-05-29 10:01:44 +00:00
|
|
|
|
2014-10-03 14:59:36 +00:00
|
|
|
if [ -z $TEST_DEV ] || [ -z $TEST_MNT ];then
|
|
|
|
echo " [NOTRUN] extent tree rebuild"
|
|
|
|
exit 0
|
|
|
|
fi
|
|
|
|
|
2014-05-29 10:01:44 +00:00
|
|
|
# test whether fsck can rebuild a corrupted extent tree
|
|
|
|
test_extent_tree_rebuild()
|
|
|
|
{
|
|
|
|
echo " [TEST] extent tree rebuild"
|
|
|
|
$here/mkfs.btrfs -f $TEST_DEV >> /dev/null 2>&1 || _fail "fail to mkfs"
|
|
|
|
|
|
|
|
run_check mount $TEST_DEV $TEST_MNT
|
|
|
|
cp -aR /lib/modules/`uname -r`/ $TEST_MNT 2>&1
|
|
|
|
|
|
|
|
for i in `seq 1 100`;do
|
|
|
|
$here/btrfs sub snapshot $TEST_MNT \
|
|
|
|
$TEST_MNT/snapaaaaaaa_$i >& /dev/null
|
|
|
|
done
|
|
|
|
run_check umount $TEST_DEV
|
|
|
|
|
|
|
|
# get extent root bytenr
|
|
|
|
extent_root_bytenr=`$here/btrfs-debug-tree -r $TEST_DEV | grep extent | awk '{print $7}'`
|
|
|
|
if [ -z $extent_root_bytenr ];then
|
|
|
|
_fail "fail to get extent root bytenr"
|
|
|
|
fi
|
|
|
|
|
|
|
|
# corrupt extent root node block
|
|
|
|
run_check $here/btrfs-corrupt-block -l $extent_root_bytenr \
|
|
|
|
-b 4096 $TEST_DEV
|
|
|
|
|
|
|
|
$here/btrfs check $TEST_DEV >& /dev/null && \
|
2014-12-12 14:50:41 +00:00
|
|
|
_fail "btrfs check should detect failure"
|
2014-05-29 10:01:44 +00:00
|
|
|
run_check $here/btrfs check --init-extent-tree $TEST_DEV
|
|
|
|
run_check $here/btrfs check $TEST_DEV
|
|
|
|
}
|
|
|
|
|
|
|
|
test_extent_tree_rebuild
|