2008-11-20 14:52:48 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2008 Oracle. All rights reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public
|
|
|
|
* License v2 as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public
|
|
|
|
* License along with this program; if not, write to the
|
|
|
|
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
|
|
* Boston, MA 021110-1307, USA.
|
|
|
|
*/
|
|
|
|
|
2022-09-14 15:06:52 +00:00
|
|
|
#include "kerncompat.h"
|
2008-11-20 14:52:48 +00:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <unistd.h>
|
2015-06-10 22:04:19 +00:00
|
|
|
#include <getopt.h>
|
2022-09-15 11:39:14 +00:00
|
|
|
#include <errno.h>
|
|
|
|
#include <stdbool.h>
|
|
|
|
#include <string.h>
|
2020-08-18 13:56:04 +00:00
|
|
|
#include "kernel-shared/ctree.h"
|
2020-08-18 13:56:04 +00:00
|
|
|
#include "kernel-shared/disk-io.h"
|
2020-08-18 13:56:04 +00:00
|
|
|
#include "kernel-shared/volumes.h"
|
2023-08-28 20:12:13 +00:00
|
|
|
#include "kernel-shared/accessors.h"
|
2023-02-16 02:30:46 +00:00
|
|
|
#include "crypto/hash.h"
|
2022-09-15 11:39:14 +00:00
|
|
|
#include "common/internal.h"
|
|
|
|
#include "common/messages.h"
|
2023-02-16 02:30:46 +00:00
|
|
|
#include "common/cpu-utils.h"
|
2022-09-14 15:06:52 +00:00
|
|
|
#include "common/box.h"
|
|
|
|
#include "common/utils.h"
|
2019-06-19 23:46:21 +00:00
|
|
|
#include "common/help.h"
|
2021-04-29 17:52:15 +00:00
|
|
|
#include "common/open-utils.h"
|
2022-09-15 21:15:17 +00:00
|
|
|
#include "common/string-utils.h"
|
2023-01-11 17:49:52 +00:00
|
|
|
#include "cmds/commands.h"
|
2017-10-19 22:24:13 +00:00
|
|
|
#include "image/metadump.h"
|
2017-10-19 22:48:30 +00:00
|
|
|
#include "image/sanitize.h"
|
2023-08-22 19:28:56 +00:00
|
|
|
#include "image/common.h"
|
2008-11-20 14:52:48 +00:00
|
|
|
|
2023-01-11 17:49:52 +00:00
|
|
|
static const char * const image_usage[] = {
|
|
|
|
"btrfs-image [options] source target",
|
|
|
|
"Create or restore a filesystem image (metadata)",
|
|
|
|
"",
|
|
|
|
"Options:",
|
|
|
|
OPTLINE("-r", "restore metadump image"),
|
|
|
|
OPTLINE("-c value", "compression level (0 ~ 9)"),
|
|
|
|
OPTLINE("-t value", "number of threads (1 ~ 32)"),
|
|
|
|
OPTLINE("-o", "don't mess with the chunk tree when restoring"),
|
|
|
|
OPTLINE("-s", "sanitize file names, use once to just use garbage, use twice if you want crc collisions"),
|
|
|
|
OPTLINE("-w", "walk all trees instead of using extent tree, do this if your extent tree is broken"),
|
|
|
|
OPTLINE("-m", "restore for multiple devices"),
|
|
|
|
OPTLINE("-d", "also dump data, conflicts with -w"),
|
|
|
|
"",
|
|
|
|
"In the dump mode, source is the btrfs device and target is the output file (use '-' for stdout).",
|
|
|
|
"In the restore mode, source is the dumped image and target is the btrfs device/file.",
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct cmd_struct image_cmd = {
|
|
|
|
.usagestr = image_usage
|
|
|
|
};
|
|
|
|
|
2015-06-21 16:23:19 +00:00
|
|
|
int BOX_MAIN(image)(int argc, char *argv[])
|
2008-11-20 14:52:48 +00:00
|
|
|
{
|
|
|
|
char *source;
|
|
|
|
char *target;
|
2015-09-09 13:32:21 +00:00
|
|
|
u64 num_threads = 0;
|
2014-02-20 01:30:51 +00:00
|
|
|
u64 compress_level = 0;
|
2008-11-20 14:52:48 +00:00
|
|
|
int create = 1;
|
2013-03-27 20:55:41 +00:00
|
|
|
int old_restore = 0;
|
2013-03-28 14:26:09 +00:00
|
|
|
int walk_trees = 0;
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
int multi_devices = 0;
|
2008-11-20 14:52:48 +00:00
|
|
|
int ret;
|
2017-10-19 23:01:43 +00:00
|
|
|
enum sanitize_mode sanitize = SANITIZE_NONE;
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
int dev_cnt = 0;
|
btrfs-progs: image: introduce -d option to dump data
This new experimental data dump feature will dump the whole image, not
only the existing tree blocks but also all its data extents(*).
This feature will rely on the new dump format (_DUmP_v1), as it needs
extra large extent size limit, and older btrfs-image dump can't handle
such large item/cluster size.
Since we're dumping all extents including data extents, for the restored
image there is no need to use any extra super block flags to inform
kernel.
Kernel should just treat the restored image as any ordinary btrfs.
This new feature will be hidden behind the experimental features, that's
to say, if --enable-experimental is not enabled, although we still have
the option, it will not do anything but output an error message.
*: The data extents will be dumped as is, that's to say, even for
preallocated extent, its (meaningless) data will be read out and
dumpped.
This behavior will cause extra space usage for the image, but we can
skip all the complex partially shared preallocated extent check.
Issue: #394
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-08-24 07:41:06 +00:00
|
|
|
bool dump_data = false;
|
2014-06-24 03:16:12 +00:00
|
|
|
int usage_error = 0;
|
2008-11-20 14:52:48 +00:00
|
|
|
FILE *out;
|
|
|
|
|
2023-02-16 02:30:46 +00:00
|
|
|
cpu_detect_flags();
|
|
|
|
hash_init_accel();
|
|
|
|
|
2008-11-20 14:52:48 +00:00
|
|
|
while (1) {
|
2015-06-10 22:04:19 +00:00
|
|
|
static const struct option long_options[] = {
|
|
|
|
{ "help", no_argument, NULL, GETOPT_VAL_HELP},
|
|
|
|
{ NULL, 0, NULL, 0 }
|
|
|
|
};
|
btrfs-progs: image: introduce -d option to dump data
This new experimental data dump feature will dump the whole image, not
only the existing tree blocks but also all its data extents(*).
This feature will rely on the new dump format (_DUmP_v1), as it needs
extra large extent size limit, and older btrfs-image dump can't handle
such large item/cluster size.
Since we're dumping all extents including data extents, for the restored
image there is no need to use any extra super block flags to inform
kernel.
Kernel should just treat the restored image as any ordinary btrfs.
This new feature will be hidden behind the experimental features, that's
to say, if --enable-experimental is not enabled, although we still have
the option, it will not do anything but output an error message.
*: The data extents will be dumped as is, that's to say, even for
preallocated extent, its (meaningless) data will be read out and
dumpped.
This behavior will cause extra space usage for the image, but we can
skip all the complex partially shared preallocated extent check.
Issue: #394
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-08-24 07:41:06 +00:00
|
|
|
int c = getopt_long(argc, argv, "rc:t:oswmd", long_options, NULL);
|
2008-11-20 14:52:48 +00:00
|
|
|
if (c < 0)
|
|
|
|
break;
|
|
|
|
switch (c) {
|
|
|
|
case 'r':
|
|
|
|
create = 0;
|
|
|
|
break;
|
|
|
|
case 't':
|
2014-02-20 01:30:51 +00:00
|
|
|
num_threads = arg_strtou64(optarg);
|
2016-11-02 23:53:43 +00:00
|
|
|
if (num_threads > MAX_WORKER_THREADS) {
|
|
|
|
error("number of threads out of range: %llu > %d",
|
2022-09-27 23:13:00 +00:00
|
|
|
num_threads, MAX_WORKER_THREADS);
|
2016-09-03 18:34:47 +00:00
|
|
|
return 1;
|
|
|
|
}
|
2008-11-20 14:52:48 +00:00
|
|
|
break;
|
|
|
|
case 'c':
|
2014-02-20 01:30:51 +00:00
|
|
|
compress_level = arg_strtou64(optarg);
|
2016-09-03 18:34:47 +00:00
|
|
|
if (compress_level > 9) {
|
|
|
|
error("compression level out of range: %llu",
|
2022-09-27 23:13:00 +00:00
|
|
|
compress_level);
|
2016-09-03 18:34:47 +00:00
|
|
|
return 1;
|
|
|
|
}
|
2008-11-20 14:52:48 +00:00
|
|
|
break;
|
2013-03-27 20:55:41 +00:00
|
|
|
case 'o':
|
|
|
|
old_restore = 1;
|
|
|
|
break;
|
2013-03-22 14:52:07 +00:00
|
|
|
case 's':
|
2017-10-19 23:01:43 +00:00
|
|
|
if (sanitize == SANITIZE_NONE)
|
|
|
|
sanitize = SANITIZE_NAMES;
|
|
|
|
else if (sanitize == SANITIZE_NAMES)
|
|
|
|
sanitize = SANITIZE_COLLISIONS;
|
2013-03-22 14:52:07 +00:00
|
|
|
break;
|
2013-03-28 14:26:09 +00:00
|
|
|
case 'w':
|
|
|
|
walk_trees = 1;
|
|
|
|
break;
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
case 'm':
|
|
|
|
create = 0;
|
|
|
|
multi_devices = 1;
|
|
|
|
break;
|
btrfs-progs: image: introduce -d option to dump data
This new experimental data dump feature will dump the whole image, not
only the existing tree blocks but also all its data extents(*).
This feature will rely on the new dump format (_DUmP_v1), as it needs
extra large extent size limit, and older btrfs-image dump can't handle
such large item/cluster size.
Since we're dumping all extents including data extents, for the restored
image there is no need to use any extra super block flags to inform
kernel.
Kernel should just treat the restored image as any ordinary btrfs.
This new feature will be hidden behind the experimental features, that's
to say, if --enable-experimental is not enabled, although we still have
the option, it will not do anything but output an error message.
*: The data extents will be dumped as is, that's to say, even for
preallocated extent, its (meaningless) data will be read out and
dumpped.
This behavior will cause extra space usage for the image, but we can
skip all the complex partially shared preallocated extent check.
Issue: #394
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-08-24 07:41:06 +00:00
|
|
|
case 'd':
|
2022-10-20 14:36:10 +00:00
|
|
|
btrfs_warn_experimental("Feature: dump image with data");
|
btrfs-progs: image: introduce -d option to dump data
This new experimental data dump feature will dump the whole image, not
only the existing tree blocks but also all its data extents(*).
This feature will rely on the new dump format (_DUmP_v1), as it needs
extra large extent size limit, and older btrfs-image dump can't handle
such large item/cluster size.
Since we're dumping all extents including data extents, for the restored
image there is no need to use any extra super block flags to inform
kernel.
Kernel should just treat the restored image as any ordinary btrfs.
This new feature will be hidden behind the experimental features, that's
to say, if --enable-experimental is not enabled, although we still have
the option, it will not do anything but output an error message.
*: The data extents will be dumped as is, that's to say, even for
preallocated extent, its (meaningless) data will be read out and
dumpped.
This behavior will cause extra space usage for the image, but we can
skip all the complex partially shared preallocated extent check.
Issue: #394
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-08-24 07:41:06 +00:00
|
|
|
dump_data = true;
|
|
|
|
break;
|
2019-06-06 11:06:04 +00:00
|
|
|
case GETOPT_VAL_HELP:
|
2008-11-20 14:52:48 +00:00
|
|
|
default:
|
2023-02-21 23:28:06 +00:00
|
|
|
usage(&image_cmd, c != GETOPT_VAL_HELP);
|
2008-11-20 14:52:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-30 03:54:12 +00:00
|
|
|
set_argv0(argv);
|
2016-03-01 15:29:16 +00:00
|
|
|
if (check_argc_min(argc - optind, 2))
|
2023-02-21 23:28:06 +00:00
|
|
|
usage(&image_cmd, 1);
|
2014-06-30 03:54:12 +00:00
|
|
|
|
2016-03-01 15:29:16 +00:00
|
|
|
dev_cnt = argc - optind - 1;
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
|
btrfs-progs: image: introduce -d option to dump data
This new experimental data dump feature will dump the whole image, not
only the existing tree blocks but also all its data extents(*).
This feature will rely on the new dump format (_DUmP_v1), as it needs
extra large extent size limit, and older btrfs-image dump can't handle
such large item/cluster size.
Since we're dumping all extents including data extents, for the restored
image there is no need to use any extra super block flags to inform
kernel.
Kernel should just treat the restored image as any ordinary btrfs.
This new feature will be hidden behind the experimental features, that's
to say, if --enable-experimental is not enabled, although we still have
the option, it will not do anything but output an error message.
*: The data extents will be dumped as is, that's to say, even for
preallocated extent, its (meaningless) data will be read out and
dumpped.
This behavior will cause extra space usage for the image, but we can
skip all the complex partially shared preallocated extent check.
Issue: #394
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-08-24 07:41:06 +00:00
|
|
|
#if !EXPERIMENTAL
|
|
|
|
if (dump_data) {
|
|
|
|
error(
|
|
|
|
"data dump feature is experimental and is not configured in this build");
|
2023-02-21 23:28:06 +00:00
|
|
|
usage(&image_cmd, 1);
|
btrfs-progs: image: introduce -d option to dump data
This new experimental data dump feature will dump the whole image, not
only the existing tree blocks but also all its data extents(*).
This feature will rely on the new dump format (_DUmP_v1), as it needs
extra large extent size limit, and older btrfs-image dump can't handle
such large item/cluster size.
Since we're dumping all extents including data extents, for the restored
image there is no need to use any extra super block flags to inform
kernel.
Kernel should just treat the restored image as any ordinary btrfs.
This new feature will be hidden behind the experimental features, that's
to say, if --enable-experimental is not enabled, although we still have
the option, it will not do anything but output an error message.
*: The data extents will be dumped as is, that's to say, even for
preallocated extent, its (meaningless) data will be read out and
dumpped.
This behavior will cause extra space usage for the image, but we can
skip all the complex partially shared preallocated extent check.
Issue: #394
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-08-24 07:41:06 +00:00
|
|
|
}
|
|
|
|
#endif
|
2014-06-24 03:16:12 +00:00
|
|
|
if (create) {
|
|
|
|
if (old_restore) {
|
2016-09-30 11:00:24 +00:00
|
|
|
error(
|
|
|
|
"create and restore cannot be used at the same time");
|
2014-06-24 03:16:12 +00:00
|
|
|
usage_error++;
|
|
|
|
}
|
btrfs-progs: image: introduce -d option to dump data
This new experimental data dump feature will dump the whole image, not
only the existing tree blocks but also all its data extents(*).
This feature will rely on the new dump format (_DUmP_v1), as it needs
extra large extent size limit, and older btrfs-image dump can't handle
such large item/cluster size.
Since we're dumping all extents including data extents, for the restored
image there is no need to use any extra super block flags to inform
kernel.
Kernel should just treat the restored image as any ordinary btrfs.
This new feature will be hidden behind the experimental features, that's
to say, if --enable-experimental is not enabled, although we still have
the option, it will not do anything but output an error message.
*: The data extents will be dumped as is, that's to say, even for
preallocated extent, its (meaningless) data will be read out and
dumpped.
This behavior will cause extra space usage for the image, but we can
skip all the complex partially shared preallocated extent check.
Issue: #394
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-08-24 07:41:06 +00:00
|
|
|
if (dump_data && walk_trees) {
|
|
|
|
error("-d conflicts with -w option");
|
|
|
|
usage_error++;
|
|
|
|
}
|
2014-06-24 03:16:12 +00:00
|
|
|
} else {
|
btrfs-progs: image: introduce -d option to dump data
This new experimental data dump feature will dump the whole image, not
only the existing tree blocks but also all its data extents(*).
This feature will rely on the new dump format (_DUmP_v1), as it needs
extra large extent size limit, and older btrfs-image dump can't handle
such large item/cluster size.
Since we're dumping all extents including data extents, for the restored
image there is no need to use any extra super block flags to inform
kernel.
Kernel should just treat the restored image as any ordinary btrfs.
This new feature will be hidden behind the experimental features, that's
to say, if --enable-experimental is not enabled, although we still have
the option, it will not do anything but output an error message.
*: The data extents will be dumped as is, that's to say, even for
preallocated extent, its (meaningless) data will be read out and
dumpped.
This behavior will cause extra space usage for the image, but we can
skip all the complex partially shared preallocated extent check.
Issue: #394
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-08-24 07:41:06 +00:00
|
|
|
if (walk_trees || sanitize != SANITIZE_NONE || compress_level ||
|
|
|
|
dump_data) {
|
2016-09-30 11:00:24 +00:00
|
|
|
error(
|
btrfs-progs: image: introduce -d option to dump data
This new experimental data dump feature will dump the whole image, not
only the existing tree blocks but also all its data extents(*).
This feature will rely on the new dump format (_DUmP_v1), as it needs
extra large extent size limit, and older btrfs-image dump can't handle
such large item/cluster size.
Since we're dumping all extents including data extents, for the restored
image there is no need to use any extra super block flags to inform
kernel.
Kernel should just treat the restored image as any ordinary btrfs.
This new feature will be hidden behind the experimental features, that's
to say, if --enable-experimental is not enabled, although we still have
the option, it will not do anything but output an error message.
*: The data extents will be dumped as is, that's to say, even for
preallocated extent, its (meaningless) data will be read out and
dumpped.
This behavior will cause extra space usage for the image, but we can
skip all the complex partially shared preallocated extent check.
Issue: #394
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-08-24 07:41:06 +00:00
|
|
|
"using -w, -s, -c, -d options for restore makes no sense");
|
2014-06-24 03:16:12 +00:00
|
|
|
usage_error++;
|
|
|
|
}
|
|
|
|
if (multi_devices && dev_cnt < 2) {
|
2016-09-30 11:00:24 +00:00
|
|
|
error("not enough devices specified for -m option");
|
2014-06-24 03:16:12 +00:00
|
|
|
usage_error++;
|
|
|
|
}
|
|
|
|
if (!multi_devices && dev_cnt != 1) {
|
2016-09-30 11:00:24 +00:00
|
|
|
error("accepts only 1 device without -m option");
|
2014-06-24 03:16:12 +00:00
|
|
|
usage_error++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (usage_error)
|
2023-02-21 23:28:06 +00:00
|
|
|
usage(&image_cmd, 1);
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
|
2008-11-20 14:52:48 +00:00
|
|
|
source = argv[optind];
|
|
|
|
target = argv[optind + 1];
|
|
|
|
|
|
|
|
if (create && !strcmp(target, "-")) {
|
|
|
|
out = stdout;
|
|
|
|
} else {
|
|
|
|
out = fopen(target, "w+");
|
|
|
|
if (!out) {
|
2016-09-08 13:57:42 +00:00
|
|
|
error("unable to create target file %s", target);
|
2008-11-20 14:52:48 +00:00
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-09 13:32:22 +00:00
|
|
|
if (compress_level > 0 || create == 0) {
|
|
|
|
if (num_threads == 0) {
|
2015-10-30 14:34:55 +00:00
|
|
|
long tmp = sysconf(_SC_NPROCESSORS_ONLN);
|
|
|
|
|
|
|
|
if (tmp <= 0)
|
|
|
|
tmp = 1;
|
btrfs-progs: image: Fix a access-beyond-boundary bug when there are 32 online CPUs
[BUG]
When there are over 32 (in my example, 35) online CPUs, btrfs-image -c9
will just hang.
[CAUSE]
Btrfs-image has a hard coded limit (32) on how many threads we can use.
For the "-t" option we do the up limit check.
But when we don't specify "-t" option and speicified "-c" option, then
btrfs-image will try to auto detect the number of online CPUs, and use
it without checking if it's over the up limit.
And for num_threads larger than the up limit, we will over write the
adjust members of metadump_struct/mdrestore_struct, corrupting
pthread_mutex_t and pthread_cond_t, causing synchronising problem.
Nowadays, with SMT/HT and higher cpu core counts, it's not hard to go
beyond 32 threads, and hit the bug.
[FIX]
Just do extra num_threads check before using the number from sysconf().
Reviewed-by: Su Yue <Damenly_Su@gmx.com>
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2019-06-06 11:06:05 +00:00
|
|
|
tmp = min_t(long, tmp, MAX_WORKER_THREADS);
|
2015-10-30 14:34:55 +00:00
|
|
|
num_threads = tmp;
|
2015-09-09 13:32:22 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
num_threads = 0;
|
2008-11-20 14:52:48 +00:00
|
|
|
}
|
|
|
|
|
2014-07-07 01:56:51 +00:00
|
|
|
if (create) {
|
|
|
|
ret = check_mounted(source);
|
|
|
|
if (ret < 0) {
|
2018-10-25 12:10:54 +00:00
|
|
|
errno = -ret;
|
|
|
|
warning("unable to check mount status of: %m");
|
2016-09-08 13:57:42 +00:00
|
|
|
} else if (ret) {
|
|
|
|
warning("%s already mounted, results may be inaccurate",
|
|
|
|
source);
|
|
|
|
}
|
2014-07-07 01:56:51 +00:00
|
|
|
|
2008-11-20 14:52:48 +00:00
|
|
|
ret = create_metadump(source, out, num_threads,
|
btrfs-progs: image: introduce -d option to dump data
This new experimental data dump feature will dump the whole image, not
only the existing tree blocks but also all its data extents(*).
This feature will rely on the new dump format (_DUmP_v1), as it needs
extra large extent size limit, and older btrfs-image dump can't handle
such large item/cluster size.
Since we're dumping all extents including data extents, for the restored
image there is no need to use any extra super block flags to inform
kernel.
Kernel should just treat the restored image as any ordinary btrfs.
This new feature will be hidden behind the experimental features, that's
to say, if --enable-experimental is not enabled, although we still have
the option, it will not do anything but output an error message.
*: The data extents will be dumped as is, that's to say, even for
preallocated extent, its (meaningless) data will be read out and
dumpped.
This behavior will cause extra space usage for the image, but we can
skip all the complex partially shared preallocated extent check.
Issue: #394
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-08-24 07:41:06 +00:00
|
|
|
compress_level, sanitize, walk_trees,
|
|
|
|
dump_data);
|
2014-07-07 01:56:51 +00:00
|
|
|
} else {
|
2015-02-06 19:40:45 +00:00
|
|
|
ret = restore_metadump(source, out, old_restore, num_threads,
|
2015-01-28 20:38:03 +00:00
|
|
|
0, target, multi_devices);
|
2014-07-07 01:56:51 +00:00
|
|
|
}
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
if (ret) {
|
2019-07-04 06:10:55 +00:00
|
|
|
error("%s failed: %d", (create) ? "create" : "restore", ret);
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* extended support for multiple devices */
|
|
|
|
if (!create && multi_devices) {
|
2023-06-13 10:26:53 +00:00
|
|
|
struct open_ctree_args oca = { 0 };
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
struct btrfs_fs_info *info;
|
|
|
|
u64 total_devs;
|
|
|
|
int i;
|
|
|
|
|
2023-06-13 10:26:53 +00:00
|
|
|
oca.filename = target;
|
|
|
|
oca.flags = OPEN_CTREE_PARTIAL | OPEN_CTREE_RESTORE |
|
2023-04-19 21:24:09 +00:00
|
|
|
OPEN_CTREE_SKIP_LEAF_ITEM_CHECKS;
|
2023-06-13 10:26:53 +00:00
|
|
|
info = open_ctree_fs_info(&oca);
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
if (!info) {
|
2016-09-08 13:57:42 +00:00
|
|
|
error("open ctree failed at %s", target);
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
total_devs = btrfs_super_num_devices(info->super_copy);
|
|
|
|
if (total_devs != dev_cnt) {
|
2016-09-08 13:57:42 +00:00
|
|
|
error("it needs %llu devices but has only %d",
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
total_devs, dev_cnt);
|
|
|
|
close_ctree(info->chunk_root);
|
|
|
|
goto out;
|
|
|
|
}
|
2008-11-20 14:52:48 +00:00
|
|
|
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
/* update super block on other disks */
|
|
|
|
for (i = 2; i <= dev_cnt; i++) {
|
|
|
|
ret = update_disk_super_on_device(info,
|
|
|
|
argv[optind + i], (u64)i);
|
|
|
|
if (ret) {
|
2016-09-08 13:57:42 +00:00
|
|
|
error("update disk superblock failed devid %d: %d",
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
i, ret);
|
|
|
|
close_ctree(info->chunk_root);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
close_ctree(info->chunk_root);
|
|
|
|
|
|
|
|
/* fix metadata block to map correct chunk */
|
2015-01-28 20:38:03 +00:00
|
|
|
ret = restore_metadump(source, out, 0, num_threads, 1,
|
|
|
|
target, 1);
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
if (ret) {
|
2016-09-08 13:57:42 +00:00
|
|
|
error("unable to fixup metadump: %d", ret);
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
out:
|
2014-06-24 03:16:45 +00:00
|
|
|
if (out == stdout) {
|
2008-11-20 14:52:48 +00:00
|
|
|
fflush(out);
|
2014-06-24 03:16:45 +00:00
|
|
|
} else {
|
2008-11-20 14:52:48 +00:00
|
|
|
fclose(out);
|
2014-06-24 03:16:45 +00:00
|
|
|
if (ret && create) {
|
|
|
|
int unlink_ret;
|
|
|
|
|
|
|
|
unlink_ret = unlink(target);
|
|
|
|
if (unlink_ret)
|
2018-01-07 21:54:21 +00:00
|
|
|
error("unlink output file %s failed: %m",
|
|
|
|
target);
|
2014-06-24 03:16:45 +00:00
|
|
|
}
|
|
|
|
}
|
2008-11-20 14:52:48 +00:00
|
|
|
|
2015-09-09 13:32:23 +00:00
|
|
|
btrfs_close_all_devices();
|
|
|
|
|
2013-09-04 15:22:30 +00:00
|
|
|
return !!ret;
|
2008-11-20 14:52:48 +00:00
|
|
|
}
|