2008-11-20 14:52:48 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2008 Oracle. All rights reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public
|
|
|
|
* License v2 as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public
|
|
|
|
* License along with this program; if not, write to the
|
|
|
|
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
|
|
* Boston, MA 021110-1307, USA.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define _XOPEN_SOURCE 500
|
|
|
|
#define _GNU_SOURCE 1
|
|
|
|
#include <pthread.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <dirent.h>
|
|
|
|
#include <zlib.h>
|
|
|
|
#include "kerncompat.h"
|
|
|
|
#include "crc32c.h"
|
|
|
|
#include "ctree.h"
|
|
|
|
#include "disk-io.h"
|
|
|
|
#include "transaction.h"
|
|
|
|
#include "utils.h"
|
|
|
|
#include "version.h"
|
2013-03-19 13:43:13 +00:00
|
|
|
#include "volumes.h"
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
#include "extent_io.h"
|
2008-11-20 14:52:48 +00:00
|
|
|
|
|
|
|
#define HEADER_MAGIC 0xbd5c25e27295668bULL
|
|
|
|
#define MAX_PENDING_SIZE (256 * 1024)
|
|
|
|
#define BLOCK_SIZE 1024
|
|
|
|
#define BLOCK_MASK (BLOCK_SIZE - 1)
|
|
|
|
|
|
|
|
#define COMPRESS_NONE 0
|
|
|
|
#define COMPRESS_ZLIB 1
|
|
|
|
|
|
|
|
struct meta_cluster_item {
|
|
|
|
__le64 bytenr;
|
|
|
|
__le32 size;
|
|
|
|
} __attribute__ ((__packed__));
|
|
|
|
|
|
|
|
struct meta_cluster_header {
|
|
|
|
__le64 magic;
|
|
|
|
__le64 bytenr;
|
|
|
|
__le32 nritems;
|
|
|
|
u8 compress;
|
|
|
|
} __attribute__ ((__packed__));
|
|
|
|
|
|
|
|
/* cluster header + index items + buffers */
|
|
|
|
struct meta_cluster {
|
|
|
|
struct meta_cluster_header header;
|
|
|
|
struct meta_cluster_item items[];
|
|
|
|
} __attribute__ ((__packed__));
|
|
|
|
|
|
|
|
#define ITEMS_PER_CLUSTER ((BLOCK_SIZE - sizeof(struct meta_cluster)) / \
|
|
|
|
sizeof(struct meta_cluster_item))
|
|
|
|
|
2013-05-08 15:40:36 +00:00
|
|
|
struct fs_chunk {
|
|
|
|
u64 logical;
|
|
|
|
u64 physical;
|
|
|
|
u64 bytes;
|
|
|
|
struct rb_node n;
|
|
|
|
};
|
|
|
|
|
2008-11-20 14:52:48 +00:00
|
|
|
struct async_work {
|
|
|
|
struct list_head list;
|
|
|
|
struct list_head ordered;
|
|
|
|
u64 start;
|
|
|
|
u64 size;
|
|
|
|
u8 *buffer;
|
|
|
|
size_t bufsize;
|
2013-03-18 18:50:45 +00:00
|
|
|
int error;
|
2008-11-20 14:52:48 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct metadump_struct {
|
|
|
|
struct btrfs_root *root;
|
|
|
|
FILE *out;
|
|
|
|
|
|
|
|
struct meta_cluster *cluster;
|
|
|
|
|
|
|
|
pthread_t *threads;
|
|
|
|
size_t num_threads;
|
|
|
|
pthread_mutex_t mutex;
|
|
|
|
pthread_cond_t cond;
|
2013-03-22 14:52:07 +00:00
|
|
|
struct rb_root name_tree;
|
2008-11-20 14:52:48 +00:00
|
|
|
|
|
|
|
struct list_head list;
|
|
|
|
struct list_head ordered;
|
|
|
|
size_t num_items;
|
|
|
|
size_t num_ready;
|
|
|
|
|
|
|
|
u64 pending_start;
|
|
|
|
u64 pending_size;
|
|
|
|
|
|
|
|
int compress_level;
|
|
|
|
int done;
|
2013-03-19 13:43:13 +00:00
|
|
|
int data;
|
2013-03-22 14:52:07 +00:00
|
|
|
int sanitize_names;
|
2014-06-26 02:53:02 +00:00
|
|
|
|
|
|
|
int error;
|
2013-03-22 14:52:07 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct name {
|
|
|
|
struct rb_node n;
|
|
|
|
char *val;
|
|
|
|
char *sub;
|
|
|
|
u32 len;
|
2008-11-20 14:52:48 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct mdrestore_struct {
|
|
|
|
FILE *in;
|
|
|
|
FILE *out;
|
|
|
|
|
|
|
|
pthread_t *threads;
|
|
|
|
size_t num_threads;
|
|
|
|
pthread_mutex_t mutex;
|
|
|
|
pthread_cond_t cond;
|
|
|
|
|
2013-05-08 15:40:36 +00:00
|
|
|
struct rb_root chunk_tree;
|
2008-11-20 14:52:48 +00:00
|
|
|
struct list_head list;
|
|
|
|
size_t num_items;
|
2014-06-12 22:57:33 +00:00
|
|
|
u32 leafsize;
|
2013-03-27 20:55:41 +00:00
|
|
|
u64 devid;
|
|
|
|
u8 uuid[BTRFS_UUID_SIZE];
|
|
|
|
u8 fsid[BTRFS_FSID_SIZE];
|
2008-11-20 14:52:48 +00:00
|
|
|
|
|
|
|
int compress_method;
|
|
|
|
int done;
|
2013-03-18 18:50:45 +00:00
|
|
|
int error;
|
2013-03-27 20:55:41 +00:00
|
|
|
int old_restore;
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
int fixup_offset;
|
|
|
|
int multi_devices;
|
|
|
|
struct btrfs_fs_info *info;
|
2008-11-20 14:52:48 +00:00
|
|
|
};
|
|
|
|
|
2013-09-09 22:06:38 +00:00
|
|
|
static void print_usage(void) __attribute__((noreturn));
|
2013-05-08 15:40:36 +00:00
|
|
|
static int search_for_chunk_blocks(struct mdrestore_struct *mdres,
|
|
|
|
u64 search, u64 cluster_bytenr);
|
2013-03-22 14:52:07 +00:00
|
|
|
static struct extent_buffer *alloc_dummy_eb(u64 bytenr, u32 size);
|
|
|
|
|
2008-11-20 14:52:48 +00:00
|
|
|
static void csum_block(u8 *buf, size_t len)
|
|
|
|
{
|
|
|
|
char result[BTRFS_CRC32_SIZE];
|
|
|
|
u32 crc = ~(u32)0;
|
|
|
|
crc = crc32c(crc, buf + BTRFS_CSUM_SIZE, len - BTRFS_CSUM_SIZE);
|
|
|
|
btrfs_csum_final(crc, result);
|
|
|
|
memcpy(buf, result, BTRFS_CRC32_SIZE);
|
|
|
|
}
|
|
|
|
|
2013-03-22 14:52:07 +00:00
|
|
|
static int has_name(struct btrfs_key *key)
|
|
|
|
{
|
|
|
|
switch (key->type) {
|
|
|
|
case BTRFS_DIR_ITEM_KEY:
|
|
|
|
case BTRFS_DIR_INDEX_KEY:
|
|
|
|
case BTRFS_INODE_REF_KEY:
|
|
|
|
case BTRFS_INODE_EXTREF_KEY:
|
2013-07-31 14:23:58 +00:00
|
|
|
case BTRFS_XATTR_ITEM_KEY:
|
2013-03-22 14:52:07 +00:00
|
|
|
return 1;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static char *generate_garbage(u32 name_len)
|
|
|
|
{
|
|
|
|
char *buf = malloc(name_len);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!buf)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
for (i = 0; i < name_len; i++) {
|
|
|
|
char c = rand() % 94 + 33;
|
|
|
|
|
|
|
|
if (c == '/')
|
|
|
|
c++;
|
|
|
|
buf[i] = c;
|
|
|
|
}
|
|
|
|
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2013-05-08 15:40:36 +00:00
|
|
|
static int name_cmp(struct rb_node *a, struct rb_node *b, int fuzz)
|
|
|
|
{
|
|
|
|
struct name *entry = rb_entry(a, struct name, n);
|
|
|
|
struct name *ins = rb_entry(b, struct name, n);
|
|
|
|
u32 len;
|
|
|
|
|
|
|
|
len = min(ins->len, entry->len);
|
|
|
|
return memcmp(ins->val, entry->val, len);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int chunk_cmp(struct rb_node *a, struct rb_node *b, int fuzz)
|
|
|
|
{
|
|
|
|
struct fs_chunk *entry = rb_entry(a, struct fs_chunk, n);
|
|
|
|
struct fs_chunk *ins = rb_entry(b, struct fs_chunk, n);
|
|
|
|
|
|
|
|
if (fuzz && ins->logical >= entry->logical &&
|
|
|
|
ins->logical < entry->logical + entry->bytes)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (ins->logical < entry->logical)
|
|
|
|
return -1;
|
|
|
|
else if (ins->logical > entry->logical)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tree_insert(struct rb_root *root, struct rb_node *ins,
|
|
|
|
int (*cmp)(struct rb_node *a, struct rb_node *b,
|
|
|
|
int fuzz))
|
2013-03-22 14:52:07 +00:00
|
|
|
{
|
|
|
|
struct rb_node ** p = &root->rb_node;
|
|
|
|
struct rb_node * parent = NULL;
|
|
|
|
int dir;
|
|
|
|
|
|
|
|
while(*p) {
|
|
|
|
parent = *p;
|
|
|
|
|
2013-05-08 15:40:36 +00:00
|
|
|
dir = cmp(*p, ins, 0);
|
2013-03-22 14:52:07 +00:00
|
|
|
if (dir < 0)
|
|
|
|
p = &(*p)->rb_left;
|
|
|
|
else if (dir > 0)
|
|
|
|
p = &(*p)->rb_right;
|
|
|
|
else
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
2013-05-08 15:40:36 +00:00
|
|
|
rb_link_node(ins, parent, p);
|
|
|
|
rb_insert_color(ins, root);
|
2013-03-22 14:52:07 +00:00
|
|
|
}
|
|
|
|
|
2013-05-08 15:40:36 +00:00
|
|
|
static struct rb_node *tree_search(struct rb_root *root,
|
|
|
|
struct rb_node *search,
|
|
|
|
int (*cmp)(struct rb_node *a,
|
|
|
|
struct rb_node *b, int fuzz),
|
|
|
|
int fuzz)
|
2013-03-22 14:52:07 +00:00
|
|
|
{
|
|
|
|
struct rb_node *n = root->rb_node;
|
|
|
|
int dir;
|
|
|
|
|
|
|
|
while (n) {
|
2013-05-08 15:40:36 +00:00
|
|
|
dir = cmp(n, search, fuzz);
|
2013-03-22 14:52:07 +00:00
|
|
|
if (dir < 0)
|
|
|
|
n = n->rb_left;
|
|
|
|
else if (dir > 0)
|
|
|
|
n = n->rb_right;
|
|
|
|
else
|
2013-05-08 15:40:36 +00:00
|
|
|
return n;
|
2013-03-22 14:52:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static char *find_collision(struct metadump_struct *md, char *name,
|
|
|
|
u32 name_len)
|
|
|
|
{
|
|
|
|
struct name *val;
|
2013-05-08 15:40:36 +00:00
|
|
|
struct rb_node *entry;
|
|
|
|
struct name tmp;
|
2013-03-22 14:52:07 +00:00
|
|
|
unsigned long checksum;
|
|
|
|
int found = 0;
|
|
|
|
int i;
|
|
|
|
|
2013-05-08 15:40:36 +00:00
|
|
|
tmp.val = name;
|
|
|
|
tmp.len = name_len;
|
|
|
|
entry = tree_search(&md->name_tree, &tmp.n, name_cmp, 0);
|
|
|
|
if (entry) {
|
|
|
|
val = rb_entry(entry, struct name, n);
|
2013-03-22 14:52:07 +00:00
|
|
|
free(name);
|
|
|
|
return val->sub;
|
|
|
|
}
|
|
|
|
|
|
|
|
val = malloc(sizeof(struct name));
|
|
|
|
if (!val) {
|
|
|
|
fprintf(stderr, "Couldn't sanitize name, enomem\n");
|
2013-09-05 02:38:54 +00:00
|
|
|
free(name);
|
2013-03-22 14:52:07 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(val, 0, sizeof(*val));
|
|
|
|
|
|
|
|
val->val = name;
|
|
|
|
val->len = name_len;
|
|
|
|
val->sub = malloc(name_len);
|
|
|
|
if (!val->sub) {
|
|
|
|
fprintf(stderr, "Couldn't sanitize name, enomem\n");
|
|
|
|
free(val);
|
2013-09-05 02:38:54 +00:00
|
|
|
free(name);
|
2013-03-22 14:52:07 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
checksum = crc32c(~1, val->val, name_len);
|
|
|
|
memset(val->sub, ' ', name_len);
|
|
|
|
i = 0;
|
|
|
|
while (1) {
|
|
|
|
if (crc32c(~1, val->sub, name_len) == checksum &&
|
|
|
|
memcmp(val->sub, val->val, val->len)) {
|
|
|
|
found = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (val->sub[i] == 127) {
|
|
|
|
do {
|
|
|
|
i++;
|
2013-10-07 21:42:56 +00:00
|
|
|
if (i >= name_len)
|
2013-03-22 14:52:07 +00:00
|
|
|
break;
|
|
|
|
} while (val->sub[i] == 127);
|
|
|
|
|
2013-10-07 21:42:56 +00:00
|
|
|
if (i >= name_len)
|
2013-03-22 14:52:07 +00:00
|
|
|
break;
|
|
|
|
val->sub[i]++;
|
|
|
|
if (val->sub[i] == '/')
|
|
|
|
val->sub[i]++;
|
|
|
|
memset(val->sub, ' ', i);
|
|
|
|
i = 0;
|
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
val->sub[i]++;
|
|
|
|
if (val->sub[i] == '/')
|
|
|
|
val->sub[i]++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!found) {
|
|
|
|
fprintf(stderr, "Couldn't find a collision for '%.*s', "
|
|
|
|
"generating normal garbage, it won't match indexes\n",
|
|
|
|
val->len, val->val);
|
|
|
|
for (i = 0; i < name_len; i++) {
|
|
|
|
char c = rand() % 94 + 33;
|
|
|
|
|
|
|
|
if (c == '/')
|
|
|
|
c++;
|
|
|
|
val->sub[i] = c;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-08 15:40:36 +00:00
|
|
|
tree_insert(&md->name_tree, &val->n, name_cmp);
|
2013-03-22 14:52:07 +00:00
|
|
|
return val->sub;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sanitize_dir_item(struct metadump_struct *md, struct extent_buffer *eb,
|
|
|
|
int slot)
|
|
|
|
{
|
|
|
|
struct btrfs_dir_item *dir_item;
|
|
|
|
char *buf;
|
|
|
|
char *garbage;
|
|
|
|
unsigned long name_ptr;
|
|
|
|
u32 total_len;
|
|
|
|
u32 cur = 0;
|
|
|
|
u32 this_len;
|
|
|
|
u32 name_len;
|
|
|
|
int free_garbage = (md->sanitize_names == 1);
|
|
|
|
|
|
|
|
dir_item = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
|
|
|
|
total_len = btrfs_item_size_nr(eb, slot);
|
|
|
|
while (cur < total_len) {
|
|
|
|
this_len = sizeof(*dir_item) +
|
|
|
|
btrfs_dir_name_len(eb, dir_item) +
|
|
|
|
btrfs_dir_data_len(eb, dir_item);
|
|
|
|
name_ptr = (unsigned long)(dir_item + 1);
|
|
|
|
name_len = btrfs_dir_name_len(eb, dir_item);
|
|
|
|
|
|
|
|
if (md->sanitize_names > 1) {
|
|
|
|
buf = malloc(name_len);
|
|
|
|
if (!buf) {
|
|
|
|
fprintf(stderr, "Couldn't sanitize name, "
|
|
|
|
"enomem\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
read_extent_buffer(eb, buf, name_ptr, name_len);
|
|
|
|
garbage = find_collision(md, buf, name_len);
|
|
|
|
} else {
|
|
|
|
garbage = generate_garbage(name_len);
|
|
|
|
}
|
|
|
|
if (!garbage) {
|
|
|
|
fprintf(stderr, "Couldn't sanitize name, enomem\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
write_extent_buffer(eb, garbage, name_ptr, name_len);
|
|
|
|
cur += this_len;
|
|
|
|
dir_item = (struct btrfs_dir_item *)((char *)dir_item +
|
|
|
|
this_len);
|
|
|
|
if (free_garbage)
|
|
|
|
free(garbage);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sanitize_inode_ref(struct metadump_struct *md,
|
|
|
|
struct extent_buffer *eb, int slot, int ext)
|
|
|
|
{
|
|
|
|
struct btrfs_inode_extref *extref;
|
|
|
|
struct btrfs_inode_ref *ref;
|
|
|
|
char *garbage, *buf;
|
|
|
|
unsigned long ptr;
|
|
|
|
unsigned long name_ptr;
|
|
|
|
u32 item_size;
|
|
|
|
u32 cur_offset = 0;
|
|
|
|
int len;
|
|
|
|
int free_garbage = (md->sanitize_names == 1);
|
|
|
|
|
|
|
|
item_size = btrfs_item_size_nr(eb, slot);
|
|
|
|
ptr = btrfs_item_ptr_offset(eb, slot);
|
|
|
|
while (cur_offset < item_size) {
|
|
|
|
if (ext) {
|
|
|
|
extref = (struct btrfs_inode_extref *)(ptr +
|
|
|
|
cur_offset);
|
|
|
|
name_ptr = (unsigned long)(&extref->name);
|
|
|
|
len = btrfs_inode_extref_name_len(eb, extref);
|
|
|
|
cur_offset += sizeof(*extref);
|
|
|
|
} else {
|
|
|
|
ref = (struct btrfs_inode_ref *)(ptr + cur_offset);
|
|
|
|
len = btrfs_inode_ref_name_len(eb, ref);
|
|
|
|
name_ptr = (unsigned long)(ref + 1);
|
|
|
|
cur_offset += sizeof(*ref);
|
|
|
|
}
|
|
|
|
cur_offset += len;
|
|
|
|
|
|
|
|
if (md->sanitize_names > 1) {
|
|
|
|
buf = malloc(len);
|
|
|
|
if (!buf) {
|
|
|
|
fprintf(stderr, "Couldn't sanitize name, "
|
|
|
|
"enomem\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
read_extent_buffer(eb, buf, name_ptr, len);
|
|
|
|
garbage = find_collision(md, buf, len);
|
|
|
|
} else {
|
|
|
|
garbage = generate_garbage(len);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!garbage) {
|
|
|
|
fprintf(stderr, "Couldn't sanitize name, enomem\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
write_extent_buffer(eb, garbage, name_ptr, len);
|
|
|
|
if (free_garbage)
|
|
|
|
free(garbage);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-31 14:23:58 +00:00
|
|
|
static void sanitize_xattr(struct metadump_struct *md,
|
|
|
|
struct extent_buffer *eb, int slot)
|
|
|
|
{
|
|
|
|
struct btrfs_dir_item *dir_item;
|
|
|
|
unsigned long data_ptr;
|
|
|
|
u32 data_len;
|
|
|
|
|
|
|
|
dir_item = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
|
|
|
|
data_len = btrfs_dir_data_len(eb, dir_item);
|
|
|
|
|
|
|
|
data_ptr = (unsigned long)((char *)(dir_item + 1) +
|
|
|
|
btrfs_dir_name_len(eb, dir_item));
|
|
|
|
memset_extent_buffer(eb, 0, data_ptr, data_len);
|
|
|
|
}
|
|
|
|
|
2013-03-22 14:52:07 +00:00
|
|
|
static void sanitize_name(struct metadump_struct *md, u8 *dst,
|
|
|
|
struct extent_buffer *src, struct btrfs_key *key,
|
|
|
|
int slot)
|
|
|
|
{
|
|
|
|
struct extent_buffer *eb;
|
|
|
|
|
|
|
|
eb = alloc_dummy_eb(src->start, src->len);
|
|
|
|
if (!eb) {
|
|
|
|
fprintf(stderr, "Couldn't sanitize name, no memory\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(eb->data, dst, eb->len);
|
|
|
|
|
|
|
|
switch (key->type) {
|
|
|
|
case BTRFS_DIR_ITEM_KEY:
|
|
|
|
case BTRFS_DIR_INDEX_KEY:
|
|
|
|
sanitize_dir_item(md, eb, slot);
|
|
|
|
break;
|
|
|
|
case BTRFS_INODE_REF_KEY:
|
|
|
|
sanitize_inode_ref(md, eb, slot, 0);
|
|
|
|
break;
|
|
|
|
case BTRFS_INODE_EXTREF_KEY:
|
|
|
|
sanitize_inode_ref(md, eb, slot, 1);
|
|
|
|
break;
|
2013-07-31 14:23:58 +00:00
|
|
|
case BTRFS_XATTR_ITEM_KEY:
|
|
|
|
sanitize_xattr(md, eb, slot);
|
|
|
|
break;
|
2013-03-22 14:52:07 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(dst, eb->data, eb->len);
|
|
|
|
free(eb);
|
|
|
|
}
|
|
|
|
|
2008-11-20 14:52:48 +00:00
|
|
|
/*
|
|
|
|
* zero inline extents and csum items
|
|
|
|
*/
|
2013-03-22 14:52:07 +00:00
|
|
|
static void zero_items(struct metadump_struct *md, u8 *dst,
|
|
|
|
struct extent_buffer *src)
|
2008-11-20 14:52:48 +00:00
|
|
|
{
|
|
|
|
struct btrfs_file_extent_item *fi;
|
|
|
|
struct btrfs_item *item;
|
|
|
|
struct btrfs_key key;
|
|
|
|
u32 nritems = btrfs_header_nritems(src);
|
|
|
|
size_t size;
|
|
|
|
unsigned long ptr;
|
|
|
|
int i, extent_type;
|
|
|
|
|
|
|
|
for (i = 0; i < nritems; i++) {
|
2013-09-20 09:55:26 +00:00
|
|
|
item = btrfs_item_nr(i);
|
2008-11-20 14:52:48 +00:00
|
|
|
btrfs_item_key_to_cpu(src, &key, i);
|
|
|
|
if (key.type == BTRFS_CSUM_ITEM_KEY) {
|
|
|
|
size = btrfs_item_size_nr(src, i);
|
|
|
|
memset(dst + btrfs_leaf_data(src) +
|
|
|
|
btrfs_item_offset_nr(src, i), 0, size);
|
|
|
|
continue;
|
|
|
|
}
|
2013-03-22 14:52:07 +00:00
|
|
|
|
|
|
|
if (md->sanitize_names && has_name(&key)) {
|
|
|
|
sanitize_name(md, dst, src, &key, i);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2008-11-20 14:52:48 +00:00
|
|
|
if (key.type != BTRFS_EXTENT_DATA_KEY)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
fi = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
|
|
|
|
extent_type = btrfs_file_extent_type(src, fi);
|
|
|
|
if (extent_type != BTRFS_FILE_EXTENT_INLINE)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ptr = btrfs_file_extent_inline_start(fi);
|
|
|
|
size = btrfs_file_extent_inline_item_len(src, item);
|
|
|
|
memset(dst + ptr, 0, size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* copy buffer and zero useless data in the buffer
|
|
|
|
*/
|
2013-03-22 14:52:07 +00:00
|
|
|
static void copy_buffer(struct metadump_struct *md, u8 *dst,
|
|
|
|
struct extent_buffer *src)
|
2008-11-20 14:52:48 +00:00
|
|
|
{
|
|
|
|
int level;
|
|
|
|
size_t size;
|
|
|
|
u32 nritems;
|
|
|
|
|
|
|
|
memcpy(dst, src->data, src->len);
|
|
|
|
if (src->start == BTRFS_SUPER_INFO_OFFSET)
|
|
|
|
return;
|
|
|
|
|
|
|
|
level = btrfs_header_level(src);
|
|
|
|
nritems = btrfs_header_nritems(src);
|
|
|
|
|
|
|
|
if (nritems == 0) {
|
|
|
|
size = sizeof(struct btrfs_header);
|
|
|
|
memset(dst + size, 0, src->len - size);
|
|
|
|
} else if (level == 0) {
|
|
|
|
size = btrfs_leaf_data(src) +
|
|
|
|
btrfs_item_offset_nr(src, nritems - 1) -
|
|
|
|
btrfs_item_nr_offset(nritems);
|
|
|
|
memset(dst + btrfs_item_nr_offset(nritems), 0, size);
|
2013-03-22 14:52:07 +00:00
|
|
|
zero_items(md, dst, src);
|
2008-11-20 14:52:48 +00:00
|
|
|
} else {
|
|
|
|
size = offsetof(struct btrfs_node, ptrs) +
|
|
|
|
sizeof(struct btrfs_key_ptr) * nritems;
|
|
|
|
memset(dst + size, 0, src->len - size);
|
|
|
|
}
|
|
|
|
csum_block(dst, src->len);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *dump_worker(void *data)
|
|
|
|
{
|
|
|
|
struct metadump_struct *md = (struct metadump_struct *)data;
|
|
|
|
struct async_work *async;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
pthread_mutex_lock(&md->mutex);
|
|
|
|
while (list_empty(&md->list)) {
|
|
|
|
if (md->done) {
|
|
|
|
pthread_mutex_unlock(&md->mutex);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
pthread_cond_wait(&md->cond, &md->mutex);
|
|
|
|
}
|
|
|
|
async = list_entry(md->list.next, struct async_work, list);
|
|
|
|
list_del_init(&async->list);
|
|
|
|
pthread_mutex_unlock(&md->mutex);
|
|
|
|
|
|
|
|
if (md->compress_level > 0) {
|
|
|
|
u8 *orig = async->buffer;
|
|
|
|
|
|
|
|
async->bufsize = compressBound(async->size);
|
|
|
|
async->buffer = malloc(async->bufsize);
|
2014-06-26 02:53:02 +00:00
|
|
|
if (!async->buffer) {
|
|
|
|
fprintf(stderr, "Error allocing buffer\n");
|
|
|
|
pthread_mutex_lock(&md->mutex);
|
|
|
|
if (!md->error)
|
|
|
|
md->error = -ENOMEM;
|
|
|
|
pthread_mutex_unlock(&md->mutex);
|
|
|
|
pthread_exit(NULL);
|
|
|
|
}
|
2008-11-20 14:52:48 +00:00
|
|
|
|
2008-12-02 12:19:08 +00:00
|
|
|
ret = compress2(async->buffer,
|
|
|
|
(unsigned long *)&async->bufsize,
|
|
|
|
orig, async->size, md->compress_level);
|
2013-03-18 18:50:45 +00:00
|
|
|
|
|
|
|
if (ret != Z_OK)
|
|
|
|
async->error = 1;
|
2008-11-20 14:52:48 +00:00
|
|
|
|
|
|
|
free(orig);
|
|
|
|
}
|
|
|
|
|
|
|
|
pthread_mutex_lock(&md->mutex);
|
|
|
|
md->num_ready++;
|
|
|
|
pthread_mutex_unlock(&md->mutex);
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
pthread_exit(NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void meta_cluster_init(struct metadump_struct *md, u64 start)
|
|
|
|
{
|
|
|
|
struct meta_cluster_header *header;
|
|
|
|
|
|
|
|
md->num_items = 0;
|
|
|
|
md->num_ready = 0;
|
|
|
|
header = &md->cluster->header;
|
|
|
|
header->magic = cpu_to_le64(HEADER_MAGIC);
|
|
|
|
header->bytenr = cpu_to_le64(start);
|
|
|
|
header->nritems = cpu_to_le32(0);
|
|
|
|
header->compress = md->compress_level > 0 ?
|
|
|
|
COMPRESS_ZLIB : COMPRESS_NONE;
|
|
|
|
}
|
|
|
|
|
2014-03-21 01:06:23 +00:00
|
|
|
static void metadump_destroy(struct metadump_struct *md, int num_threads)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct rb_node *n;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&md->mutex);
|
|
|
|
md->done = 1;
|
|
|
|
pthread_cond_broadcast(&md->cond);
|
|
|
|
pthread_mutex_unlock(&md->mutex);
|
|
|
|
|
|
|
|
for (i = 0; i < num_threads; i++)
|
|
|
|
pthread_join(md->threads[i], NULL);
|
|
|
|
|
|
|
|
pthread_cond_destroy(&md->cond);
|
|
|
|
pthread_mutex_destroy(&md->mutex);
|
|
|
|
|
|
|
|
while ((n = rb_first(&md->name_tree))) {
|
|
|
|
struct name *name;
|
|
|
|
|
|
|
|
name = rb_entry(n, struct name, n);
|
|
|
|
rb_erase(n, &md->name_tree);
|
|
|
|
free(name->val);
|
|
|
|
free(name->sub);
|
|
|
|
free(name);
|
|
|
|
}
|
|
|
|
free(md->threads);
|
|
|
|
free(md->cluster);
|
|
|
|
}
|
|
|
|
|
2008-11-20 14:52:48 +00:00
|
|
|
static int metadump_init(struct metadump_struct *md, struct btrfs_root *root,
|
2013-03-22 14:52:07 +00:00
|
|
|
FILE *out, int num_threads, int compress_level,
|
|
|
|
int sanitize_names)
|
2008-11-20 14:52:48 +00:00
|
|
|
{
|
2013-03-27 20:55:41 +00:00
|
|
|
int i, ret = 0;
|
2008-11-20 14:52:48 +00:00
|
|
|
|
|
|
|
memset(md, 0, sizeof(*md));
|
|
|
|
pthread_cond_init(&md->cond, NULL);
|
|
|
|
pthread_mutex_init(&md->mutex, NULL);
|
|
|
|
INIT_LIST_HEAD(&md->list);
|
|
|
|
INIT_LIST_HEAD(&md->ordered);
|
|
|
|
md->root = root;
|
|
|
|
md->out = out;
|
|
|
|
md->pending_start = (u64)-1;
|
|
|
|
md->compress_level = compress_level;
|
|
|
|
md->cluster = calloc(1, BLOCK_SIZE);
|
2013-03-22 14:52:07 +00:00
|
|
|
md->sanitize_names = sanitize_names;
|
|
|
|
if (sanitize_names > 1)
|
|
|
|
crc32c_optimization_init();
|
|
|
|
|
2013-03-18 18:50:45 +00:00
|
|
|
if (!md->cluster) {
|
|
|
|
pthread_cond_destroy(&md->cond);
|
|
|
|
pthread_mutex_destroy(&md->mutex);
|
2008-11-20 14:52:48 +00:00
|
|
|
return -ENOMEM;
|
2013-03-18 18:50:45 +00:00
|
|
|
}
|
2008-11-20 14:52:48 +00:00
|
|
|
|
|
|
|
meta_cluster_init(md, 0);
|
|
|
|
if (!num_threads)
|
|
|
|
return 0;
|
|
|
|
|
2013-03-22 14:52:07 +00:00
|
|
|
md->name_tree.rb_node = NULL;
|
2008-11-20 14:52:48 +00:00
|
|
|
md->num_threads = num_threads;
|
|
|
|
md->threads = calloc(num_threads, sizeof(pthread_t));
|
2013-03-18 18:50:45 +00:00
|
|
|
if (!md->threads) {
|
|
|
|
free(md->cluster);
|
|
|
|
pthread_cond_destroy(&md->cond);
|
|
|
|
pthread_mutex_destroy(&md->mutex);
|
2008-11-20 14:52:48 +00:00
|
|
|
return -ENOMEM;
|
2013-03-18 18:50:45 +00:00
|
|
|
}
|
|
|
|
|
2008-11-20 14:52:48 +00:00
|
|
|
for (i = 0; i < num_threads; i++) {
|
|
|
|
ret = pthread_create(md->threads + i, NULL, dump_worker, md);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
}
|
2013-03-18 18:50:45 +00:00
|
|
|
|
2014-03-21 01:06:23 +00:00
|
|
|
if (ret)
|
|
|
|
metadump_destroy(md, i + 1);
|
2013-03-18 18:50:45 +00:00
|
|
|
|
2008-11-20 14:52:48 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int write_zero(FILE *out, size_t size)
|
|
|
|
{
|
|
|
|
static char zero[BLOCK_SIZE];
|
|
|
|
return fwrite(zero, size, 1, out);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int write_buffers(struct metadump_struct *md, u64 *next)
|
|
|
|
{
|
|
|
|
struct meta_cluster_header *header = &md->cluster->header;
|
|
|
|
struct meta_cluster_item *item;
|
|
|
|
struct async_work *async;
|
|
|
|
u64 bytenr = 0;
|
|
|
|
u32 nritems = 0;
|
|
|
|
int ret;
|
2013-03-18 18:50:45 +00:00
|
|
|
int err = 0;
|
2008-11-20 14:52:48 +00:00
|
|
|
|
|
|
|
if (list_empty(&md->ordered))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* wait until all buffers are compressed */
|
2014-06-26 02:53:02 +00:00
|
|
|
while (!err && md->num_items > md->num_ready) {
|
2008-11-20 14:52:48 +00:00
|
|
|
struct timespec ts = {
|
|
|
|
.tv_sec = 0,
|
|
|
|
.tv_nsec = 10000000,
|
|
|
|
};
|
|
|
|
pthread_mutex_unlock(&md->mutex);
|
|
|
|
nanosleep(&ts, NULL);
|
|
|
|
pthread_mutex_lock(&md->mutex);
|
2014-06-26 02:53:02 +00:00
|
|
|
err = md->error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (err) {
|
|
|
|
fprintf(stderr, "One of the threads errored out %s\n",
|
|
|
|
strerror(err));
|
|
|
|
goto out;
|
2008-11-20 14:52:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* setup and write index block */
|
|
|
|
list_for_each_entry(async, &md->ordered, ordered) {
|
|
|
|
item = md->cluster->items + nritems;
|
|
|
|
item->bytenr = cpu_to_le64(async->start);
|
|
|
|
item->size = cpu_to_le32(async->bufsize);
|
|
|
|
nritems++;
|
|
|
|
}
|
|
|
|
header->nritems = cpu_to_le32(nritems);
|
|
|
|
|
|
|
|
ret = fwrite(md->cluster, BLOCK_SIZE, 1, md->out);
|
2013-03-18 18:50:45 +00:00
|
|
|
if (ret != 1) {
|
|
|
|
fprintf(stderr, "Error writing out cluster: %d\n", errno);
|
|
|
|
return -EIO;
|
|
|
|
}
|
2008-11-20 14:52:48 +00:00
|
|
|
|
|
|
|
/* write buffers */
|
|
|
|
bytenr += le64_to_cpu(header->bytenr) + BLOCK_SIZE;
|
|
|
|
while (!list_empty(&md->ordered)) {
|
|
|
|
async = list_entry(md->ordered.next, struct async_work,
|
|
|
|
ordered);
|
|
|
|
list_del_init(&async->ordered);
|
|
|
|
|
|
|
|
bytenr += async->bufsize;
|
2013-03-18 18:50:45 +00:00
|
|
|
if (!err)
|
|
|
|
ret = fwrite(async->buffer, async->bufsize, 1,
|
|
|
|
md->out);
|
|
|
|
if (ret != 1) {
|
|
|
|
err = -EIO;
|
|
|
|
ret = 0;
|
|
|
|
fprintf(stderr, "Error writing out cluster: %d\n",
|
|
|
|
errno);
|
|
|
|
}
|
2008-11-20 14:52:48 +00:00
|
|
|
|
|
|
|
free(async->buffer);
|
|
|
|
free(async);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* zero unused space in the last block */
|
2013-03-18 18:50:45 +00:00
|
|
|
if (!err && bytenr & BLOCK_MASK) {
|
2008-11-20 14:52:48 +00:00
|
|
|
size_t size = BLOCK_SIZE - (bytenr & BLOCK_MASK);
|
|
|
|
|
|
|
|
bytenr += size;
|
|
|
|
ret = write_zero(md->out, size);
|
2013-03-18 18:50:45 +00:00
|
|
|
if (ret != 1) {
|
|
|
|
fprintf(stderr, "Error zeroing out buffer: %d\n",
|
|
|
|
errno);
|
|
|
|
err = -EIO;
|
|
|
|
}
|
2008-11-20 14:52:48 +00:00
|
|
|
}
|
|
|
|
out:
|
|
|
|
*next = bytenr;
|
2013-03-18 18:50:45 +00:00
|
|
|
return err;
|
2008-11-20 14:52:48 +00:00
|
|
|
}
|
|
|
|
|
2013-03-19 13:43:13 +00:00
|
|
|
static int read_data_extent(struct metadump_struct *md,
|
|
|
|
struct async_work *async)
|
|
|
|
{
|
|
|
|
struct btrfs_multi_bio *multi = NULL;
|
|
|
|
struct btrfs_device *device;
|
|
|
|
u64 bytes_left = async->size;
|
|
|
|
u64 logical = async->start;
|
|
|
|
u64 offset = 0;
|
|
|
|
u64 bytenr;
|
|
|
|
u64 read_len;
|
|
|
|
ssize_t done;
|
|
|
|
int fd;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
while (bytes_left) {
|
|
|
|
read_len = bytes_left;
|
|
|
|
ret = btrfs_map_block(&md->root->fs_info->mapping_tree, READ,
|
|
|
|
logical, &read_len, &multi, 0, NULL);
|
|
|
|
if (ret) {
|
|
|
|
fprintf(stderr, "Couldn't map data block %d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
device = multi->stripes[0].dev;
|
|
|
|
|
|
|
|
if (device->fd == 0) {
|
|
|
|
fprintf(stderr,
|
|
|
|
"Device we need to read from is not open\n");
|
|
|
|
free(multi);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
fd = device->fd;
|
|
|
|
bytenr = multi->stripes[0].physical;
|
|
|
|
free(multi);
|
|
|
|
|
|
|
|
read_len = min(read_len, bytes_left);
|
|
|
|
done = pread64(fd, async->buffer+offset, read_len, bytenr);
|
|
|
|
if (done < read_len) {
|
|
|
|
if (done < 0)
|
|
|
|
fprintf(stderr, "Error reading extent %d\n",
|
|
|
|
errno);
|
|
|
|
else
|
|
|
|
fprintf(stderr, "Short read\n");
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
bytes_left -= done;
|
|
|
|
offset += done;
|
|
|
|
logical += done;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-11-20 14:52:48 +00:00
|
|
|
static int flush_pending(struct metadump_struct *md, int done)
|
|
|
|
{
|
|
|
|
struct async_work *async = NULL;
|
|
|
|
struct extent_buffer *eb;
|
|
|
|
u64 blocksize = md->root->nodesize;
|
|
|
|
u64 start;
|
|
|
|
u64 size;
|
|
|
|
size_t offset;
|
2013-03-18 18:50:45 +00:00
|
|
|
int ret = 0;
|
2008-11-20 14:52:48 +00:00
|
|
|
|
|
|
|
if (md->pending_size) {
|
|
|
|
async = calloc(1, sizeof(*async));
|
|
|
|
if (!async)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
async->start = md->pending_start;
|
|
|
|
async->size = md->pending_size;
|
|
|
|
async->bufsize = async->size;
|
|
|
|
async->buffer = malloc(async->bufsize);
|
2013-03-18 18:50:45 +00:00
|
|
|
if (!async->buffer) {
|
|
|
|
free(async);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2008-11-20 14:52:48 +00:00
|
|
|
offset = 0;
|
|
|
|
start = async->start;
|
|
|
|
size = async->size;
|
2013-03-19 13:43:13 +00:00
|
|
|
|
|
|
|
if (md->data) {
|
|
|
|
ret = read_data_extent(md, async);
|
|
|
|
if (ret) {
|
|
|
|
free(async->buffer);
|
|
|
|
free(async);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
while (!md->data && size > 0) {
|
2013-05-03 20:18:46 +00:00
|
|
|
u64 this_read = min(blocksize, size);
|
|
|
|
eb = read_tree_block(md->root, start, this_read, 0);
|
2013-03-18 18:50:45 +00:00
|
|
|
if (!eb) {
|
|
|
|
free(async->buffer);
|
|
|
|
free(async);
|
|
|
|
fprintf(stderr,
|
|
|
|
"Error reading metadata block\n");
|
|
|
|
return -EIO;
|
|
|
|
}
|
2013-03-22 14:52:07 +00:00
|
|
|
copy_buffer(md, async->buffer + offset, eb);
|
2008-11-20 14:52:48 +00:00
|
|
|
free_extent_buffer(eb);
|
2013-05-03 20:18:46 +00:00
|
|
|
start += this_read;
|
|
|
|
offset += this_read;
|
|
|
|
size -= this_read;
|
2008-11-20 14:52:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
md->pending_start = (u64)-1;
|
|
|
|
md->pending_size = 0;
|
|
|
|
} else if (!done) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
pthread_mutex_lock(&md->mutex);
|
|
|
|
if (async) {
|
|
|
|
list_add_tail(&async->ordered, &md->ordered);
|
|
|
|
md->num_items++;
|
|
|
|
if (md->compress_level > 0) {
|
|
|
|
list_add_tail(&async->list, &md->list);
|
|
|
|
pthread_cond_signal(&md->cond);
|
|
|
|
} else {
|
|
|
|
md->num_ready++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (md->num_items >= ITEMS_PER_CLUSTER || done) {
|
|
|
|
ret = write_buffers(md, &start);
|
2013-03-18 18:50:45 +00:00
|
|
|
if (ret)
|
|
|
|
fprintf(stderr, "Error writing buffers %d\n",
|
|
|
|
errno);
|
|
|
|
else
|
|
|
|
meta_cluster_init(md, start);
|
2008-11-20 14:52:48 +00:00
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&md->mutex);
|
2013-03-18 18:50:45 +00:00
|
|
|
return ret;
|
2008-11-20 14:52:48 +00:00
|
|
|
}
|
|
|
|
|
2013-03-19 13:43:13 +00:00
|
|
|
static int add_extent(u64 start, u64 size, struct metadump_struct *md,
|
|
|
|
int data)
|
2008-11-20 14:52:48 +00:00
|
|
|
{
|
|
|
|
int ret;
|
2013-03-19 13:43:13 +00:00
|
|
|
if (md->data != data ||
|
|
|
|
md->pending_size + size > MAX_PENDING_SIZE ||
|
2008-11-20 14:52:48 +00:00
|
|
|
md->pending_start + md->pending_size != start) {
|
|
|
|
ret = flush_pending(md, 0);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
md->pending_start = start;
|
|
|
|
}
|
|
|
|
readahead_tree_block(md->root, start, size, 0);
|
|
|
|
md->pending_size += size;
|
2013-03-19 13:43:13 +00:00
|
|
|
md->data = data;
|
2008-11-20 14:52:48 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-05-29 20:35:30 +00:00
|
|
|
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
|
|
|
|
static int is_tree_block(struct btrfs_root *extent_root,
|
|
|
|
struct btrfs_path *path, u64 bytenr)
|
|
|
|
{
|
|
|
|
struct extent_buffer *leaf;
|
|
|
|
struct btrfs_key key;
|
|
|
|
u64 ref_objectid;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
while (1) {
|
|
|
|
struct btrfs_extent_ref_v0 *ref_item;
|
|
|
|
path->slots[0]++;
|
|
|
|
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
|
|
|
|
ret = btrfs_next_leaf(extent_root, path);
|
2013-03-18 18:50:45 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2009-05-29 20:35:30 +00:00
|
|
|
if (ret > 0)
|
|
|
|
break;
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
}
|
|
|
|
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
|
|
|
|
if (key.objectid != bytenr)
|
|
|
|
break;
|
|
|
|
if (key.type != BTRFS_EXTENT_REF_V0_KEY)
|
|
|
|
continue;
|
|
|
|
ref_item = btrfs_item_ptr(leaf, path->slots[0],
|
|
|
|
struct btrfs_extent_ref_v0);
|
|
|
|
ref_objectid = btrfs_ref_objectid_v0(leaf, ref_item);
|
|
|
|
if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID)
|
|
|
|
return 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-03-28 14:26:09 +00:00
|
|
|
static int copy_tree_blocks(struct btrfs_root *root, struct extent_buffer *eb,
|
|
|
|
struct metadump_struct *metadump, int root_tree)
|
2013-03-18 20:03:44 +00:00
|
|
|
{
|
|
|
|
struct extent_buffer *tmp;
|
|
|
|
struct btrfs_root_item *ri;
|
|
|
|
struct btrfs_key key;
|
|
|
|
u64 bytenr;
|
|
|
|
int level;
|
|
|
|
int nritems = 0;
|
|
|
|
int i = 0;
|
|
|
|
int ret;
|
|
|
|
|
2013-03-19 13:43:13 +00:00
|
|
|
ret = add_extent(btrfs_header_bytenr(eb), root->leafsize, metadump, 0);
|
2013-03-18 20:03:44 +00:00
|
|
|
if (ret) {
|
|
|
|
fprintf(stderr, "Error adding metadata block\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-03-28 14:26:09 +00:00
|
|
|
if (btrfs_header_level(eb) == 0 && !root_tree)
|
2013-03-18 20:03:44 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
level = btrfs_header_level(eb);
|
|
|
|
nritems = btrfs_header_nritems(eb);
|
|
|
|
for (i = 0; i < nritems; i++) {
|
|
|
|
if (level == 0) {
|
|
|
|
btrfs_item_key_to_cpu(eb, &key, i);
|
|
|
|
if (key.type != BTRFS_ROOT_ITEM_KEY)
|
|
|
|
continue;
|
|
|
|
ri = btrfs_item_ptr(eb, i, struct btrfs_root_item);
|
|
|
|
bytenr = btrfs_disk_root_bytenr(eb, ri);
|
|
|
|
tmp = read_tree_block(root, bytenr, root->leafsize, 0);
|
|
|
|
if (!tmp) {
|
|
|
|
fprintf(stderr,
|
|
|
|
"Error reading log root block\n");
|
|
|
|
return -EIO;
|
|
|
|
}
|
2013-03-28 14:26:09 +00:00
|
|
|
ret = copy_tree_blocks(root, tmp, metadump, 0);
|
2013-03-18 20:03:44 +00:00
|
|
|
free_extent_buffer(tmp);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
} else {
|
|
|
|
bytenr = btrfs_node_blockptr(eb, i);
|
|
|
|
tmp = read_tree_block(root, bytenr, root->leafsize, 0);
|
|
|
|
if (!tmp) {
|
|
|
|
fprintf(stderr, "Error reading log block\n");
|
|
|
|
return -EIO;
|
|
|
|
}
|
2013-03-28 14:26:09 +00:00
|
|
|
ret = copy_tree_blocks(root, tmp, metadump, root_tree);
|
2013-03-18 20:03:44 +00:00
|
|
|
free_extent_buffer(tmp);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int copy_log_trees(struct btrfs_root *root,
|
|
|
|
struct metadump_struct *metadump,
|
|
|
|
struct btrfs_path *path)
|
|
|
|
{
|
|
|
|
u64 blocknr = btrfs_super_log_root(root->fs_info->super_copy);
|
|
|
|
|
|
|
|
if (blocknr == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!root->fs_info->log_root_tree ||
|
|
|
|
!root->fs_info->log_root_tree->node) {
|
|
|
|
fprintf(stderr, "Error copying tree log, it wasn't setup\n");
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2013-03-28 14:26:09 +00:00
|
|
|
return copy_tree_blocks(root, root->fs_info->log_root_tree->node,
|
|
|
|
metadump, 1);
|
2013-03-18 20:03:44 +00:00
|
|
|
}
|
|
|
|
|
2013-03-19 13:43:13 +00:00
|
|
|
static int copy_space_cache(struct btrfs_root *root,
|
|
|
|
struct metadump_struct *metadump,
|
|
|
|
struct btrfs_path *path)
|
|
|
|
{
|
|
|
|
struct extent_buffer *leaf;
|
|
|
|
struct btrfs_file_extent_item *fi;
|
|
|
|
struct btrfs_key key;
|
|
|
|
u64 bytenr, num_bytes;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
root = root->fs_info->tree_root;
|
|
|
|
|
|
|
|
key.objectid = 0;
|
|
|
|
key.type = BTRFS_EXTENT_DATA_KEY;
|
|
|
|
key.offset = 0;
|
|
|
|
|
|
|
|
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "Error searching for free space inode %d\n",
|
|
|
|
ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-06-19 01:46:00 +00:00
|
|
|
leaf = path->nodes[0];
|
|
|
|
|
2013-03-19 13:43:13 +00:00
|
|
|
while (1) {
|
|
|
|
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
|
|
|
|
ret = btrfs_next_leaf(root, path);
|
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "Error going to next leaf "
|
|
|
|
"%d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
if (ret > 0)
|
|
|
|
break;
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
}
|
|
|
|
|
|
|
|
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
|
|
|
|
if (key.type != BTRFS_EXTENT_DATA_KEY) {
|
|
|
|
path->slots[0]++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
fi = btrfs_item_ptr(leaf, path->slots[0],
|
|
|
|
struct btrfs_file_extent_item);
|
|
|
|
if (btrfs_file_extent_type(leaf, fi) !=
|
|
|
|
BTRFS_FILE_EXTENT_REG) {
|
|
|
|
path->slots[0]++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
|
|
|
|
num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
|
|
|
|
ret = add_extent(bytenr, num_bytes, metadump, 1);
|
|
|
|
if (ret) {
|
|
|
|
fprintf(stderr, "Error adding space cache blocks %d\n",
|
|
|
|
ret);
|
2013-08-03 00:52:43 +00:00
|
|
|
btrfs_release_path(path);
|
2013-03-19 13:43:13 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
path->slots[0]++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-03-28 14:26:09 +00:00
|
|
|
static int copy_from_extent_tree(struct metadump_struct *metadump,
|
|
|
|
struct btrfs_path *path)
|
2008-11-20 14:52:48 +00:00
|
|
|
{
|
|
|
|
struct btrfs_root *extent_root;
|
|
|
|
struct extent_buffer *leaf;
|
2009-05-29 20:35:30 +00:00
|
|
|
struct btrfs_extent_item *ei;
|
2008-11-20 14:52:48 +00:00
|
|
|
struct btrfs_key key;
|
|
|
|
u64 bytenr;
|
|
|
|
u64 num_bytes;
|
|
|
|
int ret;
|
|
|
|
|
2013-03-28 14:26:09 +00:00
|
|
|
extent_root = metadump->root->fs_info->extent_root;
|
2014-06-26 02:53:06 +00:00
|
|
|
bytenr = BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE;
|
2008-11-20 14:52:48 +00:00
|
|
|
key.objectid = bytenr;
|
|
|
|
key.type = BTRFS_EXTENT_ITEM_KEY;
|
|
|
|
key.offset = 0;
|
|
|
|
|
|
|
|
ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
|
2013-03-18 18:50:45 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "Error searching extent root %d\n", ret);
|
2013-03-28 14:26:09 +00:00
|
|
|
return ret;
|
2013-03-18 18:50:45 +00:00
|
|
|
}
|
2013-03-28 14:26:09 +00:00
|
|
|
ret = 0;
|
2008-11-20 14:52:48 +00:00
|
|
|
|
2014-06-19 01:46:00 +00:00
|
|
|
leaf = path->nodes[0];
|
|
|
|
|
2008-11-20 14:52:48 +00:00
|
|
|
while (1) {
|
|
|
|
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
|
|
|
|
ret = btrfs_next_leaf(extent_root, path);
|
2013-03-18 18:50:45 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "Error going to next leaf %d"
|
|
|
|
"\n", ret);
|
2013-03-28 14:26:09 +00:00
|
|
|
break;
|
2013-03-18 18:50:45 +00:00
|
|
|
}
|
2013-03-28 14:26:09 +00:00
|
|
|
if (ret > 0) {
|
|
|
|
ret = 0;
|
2008-11-20 14:52:48 +00:00
|
|
|
break;
|
2013-03-28 14:26:09 +00:00
|
|
|
}
|
2008-11-20 14:52:48 +00:00
|
|
|
leaf = path->nodes[0];
|
|
|
|
}
|
|
|
|
|
|
|
|
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
|
|
|
|
if (key.objectid < bytenr ||
|
2013-03-15 19:32:16 +00:00
|
|
|
(key.type != BTRFS_EXTENT_ITEM_KEY &&
|
|
|
|
key.type != BTRFS_METADATA_ITEM_KEY)) {
|
2008-11-20 14:52:48 +00:00
|
|
|
path->slots[0]++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
bytenr = key.objectid;
|
2013-03-15 19:32:16 +00:00
|
|
|
if (key.type == BTRFS_METADATA_ITEM_KEY)
|
2013-03-28 14:26:09 +00:00
|
|
|
num_bytes = extent_root->leafsize;
|
2013-06-22 05:32:41 +00:00
|
|
|
else
|
|
|
|
num_bytes = key.offset;
|
2009-05-29 20:35:30 +00:00
|
|
|
|
|
|
|
if (btrfs_item_size_nr(leaf, path->slots[0]) > sizeof(*ei)) {
|
|
|
|
ei = btrfs_item_ptr(leaf, path->slots[0],
|
|
|
|
struct btrfs_extent_item);
|
|
|
|
if (btrfs_extent_flags(leaf, ei) &
|
|
|
|
BTRFS_EXTENT_FLAG_TREE_BLOCK) {
|
2013-03-28 14:26:09 +00:00
|
|
|
ret = add_extent(bytenr, num_bytes, metadump,
|
2013-03-19 13:43:13 +00:00
|
|
|
0);
|
2013-03-18 18:50:45 +00:00
|
|
|
if (ret) {
|
|
|
|
fprintf(stderr, "Error adding block "
|
|
|
|
"%d\n", ret);
|
2013-03-28 14:26:09 +00:00
|
|
|
break;
|
2013-03-18 18:50:45 +00:00
|
|
|
}
|
2008-11-20 14:52:48 +00:00
|
|
|
}
|
2009-05-29 20:35:30 +00:00
|
|
|
} else {
|
|
|
|
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
|
2013-03-18 18:50:45 +00:00
|
|
|
ret = is_tree_block(extent_root, path, bytenr);
|
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "Error checking tree block "
|
|
|
|
"%d\n", ret);
|
2013-03-28 14:26:09 +00:00
|
|
|
break;
|
2013-03-18 18:50:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ret) {
|
2013-03-28 14:26:09 +00:00
|
|
|
ret = add_extent(bytenr, num_bytes, metadump,
|
2013-03-19 13:43:13 +00:00
|
|
|
0);
|
2013-03-18 18:50:45 +00:00
|
|
|
if (ret) {
|
|
|
|
fprintf(stderr, "Error adding block "
|
|
|
|
"%d\n", ret);
|
2013-03-28 14:26:09 +00:00
|
|
|
break;
|
2013-03-18 18:50:45 +00:00
|
|
|
}
|
2008-11-20 14:52:48 +00:00
|
|
|
}
|
2013-03-28 14:26:09 +00:00
|
|
|
ret = 0;
|
2009-05-29 20:35:30 +00:00
|
|
|
#else
|
2013-03-18 18:50:45 +00:00
|
|
|
fprintf(stderr, "Either extent tree corruption or "
|
|
|
|
"you haven't built with V0 support\n");
|
2013-03-28 14:26:09 +00:00
|
|
|
ret = -EIO;
|
|
|
|
break;
|
2009-05-29 20:35:30 +00:00
|
|
|
#endif
|
2008-11-20 14:52:48 +00:00
|
|
|
}
|
|
|
|
bytenr += num_bytes;
|
|
|
|
}
|
|
|
|
|
2013-08-03 00:52:43 +00:00
|
|
|
btrfs_release_path(path);
|
2013-03-28 14:26:09 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int create_metadump(const char *input, FILE *out, int num_threads,
|
2013-03-22 14:52:07 +00:00
|
|
|
int compress_level, int sanitize, int walk_trees)
|
2013-03-28 14:26:09 +00:00
|
|
|
{
|
|
|
|
struct btrfs_root *root;
|
|
|
|
struct btrfs_path *path = NULL;
|
|
|
|
struct metadump_struct metadump;
|
|
|
|
int ret;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
root = open_ctree(input, 0, 0);
|
|
|
|
if (!root) {
|
|
|
|
fprintf(stderr, "Open ctree failed\n");
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
BUG_ON(root->nodesize != root->leafsize);
|
|
|
|
|
|
|
|
ret = metadump_init(&metadump, root, out, num_threads,
|
2013-03-22 14:52:07 +00:00
|
|
|
compress_level, sanitize);
|
2013-03-28 14:26:09 +00:00
|
|
|
if (ret) {
|
|
|
|
fprintf(stderr, "Error initing metadump %d\n", ret);
|
|
|
|
close_ctree(root);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-06-26 02:53:06 +00:00
|
|
|
ret = add_extent(BTRFS_SUPER_INFO_OFFSET, BTRFS_SUPER_INFO_SIZE,
|
|
|
|
&metadump, 0);
|
2013-03-28 14:26:09 +00:00
|
|
|
if (ret) {
|
|
|
|
fprintf(stderr, "Error adding metadata %d\n", ret);
|
|
|
|
err = ret;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path) {
|
|
|
|
fprintf(stderr, "Out of memory allocing path\n");
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (walk_trees) {
|
|
|
|
ret = copy_tree_blocks(root, root->fs_info->chunk_root->node,
|
|
|
|
&metadump, 1);
|
|
|
|
if (ret) {
|
|
|
|
err = ret;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = copy_tree_blocks(root, root->fs_info->tree_root->node,
|
|
|
|
&metadump, 1);
|
|
|
|
if (ret) {
|
|
|
|
err = ret;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ret = copy_from_extent_tree(&metadump, path);
|
|
|
|
if (ret) {
|
|
|
|
err = ret;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
2013-03-19 13:43:13 +00:00
|
|
|
|
2013-03-18 20:03:44 +00:00
|
|
|
ret = copy_log_trees(root, &metadump, path);
|
2013-03-19 13:43:13 +00:00
|
|
|
if (ret) {
|
2013-03-18 20:03:44 +00:00
|
|
|
err = ret;
|
2013-03-19 13:43:13 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = copy_space_cache(root, &metadump, path);
|
2013-03-18 18:50:45 +00:00
|
|
|
out:
|
2008-11-20 14:52:48 +00:00
|
|
|
ret = flush_pending(&metadump, 1);
|
2013-03-18 18:50:45 +00:00
|
|
|
if (ret) {
|
|
|
|
if (!err)
|
2013-03-28 14:26:09 +00:00
|
|
|
err = ret;
|
2013-03-18 18:50:45 +00:00
|
|
|
fprintf(stderr, "Error flushing pending %d\n", ret);
|
|
|
|
}
|
2008-11-20 14:52:48 +00:00
|
|
|
|
2014-03-21 01:06:23 +00:00
|
|
|
metadump_destroy(&metadump, num_threads);
|
2008-11-20 14:52:48 +00:00
|
|
|
|
|
|
|
btrfs_free_path(path);
|
|
|
|
ret = close_ctree(root);
|
2013-03-18 18:50:45 +00:00
|
|
|
return err ? err : ret;
|
2008-11-20 14:52:48 +00:00
|
|
|
}
|
|
|
|
|
2013-03-27 20:55:41 +00:00
|
|
|
static void update_super_old(u8 *buffer)
|
2008-11-20 14:52:48 +00:00
|
|
|
{
|
|
|
|
struct btrfs_super_block *super = (struct btrfs_super_block *)buffer;
|
|
|
|
struct btrfs_chunk *chunk;
|
|
|
|
struct btrfs_disk_key *key;
|
|
|
|
u32 sectorsize = btrfs_super_sectorsize(super);
|
|
|
|
u64 flags = btrfs_super_flags(super);
|
|
|
|
|
|
|
|
flags |= BTRFS_SUPER_FLAG_METADUMP;
|
|
|
|
btrfs_set_super_flags(super, flags);
|
|
|
|
|
|
|
|
key = (struct btrfs_disk_key *)(super->sys_chunk_array);
|
|
|
|
chunk = (struct btrfs_chunk *)(super->sys_chunk_array +
|
|
|
|
sizeof(struct btrfs_disk_key));
|
|
|
|
|
|
|
|
btrfs_set_disk_key_objectid(key, BTRFS_FIRST_CHUNK_TREE_OBJECTID);
|
|
|
|
btrfs_set_disk_key_type(key, BTRFS_CHUNK_ITEM_KEY);
|
|
|
|
btrfs_set_disk_key_offset(key, 0);
|
|
|
|
|
|
|
|
btrfs_set_stack_chunk_length(chunk, (u64)-1);
|
|
|
|
btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID);
|
2013-12-16 12:33:58 +00:00
|
|
|
btrfs_set_stack_chunk_stripe_len(chunk, BTRFS_STRIPE_LEN);
|
2008-11-20 14:52:48 +00:00
|
|
|
btrfs_set_stack_chunk_type(chunk, BTRFS_BLOCK_GROUP_SYSTEM);
|
|
|
|
btrfs_set_stack_chunk_io_align(chunk, sectorsize);
|
|
|
|
btrfs_set_stack_chunk_io_width(chunk, sectorsize);
|
|
|
|
btrfs_set_stack_chunk_sector_size(chunk, sectorsize);
|
|
|
|
btrfs_set_stack_chunk_num_stripes(chunk, 1);
|
|
|
|
btrfs_set_stack_chunk_sub_stripes(chunk, 0);
|
|
|
|
chunk->stripe.devid = super->dev_item.devid;
|
2013-06-26 05:27:08 +00:00
|
|
|
btrfs_set_stack_stripe_offset(&chunk->stripe, 0);
|
2008-11-20 14:52:48 +00:00
|
|
|
memcpy(chunk->stripe.dev_uuid, super->dev_item.uuid, BTRFS_UUID_SIZE);
|
|
|
|
btrfs_set_super_sys_array_size(super, sizeof(*key) + sizeof(*chunk));
|
2014-06-26 02:53:06 +00:00
|
|
|
csum_block(buffer, BTRFS_SUPER_INFO_SIZE);
|
2008-11-20 14:52:48 +00:00
|
|
|
}
|
|
|
|
|
2013-03-27 20:55:41 +00:00
|
|
|
static int update_super(u8 *buffer)
|
|
|
|
{
|
|
|
|
struct btrfs_super_block *super = (struct btrfs_super_block *)buffer;
|
|
|
|
struct btrfs_chunk *chunk;
|
|
|
|
struct btrfs_disk_key *disk_key;
|
|
|
|
struct btrfs_key key;
|
|
|
|
u32 new_array_size = 0;
|
|
|
|
u32 array_size;
|
|
|
|
u32 cur = 0;
|
|
|
|
u8 *ptr, *write_ptr;
|
|
|
|
int old_num_stripes;
|
|
|
|
|
|
|
|
write_ptr = ptr = super->sys_chunk_array;
|
|
|
|
array_size = btrfs_super_sys_array_size(super);
|
|
|
|
|
|
|
|
while (cur < array_size) {
|
|
|
|
disk_key = (struct btrfs_disk_key *)ptr;
|
|
|
|
btrfs_disk_key_to_cpu(&key, disk_key);
|
|
|
|
|
|
|
|
new_array_size += sizeof(*disk_key);
|
|
|
|
memmove(write_ptr, ptr, sizeof(*disk_key));
|
|
|
|
|
|
|
|
write_ptr += sizeof(*disk_key);
|
|
|
|
ptr += sizeof(*disk_key);
|
|
|
|
cur += sizeof(*disk_key);
|
|
|
|
|
|
|
|
if (key.type == BTRFS_CHUNK_ITEM_KEY) {
|
|
|
|
chunk = (struct btrfs_chunk *)ptr;
|
|
|
|
old_num_stripes = btrfs_stack_chunk_num_stripes(chunk);
|
|
|
|
chunk = (struct btrfs_chunk *)write_ptr;
|
|
|
|
|
|
|
|
memmove(write_ptr, ptr, sizeof(*chunk));
|
|
|
|
btrfs_set_stack_chunk_num_stripes(chunk, 1);
|
|
|
|
btrfs_set_stack_chunk_sub_stripes(chunk, 0);
|
|
|
|
btrfs_set_stack_chunk_type(chunk,
|
|
|
|
BTRFS_BLOCK_GROUP_SYSTEM);
|
|
|
|
chunk->stripe.devid = super->dev_item.devid;
|
|
|
|
memcpy(chunk->stripe.dev_uuid, super->dev_item.uuid,
|
|
|
|
BTRFS_UUID_SIZE);
|
|
|
|
new_array_size += sizeof(*chunk);
|
|
|
|
} else {
|
|
|
|
fprintf(stderr, "Bogus key in the sys chunk array "
|
|
|
|
"%d\n", key.type);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
write_ptr += sizeof(*chunk);
|
|
|
|
ptr += btrfs_chunk_item_size(old_num_stripes);
|
|
|
|
cur += btrfs_chunk_item_size(old_num_stripes);
|
|
|
|
}
|
|
|
|
|
|
|
|
btrfs_set_super_sys_array_size(super, new_array_size);
|
2014-06-26 02:53:06 +00:00
|
|
|
csum_block(buffer, BTRFS_SUPER_INFO_SIZE);
|
2013-03-27 20:55:41 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct extent_buffer *alloc_dummy_eb(u64 bytenr, u32 size)
|
|
|
|
{
|
|
|
|
struct extent_buffer *eb;
|
|
|
|
|
|
|
|
eb = malloc(sizeof(struct extent_buffer) + size);
|
|
|
|
if (!eb)
|
|
|
|
return NULL;
|
|
|
|
memset(eb, 0, sizeof(struct extent_buffer) + size);
|
|
|
|
|
|
|
|
eb->start = bytenr;
|
|
|
|
eb->len = size;
|
|
|
|
return eb;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void truncate_item(struct extent_buffer *eb, int slot, u32 new_size)
|
|
|
|
{
|
|
|
|
struct btrfs_item *item;
|
|
|
|
u32 nritems;
|
|
|
|
u32 old_size;
|
|
|
|
u32 old_data_start;
|
|
|
|
u32 size_diff;
|
|
|
|
u32 data_end;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
old_size = btrfs_item_size_nr(eb, slot);
|
|
|
|
if (old_size == new_size)
|
|
|
|
return;
|
|
|
|
|
|
|
|
nritems = btrfs_header_nritems(eb);
|
|
|
|
data_end = btrfs_item_offset_nr(eb, nritems - 1);
|
|
|
|
|
|
|
|
old_data_start = btrfs_item_offset_nr(eb, slot);
|
|
|
|
size_diff = old_size - new_size;
|
|
|
|
|
|
|
|
for (i = slot; i < nritems; i++) {
|
|
|
|
u32 ioff;
|
2013-09-20 09:55:26 +00:00
|
|
|
item = btrfs_item_nr(i);
|
2013-03-27 20:55:41 +00:00
|
|
|
ioff = btrfs_item_offset(eb, item);
|
|
|
|
btrfs_set_item_offset(eb, item, ioff + size_diff);
|
|
|
|
}
|
|
|
|
|
|
|
|
memmove_extent_buffer(eb, btrfs_leaf_data(eb) + data_end + size_diff,
|
|
|
|
btrfs_leaf_data(eb) + data_end,
|
|
|
|
old_data_start + new_size - data_end);
|
2013-09-20 09:55:26 +00:00
|
|
|
item = btrfs_item_nr(slot);
|
2013-03-27 20:55:41 +00:00
|
|
|
btrfs_set_item_size(eb, item, new_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fixup_chunk_tree_block(struct mdrestore_struct *mdres,
|
|
|
|
struct async_work *async, u8 *buffer,
|
|
|
|
size_t size)
|
|
|
|
{
|
|
|
|
struct extent_buffer *eb;
|
|
|
|
size_t size_left = size;
|
|
|
|
u64 bytenr = async->start;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (size_left % mdres->leafsize)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
eb = alloc_dummy_eb(bytenr, mdres->leafsize);
|
|
|
|
if (!eb)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
while (size_left) {
|
|
|
|
eb->start = bytenr;
|
|
|
|
memcpy(eb->data, buffer, mdres->leafsize);
|
|
|
|
|
|
|
|
if (btrfs_header_bytenr(eb) != bytenr)
|
|
|
|
break;
|
|
|
|
if (memcmp(mdres->fsid,
|
|
|
|
eb->data + offsetof(struct btrfs_header, fsid),
|
|
|
|
BTRFS_FSID_SIZE))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (btrfs_header_owner(eb) != BTRFS_CHUNK_TREE_OBJECTID)
|
|
|
|
goto next;
|
|
|
|
|
|
|
|
if (btrfs_header_level(eb) != 0)
|
|
|
|
goto next;
|
|
|
|
|
|
|
|
for (i = 0; i < btrfs_header_nritems(eb); i++) {
|
|
|
|
struct btrfs_chunk chunk;
|
|
|
|
struct btrfs_key key;
|
|
|
|
u64 type;
|
|
|
|
|
|
|
|
btrfs_item_key_to_cpu(eb, &key, i);
|
|
|
|
if (key.type != BTRFS_CHUNK_ITEM_KEY)
|
|
|
|
continue;
|
|
|
|
truncate_item(eb, i, sizeof(chunk));
|
|
|
|
read_extent_buffer(eb, &chunk,
|
|
|
|
btrfs_item_ptr_offset(eb, i),
|
|
|
|
sizeof(chunk));
|
|
|
|
|
|
|
|
/* Zero out the RAID profile */
|
|
|
|
type = btrfs_stack_chunk_type(&chunk);
|
|
|
|
type &= (BTRFS_BLOCK_GROUP_DATA |
|
|
|
|
BTRFS_BLOCK_GROUP_SYSTEM |
|
btrfs-progs: don't remove BTRFS_BLOCK_GROUP_DUP in chunk type
During restoring of image (-r using btrfs-image) we zero out RAID
profile in chunk type but forget to save BTRFS_BLOCK_GROUP_DUP if
present. This results in some false messages being printed by btrfsck.
$ ./mkfs.btrfs /dev/sdb2 -f
$ ./btrfs-image /dev/sdb2 btrfs_image_output
$ ./btrfs-image -r btrfs_image_output disk-image
$ ./btrfsck disk-image
Checking filesystem on disk-image
UUID: e644be2d-7701-4bd4-8804-7487f560d2a7
checking extents
Chunk[256, 228, 20971520]: length(8388608), offset(20971520), type(2) mismatch with block group[20971520, 192, 8388608]: offset(8388608), objectid(20971520), flags(34)
Chunk[256, 228, 29360128]: length(1073741824), offset(29360128), type(4) mismatch with block group[29360128, 192, 1073741824]: offset(1073741824), objectid(29360128), flags(36)
Block group[20971520, 8388608] (flags = 34) didn't find the relative chunk.
Block group[29360128, 1073741824] (flags = 36) didn't find the relative chunk.
Even though ./btrfsck on /dev/sdb2 seemed fine. This is due to type
mismatch above and type mismatch occured because we zero'ed out
BTRFS_BLOCK_GROUP_DUP while handling chunk trees.
Signed-off-by: Rakesh Pandit <rakesh@tuxera.com>
Signed-off-by: David Sterba <dsterba@suse.cz>
Signed-off-by: Chris Mason <clm@fb.com>
2014-02-20 14:44:30 +00:00
|
|
|
BTRFS_BLOCK_GROUP_METADATA |
|
|
|
|
BTRFS_BLOCK_GROUP_DUP);
|
2013-03-27 20:55:41 +00:00
|
|
|
btrfs_set_stack_chunk_type(&chunk, type);
|
|
|
|
|
|
|
|
btrfs_set_stack_chunk_num_stripes(&chunk, 1);
|
|
|
|
btrfs_set_stack_chunk_sub_stripes(&chunk, 0);
|
|
|
|
btrfs_set_stack_stripe_devid(&chunk.stripe, mdres->devid);
|
|
|
|
memcpy(chunk.stripe.dev_uuid, mdres->uuid,
|
|
|
|
BTRFS_UUID_SIZE);
|
|
|
|
write_extent_buffer(eb, &chunk,
|
|
|
|
btrfs_item_ptr_offset(eb, i),
|
|
|
|
sizeof(chunk));
|
|
|
|
}
|
|
|
|
memcpy(buffer, eb->data, eb->len);
|
|
|
|
csum_block(buffer, eb->len);
|
|
|
|
next:
|
|
|
|
size_left -= mdres->leafsize;
|
|
|
|
buffer += mdres->leafsize;
|
|
|
|
bytenr += mdres->leafsize;
|
|
|
|
}
|
|
|
|
|
2013-10-07 21:43:00 +00:00
|
|
|
free(eb);
|
2013-03-27 20:55:41 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void write_backup_supers(int fd, u8 *buf)
|
|
|
|
{
|
2013-05-08 15:40:36 +00:00
|
|
|
struct btrfs_super_block *super = (struct btrfs_super_block *)buf;
|
2013-03-27 20:55:41 +00:00
|
|
|
struct stat st;
|
|
|
|
u64 size;
|
|
|
|
u64 bytenr;
|
|
|
|
int i;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (fstat(fd, &st)) {
|
|
|
|
fprintf(stderr, "Couldn't stat restore point, won't be able "
|
|
|
|
"to write backup supers: %d\n", errno);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
size = btrfs_device_size(fd, &st);
|
|
|
|
|
|
|
|
for (i = 1; i < BTRFS_SUPER_MIRROR_MAX; i++) {
|
|
|
|
bytenr = btrfs_sb_offset(i);
|
2014-06-26 02:53:06 +00:00
|
|
|
if (bytenr + BTRFS_SUPER_INFO_SIZE > size)
|
2013-03-27 20:55:41 +00:00
|
|
|
break;
|
2013-05-08 15:40:36 +00:00
|
|
|
btrfs_set_super_bytenr(super, bytenr);
|
2014-06-26 02:53:06 +00:00
|
|
|
csum_block(buf, BTRFS_SUPER_INFO_SIZE);
|
|
|
|
ret = pwrite64(fd, buf, BTRFS_SUPER_INFO_SIZE, bytenr);
|
|
|
|
if (ret < BTRFS_SUPER_INFO_SIZE) {
|
2013-03-27 20:55:41 +00:00
|
|
|
if (ret < 0)
|
|
|
|
fprintf(stderr, "Problem writing out backup "
|
|
|
|
"super block %d, err %d\n", i, errno);
|
|
|
|
else
|
|
|
|
fprintf(stderr, "Short write writing out "
|
|
|
|
"backup super block\n");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-08 15:40:36 +00:00
|
|
|
static u64 logical_to_physical(struct mdrestore_struct *mdres, u64 logical, u64 *size)
|
|
|
|
{
|
|
|
|
struct fs_chunk *fs_chunk;
|
|
|
|
struct rb_node *entry;
|
|
|
|
struct fs_chunk search;
|
|
|
|
u64 offset;
|
|
|
|
|
|
|
|
if (logical == BTRFS_SUPER_INFO_OFFSET)
|
|
|
|
return logical;
|
|
|
|
|
|
|
|
search.logical = logical;
|
|
|
|
entry = tree_search(&mdres->chunk_tree, &search.n, chunk_cmp, 1);
|
|
|
|
if (!entry) {
|
|
|
|
if (mdres->in != stdin)
|
|
|
|
printf("Couldn't find a chunk, using logical\n");
|
|
|
|
return logical;
|
|
|
|
}
|
|
|
|
fs_chunk = rb_entry(entry, struct fs_chunk, n);
|
|
|
|
if (fs_chunk->logical > logical || fs_chunk->logical + fs_chunk->bytes < logical)
|
|
|
|
BUG();
|
|
|
|
offset = search.logical - fs_chunk->logical;
|
|
|
|
|
|
|
|
*size = min(*size, fs_chunk->bytes + fs_chunk->logical - logical);
|
|
|
|
return fs_chunk->physical + offset;
|
|
|
|
}
|
|
|
|
|
2008-11-20 14:52:48 +00:00
|
|
|
static void *restore_worker(void *data)
|
|
|
|
{
|
|
|
|
struct mdrestore_struct *mdres = (struct mdrestore_struct *)data;
|
|
|
|
struct async_work *async;
|
|
|
|
size_t size;
|
|
|
|
u8 *buffer;
|
|
|
|
u8 *outbuf;
|
|
|
|
int outfd;
|
|
|
|
int ret;
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
int compress_size = MAX_PENDING_SIZE * 4;
|
2008-11-20 14:52:48 +00:00
|
|
|
|
|
|
|
outfd = fileno(mdres->out);
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
buffer = malloc(compress_size);
|
2013-03-18 18:50:45 +00:00
|
|
|
if (!buffer) {
|
|
|
|
fprintf(stderr, "Error allocing buffer\n");
|
|
|
|
pthread_mutex_lock(&mdres->mutex);
|
|
|
|
if (!mdres->error)
|
|
|
|
mdres->error = -ENOMEM;
|
|
|
|
pthread_mutex_unlock(&mdres->mutex);
|
2014-06-26 02:53:03 +00:00
|
|
|
pthread_exit(NULL);
|
2013-03-18 18:50:45 +00:00
|
|
|
}
|
2008-11-20 14:52:48 +00:00
|
|
|
|
|
|
|
while (1) {
|
2013-05-08 15:40:36 +00:00
|
|
|
u64 bytenr;
|
|
|
|
off_t offset = 0;
|
2013-03-18 18:50:45 +00:00
|
|
|
int err = 0;
|
|
|
|
|
2008-11-20 14:52:48 +00:00
|
|
|
pthread_mutex_lock(&mdres->mutex);
|
2013-03-27 20:55:41 +00:00
|
|
|
while (!mdres->leafsize || list_empty(&mdres->list)) {
|
2008-11-20 14:52:48 +00:00
|
|
|
if (mdres->done) {
|
|
|
|
pthread_mutex_unlock(&mdres->mutex);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
pthread_cond_wait(&mdres->cond, &mdres->mutex);
|
|
|
|
}
|
|
|
|
async = list_entry(mdres->list.next, struct async_work, list);
|
|
|
|
list_del_init(&async->list);
|
|
|
|
pthread_mutex_unlock(&mdres->mutex);
|
|
|
|
|
|
|
|
if (mdres->compress_method == COMPRESS_ZLIB) {
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
size = compress_size;
|
2008-12-02 12:19:08 +00:00
|
|
|
ret = uncompress(buffer, (unsigned long *)&size,
|
|
|
|
async->buffer, async->bufsize);
|
2013-03-18 18:50:45 +00:00
|
|
|
if (ret != Z_OK) {
|
|
|
|
fprintf(stderr, "Error decompressing %d\n",
|
|
|
|
ret);
|
|
|
|
err = -EIO;
|
|
|
|
}
|
2008-11-20 14:52:48 +00:00
|
|
|
outbuf = buffer;
|
|
|
|
} else {
|
|
|
|
outbuf = async->buffer;
|
|
|
|
size = async->bufsize;
|
|
|
|
}
|
|
|
|
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
if (!mdres->multi_devices) {
|
|
|
|
if (async->start == BTRFS_SUPER_INFO_OFFSET) {
|
|
|
|
if (mdres->old_restore) {
|
|
|
|
update_super_old(outbuf);
|
|
|
|
} else {
|
|
|
|
ret = update_super(outbuf);
|
|
|
|
if (ret)
|
|
|
|
err = ret;
|
|
|
|
}
|
|
|
|
} else if (!mdres->old_restore) {
|
|
|
|
ret = fixup_chunk_tree_block(mdres, async, outbuf, size);
|
2013-03-27 20:55:41 +00:00
|
|
|
if (ret)
|
|
|
|
err = ret;
|
|
|
|
}
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!mdres->fixup_offset) {
|
|
|
|
while (size) {
|
|
|
|
u64 chunk_size = size;
|
2014-06-26 02:53:01 +00:00
|
|
|
if (!mdres->multi_devices && !mdres->old_restore)
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
bytenr = logical_to_physical(mdres,
|
|
|
|
async->start + offset,
|
|
|
|
&chunk_size);
|
|
|
|
else
|
|
|
|
bytenr = async->start + offset;
|
|
|
|
|
|
|
|
ret = pwrite64(outfd, outbuf+offset, chunk_size,
|
|
|
|
bytenr);
|
|
|
|
if (ret != chunk_size) {
|
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "Error writing to "
|
|
|
|
"device %d\n", errno);
|
|
|
|
err = errno;
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
fprintf(stderr, "Short write\n");
|
|
|
|
err = -EIO;
|
|
|
|
break;
|
|
|
|
}
|
2013-05-08 15:40:36 +00:00
|
|
|
}
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
size -= chunk_size;
|
|
|
|
offset += chunk_size;
|
|
|
|
}
|
|
|
|
} else if (async->start != BTRFS_SUPER_INFO_OFFSET) {
|
|
|
|
ret = write_data_to_disk(mdres->info, outbuf, async->start, size, 0);
|
|
|
|
if (ret) {
|
|
|
|
printk("Error write data\n");
|
|
|
|
exit(1);
|
2013-03-18 18:50:45 +00:00
|
|
|
}
|
|
|
|
}
|
2008-11-20 14:52:48 +00:00
|
|
|
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
|
|
|
|
/* backup super blocks are already there at fixup_offset stage */
|
|
|
|
if (!mdres->multi_devices && async->start == BTRFS_SUPER_INFO_OFFSET)
|
2013-03-27 20:55:41 +00:00
|
|
|
write_backup_supers(outfd, outbuf);
|
|
|
|
|
2008-11-20 14:52:48 +00:00
|
|
|
pthread_mutex_lock(&mdres->mutex);
|
2013-03-18 18:50:45 +00:00
|
|
|
if (err && !mdres->error)
|
|
|
|
mdres->error = err;
|
2008-11-20 14:52:48 +00:00
|
|
|
mdres->num_items--;
|
|
|
|
pthread_mutex_unlock(&mdres->mutex);
|
|
|
|
|
|
|
|
free(async->buffer);
|
|
|
|
free(async);
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
free(buffer);
|
|
|
|
pthread_exit(NULL);
|
|
|
|
}
|
|
|
|
|
2014-03-21 01:06:23 +00:00
|
|
|
static void mdrestore_destroy(struct mdrestore_struct *mdres, int num_threads)
|
2013-03-18 18:50:45 +00:00
|
|
|
{
|
2013-05-08 15:40:36 +00:00
|
|
|
struct rb_node *n;
|
2013-03-18 18:50:45 +00:00
|
|
|
int i;
|
2013-05-08 15:40:36 +00:00
|
|
|
|
|
|
|
while ((n = rb_first(&mdres->chunk_tree))) {
|
|
|
|
struct fs_chunk *entry;
|
|
|
|
|
|
|
|
entry = rb_entry(n, struct fs_chunk, n);
|
|
|
|
rb_erase(n, &mdres->chunk_tree);
|
|
|
|
free(entry);
|
|
|
|
}
|
2013-03-18 18:50:45 +00:00
|
|
|
pthread_mutex_lock(&mdres->mutex);
|
|
|
|
mdres->done = 1;
|
|
|
|
pthread_cond_broadcast(&mdres->cond);
|
|
|
|
pthread_mutex_unlock(&mdres->mutex);
|
|
|
|
|
2014-03-21 01:06:23 +00:00
|
|
|
for (i = 0; i < num_threads; i++)
|
2013-03-18 18:50:45 +00:00
|
|
|
pthread_join(mdres->threads[i], NULL);
|
|
|
|
|
|
|
|
pthread_cond_destroy(&mdres->cond);
|
|
|
|
pthread_mutex_destroy(&mdres->mutex);
|
|
|
|
free(mdres->threads);
|
|
|
|
}
|
|
|
|
|
2013-01-25 00:18:53 +00:00
|
|
|
static int mdrestore_init(struct mdrestore_struct *mdres,
|
2013-03-27 20:55:41 +00:00
|
|
|
FILE *in, FILE *out, int old_restore,
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
int num_threads, int fixup_offset,
|
|
|
|
struct btrfs_fs_info *info, int multi_devices)
|
2008-11-20 14:52:48 +00:00
|
|
|
{
|
|
|
|
int i, ret = 0;
|
|
|
|
|
|
|
|
memset(mdres, 0, sizeof(*mdres));
|
|
|
|
pthread_cond_init(&mdres->cond, NULL);
|
|
|
|
pthread_mutex_init(&mdres->mutex, NULL);
|
|
|
|
INIT_LIST_HEAD(&mdres->list);
|
|
|
|
mdres->in = in;
|
|
|
|
mdres->out = out;
|
2013-03-27 20:55:41 +00:00
|
|
|
mdres->old_restore = old_restore;
|
2013-05-08 15:40:36 +00:00
|
|
|
mdres->chunk_tree.rb_node = NULL;
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
mdres->fixup_offset = fixup_offset;
|
|
|
|
mdres->info = info;
|
|
|
|
mdres->multi_devices = multi_devices;
|
2008-11-20 14:52:48 +00:00
|
|
|
|
|
|
|
if (!num_threads)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
mdres->num_threads = num_threads;
|
|
|
|
mdres->threads = calloc(num_threads, sizeof(pthread_t));
|
|
|
|
if (!mdres->threads)
|
|
|
|
return -ENOMEM;
|
|
|
|
for (i = 0; i < num_threads; i++) {
|
|
|
|
ret = pthread_create(mdres->threads + i, NULL, restore_worker,
|
|
|
|
mdres);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
}
|
2013-03-18 18:50:45 +00:00
|
|
|
if (ret)
|
2014-03-21 01:06:23 +00:00
|
|
|
mdrestore_destroy(mdres, i + 1);
|
2008-11-20 14:52:48 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-03-27 20:55:41 +00:00
|
|
|
static int fill_mdres_info(struct mdrestore_struct *mdres,
|
|
|
|
struct async_work *async)
|
|
|
|
{
|
|
|
|
struct btrfs_super_block *super;
|
|
|
|
u8 *buffer = NULL;
|
|
|
|
u8 *outbuf;
|
|
|
|
int ret;
|
|
|
|
|
2013-05-08 15:40:36 +00:00
|
|
|
/* We've already been initialized */
|
|
|
|
if (mdres->leafsize)
|
|
|
|
return 0;
|
|
|
|
|
2013-03-27 20:55:41 +00:00
|
|
|
if (mdres->compress_method == COMPRESS_ZLIB) {
|
|
|
|
size_t size = MAX_PENDING_SIZE * 2;
|
|
|
|
|
|
|
|
buffer = malloc(MAX_PENDING_SIZE * 2);
|
|
|
|
if (!buffer)
|
|
|
|
return -ENOMEM;
|
|
|
|
ret = uncompress(buffer, (unsigned long *)&size,
|
|
|
|
async->buffer, async->bufsize);
|
|
|
|
if (ret != Z_OK) {
|
|
|
|
fprintf(stderr, "Error decompressing %d\n", ret);
|
|
|
|
free(buffer);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
outbuf = buffer;
|
|
|
|
} else {
|
|
|
|
outbuf = async->buffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
super = (struct btrfs_super_block *)outbuf;
|
|
|
|
mdres->leafsize = btrfs_super_leafsize(super);
|
|
|
|
memcpy(mdres->fsid, super->fsid, BTRFS_FSID_SIZE);
|
|
|
|
memcpy(mdres->uuid, super->dev_item.uuid,
|
|
|
|
BTRFS_UUID_SIZE);
|
|
|
|
mdres->devid = le64_to_cpu(super->dev_item.devid);
|
|
|
|
free(buffer);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-11-20 14:52:48 +00:00
|
|
|
static int add_cluster(struct meta_cluster *cluster,
|
|
|
|
struct mdrestore_struct *mdres, u64 *next)
|
|
|
|
{
|
|
|
|
struct meta_cluster_item *item;
|
|
|
|
struct meta_cluster_header *header = &cluster->header;
|
|
|
|
struct async_work *async;
|
|
|
|
u64 bytenr;
|
|
|
|
u32 i, nritems;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
BUG_ON(mdres->num_items);
|
|
|
|
mdres->compress_method = header->compress;
|
|
|
|
|
|
|
|
bytenr = le64_to_cpu(header->bytenr) + BLOCK_SIZE;
|
|
|
|
nritems = le32_to_cpu(header->nritems);
|
|
|
|
for (i = 0; i < nritems; i++) {
|
|
|
|
item = &cluster->items[i];
|
|
|
|
async = calloc(1, sizeof(*async));
|
2013-03-18 18:50:45 +00:00
|
|
|
if (!async) {
|
|
|
|
fprintf(stderr, "Error allocating async\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2008-11-20 14:52:48 +00:00
|
|
|
async->start = le64_to_cpu(item->bytenr);
|
|
|
|
async->bufsize = le32_to_cpu(item->size);
|
|
|
|
async->buffer = malloc(async->bufsize);
|
2013-03-18 18:50:45 +00:00
|
|
|
if (!async->buffer) {
|
|
|
|
fprintf(stderr, "Error allocing async buffer\n");
|
|
|
|
free(async);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2008-11-20 14:52:48 +00:00
|
|
|
ret = fread(async->buffer, async->bufsize, 1, mdres->in);
|
2013-03-18 18:50:45 +00:00
|
|
|
if (ret != 1) {
|
|
|
|
fprintf(stderr, "Error reading buffer %d\n", errno);
|
|
|
|
free(async->buffer);
|
|
|
|
free(async);
|
|
|
|
return -EIO;
|
|
|
|
}
|
2008-11-20 14:52:48 +00:00
|
|
|
bytenr += async->bufsize;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&mdres->mutex);
|
2013-03-27 20:55:41 +00:00
|
|
|
if (async->start == BTRFS_SUPER_INFO_OFFSET) {
|
|
|
|
ret = fill_mdres_info(mdres, async);
|
|
|
|
if (ret) {
|
|
|
|
fprintf(stderr, "Error setting up restore\n");
|
|
|
|
pthread_mutex_unlock(&mdres->mutex);
|
|
|
|
free(async->buffer);
|
|
|
|
free(async);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
2008-11-20 14:52:48 +00:00
|
|
|
list_add_tail(&async->list, &mdres->list);
|
|
|
|
mdres->num_items++;
|
|
|
|
pthread_cond_signal(&mdres->cond);
|
|
|
|
pthread_mutex_unlock(&mdres->mutex);
|
|
|
|
}
|
|
|
|
if (bytenr & BLOCK_MASK) {
|
|
|
|
char buffer[BLOCK_MASK];
|
|
|
|
size_t size = BLOCK_SIZE - (bytenr & BLOCK_MASK);
|
|
|
|
|
|
|
|
bytenr += size;
|
|
|
|
ret = fread(buffer, size, 1, mdres->in);
|
2013-03-18 18:50:45 +00:00
|
|
|
if (ret != 1) {
|
|
|
|
fprintf(stderr, "Error reading in buffer %d\n", errno);
|
|
|
|
return -EIO;
|
|
|
|
}
|
2008-11-20 14:52:48 +00:00
|
|
|
}
|
|
|
|
*next = bytenr;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int wait_for_worker(struct mdrestore_struct *mdres)
|
|
|
|
{
|
2013-03-18 18:50:45 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2008-11-20 14:52:48 +00:00
|
|
|
pthread_mutex_lock(&mdres->mutex);
|
2013-03-18 18:50:45 +00:00
|
|
|
ret = mdres->error;
|
|
|
|
while (!ret && mdres->num_items > 0) {
|
2008-11-20 14:52:48 +00:00
|
|
|
struct timespec ts = {
|
|
|
|
.tv_sec = 0,
|
|
|
|
.tv_nsec = 10000000,
|
|
|
|
};
|
|
|
|
pthread_mutex_unlock(&mdres->mutex);
|
|
|
|
nanosleep(&ts, NULL);
|
|
|
|
pthread_mutex_lock(&mdres->mutex);
|
2013-03-18 18:50:45 +00:00
|
|
|
ret = mdres->error;
|
2008-11-20 14:52:48 +00:00
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&mdres->mutex);
|
2013-03-18 18:50:45 +00:00
|
|
|
return ret;
|
2008-11-20 14:52:48 +00:00
|
|
|
}
|
|
|
|
|
2013-05-08 15:40:36 +00:00
|
|
|
static int read_chunk_block(struct mdrestore_struct *mdres, u8 *buffer,
|
|
|
|
u64 bytenr, u64 item_bytenr, u32 bufsize,
|
|
|
|
u64 cluster_bytenr)
|
|
|
|
{
|
|
|
|
struct extent_buffer *eb;
|
|
|
|
int ret = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
eb = alloc_dummy_eb(bytenr, mdres->leafsize);
|
|
|
|
if (!eb) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (item_bytenr != bytenr) {
|
|
|
|
buffer += mdres->leafsize;
|
|
|
|
item_bytenr += mdres->leafsize;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(eb->data, buffer, mdres->leafsize);
|
|
|
|
if (btrfs_header_bytenr(eb) != bytenr) {
|
|
|
|
fprintf(stderr, "Eb bytenr doesn't match found bytenr\n");
|
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (memcmp(mdres->fsid, eb->data + offsetof(struct btrfs_header, fsid),
|
|
|
|
BTRFS_FSID_SIZE)) {
|
|
|
|
fprintf(stderr, "Fsid doesn't match\n");
|
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (btrfs_header_owner(eb) != BTRFS_CHUNK_TREE_OBJECTID) {
|
|
|
|
fprintf(stderr, "Does not belong to the chunk tree\n");
|
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < btrfs_header_nritems(eb); i++) {
|
|
|
|
struct btrfs_chunk chunk;
|
|
|
|
struct fs_chunk *fs_chunk;
|
|
|
|
struct btrfs_key key;
|
|
|
|
|
|
|
|
if (btrfs_header_level(eb)) {
|
|
|
|
u64 blockptr = btrfs_node_blockptr(eb, i);
|
|
|
|
|
|
|
|
ret = search_for_chunk_blocks(mdres, blockptr,
|
|
|
|
cluster_bytenr);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Yay a leaf! We loves leafs! */
|
|
|
|
btrfs_item_key_to_cpu(eb, &key, i);
|
|
|
|
if (key.type != BTRFS_CHUNK_ITEM_KEY)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
fs_chunk = malloc(sizeof(struct fs_chunk));
|
|
|
|
if (!fs_chunk) {
|
|
|
|
fprintf(stderr, "Erorr allocating chunk\n");
|
|
|
|
ret = -ENOMEM;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
memset(fs_chunk, 0, sizeof(*fs_chunk));
|
|
|
|
read_extent_buffer(eb, &chunk, btrfs_item_ptr_offset(eb, i),
|
|
|
|
sizeof(chunk));
|
|
|
|
|
|
|
|
fs_chunk->logical = key.offset;
|
|
|
|
fs_chunk->physical = btrfs_stack_stripe_offset(&chunk.stripe);
|
|
|
|
fs_chunk->bytes = btrfs_stack_chunk_length(&chunk);
|
|
|
|
tree_insert(&mdres->chunk_tree, &fs_chunk->n, chunk_cmp);
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
free(eb);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If you have to ask you aren't worthy */
|
|
|
|
static int search_for_chunk_blocks(struct mdrestore_struct *mdres,
|
|
|
|
u64 search, u64 cluster_bytenr)
|
|
|
|
{
|
|
|
|
struct meta_cluster *cluster;
|
|
|
|
struct meta_cluster_header *header;
|
|
|
|
struct meta_cluster_item *item;
|
|
|
|
u64 current_cluster = cluster_bytenr, bytenr;
|
|
|
|
u64 item_bytenr;
|
|
|
|
u32 bufsize, nritems, i;
|
2013-10-07 21:42:57 +00:00
|
|
|
u32 max_size = MAX_PENDING_SIZE * 2;
|
2013-05-08 15:40:36 +00:00
|
|
|
u8 *buffer, *tmp = NULL;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
cluster = malloc(BLOCK_SIZE);
|
|
|
|
if (!cluster) {
|
|
|
|
fprintf(stderr, "Error allocating cluster\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2013-10-07 21:42:57 +00:00
|
|
|
buffer = malloc(max_size);
|
2013-05-08 15:40:36 +00:00
|
|
|
if (!buffer) {
|
|
|
|
fprintf(stderr, "Error allocing buffer\n");
|
|
|
|
free(cluster);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mdres->compress_method == COMPRESS_ZLIB) {
|
2013-10-07 21:42:57 +00:00
|
|
|
tmp = malloc(max_size);
|
2013-05-08 15:40:36 +00:00
|
|
|
if (!tmp) {
|
|
|
|
fprintf(stderr, "Error allocing tmp buffer\n");
|
|
|
|
free(cluster);
|
|
|
|
free(buffer);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bytenr = current_cluster;
|
|
|
|
while (1) {
|
|
|
|
if (fseek(mdres->in, current_cluster, SEEK_SET)) {
|
|
|
|
fprintf(stderr, "Error seeking: %d\n", errno);
|
|
|
|
ret = -EIO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = fread(cluster, BLOCK_SIZE, 1, mdres->in);
|
|
|
|
if (ret == 0) {
|
|
|
|
if (cluster_bytenr != 0) {
|
|
|
|
cluster_bytenr = 0;
|
|
|
|
current_cluster = 0;
|
|
|
|
bytenr = 0;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
printf("ok this is where we screwed up?\n");
|
|
|
|
ret = -EIO;
|
|
|
|
break;
|
|
|
|
} else if (ret < 0) {
|
|
|
|
fprintf(stderr, "Error reading image\n");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
header = &cluster->header;
|
|
|
|
if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
|
|
|
|
le64_to_cpu(header->bytenr) != current_cluster) {
|
|
|
|
fprintf(stderr, "bad header in metadump image\n");
|
|
|
|
ret = -EIO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
bytenr += BLOCK_SIZE;
|
|
|
|
nritems = le32_to_cpu(header->nritems);
|
|
|
|
for (i = 0; i < nritems; i++) {
|
|
|
|
size_t size;
|
|
|
|
|
|
|
|
item = &cluster->items[i];
|
|
|
|
bufsize = le32_to_cpu(item->size);
|
|
|
|
item_bytenr = le64_to_cpu(item->bytenr);
|
|
|
|
|
2013-10-07 21:42:57 +00:00
|
|
|
if (bufsize > max_size) {
|
|
|
|
fprintf(stderr, "item %u size %u too big\n",
|
|
|
|
i, bufsize);
|
|
|
|
ret = -EIO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-05-08 15:40:36 +00:00
|
|
|
if (mdres->compress_method == COMPRESS_ZLIB) {
|
|
|
|
ret = fread(tmp, bufsize, 1, mdres->in);
|
|
|
|
if (ret != 1) {
|
|
|
|
fprintf(stderr, "Error reading: %d\n",
|
|
|
|
errno);
|
|
|
|
ret = -EIO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-10-07 21:42:57 +00:00
|
|
|
size = max_size;
|
2013-05-08 15:40:36 +00:00
|
|
|
ret = uncompress(buffer,
|
|
|
|
(unsigned long *)&size, tmp,
|
|
|
|
bufsize);
|
|
|
|
if (ret != Z_OK) {
|
|
|
|
fprintf(stderr, "Error decompressing "
|
|
|
|
"%d\n", ret);
|
|
|
|
ret = -EIO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ret = fread(buffer, bufsize, 1, mdres->in);
|
|
|
|
if (ret != 1) {
|
|
|
|
fprintf(stderr, "Error reading: %d\n",
|
|
|
|
errno);
|
|
|
|
ret = -EIO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
size = bufsize;
|
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
if (item_bytenr <= search &&
|
|
|
|
item_bytenr + size > search) {
|
|
|
|
ret = read_chunk_block(mdres, buffer, search,
|
|
|
|
item_bytenr, size,
|
|
|
|
current_cluster);
|
|
|
|
if (!ret)
|
|
|
|
ret = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
bytenr += bufsize;
|
|
|
|
}
|
|
|
|
if (ret) {
|
|
|
|
if (ret > 0)
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (bytenr & BLOCK_MASK)
|
|
|
|
bytenr += BLOCK_SIZE - (bytenr & BLOCK_MASK);
|
|
|
|
current_cluster = bytenr;
|
|
|
|
}
|
|
|
|
|
|
|
|
free(tmp);
|
|
|
|
free(buffer);
|
|
|
|
free(cluster);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int build_chunk_tree(struct mdrestore_struct *mdres,
|
|
|
|
struct meta_cluster *cluster)
|
|
|
|
{
|
|
|
|
struct btrfs_super_block *super;
|
|
|
|
struct meta_cluster_header *header;
|
|
|
|
struct meta_cluster_item *item = NULL;
|
|
|
|
u64 chunk_root_bytenr = 0;
|
|
|
|
u32 i, nritems;
|
|
|
|
u64 bytenr = 0;
|
|
|
|
u8 *buffer;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* We can't seek with stdin so don't bother doing this */
|
|
|
|
if (mdres->in == stdin)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = fread(cluster, BLOCK_SIZE, 1, mdres->in);
|
|
|
|
if (ret <= 0) {
|
|
|
|
fprintf(stderr, "Error reading in cluster: %d\n", errno);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
header = &cluster->header;
|
|
|
|
if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
|
|
|
|
le64_to_cpu(header->bytenr) != 0) {
|
|
|
|
fprintf(stderr, "bad header in metadump image\n");
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
bytenr += BLOCK_SIZE;
|
|
|
|
mdres->compress_method = header->compress;
|
|
|
|
nritems = le32_to_cpu(header->nritems);
|
|
|
|
for (i = 0; i < nritems; i++) {
|
|
|
|
item = &cluster->items[i];
|
|
|
|
|
|
|
|
if (le64_to_cpu(item->bytenr) == BTRFS_SUPER_INFO_OFFSET)
|
|
|
|
break;
|
|
|
|
bytenr += le32_to_cpu(item->size);
|
|
|
|
if (fseek(mdres->in, le32_to_cpu(item->size), SEEK_CUR)) {
|
|
|
|
fprintf(stderr, "Error seeking: %d\n", errno);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!item || le64_to_cpu(item->bytenr) != BTRFS_SUPER_INFO_OFFSET) {
|
|
|
|
fprintf(stderr, "Huh, didn't find the super?\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
buffer = malloc(le32_to_cpu(item->size));
|
|
|
|
if (!buffer) {
|
|
|
|
fprintf(stderr, "Error allocing buffer\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = fread(buffer, le32_to_cpu(item->size), 1, mdres->in);
|
|
|
|
if (ret != 1) {
|
|
|
|
fprintf(stderr, "Error reading buffer: %d\n", errno);
|
|
|
|
free(buffer);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mdres->compress_method == COMPRESS_ZLIB) {
|
|
|
|
size_t size = MAX_PENDING_SIZE * 2;
|
|
|
|
u8 *tmp;
|
|
|
|
|
|
|
|
tmp = malloc(MAX_PENDING_SIZE * 2);
|
|
|
|
if (!tmp) {
|
|
|
|
free(buffer);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
ret = uncompress(tmp, (unsigned long *)&size,
|
|
|
|
buffer, le32_to_cpu(item->size));
|
|
|
|
if (ret != Z_OK) {
|
|
|
|
fprintf(stderr, "Error decompressing %d\n", ret);
|
|
|
|
free(buffer);
|
|
|
|
free(tmp);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
free(buffer);
|
|
|
|
buffer = tmp;
|
|
|
|
}
|
|
|
|
|
2014-05-19 05:40:42 +00:00
|
|
|
pthread_mutex_lock(&mdres->mutex);
|
2013-05-08 15:40:36 +00:00
|
|
|
super = (struct btrfs_super_block *)buffer;
|
|
|
|
chunk_root_bytenr = btrfs_super_chunk_root(super);
|
|
|
|
mdres->leafsize = btrfs_super_leafsize(super);
|
|
|
|
memcpy(mdres->fsid, super->fsid, BTRFS_FSID_SIZE);
|
|
|
|
memcpy(mdres->uuid, super->dev_item.uuid,
|
|
|
|
BTRFS_UUID_SIZE);
|
|
|
|
mdres->devid = le64_to_cpu(super->dev_item.devid);
|
|
|
|
free(buffer);
|
2014-05-19 05:40:42 +00:00
|
|
|
pthread_mutex_unlock(&mdres->mutex);
|
2013-05-08 15:40:36 +00:00
|
|
|
|
|
|
|
return search_for_chunk_blocks(mdres, chunk_root_bytenr, 0);
|
|
|
|
}
|
|
|
|
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
static int __restore_metadump(const char *input, FILE *out, int old_restore,
|
|
|
|
int num_threads, int fixup_offset,
|
|
|
|
const char *target, int multi_devices)
|
2008-11-20 14:52:48 +00:00
|
|
|
{
|
2013-03-04 22:39:55 +00:00
|
|
|
struct meta_cluster *cluster = NULL;
|
2008-11-20 14:52:48 +00:00
|
|
|
struct meta_cluster_header *header;
|
|
|
|
struct mdrestore_struct mdrestore;
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
struct btrfs_fs_info *info = NULL;
|
2008-11-20 14:52:48 +00:00
|
|
|
u64 bytenr = 0;
|
2013-03-04 22:39:55 +00:00
|
|
|
FILE *in = NULL;
|
2013-03-18 18:50:45 +00:00
|
|
|
int ret = 0;
|
2008-11-20 14:52:48 +00:00
|
|
|
|
|
|
|
if (!strcmp(input, "-")) {
|
|
|
|
in = stdin;
|
|
|
|
} else {
|
|
|
|
in = fopen(input, "r");
|
|
|
|
if (!in) {
|
|
|
|
perror("unable to open metadump image");
|
2009-01-21 18:22:49 +00:00
|
|
|
return 1;
|
2008-11-20 14:52:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
/* NOTE: open with write mode */
|
|
|
|
if (fixup_offset) {
|
|
|
|
BUG_ON(!target);
|
2013-10-28 18:28:43 +00:00
|
|
|
info = open_ctree_fs_info(target, 0, 0,
|
|
|
|
OPEN_CTREE_WRITES |
|
|
|
|
OPEN_CTREE_RESTORE |
|
|
|
|
OPEN_CTREE_PARTIAL);
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
if (!info) {
|
|
|
|
fprintf(stderr, "%s: open ctree failed\n", __func__);
|
|
|
|
ret = -EIO;
|
|
|
|
goto failed_open;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-20 14:52:48 +00:00
|
|
|
cluster = malloc(BLOCK_SIZE);
|
2013-03-18 18:50:45 +00:00
|
|
|
if (!cluster) {
|
|
|
|
fprintf(stderr, "Error allocating cluster\n");
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto failed_info;
|
2013-03-18 18:50:45 +00:00
|
|
|
}
|
2008-11-20 14:52:48 +00:00
|
|
|
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
ret = mdrestore_init(&mdrestore, in, out, old_restore, num_threads,
|
|
|
|
fixup_offset, info, multi_devices);
|
2013-03-18 18:50:45 +00:00
|
|
|
if (ret) {
|
|
|
|
fprintf(stderr, "Error initing mdrestore %d\n", ret);
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
goto failed_cluster;
|
2013-03-18 18:50:45 +00:00
|
|
|
}
|
2008-11-20 14:52:48 +00:00
|
|
|
|
2014-06-26 02:53:01 +00:00
|
|
|
if (!multi_devices && !old_restore) {
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
ret = build_chunk_tree(&mdrestore, cluster);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
}
|
2013-05-08 15:40:36 +00:00
|
|
|
|
|
|
|
if (in != stdin && fseek(in, 0, SEEK_SET)) {
|
|
|
|
fprintf(stderr, "Error seeking %d\n", errno);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2008-11-20 14:52:48 +00:00
|
|
|
while (1) {
|
|
|
|
ret = fread(cluster, BLOCK_SIZE, 1, in);
|
|
|
|
if (!ret)
|
|
|
|
break;
|
|
|
|
|
|
|
|
header = &cluster->header;
|
|
|
|
if (le64_to_cpu(header->magic) != HEADER_MAGIC ||
|
|
|
|
le64_to_cpu(header->bytenr) != bytenr) {
|
|
|
|
fprintf(stderr, "bad header in metadump image\n");
|
2013-03-18 18:50:45 +00:00
|
|
|
ret = -EIO;
|
|
|
|
break;
|
2008-11-20 14:52:48 +00:00
|
|
|
}
|
|
|
|
ret = add_cluster(cluster, &mdrestore, &bytenr);
|
2013-03-18 18:50:45 +00:00
|
|
|
if (ret) {
|
|
|
|
fprintf(stderr, "Error adding cluster\n");
|
|
|
|
break;
|
|
|
|
}
|
2008-11-20 14:52:48 +00:00
|
|
|
|
2013-03-18 18:50:45 +00:00
|
|
|
ret = wait_for_worker(&mdrestore);
|
|
|
|
if (ret) {
|
|
|
|
fprintf(stderr, "One of the threads errored out %d\n",
|
|
|
|
ret);
|
|
|
|
break;
|
|
|
|
}
|
2008-11-20 14:52:48 +00:00
|
|
|
}
|
2013-05-08 15:40:36 +00:00
|
|
|
out:
|
2014-03-21 01:06:23 +00:00
|
|
|
mdrestore_destroy(&mdrestore, num_threads);
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
failed_cluster:
|
2008-11-20 14:52:48 +00:00
|
|
|
free(cluster);
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
failed_info:
|
|
|
|
if (fixup_offset && info)
|
|
|
|
close_ctree(info->chunk_root);
|
|
|
|
failed_open:
|
2008-11-20 14:52:48 +00:00
|
|
|
if (in != stdin)
|
|
|
|
fclose(in);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
static int restore_metadump(const char *input, FILE *out, int old_restore,
|
|
|
|
int num_threads, int multi_devices)
|
|
|
|
{
|
|
|
|
return __restore_metadump(input, out, old_restore, num_threads, 0, NULL,
|
|
|
|
multi_devices);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fixup_metadump(const char *input, FILE *out, int num_threads,
|
|
|
|
const char *target)
|
|
|
|
{
|
|
|
|
return __restore_metadump(input, out, 0, num_threads, 1, target, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int update_disk_super_on_device(struct btrfs_fs_info *info,
|
|
|
|
const char *other_dev, u64 cur_devid)
|
|
|
|
{
|
|
|
|
struct btrfs_key key;
|
|
|
|
struct extent_buffer *leaf;
|
|
|
|
struct btrfs_path path;
|
|
|
|
struct btrfs_dev_item *dev_item;
|
|
|
|
struct btrfs_super_block *disk_super;
|
|
|
|
char dev_uuid[BTRFS_UUID_SIZE];
|
|
|
|
char fs_uuid[BTRFS_UUID_SIZE];
|
|
|
|
u64 devid, type, io_align, io_width;
|
|
|
|
u64 sector_size, total_bytes, bytes_used;
|
|
|
|
char *buf;
|
|
|
|
int fp;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
|
|
|
|
key.type = BTRFS_DEV_ITEM_KEY;
|
|
|
|
key.offset = cur_devid;
|
|
|
|
|
|
|
|
btrfs_init_path(&path);
|
|
|
|
ret = btrfs_search_slot(NULL, info->chunk_root, &key, &path, 0, 0);
|
|
|
|
if (ret) {
|
|
|
|
fprintf(stderr, "search key fails\n");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
leaf = path.nodes[0];
|
|
|
|
dev_item = btrfs_item_ptr(leaf, path.slots[0],
|
|
|
|
struct btrfs_dev_item);
|
|
|
|
|
|
|
|
devid = btrfs_device_id(leaf, dev_item);
|
|
|
|
if (devid != cur_devid) {
|
|
|
|
printk("devid %llu mismatch with %llu\n", devid, cur_devid);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
type = btrfs_device_type(leaf, dev_item);
|
|
|
|
io_align = btrfs_device_io_align(leaf, dev_item);
|
|
|
|
io_width = btrfs_device_io_width(leaf, dev_item);
|
|
|
|
sector_size = btrfs_device_sector_size(leaf, dev_item);
|
|
|
|
total_bytes = btrfs_device_total_bytes(leaf, dev_item);
|
|
|
|
bytes_used = btrfs_device_bytes_used(leaf, dev_item);
|
|
|
|
read_extent_buffer(leaf, dev_uuid, (unsigned long)btrfs_device_uuid(dev_item), BTRFS_UUID_SIZE);
|
|
|
|
read_extent_buffer(leaf, fs_uuid, (unsigned long)btrfs_device_fsid(dev_item), BTRFS_UUID_SIZE);
|
|
|
|
|
2013-08-03 00:52:43 +00:00
|
|
|
btrfs_release_path(&path);
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
|
|
|
|
printk("update disk super on %s devid=%llu\n", other_dev, devid);
|
|
|
|
|
|
|
|
/* update other devices' super block */
|
|
|
|
fp = open(other_dev, O_CREAT | O_RDWR, 0600);
|
|
|
|
if (fp < 0) {
|
|
|
|
fprintf(stderr, "could not open %s\n", other_dev);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
buf = malloc(BTRFS_SUPER_INFO_SIZE);
|
|
|
|
if (!buf) {
|
|
|
|
ret = -ENOMEM;
|
2014-06-26 02:53:04 +00:00
|
|
|
close(fp);
|
|
|
|
return ret;
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(buf, info->super_copy, BTRFS_SUPER_INFO_SIZE);
|
|
|
|
|
|
|
|
disk_super = (struct btrfs_super_block *)buf;
|
|
|
|
dev_item = &disk_super->dev_item;
|
|
|
|
|
|
|
|
btrfs_set_stack_device_type(dev_item, type);
|
|
|
|
btrfs_set_stack_device_id(dev_item, devid);
|
|
|
|
btrfs_set_stack_device_total_bytes(dev_item, total_bytes);
|
|
|
|
btrfs_set_stack_device_bytes_used(dev_item, bytes_used);
|
|
|
|
btrfs_set_stack_device_io_align(dev_item, io_align);
|
|
|
|
btrfs_set_stack_device_io_width(dev_item, io_width);
|
|
|
|
btrfs_set_stack_device_sector_size(dev_item, sector_size);
|
|
|
|
memcpy(dev_item->uuid, dev_uuid, BTRFS_UUID_SIZE);
|
|
|
|
memcpy(dev_item->fsid, fs_uuid, BTRFS_UUID_SIZE);
|
|
|
|
csum_block((u8 *)buf, BTRFS_SUPER_INFO_SIZE);
|
|
|
|
|
|
|
|
ret = pwrite64(fp, buf, BTRFS_SUPER_INFO_SIZE, BTRFS_SUPER_INFO_OFFSET);
|
|
|
|
if (ret != BTRFS_SUPER_INFO_SIZE) {
|
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
write_backup_supers(fp, (u8 *)buf);
|
|
|
|
|
|
|
|
out:
|
|
|
|
free(buf);
|
|
|
|
close(fp);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-11-20 14:52:48 +00:00
|
|
|
static void print_usage(void)
|
|
|
|
{
|
|
|
|
fprintf(stderr, "usage: btrfs-image [options] source target\n");
|
|
|
|
fprintf(stderr, "\t-r \trestore metadump image\n");
|
|
|
|
fprintf(stderr, "\t-c value\tcompression level (0 ~ 9)\n");
|
|
|
|
fprintf(stderr, "\t-t value\tnumber of threads (1 ~ 32)\n");
|
2013-03-27 20:55:41 +00:00
|
|
|
fprintf(stderr, "\t-o \tdon't mess with the chunk tree when restoring\n");
|
2013-06-22 05:32:44 +00:00
|
|
|
fprintf(stderr, "\t-s \tsanitize file names, use once to just use garbage, use twice if you want crc collisions\n");
|
2013-03-28 14:26:09 +00:00
|
|
|
fprintf(stderr, "\t-w \twalk all trees instead of using extent tree, do this if your extent tree is broken\n");
|
2014-06-19 01:46:03 +00:00
|
|
|
fprintf(stderr, "\t-m \trestore for multiple devices\n");
|
2008-11-20 14:52:48 +00:00
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
int main(int argc, char *argv[])
|
|
|
|
{
|
|
|
|
char *source;
|
|
|
|
char *target;
|
2014-02-20 01:30:51 +00:00
|
|
|
u64 num_threads = 0;
|
|
|
|
u64 compress_level = 0;
|
2008-11-20 14:52:48 +00:00
|
|
|
int create = 1;
|
2013-03-27 20:55:41 +00:00
|
|
|
int old_restore = 0;
|
2013-03-28 14:26:09 +00:00
|
|
|
int walk_trees = 0;
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
int multi_devices = 0;
|
2008-11-20 14:52:48 +00:00
|
|
|
int ret;
|
2013-03-22 14:52:07 +00:00
|
|
|
int sanitize = 0;
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
int dev_cnt = 0;
|
2014-06-24 03:16:12 +00:00
|
|
|
int usage_error = 0;
|
2008-11-20 14:52:48 +00:00
|
|
|
FILE *out;
|
|
|
|
|
|
|
|
while (1) {
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
int c = getopt(argc, argv, "rc:t:oswm");
|
2008-11-20 14:52:48 +00:00
|
|
|
if (c < 0)
|
|
|
|
break;
|
|
|
|
switch (c) {
|
|
|
|
case 'r':
|
|
|
|
create = 0;
|
|
|
|
break;
|
|
|
|
case 't':
|
2014-02-20 01:30:51 +00:00
|
|
|
num_threads = arg_strtou64(optarg);
|
|
|
|
if (num_threads > 32)
|
2008-11-20 14:52:48 +00:00
|
|
|
print_usage();
|
|
|
|
break;
|
|
|
|
case 'c':
|
2014-02-20 01:30:51 +00:00
|
|
|
compress_level = arg_strtou64(optarg);
|
|
|
|
if (compress_level > 9)
|
2008-11-20 14:52:48 +00:00
|
|
|
print_usage();
|
|
|
|
break;
|
2013-03-27 20:55:41 +00:00
|
|
|
case 'o':
|
|
|
|
old_restore = 1;
|
|
|
|
break;
|
2013-03-22 14:52:07 +00:00
|
|
|
case 's':
|
|
|
|
sanitize++;
|
|
|
|
break;
|
2013-03-28 14:26:09 +00:00
|
|
|
case 'w':
|
|
|
|
walk_trees = 1;
|
|
|
|
break;
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
case 'm':
|
|
|
|
create = 0;
|
|
|
|
multi_devices = 1;
|
|
|
|
break;
|
2008-11-20 14:52:48 +00:00
|
|
|
default:
|
|
|
|
print_usage();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
argc = argc - optind;
|
2014-06-30 03:54:12 +00:00
|
|
|
set_argv0(argv);
|
|
|
|
if (check_argc_min(argc, 2))
|
|
|
|
print_usage();
|
|
|
|
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
dev_cnt = argc - 1;
|
|
|
|
|
2014-06-24 03:16:12 +00:00
|
|
|
if (create) {
|
|
|
|
if (old_restore) {
|
|
|
|
fprintf(stderr, "Usage error: create and restore cannot be used at the same time\n");
|
|
|
|
usage_error++;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (walk_trees || sanitize || compress_level) {
|
|
|
|
fprintf(stderr, "Usage error: use -w, -s, -c options for restore makes no sense\n");
|
|
|
|
usage_error++;
|
|
|
|
}
|
|
|
|
if (multi_devices && dev_cnt < 2) {
|
|
|
|
fprintf(stderr, "Usage error: not enough devices specified for -m option\n");
|
|
|
|
usage_error++;
|
|
|
|
}
|
|
|
|
if (!multi_devices && dev_cnt != 1) {
|
|
|
|
fprintf(stderr, "Usage error: accepts only 1 device without -m option\n");
|
|
|
|
usage_error++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (usage_error)
|
2008-11-20 14:52:48 +00:00
|
|
|
print_usage();
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
|
2008-11-20 14:52:48 +00:00
|
|
|
source = argv[optind];
|
|
|
|
target = argv[optind + 1];
|
|
|
|
|
|
|
|
if (create && !strcmp(target, "-")) {
|
|
|
|
out = stdout;
|
|
|
|
} else {
|
|
|
|
out = fopen(target, "w+");
|
|
|
|
if (!out) {
|
|
|
|
perror("unable to create target file");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (num_threads == 0 && compress_level > 0) {
|
|
|
|
num_threads = sysconf(_SC_NPROCESSORS_ONLN);
|
|
|
|
if (num_threads <= 0)
|
|
|
|
num_threads = 1;
|
|
|
|
}
|
|
|
|
|
2014-07-07 01:56:51 +00:00
|
|
|
if (create) {
|
|
|
|
ret = check_mounted(source);
|
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "Could not check mount status: %s\n",
|
|
|
|
strerror(-ret));
|
|
|
|
exit(1);
|
|
|
|
} else if (ret)
|
|
|
|
fprintf(stderr,
|
|
|
|
"WARNING: The device is mounted. Make sure the filesystem is quiescent.\n");
|
|
|
|
|
2008-11-20 14:52:48 +00:00
|
|
|
ret = create_metadump(source, out, num_threads,
|
2013-03-22 14:52:07 +00:00
|
|
|
compress_level, sanitize, walk_trees);
|
2014-07-07 01:56:51 +00:00
|
|
|
} else {
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
ret = restore_metadump(source, out, old_restore, 1,
|
|
|
|
multi_devices);
|
2014-07-07 01:56:51 +00:00
|
|
|
}
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
if (ret) {
|
|
|
|
printk("%s failed (%s)\n", (create) ? "create" : "restore",
|
|
|
|
strerror(errno));
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* extended support for multiple devices */
|
|
|
|
if (!create && multi_devices) {
|
|
|
|
struct btrfs_fs_info *info;
|
|
|
|
u64 total_devs;
|
|
|
|
int i;
|
|
|
|
|
2013-10-28 18:28:43 +00:00
|
|
|
info = open_ctree_fs_info(target, 0, 0,
|
|
|
|
OPEN_CTREE_PARTIAL |
|
|
|
|
OPEN_CTREE_RESTORE);
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
if (!info) {
|
|
|
|
int e = errno;
|
|
|
|
fprintf(stderr, "unable to open %s error = %s\n",
|
|
|
|
target, strerror(e));
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
total_devs = btrfs_super_num_devices(info->super_copy);
|
|
|
|
if (total_devs != dev_cnt) {
|
|
|
|
printk("it needs %llu devices but has only %d\n",
|
|
|
|
total_devs, dev_cnt);
|
|
|
|
close_ctree(info->chunk_root);
|
|
|
|
goto out;
|
|
|
|
}
|
2008-11-20 14:52:48 +00:00
|
|
|
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
/* update super block on other disks */
|
|
|
|
for (i = 2; i <= dev_cnt; i++) {
|
|
|
|
ret = update_disk_super_on_device(info,
|
|
|
|
argv[optind + i], (u64)i);
|
|
|
|
if (ret) {
|
|
|
|
printk("update disk super failed devid=%d (error=%d)\n",
|
|
|
|
i, ret);
|
|
|
|
close_ctree(info->chunk_root);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
close_ctree(info->chunk_root);
|
|
|
|
|
|
|
|
/* fix metadata block to map correct chunk */
|
|
|
|
ret = fixup_metadump(source, out, 1, target);
|
|
|
|
if (ret) {
|
|
|
|
fprintf(stderr, "fix metadump failed (error=%d)\n",
|
|
|
|
ret);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
2014-06-24 03:16:45 +00:00
|
|
|
if (out == stdout) {
|
2008-11-20 14:52:48 +00:00
|
|
|
fflush(out);
|
2014-06-24 03:16:45 +00:00
|
|
|
} else {
|
2008-11-20 14:52:48 +00:00
|
|
|
fclose(out);
|
2014-06-24 03:16:45 +00:00
|
|
|
if (ret && create) {
|
|
|
|
int unlink_ret;
|
|
|
|
|
|
|
|
unlink_ret = unlink(target);
|
|
|
|
if (unlink_ret)
|
|
|
|
fprintf(stderr,
|
|
|
|
"unlink output file failed : %s\n",
|
|
|
|
strerror(errno));
|
|
|
|
}
|
|
|
|
}
|
2008-11-20 14:52:48 +00:00
|
|
|
|
2013-09-04 15:22:30 +00:00
|
|
|
return !!ret;
|
2008-11-20 14:52:48 +00:00
|
|
|
}
|