btrfs-progs/send-utils.c

774 lines
19 KiB
C
Raw Normal View History

/*
* Copyright (C) 2012 Alexander Block. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
Btrfs-progs: use UUID tree for send/receive This commit changes the btrfs send/receive commands to use the UUID tree to map UUIDs to subvolumes, and to use the root tree to map subvolume IDs to paths. Now these tools start fast and are independent on the number of subvolules/snapshot that exist. Before this commit, mapping UUIDs to subvolume IDs was an operation with a high effort. The algorithm even had quadratic effort (based on the number of existing subvolumes). E.g. with 15,000 subvolumes it took much more than 5 minutes on a state of the art XEON CPU to start btrfs send or receive before these tools were able to send or receive the first byte). Even linear effort instead of the current quadratic effort would be too much since it would be a waste. And these data structures to allow mapping UUIDs to subvolume IDs had been created every time a btrfs send/receive instance was started. It is much more efficient to maintain a searchable persistent data structure in the filesystem, one that is updated whenever a subvolume/snapshot is created and deleted, and when the received subvolume UUID is set by the btrfs-receive tool. Therefore kernel code was added that is able to maintain data structures in the filesystem that allow to quickly search for a given UUID and to retrieve data that is assigned to this UUID, like which subvolume ID is related to this UUID. Signed-off-by: Stefan Behrens <sbehrens@giantdisaster.de> Signed-off-by: David Sterba <dsterba@suse.cz> Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-26 15:17:57 +00:00
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
Btrfs-progs: use UUID tree for send/receive This commit changes the btrfs send/receive commands to use the UUID tree to map UUIDs to subvolumes, and to use the root tree to map subvolume IDs to paths. Now these tools start fast and are independent on the number of subvolules/snapshot that exist. Before this commit, mapping UUIDs to subvolume IDs was an operation with a high effort. The algorithm even had quadratic effort (based on the number of existing subvolumes). E.g. with 15,000 subvolumes it took much more than 5 minutes on a state of the art XEON CPU to start btrfs send or receive before these tools were able to send or receive the first byte). Even linear effort instead of the current quadratic effort would be too much since it would be a waste. And these data structures to allow mapping UUIDs to subvolume IDs had been created every time a btrfs send/receive instance was started. It is much more efficient to maintain a searchable persistent data structure in the filesystem, one that is updated whenever a subvolume/snapshot is created and deleted, and when the received subvolume UUID is set by the btrfs-receive tool. Therefore kernel code was added that is able to maintain data structures in the filesystem that allow to quickly search for a given UUID and to retrieve data that is assigned to this UUID, like which subvolume ID is related to this UUID. Signed-off-by: Stefan Behrens <sbehrens@giantdisaster.de> Signed-off-by: David Sterba <dsterba@suse.cz> Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-26 15:17:57 +00:00
#include <uuid/uuid.h>
#include <limits.h>
#include <errno.h>
#include "ctree.h"
#include "send-utils.h"
#include "ioctl.h"
#include "btrfs-list.h"
static int btrfs_subvolid_resolve_sub(int fd, char *path, size_t *path_len,
u64 subvol_id);
Btrfs-progs: use UUID tree for send/receive This commit changes the btrfs send/receive commands to use the UUID tree to map UUIDs to subvolumes, and to use the root tree to map subvolume IDs to paths. Now these tools start fast and are independent on the number of subvolules/snapshot that exist. Before this commit, mapping UUIDs to subvolume IDs was an operation with a high effort. The algorithm even had quadratic effort (based on the number of existing subvolumes). E.g. with 15,000 subvolumes it took much more than 5 minutes on a state of the art XEON CPU to start btrfs send or receive before these tools were able to send or receive the first byte). Even linear effort instead of the current quadratic effort would be too much since it would be a waste. And these data structures to allow mapping UUIDs to subvolume IDs had been created every time a btrfs send/receive instance was started. It is much more efficient to maintain a searchable persistent data structure in the filesystem, one that is updated whenever a subvolume/snapshot is created and deleted, and when the received subvolume UUID is set by the btrfs-receive tool. Therefore kernel code was added that is able to maintain data structures in the filesystem that allow to quickly search for a given UUID and to retrieve data that is assigned to this UUID, like which subvolume ID is related to this UUID. Signed-off-by: Stefan Behrens <sbehrens@giantdisaster.de> Signed-off-by: David Sterba <dsterba@suse.cz> Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-26 15:17:57 +00:00
static int btrfs_get_root_id_by_sub_path(int mnt_fd, const char *sub_path,
u64 *root_id)
{
int ret;
int subvol_fd;
subvol_fd = openat(mnt_fd, sub_path, O_RDONLY);
if (subvol_fd < 0) {
ret = -errno;
fprintf(stderr, "ERROR: open %s failed. %s\n", sub_path,
strerror(-ret));
return ret;
}
ret = btrfs_list_get_path_rootid(subvol_fd, root_id);
close(subvol_fd);
return ret;
}
static int btrfs_read_root_item_raw(int mnt_fd, u64 root_id, size_t buf_len,
u32 *read_len, void *buf)
{
int ret;
struct btrfs_ioctl_search_args args;
struct btrfs_ioctl_search_key *sk = &args.key;
struct btrfs_ioctl_search_header *sh;
unsigned long off = 0;
int found = 0;
int i;
*read_len = 0;
memset(&args, 0, sizeof(args));
sk->tree_id = BTRFS_ROOT_TREE_OBJECTID;
/*
* there may be more than one ROOT_ITEM key if there are
* snapshots pending deletion, we have to loop through
* them.
*/
sk->min_objectid = root_id;
sk->max_objectid = root_id;
sk->max_type = BTRFS_ROOT_ITEM_KEY;
sk->min_type = BTRFS_ROOT_ITEM_KEY;
sk->max_offset = (u64)-1;
sk->max_transid = (u64)-1;
sk->nr_items = 4096;
while (1) {
ret = ioctl(mnt_fd, BTRFS_IOC_TREE_SEARCH, &args);
if (ret < 0) {
fprintf(stderr,
"ERROR: can't perform the search - %s\n",
strerror(errno));
return 0;
}
/* the ioctl returns the number of item it found in nr_items */
if (sk->nr_items == 0)
break;
off = 0;
for (i = 0; i < sk->nr_items; i++) {
struct btrfs_root_item *item;
sh = (struct btrfs_ioctl_search_header *)(args.buf +
off);
off += sizeof(*sh);
item = (struct btrfs_root_item *)(args.buf + off);
off += sh->len;
sk->min_objectid = sh->objectid;
sk->min_type = sh->type;
sk->min_offset = sh->offset;
if (sh->objectid > root_id)
break;
if (sh->objectid == root_id &&
sh->type == BTRFS_ROOT_ITEM_KEY) {
if (sh->len > buf_len) {
/* btrfs-progs is too old for kernel */
fprintf(stderr,
"ERROR: buf for read_root_item_raw() is too small, get newer btrfs tools!\n");
return -EOVERFLOW;
}
memcpy(buf, item, sh->len);
*read_len = sh->len;
found = 1;
}
}
if (sk->min_offset < (u64)-1)
sk->min_offset++;
else
break;
if (sk->min_type != BTRFS_ROOT_ITEM_KEY ||
sk->min_objectid != root_id)
break;
}
return found ? 0 : -ENOENT;
}
/*
* Read a root item from the tree. In case we detect a root item smaller then
* sizeof(root_item), we know it's an old version of the root structure and
* initialize all new fields to zero. The same happens if we detect mismatching
* generation numbers as then we know the root was once mounted with an older
* kernel that was not aware of the root item structure change.
*/
static int btrfs_read_root_item(int mnt_fd, u64 root_id,
struct btrfs_root_item *item)
{
int ret;
u32 read_len;
ret = btrfs_read_root_item_raw(mnt_fd, root_id, sizeof(*item),
&read_len, item);
if (ret)
return ret;
if (read_len < sizeof(*item) ||
btrfs_root_generation(item) != btrfs_root_generation_v2(item))
memset(&item->generation_v2, 0,
sizeof(*item) - offsetof(struct btrfs_root_item,
generation_v2));
return 0;
}
#ifdef BTRFS_COMPAT_SEND_NO_UUID_TREE
static struct rb_node *tree_insert(struct rb_root *root,
struct subvol_info *si,
enum subvol_search_type type)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct subvol_info *entry;
__s64 comp;
while (*p) {
parent = *p;
if (type == subvol_search_by_received_uuid) {
entry = rb_entry(parent, struct subvol_info,
rb_received_node);
comp = memcmp(entry->received_uuid, si->received_uuid,
BTRFS_UUID_SIZE);
if (!comp) {
if (entry->stransid < si->stransid)
comp = -1;
else if (entry->stransid > si->stransid)
comp = 1;
else
comp = 0;
}
} else if (type == subvol_search_by_uuid) {
entry = rb_entry(parent, struct subvol_info,
rb_local_node);
comp = memcmp(entry->uuid, si->uuid, BTRFS_UUID_SIZE);
} else if (type == subvol_search_by_root_id) {
entry = rb_entry(parent, struct subvol_info,
rb_root_id_node);
comp = entry->root_id - si->root_id;
} else if (type == subvol_search_by_path) {
entry = rb_entry(parent, struct subvol_info,
rb_path_node);
comp = strcmp(entry->path, si->path);
} else {
BUG();
}
if (comp < 0)
p = &(*p)->rb_left;
else if (comp > 0)
p = &(*p)->rb_right;
else
return parent;
}
if (type == subvol_search_by_received_uuid) {
rb_link_node(&si->rb_received_node, parent, p);
rb_insert_color(&si->rb_received_node, root);
} else if (type == subvol_search_by_uuid) {
rb_link_node(&si->rb_local_node, parent, p);
rb_insert_color(&si->rb_local_node, root);
} else if (type == subvol_search_by_root_id) {
rb_link_node(&si->rb_root_id_node, parent, p);
rb_insert_color(&si->rb_root_id_node, root);
} else if (type == subvol_search_by_path) {
rb_link_node(&si->rb_path_node, parent, p);
rb_insert_color(&si->rb_path_node, root);
}
return NULL;
}
#endif
int btrfs_subvolid_resolve(int fd, char *path, size_t path_len, u64 subvol_id)
{
if (path_len < 1)
return -EOVERFLOW;
path[0] = '\0';
path_len--;
path[path_len] = '\0';
return btrfs_subvolid_resolve_sub(fd, path, &path_len, subvol_id);
}
static int btrfs_subvolid_resolve_sub(int fd, char *path, size_t *path_len,
u64 subvol_id)
{
int ret;
struct btrfs_ioctl_search_args search_arg;
struct btrfs_ioctl_ino_lookup_args ino_lookup_arg;
struct btrfs_ioctl_search_header *search_header;
struct btrfs_root_ref *backref_item;
if (subvol_id == BTRFS_FS_TREE_OBJECTID) {
if (*path_len < 1)
return -EOVERFLOW;
*path = '\0';
(*path_len)--;
return 0;
}
memset(&search_arg, 0, sizeof(search_arg));
search_arg.key.tree_id = BTRFS_ROOT_TREE_OBJECTID;
search_arg.key.min_objectid = subvol_id;
search_arg.key.max_objectid = subvol_id;
search_arg.key.min_type = BTRFS_ROOT_BACKREF_KEY;
search_arg.key.max_type = BTRFS_ROOT_BACKREF_KEY;
search_arg.key.max_offset = (u64)-1;
search_arg.key.max_transid = (u64)-1;
search_arg.key.nr_items = 1;
ret = ioctl(fd, BTRFS_IOC_TREE_SEARCH, &search_arg);
if (ret) {
fprintf(stderr,
"ioctl(BTRFS_IOC_TREE_SEARCH, subvol_id %llu) ret=%d, error: %s\n",
(unsigned long long)subvol_id, ret, strerror(errno));
return ret;
}
if (search_arg.key.nr_items < 1) {
fprintf(stderr,
"failed to lookup subvol_id %llu!\n",
(unsigned long long)subvol_id);
return -ENOENT;
}
search_header = (struct btrfs_ioctl_search_header *)search_arg.buf;
backref_item = (struct btrfs_root_ref *)(search_header + 1);
if (search_header->offset != BTRFS_FS_TREE_OBJECTID) {
int sub_ret;
sub_ret = btrfs_subvolid_resolve_sub(fd, path, path_len,
search_header->offset);
if (sub_ret)
return sub_ret;
if (*path_len < 1)
return -EOVERFLOW;
strcat(path, "/");
(*path_len)--;
}
if (btrfs_stack_root_ref_dirid(backref_item) !=
BTRFS_FIRST_FREE_OBJECTID) {
int len;
memset(&ino_lookup_arg, 0, sizeof(ino_lookup_arg));
ino_lookup_arg.treeid = search_header->offset;
ino_lookup_arg.objectid =
btrfs_stack_root_ref_dirid(backref_item);
ret = ioctl(fd, BTRFS_IOC_INO_LOOKUP, &ino_lookup_arg);
if (ret) {
fprintf(stderr,
"ioctl(BTRFS_IOC_INO_LOOKUP) ret=%d, error: %s\n",
ret, strerror(errno));
return ret;
}
len = strlen(ino_lookup_arg.name);
if (*path_len < len)
return -EOVERFLOW;
strcat(path, ino_lookup_arg.name);
(*path_len) -= len;
}
if (*path_len < btrfs_stack_root_ref_name_len(backref_item))
return -EOVERFLOW;
strncat(path, (char *)(backref_item + 1),
btrfs_stack_root_ref_name_len(backref_item));
(*path_len) -= btrfs_stack_root_ref_name_len(backref_item);
return 0;
}
#ifdef BTRFS_COMPAT_SEND_NO_UUID_TREE
static int count_bytes(void *buf, int len, char b)
{
int cnt = 0;
int i;
for (i = 0; i < len; i++) {
if (((char *)buf)[i] == b)
cnt++;
}
return cnt;
}
void subvol_uuid_search_add(struct subvol_uuid_search *s,
struct subvol_info *si)
{
int cnt;
tree_insert(&s->root_id_subvols, si, subvol_search_by_root_id);
tree_insert(&s->path_subvols, si, subvol_search_by_path);
cnt = count_bytes(si->uuid, BTRFS_UUID_SIZE, 0);
if (cnt != BTRFS_UUID_SIZE)
tree_insert(&s->local_subvols, si, subvol_search_by_uuid);
cnt = count_bytes(si->received_uuid, BTRFS_UUID_SIZE, 0);
if (cnt != BTRFS_UUID_SIZE)
tree_insert(&s->received_subvols, si,
subvol_search_by_received_uuid);
}
static struct subvol_info *tree_search(struct rb_root *root,
u64 root_id, const u8 *uuid,
u64 stransid, const char *path,
enum subvol_search_type type)
{
struct rb_node *n = root->rb_node;
struct subvol_info *entry;
__s64 comp;
while (n) {
if (type == subvol_search_by_received_uuid) {
entry = rb_entry(n, struct subvol_info,
rb_received_node);
comp = memcmp(entry->received_uuid, uuid,
BTRFS_UUID_SIZE);
if (!comp) {
if (entry->stransid < stransid)
comp = -1;
else if (entry->stransid > stransid)
comp = 1;
else
comp = 0;
}
} else if (type == subvol_search_by_uuid) {
entry = rb_entry(n, struct subvol_info, rb_local_node);
comp = memcmp(entry->uuid, uuid, BTRFS_UUID_SIZE);
} else if (type == subvol_search_by_root_id) {
entry = rb_entry(n, struct subvol_info,
rb_root_id_node);
comp = entry->root_id - root_id;
} else if (type == subvol_search_by_path) {
entry = rb_entry(n, struct subvol_info, rb_path_node);
comp = strcmp(entry->path, path);
} else {
BUG();
}
if (comp < 0)
n = n->rb_left;
else if (comp > 0)
n = n->rb_right;
else
return entry;
Btrfs-progs: use UUID tree for send/receive This commit changes the btrfs send/receive commands to use the UUID tree to map UUIDs to subvolumes, and to use the root tree to map subvolume IDs to paths. Now these tools start fast and are independent on the number of subvolules/snapshot that exist. Before this commit, mapping UUIDs to subvolume IDs was an operation with a high effort. The algorithm even had quadratic effort (based on the number of existing subvolumes). E.g. with 15,000 subvolumes it took much more than 5 minutes on a state of the art XEON CPU to start btrfs send or receive before these tools were able to send or receive the first byte). Even linear effort instead of the current quadratic effort would be too much since it would be a waste. And these data structures to allow mapping UUIDs to subvolume IDs had been created every time a btrfs send/receive instance was started. It is much more efficient to maintain a searchable persistent data structure in the filesystem, one that is updated whenever a subvolume/snapshot is created and deleted, and when the received subvolume UUID is set by the btrfs-receive tool. Therefore kernel code was added that is able to maintain data structures in the filesystem that allow to quickly search for a given UUID and to retrieve data that is assigned to this UUID, like which subvolume ID is related to this UUID. Signed-off-by: Stefan Behrens <sbehrens@giantdisaster.de> Signed-off-by: David Sterba <dsterba@suse.cz> Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-26 15:17:57 +00:00
}
return NULL;
}
/*
* this function will be only called if kernel dosen't support uuid tree.
*/
static struct subvol_info *subvol_uuid_search_old(struct subvol_uuid_search *s,
u64 root_id, const u8 *uuid, u64 transid,
const char *path,
enum subvol_search_type type)
{
struct rb_root *root;
if (type == subvol_search_by_received_uuid)
root = &s->received_subvols;
else if (type == subvol_search_by_uuid)
root = &s->local_subvols;
else if (type == subvol_search_by_root_id)
root = &s->root_id_subvols;
else if (type == subvol_search_by_path)
root = &s->path_subvols;
else
return NULL;
return tree_search(root, root_id, uuid, transid, path, type);
}
#else
void subvol_uuid_search_add(struct subvol_uuid_search *s,
struct subvol_info *si)
{
if (si) {
free(si->path);
free(si);
}
}
#endif
struct subvol_info *subvol_uuid_search(struct subvol_uuid_search *s,
u64 root_id, const u8 *uuid, u64 transid,
const char *path,
enum subvol_search_type type)
{
Btrfs-progs: use UUID tree for send/receive This commit changes the btrfs send/receive commands to use the UUID tree to map UUIDs to subvolumes, and to use the root tree to map subvolume IDs to paths. Now these tools start fast and are independent on the number of subvolules/snapshot that exist. Before this commit, mapping UUIDs to subvolume IDs was an operation with a high effort. The algorithm even had quadratic effort (based on the number of existing subvolumes). E.g. with 15,000 subvolumes it took much more than 5 minutes on a state of the art XEON CPU to start btrfs send or receive before these tools were able to send or receive the first byte). Even linear effort instead of the current quadratic effort would be too much since it would be a waste. And these data structures to allow mapping UUIDs to subvolume IDs had been created every time a btrfs send/receive instance was started. It is much more efficient to maintain a searchable persistent data structure in the filesystem, one that is updated whenever a subvolume/snapshot is created and deleted, and when the received subvolume UUID is set by the btrfs-receive tool. Therefore kernel code was added that is able to maintain data structures in the filesystem that allow to quickly search for a given UUID and to retrieve data that is assigned to this UUID, like which subvolume ID is related to this UUID. Signed-off-by: Stefan Behrens <sbehrens@giantdisaster.de> Signed-off-by: David Sterba <dsterba@suse.cz> Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-26 15:17:57 +00:00
int ret = 0;
struct btrfs_root_item root_item;
Btrfs-progs: use UUID tree for send/receive This commit changes the btrfs send/receive commands to use the UUID tree to map UUIDs to subvolumes, and to use the root tree to map subvolume IDs to paths. Now these tools start fast and are independent on the number of subvolules/snapshot that exist. Before this commit, mapping UUIDs to subvolume IDs was an operation with a high effort. The algorithm even had quadratic effort (based on the number of existing subvolumes). E.g. with 15,000 subvolumes it took much more than 5 minutes on a state of the art XEON CPU to start btrfs send or receive before these tools were able to send or receive the first byte). Even linear effort instead of the current quadratic effort would be too much since it would be a waste. And these data structures to allow mapping UUIDs to subvolume IDs had been created every time a btrfs send/receive instance was started. It is much more efficient to maintain a searchable persistent data structure in the filesystem, one that is updated whenever a subvolume/snapshot is created and deleted, and when the received subvolume UUID is set by the btrfs-receive tool. Therefore kernel code was added that is able to maintain data structures in the filesystem that allow to quickly search for a given UUID and to retrieve data that is assigned to this UUID, like which subvolume ID is related to this UUID. Signed-off-by: Stefan Behrens <sbehrens@giantdisaster.de> Signed-off-by: David Sterba <dsterba@suse.cz> Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-26 15:17:57 +00:00
struct subvol_info *info = NULL;
#ifdef BTRFS_COMPAT_SEND_NO_UUID_TREE
if (!s->uuid_tree_existed)
return subvol_uuid_search_old(s, root_id, uuid, transid,
path, type);
#endif
Btrfs-progs: use UUID tree for send/receive This commit changes the btrfs send/receive commands to use the UUID tree to map UUIDs to subvolumes, and to use the root tree to map subvolume IDs to paths. Now these tools start fast and are independent on the number of subvolules/snapshot that exist. Before this commit, mapping UUIDs to subvolume IDs was an operation with a high effort. The algorithm even had quadratic effort (based on the number of existing subvolumes). E.g. with 15,000 subvolumes it took much more than 5 minutes on a state of the art XEON CPU to start btrfs send or receive before these tools were able to send or receive the first byte). Even linear effort instead of the current quadratic effort would be too much since it would be a waste. And these data structures to allow mapping UUIDs to subvolume IDs had been created every time a btrfs send/receive instance was started. It is much more efficient to maintain a searchable persistent data structure in the filesystem, one that is updated whenever a subvolume/snapshot is created and deleted, and when the received subvolume UUID is set by the btrfs-receive tool. Therefore kernel code was added that is able to maintain data structures in the filesystem that allow to quickly search for a given UUID and to retrieve data that is assigned to this UUID, like which subvolume ID is related to this UUID. Signed-off-by: Stefan Behrens <sbehrens@giantdisaster.de> Signed-off-by: David Sterba <dsterba@suse.cz> Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-26 15:17:57 +00:00
switch (type) {
case subvol_search_by_received_uuid:
ret = btrfs_lookup_uuid_received_subvol_item(s->mnt_fd, uuid,
&root_id);
break;
case subvol_search_by_uuid:
ret = btrfs_lookup_uuid_subvol_item(s->mnt_fd, uuid, &root_id);
break;
case subvol_search_by_root_id:
break;
case subvol_search_by_path:
ret = btrfs_get_root_id_by_sub_path(s->mnt_fd, path, &root_id);
break;
default:
ret = -EINVAL;
break;
}
Btrfs-progs: use UUID tree for send/receive This commit changes the btrfs send/receive commands to use the UUID tree to map UUIDs to subvolumes, and to use the root tree to map subvolume IDs to paths. Now these tools start fast and are independent on the number of subvolules/snapshot that exist. Before this commit, mapping UUIDs to subvolume IDs was an operation with a high effort. The algorithm even had quadratic effort (based on the number of existing subvolumes). E.g. with 15,000 subvolumes it took much more than 5 minutes on a state of the art XEON CPU to start btrfs send or receive before these tools were able to send or receive the first byte). Even linear effort instead of the current quadratic effort would be too much since it would be a waste. And these data structures to allow mapping UUIDs to subvolume IDs had been created every time a btrfs send/receive instance was started. It is much more efficient to maintain a searchable persistent data structure in the filesystem, one that is updated whenever a subvolume/snapshot is created and deleted, and when the received subvolume UUID is set by the btrfs-receive tool. Therefore kernel code was added that is able to maintain data structures in the filesystem that allow to quickly search for a given UUID and to retrieve data that is assigned to this UUID, like which subvolume ID is related to this UUID. Signed-off-by: Stefan Behrens <sbehrens@giantdisaster.de> Signed-off-by: David Sterba <dsterba@suse.cz> Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-26 15:17:57 +00:00
if (ret)
goto out;
ret = btrfs_read_root_item(s->mnt_fd, root_id, &root_item);
if (ret)
goto out;
info = calloc(1, sizeof(*info));
info->root_id = root_id;
memcpy(info->uuid, root_item.uuid, BTRFS_UUID_SIZE);
memcpy(info->received_uuid, root_item.received_uuid, BTRFS_UUID_SIZE);
memcpy(info->parent_uuid, root_item.parent_uuid, BTRFS_UUID_SIZE);
info->ctransid = btrfs_root_ctransid(&root_item);
info->otransid = btrfs_root_otransid(&root_item);
info->stransid = btrfs_root_stransid(&root_item);
info->rtransid = btrfs_root_rtransid(&root_item);
if (type == subvol_search_by_path) {
info->path = strdup(path);
} else {
info->path = malloc(PATH_MAX);
Btrfs-progs: use UUID tree for send/receive This commit changes the btrfs send/receive commands to use the UUID tree to map UUIDs to subvolumes, and to use the root tree to map subvolume IDs to paths. Now these tools start fast and are independent on the number of subvolules/snapshot that exist. Before this commit, mapping UUIDs to subvolume IDs was an operation with a high effort. The algorithm even had quadratic effort (based on the number of existing subvolumes). E.g. with 15,000 subvolumes it took much more than 5 minutes on a state of the art XEON CPU to start btrfs send or receive before these tools were able to send or receive the first byte). Even linear effort instead of the current quadratic effort would be too much since it would be a waste. And these data structures to allow mapping UUIDs to subvolume IDs had been created every time a btrfs send/receive instance was started. It is much more efficient to maintain a searchable persistent data structure in the filesystem, one that is updated whenever a subvolume/snapshot is created and deleted, and when the received subvolume UUID is set by the btrfs-receive tool. Therefore kernel code was added that is able to maintain data structures in the filesystem that allow to quickly search for a given UUID and to retrieve data that is assigned to this UUID, like which subvolume ID is related to this UUID. Signed-off-by: Stefan Behrens <sbehrens@giantdisaster.de> Signed-off-by: David Sterba <dsterba@suse.cz> Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-26 15:17:57 +00:00
ret = btrfs_subvolid_resolve(s->mnt_fd, info->path,
PATH_MAX, root_id);
}
out:
Btrfs-progs: use UUID tree for send/receive This commit changes the btrfs send/receive commands to use the UUID tree to map UUIDs to subvolumes, and to use the root tree to map subvolume IDs to paths. Now these tools start fast and are independent on the number of subvolules/snapshot that exist. Before this commit, mapping UUIDs to subvolume IDs was an operation with a high effort. The algorithm even had quadratic effort (based on the number of existing subvolumes). E.g. with 15,000 subvolumes it took much more than 5 minutes on a state of the art XEON CPU to start btrfs send or receive before these tools were able to send or receive the first byte). Even linear effort instead of the current quadratic effort would be too much since it would be a waste. And these data structures to allow mapping UUIDs to subvolume IDs had been created every time a btrfs send/receive instance was started. It is much more efficient to maintain a searchable persistent data structure in the filesystem, one that is updated whenever a subvolume/snapshot is created and deleted, and when the received subvolume UUID is set by the btrfs-receive tool. Therefore kernel code was added that is able to maintain data structures in the filesystem that allow to quickly search for a given UUID and to retrieve data that is assigned to this UUID, like which subvolume ID is related to this UUID. Signed-off-by: Stefan Behrens <sbehrens@giantdisaster.de> Signed-off-by: David Sterba <dsterba@suse.cz> Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-26 15:17:57 +00:00
if (ret && info) {
free(info->path);
free(info);
info = NULL;
}
return info;
}
#ifdef BTRFS_COMPAT_SEND_NO_UUID_TREE
static int is_uuid_tree_supported(int fd)
{
int ret;
struct btrfs_ioctl_search_args args;
struct btrfs_ioctl_search_key *sk = &args.key;
memset(&args, 0, sizeof(args));
sk->tree_id = BTRFS_ROOT_TREE_OBJECTID;
sk->min_objectid = BTRFS_UUID_TREE_OBJECTID;
sk->max_objectid = BTRFS_UUID_TREE_OBJECTID;
sk->max_type = BTRFS_ROOT_ITEM_KEY;
sk->min_type = BTRFS_ROOT_ITEM_KEY;
sk->max_offset = (u64)-1;
sk->max_transid = (u64)-1;
sk->nr_items = 1;
ret = ioctl(fd, BTRFS_IOC_TREE_SEARCH, &args);
if (ret < 0)
return ret;
/* the ioctl returns the number of item it found in nr_items */
if (sk->nr_items == 0)
return 0;
return 1;
}
/*
* this function is mainly used to read all root items
* it will be only used when we use older kernel which uuid
* tree is not supported yet
*/
Btrfs-progs: use UUID tree for send/receive This commit changes the btrfs send/receive commands to use the UUID tree to map UUIDs to subvolumes, and to use the root tree to map subvolume IDs to paths. Now these tools start fast and are independent on the number of subvolules/snapshot that exist. Before this commit, mapping UUIDs to subvolume IDs was an operation with a high effort. The algorithm even had quadratic effort (based on the number of existing subvolumes). E.g. with 15,000 subvolumes it took much more than 5 minutes on a state of the art XEON CPU to start btrfs send or receive before these tools were able to send or receive the first byte). Even linear effort instead of the current quadratic effort would be too much since it would be a waste. And these data structures to allow mapping UUIDs to subvolume IDs had been created every time a btrfs send/receive instance was started. It is much more efficient to maintain a searchable persistent data structure in the filesystem, one that is updated whenever a subvolume/snapshot is created and deleted, and when the received subvolume UUID is set by the btrfs-receive tool. Therefore kernel code was added that is able to maintain data structures in the filesystem that allow to quickly search for a given UUID and to retrieve data that is assigned to this UUID, like which subvolume ID is related to this UUID. Signed-off-by: Stefan Behrens <sbehrens@giantdisaster.de> Signed-off-by: David Sterba <dsterba@suse.cz> Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-26 15:17:57 +00:00
int subvol_uuid_search_init(int mnt_fd, struct subvol_uuid_search *s)
{
int ret;
struct btrfs_ioctl_search_args args;
struct btrfs_ioctl_search_key *sk = &args.key;
struct btrfs_ioctl_search_header *sh;
struct btrfs_root_item *root_item_ptr;
struct btrfs_root_item root_item = {};
struct subvol_info *si = NULL;
int root_item_valid = 0;
unsigned long off = 0;
int i;
int e;
char *path;
Btrfs-progs: use UUID tree for send/receive This commit changes the btrfs send/receive commands to use the UUID tree to map UUIDs to subvolumes, and to use the root tree to map subvolume IDs to paths. Now these tools start fast and are independent on the number of subvolules/snapshot that exist. Before this commit, mapping UUIDs to subvolume IDs was an operation with a high effort. The algorithm even had quadratic effort (based on the number of existing subvolumes). E.g. with 15,000 subvolumes it took much more than 5 minutes on a state of the art XEON CPU to start btrfs send or receive before these tools were able to send or receive the first byte). Even linear effort instead of the current quadratic effort would be too much since it would be a waste. And these data structures to allow mapping UUIDs to subvolume IDs had been created every time a btrfs send/receive instance was started. It is much more efficient to maintain a searchable persistent data structure in the filesystem, one that is updated whenever a subvolume/snapshot is created and deleted, and when the received subvolume UUID is set by the btrfs-receive tool. Therefore kernel code was added that is able to maintain data structures in the filesystem that allow to quickly search for a given UUID and to retrieve data that is assigned to this UUID, like which subvolume ID is related to this UUID. Signed-off-by: Stefan Behrens <sbehrens@giantdisaster.de> Signed-off-by: David Sterba <dsterba@suse.cz> Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-26 15:17:57 +00:00
s->mnt_fd = mnt_fd;
s->root_id_subvols = RB_ROOT;
s->local_subvols = RB_ROOT;
s->received_subvols = RB_ROOT;
s->path_subvols = RB_ROOT;
ret = is_uuid_tree_supported(mnt_fd);
if (ret < 0) {
fprintf(stderr,
"ERROR: check if we support uuid tree fails - %s\n",
strerror(errno));
return ret;
} else if (ret) {
/* uuid tree is supported */
s->uuid_tree_existed = 1;
return 0;
}
memset(&args, 0, sizeof(args));
sk->tree_id = BTRFS_ROOT_TREE_OBJECTID;
sk->max_objectid = (u64)-1;
sk->max_offset = (u64)-1;
sk->max_transid = (u64)-1;
sk->min_type = BTRFS_ROOT_ITEM_KEY;
sk->max_type = BTRFS_ROOT_BACKREF_KEY;
sk->nr_items = 4096;
while (1) {
ret = ioctl(mnt_fd, BTRFS_IOC_TREE_SEARCH, &args);
e = errno;
if (ret < 0) {
fprintf(stderr, "ERROR: can't perform the search - %s\n",
strerror(e));
return ret;
}
if (sk->nr_items == 0)
break;
off = 0;
for (i = 0; i < sk->nr_items; i++) {
sh = (struct btrfs_ioctl_search_header *)(args.buf +
off);
off += sizeof(*sh);
if ((sh->objectid != 5 &&
sh->objectid < BTRFS_FIRST_FREE_OBJECTID) ||
sh->objectid > BTRFS_LAST_FREE_OBJECTID)
goto skip;
if (sh->type == BTRFS_ROOT_ITEM_KEY) {
/* older kernels don't have uuids+times */
if (sh->len < sizeof(root_item)) {
root_item_valid = 0;
goto skip;
}
root_item_ptr = (struct btrfs_root_item *)
(args.buf + off);
memcpy(&root_item, root_item_ptr,
sizeof(root_item));
root_item_valid = 1;
} else if (sh->type == BTRFS_ROOT_BACKREF_KEY ||
root_item_valid) {
if (!root_item_valid)
goto skip;
path = btrfs_list_path_for_root(mnt_fd,
sh->objectid);
if (!path)
path = strdup("");
if (IS_ERR(path)) {
ret = PTR_ERR(path);
fprintf(stderr, "ERROR: unable to "
"resolve path "
"for root %llu\n",
sh->objectid);
goto out;
}
si = calloc(1, sizeof(*si));
si->root_id = sh->objectid;
memcpy(si->uuid, root_item.uuid,
BTRFS_UUID_SIZE);
memcpy(si->parent_uuid, root_item.parent_uuid,
BTRFS_UUID_SIZE);
memcpy(si->received_uuid,
root_item.received_uuid,
BTRFS_UUID_SIZE);
si->ctransid = btrfs_root_ctransid(&root_item);
si->otransid = btrfs_root_otransid(&root_item);
si->stransid = btrfs_root_stransid(&root_item);
si->rtransid = btrfs_root_rtransid(&root_item);
si->path = path;
subvol_uuid_search_add(s, si);
root_item_valid = 0;
} else {
goto skip;
}
skip:
off += sh->len;
/*
* record the mins in sk so we can make sure the
* next search doesn't repeat this root
*/
sk->min_objectid = sh->objectid;
sk->min_offset = sh->offset;
sk->min_type = sh->type;
}
sk->nr_items = 4096;
if (sk->min_offset < (u64)-1)
sk->min_offset++;
else if (sk->min_objectid < (u64)-1) {
sk->min_objectid++;
sk->min_offset = 0;
sk->min_type = 0;
} else
break;
}
out:
return ret;
Btrfs-progs: use UUID tree for send/receive This commit changes the btrfs send/receive commands to use the UUID tree to map UUIDs to subvolumes, and to use the root tree to map subvolume IDs to paths. Now these tools start fast and are independent on the number of subvolules/snapshot that exist. Before this commit, mapping UUIDs to subvolume IDs was an operation with a high effort. The algorithm even had quadratic effort (based on the number of existing subvolumes). E.g. with 15,000 subvolumes it took much more than 5 minutes on a state of the art XEON CPU to start btrfs send or receive before these tools were able to send or receive the first byte). Even linear effort instead of the current quadratic effort would be too much since it would be a waste. And these data structures to allow mapping UUIDs to subvolume IDs had been created every time a btrfs send/receive instance was started. It is much more efficient to maintain a searchable persistent data structure in the filesystem, one that is updated whenever a subvolume/snapshot is created and deleted, and when the received subvolume UUID is set by the btrfs-receive tool. Therefore kernel code was added that is able to maintain data structures in the filesystem that allow to quickly search for a given UUID and to retrieve data that is assigned to this UUID, like which subvolume ID is related to this UUID. Signed-off-by: Stefan Behrens <sbehrens@giantdisaster.de> Signed-off-by: David Sterba <dsterba@suse.cz> Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-26 15:17:57 +00:00
}
Btrfs-progs: use UUID tree for send/receive This commit changes the btrfs send/receive commands to use the UUID tree to map UUIDs to subvolumes, and to use the root tree to map subvolume IDs to paths. Now these tools start fast and are independent on the number of subvolules/snapshot that exist. Before this commit, mapping UUIDs to subvolume IDs was an operation with a high effort. The algorithm even had quadratic effort (based on the number of existing subvolumes). E.g. with 15,000 subvolumes it took much more than 5 minutes on a state of the art XEON CPU to start btrfs send or receive before these tools were able to send or receive the first byte). Even linear effort instead of the current quadratic effort would be too much since it would be a waste. And these data structures to allow mapping UUIDs to subvolume IDs had been created every time a btrfs send/receive instance was started. It is much more efficient to maintain a searchable persistent data structure in the filesystem, one that is updated whenever a subvolume/snapshot is created and deleted, and when the received subvolume UUID is set by the btrfs-receive tool. Therefore kernel code was added that is able to maintain data structures in the filesystem that allow to quickly search for a given UUID and to retrieve data that is assigned to this UUID, like which subvolume ID is related to this UUID. Signed-off-by: Stefan Behrens <sbehrens@giantdisaster.de> Signed-off-by: David Sterba <dsterba@suse.cz> Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-26 15:17:57 +00:00
void subvol_uuid_search_finit(struct subvol_uuid_search *s)
{
struct rb_root *root = &s->root_id_subvols;
struct rb_node *node;
if (!s->uuid_tree_existed)
return;
while ((node = rb_first(root))) {
struct subvol_info *entry =
rb_entry(node, struct subvol_info, rb_root_id_node);
free(entry->path);
rb_erase(node, root);
free(entry);
}
s->root_id_subvols = RB_ROOT;
s->local_subvols = RB_ROOT;
s->received_subvols = RB_ROOT;
s->path_subvols = RB_ROOT;
}
#else
int subvol_uuid_search_init(int mnt_fd, struct subvol_uuid_search *s)
{
s->mnt_fd = mnt_fd;
return 0;
}
void subvol_uuid_search_finit(struct subvol_uuid_search *s)
{
}
#endif
int path_cat_out(char *out, const char *p1, const char *p2)
{
int p1_len = strlen(p1);
int p2_len = strlen(p2);
if (p1_len + p2_len + 2 >= PATH_MAX)
return -ENAMETOOLONG;
if (p1_len && p1[p1_len - 1] == '/')
p1_len--;
if (p2_len && p2[p2_len - 1] == '/')
p2_len--;
sprintf(out, "%.*s/%.*s", p1_len, p1, p2_len, p2);
return 0;
}
__attribute__((deprecated))
char *path_cat(const char *p1, const char *p2)
{
int p1_len = strlen(p1);
int p2_len = strlen(p2);
char *new = malloc(p1_len + p2_len + 2);
path_cat_out(new, p1, p2);
return new;
}
int path_cat3_out(char *out, const char *p1, const char *p2, const char *p3)
{
int p1_len = strlen(p1);
int p2_len = strlen(p2);
int p3_len = strlen(p3);
if (p1_len + p2_len + p3_len + 3 >= PATH_MAX)
return -ENAMETOOLONG;
if (p1_len && p1[p1_len - 1] == '/')
p1_len--;
if (p2_len && p2[p2_len - 1] == '/')
p2_len--;
if (p3_len && p3[p3_len - 1] == '/')
p3_len--;
sprintf(out, "%.*s/%.*s/%.*s", p1_len, p1, p2_len, p2, p3_len, p3);
return 0;
}
__attribute__((deprecated))
char *path_cat3(const char *p1, const char *p2, const char *p3)
{
int p1_len = strlen(p1);
int p2_len = strlen(p2);
int p3_len = strlen(p3);
char *new = malloc(p1_len + p2_len + p3_len + 3);
path_cat3_out(new, p1, p2, p3);
return new;
}