2008-03-24 19:03:18 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2007 Oracle. All rights reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public
|
|
|
|
* License v2 as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public
|
|
|
|
* License along with this program; if not, write to the
|
|
|
|
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
|
|
* Boston, MA 021110-1307, USA.
|
|
|
|
*/
|
2008-03-24 19:05:44 +00:00
|
|
|
#define _XOPEN_SOURCE 600
|
|
|
|
#define __USE_XOPEN2K
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <uuid/uuid.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <unistd.h>
|
2008-03-24 19:03:18 +00:00
|
|
|
#include "ctree.h"
|
|
|
|
#include "disk-io.h"
|
|
|
|
#include "transaction.h"
|
|
|
|
#include "print-tree.h"
|
|
|
|
#include "volumes.h"
|
2013-04-26 21:06:07 +00:00
|
|
|
#include "math.h"
|
2008-03-24 19:03:18 +00:00
|
|
|
|
2008-03-25 20:50:20 +00:00
|
|
|
struct stripe {
|
2008-03-24 19:03:18 +00:00
|
|
|
struct btrfs_device *dev;
|
|
|
|
u64 physical;
|
|
|
|
};
|
2008-03-25 20:50:20 +00:00
|
|
|
|
2009-07-11 17:12:37 +00:00
|
|
|
static inline int nr_parity_stripes(struct map_lookup *map)
|
|
|
|
{
|
|
|
|
if (map->type & BTRFS_BLOCK_GROUP_RAID5)
|
|
|
|
return 1;
|
|
|
|
else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
|
|
|
|
return 2;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int nr_data_stripes(struct map_lookup *map)
|
|
|
|
{
|
|
|
|
return map->num_stripes - nr_parity_stripes(map);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define is_parity_stripe(x) ( ((x) == BTRFS_RAID5_P_STRIPE) || ((x) == BTRFS_RAID6_Q_STRIPE) )
|
|
|
|
|
2008-03-24 19:05:44 +00:00
|
|
|
static LIST_HEAD(fs_uuids);
|
|
|
|
|
2008-04-18 14:31:42 +00:00
|
|
|
static struct btrfs_device *__find_device(struct list_head *head, u64 devid,
|
|
|
|
u8 *uuid)
|
2008-03-24 19:05:44 +00:00
|
|
|
{
|
|
|
|
struct btrfs_device *dev;
|
|
|
|
struct list_head *cur;
|
|
|
|
|
|
|
|
list_for_each(cur, head) {
|
|
|
|
dev = list_entry(cur, struct btrfs_device, dev_list);
|
2008-04-18 14:31:42 +00:00
|
|
|
if (dev->devid == devid &&
|
|
|
|
!memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE)) {
|
2008-03-24 19:05:44 +00:00
|
|
|
return dev;
|
2008-04-18 14:31:42 +00:00
|
|
|
}
|
2008-03-24 19:05:44 +00:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct btrfs_fs_devices *find_fsid(u8 *fsid)
|
|
|
|
{
|
|
|
|
struct list_head *cur;
|
|
|
|
struct btrfs_fs_devices *fs_devices;
|
|
|
|
|
|
|
|
list_for_each(cur, &fs_uuids) {
|
|
|
|
fs_devices = list_entry(cur, struct btrfs_fs_devices, list);
|
|
|
|
if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
|
|
|
|
return fs_devices;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int device_list_add(const char *path,
|
|
|
|
struct btrfs_super_block *disk_super,
|
|
|
|
u64 devid, struct btrfs_fs_devices **fs_devices_ret)
|
|
|
|
{
|
|
|
|
struct btrfs_device *device;
|
|
|
|
struct btrfs_fs_devices *fs_devices;
|
|
|
|
u64 found_transid = btrfs_super_generation(disk_super);
|
|
|
|
|
|
|
|
fs_devices = find_fsid(disk_super->fsid);
|
|
|
|
if (!fs_devices) {
|
2008-11-18 15:40:06 +00:00
|
|
|
fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
|
2008-03-24 19:05:44 +00:00
|
|
|
if (!fs_devices)
|
|
|
|
return -ENOMEM;
|
|
|
|
INIT_LIST_HEAD(&fs_devices->devices);
|
|
|
|
list_add(&fs_devices->list, &fs_uuids);
|
|
|
|
memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
|
|
|
|
fs_devices->latest_devid = devid;
|
|
|
|
fs_devices->latest_trans = found_transid;
|
|
|
|
fs_devices->lowest_devid = (u64)-1;
|
|
|
|
device = NULL;
|
|
|
|
} else {
|
2008-04-18 14:31:42 +00:00
|
|
|
device = __find_device(&fs_devices->devices, devid,
|
|
|
|
disk_super->dev_item.uuid);
|
2008-03-24 19:05:44 +00:00
|
|
|
}
|
|
|
|
if (!device) {
|
|
|
|
device = kzalloc(sizeof(*device), GFP_NOFS);
|
|
|
|
if (!device) {
|
|
|
|
/* we can safely leave the fs_devices entry around */
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2013-07-03 13:25:10 +00:00
|
|
|
device->fd = -1;
|
2008-03-24 19:05:44 +00:00
|
|
|
device->devid = devid;
|
2008-04-18 14:31:42 +00:00
|
|
|
memcpy(device->uuid, disk_super->dev_item.uuid,
|
|
|
|
BTRFS_UUID_SIZE);
|
2008-03-24 19:05:44 +00:00
|
|
|
device->name = kstrdup(path, GFP_NOFS);
|
|
|
|
if (!device->name) {
|
|
|
|
kfree(device);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2008-04-22 18:06:31 +00:00
|
|
|
device->label = kstrdup(disk_super->label, GFP_NOFS);
|
2013-06-30 11:51:45 +00:00
|
|
|
if (!device->label) {
|
|
|
|
kfree(device->name);
|
|
|
|
kfree(device);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2008-04-22 18:06:31 +00:00
|
|
|
device->total_devs = btrfs_super_num_devices(disk_super);
|
|
|
|
device->super_bytes_used = btrfs_super_bytes_used(disk_super);
|
|
|
|
device->total_bytes =
|
|
|
|
btrfs_stack_device_total_bytes(&disk_super->dev_item);
|
|
|
|
device->bytes_used =
|
|
|
|
btrfs_stack_device_bytes_used(&disk_super->dev_item);
|
2008-03-24 19:05:44 +00:00
|
|
|
list_add(&device->dev_list, &fs_devices->devices);
|
2008-11-18 15:40:06 +00:00
|
|
|
device->fs_devices = fs_devices;
|
2012-02-21 20:56:10 +00:00
|
|
|
} else if (!device->name || strcmp(device->name, path)) {
|
|
|
|
char *name = strdup(path);
|
|
|
|
if (!name)
|
|
|
|
return -ENOMEM;
|
|
|
|
kfree(device->name);
|
|
|
|
device->name = name;
|
|
|
|
}
|
|
|
|
|
2008-03-24 19:05:44 +00:00
|
|
|
|
|
|
|
if (found_transid > fs_devices->latest_trans) {
|
|
|
|
fs_devices->latest_devid = devid;
|
|
|
|
fs_devices->latest_trans = found_transid;
|
|
|
|
}
|
|
|
|
if (fs_devices->lowest_devid > devid) {
|
|
|
|
fs_devices->lowest_devid = devid;
|
|
|
|
}
|
|
|
|
*fs_devices_ret = fs_devices;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
|
|
|
|
{
|
2008-11-18 15:40:06 +00:00
|
|
|
struct btrfs_fs_devices *seed_devices;
|
2008-03-24 19:05:44 +00:00
|
|
|
struct btrfs_device *device;
|
2014-03-14 03:28:11 +00:00
|
|
|
|
2008-11-18 15:40:06 +00:00
|
|
|
again:
|
2014-03-14 03:28:11 +00:00
|
|
|
while (!list_empty(&fs_devices->devices)) {
|
|
|
|
device = list_entry(fs_devices->devices.next,
|
|
|
|
struct btrfs_device, dev_list);
|
2013-07-03 13:25:10 +00:00
|
|
|
if (device->fd != -1) {
|
2013-07-03 13:25:11 +00:00
|
|
|
fsync(device->fd);
|
|
|
|
if (posix_fadvise(device->fd, 0, 0, POSIX_FADV_DONTNEED))
|
|
|
|
fprintf(stderr, "Warning, could not drop caches\n");
|
2013-07-03 13:25:10 +00:00
|
|
|
close(device->fd);
|
|
|
|
device->fd = -1;
|
|
|
|
}
|
2008-11-18 15:40:06 +00:00
|
|
|
device->writeable = 0;
|
2014-03-14 03:28:11 +00:00
|
|
|
list_del(&device->dev_list);
|
|
|
|
/* free the memory */
|
|
|
|
free(device->name);
|
|
|
|
free(device->label);
|
|
|
|
free(device);
|
2008-11-18 15:40:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
seed_devices = fs_devices->seed;
|
|
|
|
fs_devices->seed = NULL;
|
|
|
|
if (seed_devices) {
|
2014-07-07 14:35:53 +00:00
|
|
|
struct btrfs_fs_devices *orig;
|
|
|
|
|
|
|
|
orig = fs_devices;
|
2008-11-18 15:40:06 +00:00
|
|
|
fs_devices = seed_devices;
|
2014-07-07 14:35:53 +00:00
|
|
|
list_del(&orig->list);
|
|
|
|
free(orig);
|
2008-11-18 15:40:06 +00:00
|
|
|
goto again;
|
2014-07-07 14:35:53 +00:00
|
|
|
} else {
|
|
|
|
list_del(&fs_devices->list);
|
|
|
|
free(fs_devices);
|
2008-03-24 19:05:44 +00:00
|
|
|
}
|
2008-11-18 15:40:06 +00:00
|
|
|
|
2008-03-24 19:05:44 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, int flags)
|
|
|
|
{
|
|
|
|
int fd;
|
|
|
|
struct list_head *head = &fs_devices->devices;
|
|
|
|
struct list_head *cur;
|
|
|
|
struct btrfs_device *device;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
list_for_each(cur, head) {
|
|
|
|
device = list_entry(cur, struct btrfs_device, dev_list);
|
2013-06-22 05:32:42 +00:00
|
|
|
if (!device->name) {
|
|
|
|
printk("no name for device %llu, skip it now\n", device->devid);
|
|
|
|
continue;
|
|
|
|
}
|
2008-04-22 18:06:31 +00:00
|
|
|
|
2008-03-24 19:05:44 +00:00
|
|
|
fd = open(device->name, flags);
|
|
|
|
if (fd < 0) {
|
|
|
|
ret = -errno;
|
|
|
|
goto fail;
|
|
|
|
}
|
2008-04-22 18:06:31 +00:00
|
|
|
|
2013-03-04 22:40:01 +00:00
|
|
|
if (posix_fadvise(fd, 0, 0, POSIX_FADV_DONTNEED))
|
|
|
|
fprintf(stderr, "Warning, could not drop caches\n");
|
2013-02-01 15:44:22 +00:00
|
|
|
|
2008-03-24 19:05:44 +00:00
|
|
|
if (device->devid == fs_devices->latest_devid)
|
|
|
|
fs_devices->latest_bdev = fd;
|
|
|
|
if (device->devid == fs_devices->lowest_devid)
|
|
|
|
fs_devices->lowest_bdev = fd;
|
|
|
|
device->fd = fd;
|
2014-01-13 13:14:55 +00:00
|
|
|
if (flags & O_RDWR)
|
2008-11-18 15:40:06 +00:00
|
|
|
device->writeable = 1;
|
2008-03-24 19:05:44 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
fail:
|
|
|
|
btrfs_close_devices(fs_devices);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_scan_one_device(int fd, const char *path,
|
|
|
|
struct btrfs_fs_devices **fs_devices_ret,
|
2014-07-03 09:36:36 +00:00
|
|
|
u64 *total_devs, u64 super_offset, int super_recover)
|
2008-03-24 19:05:44 +00:00
|
|
|
{
|
|
|
|
struct btrfs_super_block *disk_super;
|
|
|
|
char *buf;
|
|
|
|
int ret;
|
|
|
|
u64 devid;
|
|
|
|
|
|
|
|
buf = malloc(4096);
|
|
|
|
if (!buf) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
disk_super = (struct btrfs_super_block *)buf;
|
2014-07-03 09:36:36 +00:00
|
|
|
ret = btrfs_read_dev_super(fd, disk_super, super_offset, super_recover);
|
2008-12-17 21:10:07 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
ret = -EIO;
|
2008-03-24 19:05:44 +00:00
|
|
|
goto error_brelse;
|
|
|
|
}
|
2013-06-26 05:27:08 +00:00
|
|
|
devid = btrfs_stack_device_id(&disk_super->dev_item);
|
2008-11-20 14:52:48 +00:00
|
|
|
if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_METADUMP)
|
|
|
|
*total_devs = 1;
|
|
|
|
else
|
|
|
|
*total_devs = btrfs_super_num_devices(disk_super);
|
2008-04-18 14:31:42 +00:00
|
|
|
|
2008-03-24 19:05:44 +00:00
|
|
|
ret = device_list_add(path, disk_super, devid, fs_devices_ret);
|
|
|
|
|
|
|
|
error_brelse:
|
|
|
|
free(buf);
|
|
|
|
error:
|
|
|
|
return ret;
|
|
|
|
}
|
2008-03-24 19:03:18 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* this uses a pretty simple search, the expectation is that it is
|
|
|
|
* called very infrequently and that a given device has a small number
|
|
|
|
* of extents
|
|
|
|
*/
|
|
|
|
static int find_free_dev_extent(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_device *device,
|
|
|
|
struct btrfs_path *path,
|
|
|
|
u64 num_bytes, u64 *start)
|
|
|
|
{
|
|
|
|
struct btrfs_key key;
|
|
|
|
struct btrfs_root *root = device->dev_root;
|
|
|
|
struct btrfs_dev_extent *dev_extent = NULL;
|
|
|
|
u64 hole_size = 0;
|
|
|
|
u64 last_byte = 0;
|
2013-09-05 06:57:19 +00:00
|
|
|
u64 search_start = root->fs_info->alloc_start;
|
2008-03-24 19:03:18 +00:00
|
|
|
u64 search_end = device->total_bytes;
|
|
|
|
int ret;
|
|
|
|
int slot = 0;
|
|
|
|
int start_found;
|
|
|
|
struct extent_buffer *l;
|
|
|
|
|
|
|
|
start_found = 0;
|
|
|
|
path->reada = 2;
|
|
|
|
|
|
|
|
/* FIXME use last free of some kind */
|
|
|
|
|
2008-04-03 20:35:48 +00:00
|
|
|
/* we don't want to overwrite the superblock on the drive,
|
|
|
|
* so we make sure to start at an offset of at least 1MB
|
|
|
|
*/
|
2013-09-05 06:57:19 +00:00
|
|
|
search_start = max(BTRFS_BLOCK_RESERVED_1M_FOR_SUPER, search_start);
|
2008-04-25 20:55:21 +00:00
|
|
|
|
2013-09-05 06:57:19 +00:00
|
|
|
if (search_start >= search_end) {
|
|
|
|
ret = -ENOSPC;
|
|
|
|
goto error;
|
|
|
|
}
|
2008-04-25 20:55:21 +00:00
|
|
|
|
2008-03-24 19:03:18 +00:00
|
|
|
key.objectid = device->devid;
|
|
|
|
key.offset = search_start;
|
|
|
|
key.type = BTRFS_DEV_EXTENT_KEY;
|
|
|
|
ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
|
|
|
|
if (ret < 0)
|
|
|
|
goto error;
|
|
|
|
ret = btrfs_previous_item(root, path, 0, key.type);
|
|
|
|
if (ret < 0)
|
|
|
|
goto error;
|
|
|
|
l = path->nodes[0];
|
|
|
|
btrfs_item_key_to_cpu(l, &key, path->slots[0]);
|
|
|
|
while (1) {
|
|
|
|
l = path->nodes[0];
|
|
|
|
slot = path->slots[0];
|
|
|
|
if (slot >= btrfs_header_nritems(l)) {
|
|
|
|
ret = btrfs_next_leaf(root, path);
|
|
|
|
if (ret == 0)
|
|
|
|
continue;
|
|
|
|
if (ret < 0)
|
|
|
|
goto error;
|
|
|
|
no_more_items:
|
|
|
|
if (!start_found) {
|
|
|
|
if (search_start >= search_end) {
|
|
|
|
ret = -ENOSPC;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
*start = search_start;
|
|
|
|
start_found = 1;
|
|
|
|
goto check_pending;
|
|
|
|
}
|
|
|
|
*start = last_byte > search_start ?
|
|
|
|
last_byte : search_start;
|
|
|
|
if (search_end <= *start) {
|
|
|
|
ret = -ENOSPC;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
goto check_pending;
|
|
|
|
}
|
|
|
|
btrfs_item_key_to_cpu(l, &key, slot);
|
|
|
|
|
|
|
|
if (key.objectid < device->devid)
|
|
|
|
goto next;
|
|
|
|
|
|
|
|
if (key.objectid > device->devid)
|
|
|
|
goto no_more_items;
|
|
|
|
|
|
|
|
if (key.offset >= search_start && key.offset > last_byte &&
|
|
|
|
start_found) {
|
|
|
|
if (last_byte < search_start)
|
|
|
|
last_byte = search_start;
|
|
|
|
hole_size = key.offset - last_byte;
|
|
|
|
if (key.offset > last_byte &&
|
|
|
|
hole_size >= num_bytes) {
|
|
|
|
*start = last_byte;
|
|
|
|
goto check_pending;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) {
|
|
|
|
goto next;
|
|
|
|
}
|
|
|
|
|
|
|
|
start_found = 1;
|
|
|
|
dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
|
|
|
|
last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
|
|
|
|
next:
|
|
|
|
path->slots[0]++;
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
check_pending:
|
|
|
|
/* we have to make sure we didn't find an extent that has already
|
|
|
|
* been allocated by the map tree or the original allocation
|
|
|
|
*/
|
2013-08-03 00:52:43 +00:00
|
|
|
btrfs_release_path(path);
|
2008-03-24 19:03:18 +00:00
|
|
|
BUG_ON(*start < search_start);
|
|
|
|
|
2008-03-24 19:03:58 +00:00
|
|
|
if (*start + num_bytes > search_end) {
|
2008-03-24 19:03:18 +00:00
|
|
|
ret = -ENOSPC;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
/* check for pending inserts here */
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error:
|
2013-08-03 00:52:43 +00:00
|
|
|
btrfs_release_path(path);
|
2008-03-24 19:03:18 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-08-07 01:03:33 +00:00
|
|
|
static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_device *device,
|
|
|
|
u64 chunk_tree, u64 chunk_objectid,
|
|
|
|
u64 chunk_offset,
|
|
|
|
u64 num_bytes, u64 *start)
|
2008-03-24 19:03:18 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct btrfs_path *path;
|
|
|
|
struct btrfs_root *root = device->dev_root;
|
|
|
|
struct btrfs_dev_extent *extent;
|
|
|
|
struct extent_buffer *leaf;
|
|
|
|
struct btrfs_key key;
|
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ret = find_free_dev_extent(trans, device, path, num_bytes, start);
|
2008-03-24 19:03:58 +00:00
|
|
|
if (ret) {
|
2008-03-24 19:03:18 +00:00
|
|
|
goto err;
|
2008-03-24 19:03:58 +00:00
|
|
|
}
|
2008-03-24 19:03:18 +00:00
|
|
|
|
|
|
|
key.objectid = device->devid;
|
|
|
|
key.offset = *start;
|
|
|
|
key.type = BTRFS_DEV_EXTENT_KEY;
|
|
|
|
ret = btrfs_insert_empty_item(trans, root, path, &key,
|
|
|
|
sizeof(*extent));
|
|
|
|
BUG_ON(ret);
|
|
|
|
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
extent = btrfs_item_ptr(leaf, path->slots[0],
|
|
|
|
struct btrfs_dev_extent);
|
2008-04-15 19:42:08 +00:00
|
|
|
btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
|
|
|
|
btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
|
|
|
|
btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
|
|
|
|
|
|
|
|
write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
|
|
|
|
(unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
|
|
|
|
BTRFS_UUID_SIZE);
|
|
|
|
|
2008-03-24 19:03:18 +00:00
|
|
|
btrfs_set_dev_extent_length(leaf, extent, num_bytes);
|
|
|
|
btrfs_mark_buffer_dirty(leaf);
|
|
|
|
err:
|
|
|
|
btrfs_free_path(path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-04-15 19:42:08 +00:00
|
|
|
static int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset)
|
2008-03-24 19:03:18 +00:00
|
|
|
{
|
|
|
|
struct btrfs_path *path;
|
|
|
|
int ret;
|
|
|
|
struct btrfs_key key;
|
2008-04-15 19:42:08 +00:00
|
|
|
struct btrfs_chunk *chunk;
|
2008-03-24 19:03:18 +00:00
|
|
|
struct btrfs_key found_key;
|
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
|
|
|
BUG_ON(!path);
|
|
|
|
|
2008-04-15 19:42:08 +00:00
|
|
|
key.objectid = objectid;
|
2008-03-24 19:03:18 +00:00
|
|
|
key.offset = (u64)-1;
|
|
|
|
key.type = BTRFS_CHUNK_ITEM_KEY;
|
|
|
|
|
|
|
|
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
|
|
|
if (ret < 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
BUG_ON(ret == 0);
|
|
|
|
|
|
|
|
ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
|
|
|
|
if (ret) {
|
2008-04-15 19:42:08 +00:00
|
|
|
*offset = 0;
|
2008-03-24 19:03:18 +00:00
|
|
|
} else {
|
|
|
|
btrfs_item_key_to_cpu(path->nodes[0], &found_key,
|
|
|
|
path->slots[0]);
|
2008-04-15 19:42:08 +00:00
|
|
|
if (found_key.objectid != objectid)
|
|
|
|
*offset = 0;
|
|
|
|
else {
|
|
|
|
chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
|
|
|
|
struct btrfs_chunk);
|
|
|
|
*offset = found_key.offset +
|
|
|
|
btrfs_chunk_length(path->nodes[0], chunk);
|
|
|
|
}
|
2008-03-24 19:03:18 +00:00
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
error:
|
|
|
|
btrfs_free_path(path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
|
|
|
|
u64 *objectid)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct btrfs_key key;
|
|
|
|
struct btrfs_key found_key;
|
|
|
|
|
|
|
|
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
|
|
|
|
key.type = BTRFS_DEV_ITEM_KEY;
|
|
|
|
key.offset = (u64)-1;
|
|
|
|
|
|
|
|
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
|
|
|
if (ret < 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
BUG_ON(ret == 0);
|
|
|
|
|
|
|
|
ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
|
|
|
|
BTRFS_DEV_ITEM_KEY);
|
|
|
|
if (ret) {
|
|
|
|
*objectid = 1;
|
|
|
|
} else {
|
|
|
|
btrfs_item_key_to_cpu(path->nodes[0], &found_key,
|
|
|
|
path->slots[0]);
|
|
|
|
*objectid = found_key.offset + 1;
|
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
error:
|
2013-08-03 00:52:43 +00:00
|
|
|
btrfs_release_path(path);
|
2008-03-24 19:03:18 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* the device information is stored in the chunk root
|
|
|
|
* the btrfs_device struct should be fully filled in
|
|
|
|
*/
|
|
|
|
int btrfs_add_device(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root,
|
|
|
|
struct btrfs_device *device)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct btrfs_path *path;
|
|
|
|
struct btrfs_dev_item *dev_item;
|
|
|
|
struct extent_buffer *leaf;
|
|
|
|
struct btrfs_key key;
|
|
|
|
unsigned long ptr;
|
2008-05-02 19:05:11 +00:00
|
|
|
u64 free_devid = 0;
|
2008-03-24 19:03:18 +00:00
|
|
|
|
|
|
|
root = root->fs_info->chunk_root;
|
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ret = find_next_devid(root, path, &free_devid);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
|
|
|
|
key.type = BTRFS_DEV_ITEM_KEY;
|
|
|
|
key.offset = free_devid;
|
|
|
|
|
|
|
|
ret = btrfs_insert_empty_item(trans, root, path, &key,
|
2008-03-24 19:04:37 +00:00
|
|
|
sizeof(*dev_item));
|
2008-03-24 19:03:18 +00:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
|
|
|
|
|
2008-03-24 19:04:49 +00:00
|
|
|
device->devid = free_devid;
|
2008-03-24 19:03:18 +00:00
|
|
|
btrfs_set_device_id(leaf, dev_item, device->devid);
|
2008-11-18 15:40:06 +00:00
|
|
|
btrfs_set_device_generation(leaf, dev_item, 0);
|
2008-03-24 19:03:18 +00:00
|
|
|
btrfs_set_device_type(leaf, dev_item, device->type);
|
|
|
|
btrfs_set_device_io_align(leaf, dev_item, device->io_align);
|
|
|
|
btrfs_set_device_io_width(leaf, dev_item, device->io_width);
|
|
|
|
btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
|
|
|
|
btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
|
|
|
|
btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
|
2008-04-15 19:42:08 +00:00
|
|
|
btrfs_set_device_group(leaf, dev_item, 0);
|
|
|
|
btrfs_set_device_seek_speed(leaf, dev_item, 0);
|
|
|
|
btrfs_set_device_bandwidth(leaf, dev_item, 0);
|
2008-12-08 22:01:14 +00:00
|
|
|
btrfs_set_device_start_offset(leaf, dev_item, 0);
|
2008-03-24 19:03:18 +00:00
|
|
|
|
|
|
|
ptr = (unsigned long)btrfs_device_uuid(dev_item);
|
2008-04-15 19:42:08 +00:00
|
|
|
write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
|
2008-11-18 15:40:06 +00:00
|
|
|
ptr = (unsigned long)btrfs_device_fsid(dev_item);
|
|
|
|
write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
|
2008-03-24 19:03:18 +00:00
|
|
|
btrfs_mark_buffer_dirty(leaf);
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
btrfs_free_path(path);
|
|
|
|
return ret;
|
|
|
|
}
|
2008-03-24 19:05:44 +00:00
|
|
|
|
2008-03-24 19:03:18 +00:00
|
|
|
int btrfs_update_device(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_device *device)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct btrfs_path *path;
|
|
|
|
struct btrfs_root *root;
|
|
|
|
struct btrfs_dev_item *dev_item;
|
|
|
|
struct extent_buffer *leaf;
|
|
|
|
struct btrfs_key key;
|
|
|
|
|
|
|
|
root = device->dev_root->fs_info->chunk_root;
|
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
|
|
|
|
key.type = BTRFS_DEV_ITEM_KEY;
|
|
|
|
key.offset = device->devid;
|
|
|
|
|
|
|
|
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (ret > 0) {
|
|
|
|
ret = -ENOENT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
|
|
|
|
|
|
|
|
btrfs_set_device_id(leaf, dev_item, device->devid);
|
|
|
|
btrfs_set_device_type(leaf, dev_item, device->type);
|
|
|
|
btrfs_set_device_io_align(leaf, dev_item, device->io_align);
|
|
|
|
btrfs_set_device_io_width(leaf, dev_item, device->io_width);
|
|
|
|
btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
|
|
|
|
btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
|
|
|
|
btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
|
|
|
|
btrfs_mark_buffer_dirty(leaf);
|
|
|
|
|
|
|
|
out:
|
|
|
|
btrfs_free_path(path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root,
|
|
|
|
struct btrfs_key *key,
|
|
|
|
struct btrfs_chunk *chunk, int item_size)
|
|
|
|
{
|
2013-03-06 16:32:51 +00:00
|
|
|
struct btrfs_super_block *super_copy = root->fs_info->super_copy;
|
2008-03-24 19:03:18 +00:00
|
|
|
struct btrfs_disk_key disk_key;
|
|
|
|
u32 array_size;
|
|
|
|
u8 *ptr;
|
|
|
|
|
|
|
|
array_size = btrfs_super_sys_array_size(super_copy);
|
2014-04-21 12:13:31 +00:00
|
|
|
if (array_size + item_size + sizeof(disk_key)
|
|
|
|
> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
|
2008-03-24 19:03:18 +00:00
|
|
|
return -EFBIG;
|
|
|
|
|
|
|
|
ptr = super_copy->sys_chunk_array + array_size;
|
|
|
|
btrfs_cpu_key_to_disk(&disk_key, key);
|
|
|
|
memcpy(ptr, &disk_key, sizeof(disk_key));
|
|
|
|
ptr += sizeof(disk_key);
|
|
|
|
memcpy(ptr, chunk, item_size);
|
|
|
|
item_size += sizeof(disk_key);
|
|
|
|
btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-04-18 14:31:42 +00:00
|
|
|
static u64 chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes,
|
|
|
|
int sub_stripes)
|
|
|
|
{
|
|
|
|
if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
|
|
|
|
return calc_size;
|
|
|
|
else if (type & BTRFS_BLOCK_GROUP_RAID10)
|
|
|
|
return calc_size * (num_stripes / sub_stripes);
|
2009-07-11 17:12:37 +00:00
|
|
|
else if (type & BTRFS_BLOCK_GROUP_RAID5)
|
|
|
|
return calc_size * (num_stripes - 1);
|
|
|
|
else if (type & BTRFS_BLOCK_GROUP_RAID6)
|
|
|
|
return calc_size * (num_stripes - 2);
|
2008-04-18 14:31:42 +00:00
|
|
|
else
|
|
|
|
return calc_size * num_stripes;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-07-11 17:12:37 +00:00
|
|
|
static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
|
|
|
|
{
|
|
|
|
/* TODO, add a way to store the preferred stripe size */
|
2013-12-16 12:33:58 +00:00
|
|
|
return BTRFS_STRIPE_LEN;
|
2009-07-11 17:12:37 +00:00
|
|
|
}
|
|
|
|
|
2013-09-05 06:57:19 +00:00
|
|
|
/*
|
|
|
|
* btrfs_device_avail_bytes - count bytes available for alloc_chunk
|
|
|
|
*
|
|
|
|
* It is not equal to "device->total_bytes - device->bytes_used".
|
|
|
|
* We do not allocate any chunk in 1M at beginning of device, and not
|
|
|
|
* allowed to allocate any chunk before alloc_start if it is specified.
|
|
|
|
* So search holes from max(1M, alloc_start) to device->total_bytes.
|
|
|
|
*/
|
|
|
|
static int btrfs_device_avail_bytes(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_device *device,
|
|
|
|
u64 *avail_bytes)
|
|
|
|
{
|
|
|
|
struct btrfs_path *path;
|
|
|
|
struct btrfs_root *root = device->dev_root;
|
|
|
|
struct btrfs_key key;
|
|
|
|
struct btrfs_dev_extent *dev_extent = NULL;
|
|
|
|
struct extent_buffer *l;
|
|
|
|
u64 search_start = root->fs_info->alloc_start;
|
|
|
|
u64 search_end = device->total_bytes;
|
|
|
|
u64 extent_end = 0;
|
|
|
|
u64 free_bytes = 0;
|
|
|
|
int ret;
|
|
|
|
int slot = 0;
|
|
|
|
|
|
|
|
search_start = max(BTRFS_BLOCK_RESERVED_1M_FOR_SUPER, search_start);
|
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
key.objectid = device->devid;
|
|
|
|
key.offset = root->fs_info->alloc_start;
|
|
|
|
key.type = BTRFS_DEV_EXTENT_KEY;
|
|
|
|
|
|
|
|
path->reada = 2;
|
|
|
|
ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
|
|
|
|
if (ret < 0)
|
|
|
|
goto error;
|
|
|
|
ret = btrfs_previous_item(root, path, 0, key.type);
|
|
|
|
if (ret < 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
l = path->nodes[0];
|
|
|
|
slot = path->slots[0];
|
|
|
|
if (slot >= btrfs_header_nritems(l)) {
|
|
|
|
ret = btrfs_next_leaf(root, path);
|
|
|
|
if (ret == 0)
|
|
|
|
continue;
|
|
|
|
if (ret < 0)
|
|
|
|
goto error;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
btrfs_item_key_to_cpu(l, &key, slot);
|
|
|
|
|
|
|
|
if (key.objectid < device->devid)
|
|
|
|
goto next;
|
|
|
|
if (key.objectid > device->devid)
|
|
|
|
break;
|
|
|
|
if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
|
|
|
|
goto next;
|
|
|
|
if (key.offset > search_end)
|
|
|
|
break;
|
|
|
|
if (key.offset > search_start)
|
|
|
|
free_bytes += key.offset - search_start;
|
|
|
|
|
|
|
|
dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
|
|
|
|
extent_end = key.offset + btrfs_dev_extent_length(l,
|
|
|
|
dev_extent);
|
|
|
|
if (extent_end > search_start)
|
|
|
|
search_start = extent_end;
|
|
|
|
if (search_start > search_end)
|
|
|
|
break;
|
|
|
|
next:
|
|
|
|
path->slots[0]++;
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (search_start < search_end)
|
|
|
|
free_bytes += search_end - search_start;
|
|
|
|
|
|
|
|
*avail_bytes = free_bytes;
|
|
|
|
ret = 0;
|
|
|
|
error:
|
|
|
|
btrfs_free_path(path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-04-21 12:13:32 +00:00
|
|
|
#define BTRFS_MAX_DEVS(r) ((BTRFS_LEAF_DATA_SIZE(r) \
|
|
|
|
- sizeof(struct btrfs_item) \
|
|
|
|
- sizeof(struct btrfs_chunk)) \
|
|
|
|
/ sizeof(struct btrfs_stripe) + 1)
|
|
|
|
|
|
|
|
#define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \
|
|
|
|
- 2 * sizeof(struct btrfs_disk_key) \
|
|
|
|
- 2 * sizeof(struct btrfs_chunk)) \
|
|
|
|
/ sizeof(struct btrfs_stripe) + 1)
|
|
|
|
|
2008-03-24 19:03:18 +00:00
|
|
|
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *extent_root, u64 *start,
|
2008-03-24 19:03:58 +00:00
|
|
|
u64 *num_bytes, u64 type)
|
2008-03-24 19:03:18 +00:00
|
|
|
{
|
|
|
|
u64 dev_offset;
|
2008-03-25 20:50:20 +00:00
|
|
|
struct btrfs_fs_info *info = extent_root->fs_info;
|
2013-07-03 13:25:13 +00:00
|
|
|
struct btrfs_root *chunk_root = info->chunk_root;
|
2008-03-24 19:03:18 +00:00
|
|
|
struct btrfs_stripe *stripes;
|
|
|
|
struct btrfs_device *device = NULL;
|
|
|
|
struct btrfs_chunk *chunk;
|
2008-03-24 19:03:58 +00:00
|
|
|
struct list_head private_devs;
|
2013-07-03 13:25:13 +00:00
|
|
|
struct list_head *dev_list = &info->fs_devices->devices;
|
2008-03-24 19:03:58 +00:00
|
|
|
struct list_head *cur;
|
2008-03-24 19:03:18 +00:00
|
|
|
struct map_lookup *map;
|
2008-04-18 15:56:21 +00:00
|
|
|
int min_stripe_size = 1 * 1024 * 1024;
|
2008-03-25 20:50:20 +00:00
|
|
|
u64 calc_size = 8 * 1024 * 1024;
|
2008-04-18 14:31:42 +00:00
|
|
|
u64 min_free;
|
|
|
|
u64 max_chunk_size = 4 * calc_size;
|
2013-09-05 06:57:19 +00:00
|
|
|
u64 avail = 0;
|
2008-03-24 19:03:58 +00:00
|
|
|
u64 max_avail = 0;
|
2008-04-18 14:31:42 +00:00
|
|
|
u64 percent_max;
|
2008-03-24 19:03:58 +00:00
|
|
|
int num_stripes = 1;
|
2014-04-21 12:13:32 +00:00
|
|
|
int max_stripes = 0;
|
2008-04-18 15:56:21 +00:00
|
|
|
int min_stripes = 1;
|
2008-04-16 15:14:21 +00:00
|
|
|
int sub_stripes = 0;
|
2008-03-24 19:03:58 +00:00
|
|
|
int looped = 0;
|
2008-03-24 19:03:18 +00:00
|
|
|
int ret;
|
2008-03-24 19:03:58 +00:00
|
|
|
int index;
|
2013-12-16 12:33:58 +00:00
|
|
|
int stripe_len = BTRFS_STRIPE_LEN;
|
2008-03-24 19:03:18 +00:00
|
|
|
struct btrfs_key key;
|
2012-10-21 14:34:33 +00:00
|
|
|
u64 offset;
|
2008-03-24 19:03:18 +00:00
|
|
|
|
2008-04-03 20:35:48 +00:00
|
|
|
if (list_empty(dev_list)) {
|
2008-03-24 19:03:58 +00:00
|
|
|
return -ENOSPC;
|
2008-04-03 20:35:48 +00:00
|
|
|
}
|
2008-03-25 20:50:20 +00:00
|
|
|
|
2008-04-03 20:35:48 +00:00
|
|
|
if (type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
|
2009-07-11 17:12:37 +00:00
|
|
|
BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
|
2008-04-16 15:14:21 +00:00
|
|
|
BTRFS_BLOCK_GROUP_RAID10 |
|
2008-04-03 20:35:48 +00:00
|
|
|
BTRFS_BLOCK_GROUP_DUP)) {
|
2008-04-18 14:31:42 +00:00
|
|
|
if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
|
2008-04-18 15:56:21 +00:00
|
|
|
calc_size = 8 * 1024 * 1024;
|
|
|
|
max_chunk_size = calc_size * 2;
|
|
|
|
min_stripe_size = 1 * 1024 * 1024;
|
2014-04-21 12:13:32 +00:00
|
|
|
max_stripes = BTRFS_MAX_DEVS_SYS_CHUNK;
|
2008-04-18 14:31:42 +00:00
|
|
|
} else if (type & BTRFS_BLOCK_GROUP_DATA) {
|
|
|
|
calc_size = 1024 * 1024 * 1024;
|
|
|
|
max_chunk_size = 10 * calc_size;
|
2008-04-18 15:56:21 +00:00
|
|
|
min_stripe_size = 64 * 1024 * 1024;
|
2014-04-21 12:13:32 +00:00
|
|
|
max_stripes = BTRFS_MAX_DEVS(chunk_root);
|
2008-04-18 14:31:42 +00:00
|
|
|
} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
|
2008-04-04 19:42:17 +00:00
|
|
|
calc_size = 1024 * 1024 * 1024;
|
2008-04-18 14:31:42 +00:00
|
|
|
max_chunk_size = 4 * calc_size;
|
2008-04-18 15:56:21 +00:00
|
|
|
min_stripe_size = 32 * 1024 * 1024;
|
2014-04-21 12:13:32 +00:00
|
|
|
max_stripes = BTRFS_MAX_DEVS(chunk_root);
|
2008-04-18 14:31:42 +00:00
|
|
|
}
|
2008-04-03 20:35:48 +00:00
|
|
|
}
|
2008-04-03 20:35:48 +00:00
|
|
|
if (type & BTRFS_BLOCK_GROUP_RAID1) {
|
2008-04-03 20:35:48 +00:00
|
|
|
num_stripes = min_t(u64, 2,
|
2013-03-06 16:32:51 +00:00
|
|
|
btrfs_super_num_devices(info->super_copy));
|
2008-04-18 14:31:42 +00:00
|
|
|
if (num_stripes < 2)
|
|
|
|
return -ENOSPC;
|
2008-04-18 15:56:21 +00:00
|
|
|
min_stripes = 2;
|
2008-04-03 20:35:48 +00:00
|
|
|
}
|
2008-04-18 15:56:21 +00:00
|
|
|
if (type & BTRFS_BLOCK_GROUP_DUP) {
|
2008-04-03 20:35:48 +00:00
|
|
|
num_stripes = 2;
|
2008-04-18 15:56:21 +00:00
|
|
|
min_stripes = 2;
|
|
|
|
}
|
|
|
|
if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
|
2013-03-06 16:32:51 +00:00
|
|
|
num_stripes = btrfs_super_num_devices(info->super_copy);
|
2014-04-21 12:13:32 +00:00
|
|
|
if (num_stripes > max_stripes)
|
|
|
|
num_stripes = max_stripes;
|
2008-04-18 15:56:21 +00:00
|
|
|
min_stripes = 2;
|
|
|
|
}
|
2008-04-16 15:14:21 +00:00
|
|
|
if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
|
2013-03-06 16:32:51 +00:00
|
|
|
num_stripes = btrfs_super_num_devices(info->super_copy);
|
2014-04-21 12:13:32 +00:00
|
|
|
if (num_stripes > max_stripes)
|
|
|
|
num_stripes = max_stripes;
|
2008-04-16 15:14:21 +00:00
|
|
|
if (num_stripes < 4)
|
|
|
|
return -ENOSPC;
|
|
|
|
num_stripes &= ~(u32)1;
|
|
|
|
sub_stripes = 2;
|
2008-04-18 15:56:21 +00:00
|
|
|
min_stripes = 4;
|
2008-04-16 15:14:21 +00:00
|
|
|
}
|
2009-07-11 17:12:37 +00:00
|
|
|
if (type & (BTRFS_BLOCK_GROUP_RAID5)) {
|
2013-03-06 16:32:51 +00:00
|
|
|
num_stripes = btrfs_super_num_devices(info->super_copy);
|
2014-04-21 12:13:32 +00:00
|
|
|
if (num_stripes > max_stripes)
|
|
|
|
num_stripes = max_stripes;
|
2009-07-11 17:12:37 +00:00
|
|
|
if (num_stripes < 2)
|
|
|
|
return -ENOSPC;
|
|
|
|
min_stripes = 2;
|
|
|
|
stripe_len = find_raid56_stripe_len(num_stripes - 1,
|
2013-03-06 16:32:51 +00:00
|
|
|
btrfs_super_stripesize(info->super_copy));
|
2009-07-11 17:12:37 +00:00
|
|
|
}
|
|
|
|
if (type & (BTRFS_BLOCK_GROUP_RAID6)) {
|
2013-03-06 16:32:51 +00:00
|
|
|
num_stripes = btrfs_super_num_devices(info->super_copy);
|
2014-04-21 12:13:32 +00:00
|
|
|
if (num_stripes > max_stripes)
|
|
|
|
num_stripes = max_stripes;
|
2009-07-11 17:12:37 +00:00
|
|
|
if (num_stripes < 3)
|
|
|
|
return -ENOSPC;
|
|
|
|
min_stripes = 3;
|
|
|
|
stripe_len = find_raid56_stripe_len(num_stripes - 2,
|
2013-03-06 16:32:51 +00:00
|
|
|
btrfs_super_stripesize(info->super_copy));
|
2009-07-11 17:12:37 +00:00
|
|
|
}
|
2008-04-18 14:31:42 +00:00
|
|
|
|
|
|
|
/* we don't want a chunk larger than 10% of the FS */
|
2013-03-06 16:32:51 +00:00
|
|
|
percent_max = div_factor(btrfs_super_total_bytes(info->super_copy), 1);
|
2008-04-18 14:31:42 +00:00
|
|
|
max_chunk_size = min(percent_max, max_chunk_size);
|
|
|
|
|
2008-04-18 15:56:21 +00:00
|
|
|
again:
|
|
|
|
if (chunk_bytes_by_type(type, calc_size, num_stripes, sub_stripes) >
|
|
|
|
max_chunk_size) {
|
2008-04-18 14:31:42 +00:00
|
|
|
calc_size = max_chunk_size;
|
|
|
|
calc_size /= num_stripes;
|
|
|
|
calc_size /= stripe_len;
|
|
|
|
calc_size *= stripe_len;
|
|
|
|
}
|
|
|
|
/* we don't want tiny stripes */
|
2008-04-18 15:56:21 +00:00
|
|
|
calc_size = max_t(u64, calc_size, min_stripe_size);
|
2008-04-18 14:31:42 +00:00
|
|
|
|
|
|
|
calc_size /= stripe_len;
|
|
|
|
calc_size *= stripe_len;
|
2008-03-24 19:03:58 +00:00
|
|
|
INIT_LIST_HEAD(&private_devs);
|
|
|
|
cur = dev_list->next;
|
|
|
|
index = 0;
|
2008-04-03 20:35:48 +00:00
|
|
|
|
|
|
|
if (type & BTRFS_BLOCK_GROUP_DUP)
|
|
|
|
min_free = calc_size * 2;
|
2008-04-18 14:31:42 +00:00
|
|
|
else
|
|
|
|
min_free = calc_size;
|
2008-04-03 20:35:48 +00:00
|
|
|
|
2008-03-24 19:03:58 +00:00
|
|
|
/* build a private list of devices we will allocate from */
|
|
|
|
while(index < num_stripes) {
|
|
|
|
device = list_entry(cur, struct btrfs_device, dev_list);
|
2013-09-05 06:57:19 +00:00
|
|
|
ret = btrfs_device_avail_bytes(trans, device, &avail);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2008-03-24 19:03:58 +00:00
|
|
|
cur = cur->next;
|
2008-04-03 20:35:48 +00:00
|
|
|
if (avail >= min_free) {
|
2008-03-24 19:03:58 +00:00
|
|
|
list_move_tail(&device->dev_list, &private_devs);
|
|
|
|
index++;
|
2008-04-03 20:35:48 +00:00
|
|
|
if (type & BTRFS_BLOCK_GROUP_DUP)
|
|
|
|
index++;
|
2008-04-18 15:56:21 +00:00
|
|
|
} else if (avail > max_avail)
|
|
|
|
max_avail = avail;
|
2008-03-24 19:03:58 +00:00
|
|
|
if (cur == dev_list)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (index < num_stripes) {
|
|
|
|
list_splice(&private_devs, dev_list);
|
2008-04-18 15:56:21 +00:00
|
|
|
if (index >= min_stripes) {
|
|
|
|
num_stripes = index;
|
|
|
|
if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
|
|
|
|
num_stripes /= sub_stripes;
|
|
|
|
num_stripes *= sub_stripes;
|
|
|
|
}
|
|
|
|
looped = 1;
|
|
|
|
goto again;
|
|
|
|
}
|
2008-03-24 19:03:58 +00:00
|
|
|
if (!looped && max_avail > 0) {
|
|
|
|
looped = 1;
|
|
|
|
calc_size = max_avail;
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
return -ENOSPC;
|
|
|
|
}
|
2008-04-15 19:42:08 +00:00
|
|
|
ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
|
2012-10-21 14:34:33 +00:00
|
|
|
&offset);
|
2008-03-24 19:03:18 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2012-10-21 14:34:33 +00:00
|
|
|
key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
|
|
|
|
key.type = BTRFS_CHUNK_ITEM_KEY;
|
|
|
|
key.offset = offset;
|
2008-03-24 19:03:18 +00:00
|
|
|
|
|
|
|
chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
|
|
|
|
if (!chunk)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2013-07-03 13:25:17 +00:00
|
|
|
map = kmalloc(btrfs_map_lookup_size(num_stripes), GFP_NOFS);
|
2008-03-25 20:50:20 +00:00
|
|
|
if (!map) {
|
|
|
|
kfree(chunk);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2008-03-24 19:03:18 +00:00
|
|
|
stripes = &chunk->stripe;
|
2008-04-18 14:31:42 +00:00
|
|
|
*num_bytes = chunk_bytes_by_type(type, calc_size,
|
|
|
|
num_stripes, sub_stripes);
|
2008-03-24 19:03:58 +00:00
|
|
|
index = 0;
|
2008-03-24 19:03:18 +00:00
|
|
|
while(index < num_stripes) {
|
2008-04-15 19:42:08 +00:00
|
|
|
struct btrfs_stripe *stripe;
|
2008-03-24 19:03:58 +00:00
|
|
|
BUG_ON(list_empty(&private_devs));
|
|
|
|
cur = private_devs.next;
|
|
|
|
device = list_entry(cur, struct btrfs_device, dev_list);
|
2008-04-03 20:35:48 +00:00
|
|
|
|
|
|
|
/* loop over this device again if we're doing a dup group */
|
|
|
|
if (!(type & BTRFS_BLOCK_GROUP_DUP) ||
|
|
|
|
(index == num_stripes - 1))
|
|
|
|
list_move_tail(&device->dev_list, dev_list);
|
2008-03-24 19:03:18 +00:00
|
|
|
|
|
|
|
ret = btrfs_alloc_dev_extent(trans, device,
|
2008-04-15 19:42:08 +00:00
|
|
|
info->chunk_root->root_key.objectid,
|
|
|
|
BTRFS_FIRST_CHUNK_TREE_OBJECTID, key.offset,
|
|
|
|
calc_size, &dev_offset);
|
2008-03-24 19:03:18 +00:00
|
|
|
BUG_ON(ret);
|
2008-04-22 18:06:31 +00:00
|
|
|
|
2008-03-24 19:03:18 +00:00
|
|
|
device->bytes_used += calc_size;
|
|
|
|
ret = btrfs_update_device(trans, device);
|
|
|
|
BUG_ON(ret);
|
|
|
|
|
2008-03-25 20:50:20 +00:00
|
|
|
map->stripes[index].dev = device;
|
|
|
|
map->stripes[index].physical = dev_offset;
|
2008-04-15 19:42:08 +00:00
|
|
|
stripe = stripes + index;
|
|
|
|
btrfs_set_stack_stripe_devid(stripe, device->devid);
|
|
|
|
btrfs_set_stack_stripe_offset(stripe, dev_offset);
|
|
|
|
memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
|
2008-03-24 19:03:18 +00:00
|
|
|
index++;
|
|
|
|
}
|
2008-03-24 19:03:58 +00:00
|
|
|
BUG_ON(!list_empty(&private_devs));
|
2008-03-24 19:03:18 +00:00
|
|
|
|
2008-04-15 19:42:08 +00:00
|
|
|
/* key was set above */
|
|
|
|
btrfs_set_stack_chunk_length(chunk, *num_bytes);
|
2008-03-24 19:03:18 +00:00
|
|
|
btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
|
2008-03-25 20:50:20 +00:00
|
|
|
btrfs_set_stack_chunk_stripe_len(chunk, stripe_len);
|
2008-03-24 19:03:18 +00:00
|
|
|
btrfs_set_stack_chunk_type(chunk, type);
|
|
|
|
btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
|
2008-03-25 20:50:20 +00:00
|
|
|
btrfs_set_stack_chunk_io_align(chunk, stripe_len);
|
|
|
|
btrfs_set_stack_chunk_io_width(chunk, stripe_len);
|
2008-03-24 19:03:18 +00:00
|
|
|
btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
|
2008-04-16 15:14:21 +00:00
|
|
|
btrfs_set_stack_chunk_sub_stripes(chunk, sub_stripes);
|
2008-03-25 20:50:20 +00:00
|
|
|
map->sector_size = extent_root->sectorsize;
|
|
|
|
map->stripe_len = stripe_len;
|
|
|
|
map->io_align = stripe_len;
|
|
|
|
map->io_width = stripe_len;
|
|
|
|
map->type = type;
|
|
|
|
map->num_stripes = num_stripes;
|
2008-04-16 15:14:21 +00:00
|
|
|
map->sub_stripes = sub_stripes;
|
2008-03-24 19:03:18 +00:00
|
|
|
|
|
|
|
ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
|
|
|
|
btrfs_chunk_item_size(num_stripes));
|
|
|
|
BUG_ON(ret);
|
2008-04-15 19:42:08 +00:00
|
|
|
*start = key.offset;;
|
2008-03-24 19:03:18 +00:00
|
|
|
|
2008-04-15 19:42:08 +00:00
|
|
|
map->ce.start = key.offset;
|
|
|
|
map->ce.size = *num_bytes;
|
2008-03-24 19:03:18 +00:00
|
|
|
|
2013-07-03 13:25:13 +00:00
|
|
|
ret = insert_cache_extent(&info->mapping_tree.cache_tree, &map->ce);
|
2008-03-24 19:03:18 +00:00
|
|
|
BUG_ON(ret);
|
|
|
|
|
2008-04-04 19:42:17 +00:00
|
|
|
if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
|
|
|
|
ret = btrfs_add_system_chunk(trans, chunk_root, &key,
|
|
|
|
chunk, btrfs_chunk_item_size(num_stripes));
|
|
|
|
BUG_ON(ret);
|
|
|
|
}
|
|
|
|
|
2008-03-24 19:03:18 +00:00
|
|
|
kfree(chunk);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-07-08 09:17:59 +00:00
|
|
|
int btrfs_alloc_data_chunk(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *extent_root, u64 *start,
|
|
|
|
u64 num_bytes, u64 type)
|
|
|
|
{
|
|
|
|
u64 dev_offset;
|
|
|
|
struct btrfs_fs_info *info = extent_root->fs_info;
|
2013-07-03 13:25:13 +00:00
|
|
|
struct btrfs_root *chunk_root = info->chunk_root;
|
2010-07-08 09:17:59 +00:00
|
|
|
struct btrfs_stripe *stripes;
|
|
|
|
struct btrfs_device *device = NULL;
|
|
|
|
struct btrfs_chunk *chunk;
|
2013-07-03 13:25:13 +00:00
|
|
|
struct list_head *dev_list = &info->fs_devices->devices;
|
2010-07-08 09:17:59 +00:00
|
|
|
struct list_head *cur;
|
|
|
|
struct map_lookup *map;
|
|
|
|
u64 calc_size = 8 * 1024 * 1024;
|
|
|
|
int num_stripes = 1;
|
|
|
|
int sub_stripes = 0;
|
|
|
|
int ret;
|
|
|
|
int index;
|
2013-12-16 12:33:58 +00:00
|
|
|
int stripe_len = BTRFS_STRIPE_LEN;
|
2010-07-08 09:17:59 +00:00
|
|
|
struct btrfs_key key;
|
|
|
|
|
|
|
|
key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
|
|
|
|
key.type = BTRFS_CHUNK_ITEM_KEY;
|
|
|
|
ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
|
|
|
|
&key.offset);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
|
|
|
|
if (!chunk)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2013-07-03 13:25:17 +00:00
|
|
|
map = kmalloc(btrfs_map_lookup_size(num_stripes), GFP_NOFS);
|
2010-07-08 09:17:59 +00:00
|
|
|
if (!map) {
|
|
|
|
kfree(chunk);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
stripes = &chunk->stripe;
|
|
|
|
calc_size = num_bytes;
|
|
|
|
|
|
|
|
index = 0;
|
|
|
|
cur = dev_list->next;
|
|
|
|
device = list_entry(cur, struct btrfs_device, dev_list);
|
|
|
|
|
|
|
|
while (index < num_stripes) {
|
|
|
|
struct btrfs_stripe *stripe;
|
|
|
|
|
|
|
|
ret = btrfs_alloc_dev_extent(trans, device,
|
|
|
|
info->chunk_root->root_key.objectid,
|
|
|
|
BTRFS_FIRST_CHUNK_TREE_OBJECTID, key.offset,
|
|
|
|
calc_size, &dev_offset);
|
|
|
|
BUG_ON(ret);
|
|
|
|
|
|
|
|
device->bytes_used += calc_size;
|
|
|
|
ret = btrfs_update_device(trans, device);
|
|
|
|
BUG_ON(ret);
|
|
|
|
|
|
|
|
map->stripes[index].dev = device;
|
|
|
|
map->stripes[index].physical = dev_offset;
|
|
|
|
stripe = stripes + index;
|
|
|
|
btrfs_set_stack_stripe_devid(stripe, device->devid);
|
|
|
|
btrfs_set_stack_stripe_offset(stripe, dev_offset);
|
|
|
|
memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
|
|
|
|
index++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* key was set above */
|
|
|
|
btrfs_set_stack_chunk_length(chunk, num_bytes);
|
|
|
|
btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
|
|
|
|
btrfs_set_stack_chunk_stripe_len(chunk, stripe_len);
|
|
|
|
btrfs_set_stack_chunk_type(chunk, type);
|
|
|
|
btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
|
|
|
|
btrfs_set_stack_chunk_io_align(chunk, stripe_len);
|
|
|
|
btrfs_set_stack_chunk_io_width(chunk, stripe_len);
|
|
|
|
btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
|
|
|
|
btrfs_set_stack_chunk_sub_stripes(chunk, sub_stripes);
|
|
|
|
map->sector_size = extent_root->sectorsize;
|
|
|
|
map->stripe_len = stripe_len;
|
|
|
|
map->io_align = stripe_len;
|
|
|
|
map->io_width = stripe_len;
|
|
|
|
map->type = type;
|
|
|
|
map->num_stripes = num_stripes;
|
|
|
|
map->sub_stripes = sub_stripes;
|
|
|
|
|
|
|
|
ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
|
|
|
|
btrfs_chunk_item_size(num_stripes));
|
|
|
|
BUG_ON(ret);
|
|
|
|
*start = key.offset;
|
|
|
|
|
|
|
|
map->ce.start = key.offset;
|
|
|
|
map->ce.size = num_bytes;
|
|
|
|
|
2013-07-03 13:25:13 +00:00
|
|
|
ret = insert_cache_extent(&info->mapping_tree.cache_tree, &map->ce);
|
2010-07-08 09:17:59 +00:00
|
|
|
BUG_ON(ret);
|
|
|
|
|
|
|
|
kfree(chunk);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-04-09 20:28:12 +00:00
|
|
|
int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
|
|
|
|
{
|
|
|
|
struct cache_extent *ce;
|
|
|
|
struct map_lookup *map;
|
|
|
|
int ret;
|
|
|
|
|
2013-07-03 13:25:15 +00:00
|
|
|
ce = search_cache_extent(&map_tree->cache_tree, logical);
|
2014-09-25 20:10:48 +00:00
|
|
|
if (!ce) {
|
|
|
|
fprintf(stderr, "No mapping for %llu-%llu\n",
|
|
|
|
(unsigned long long)logical,
|
|
|
|
(unsigned long long)logical+len);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
if (ce->start > logical || ce->start + ce->size < logical) {
|
|
|
|
fprintf(stderr, "Invalid mapping for %llu-%llu, got "
|
|
|
|
"%llu-%llu\n", (unsigned long long)logical,
|
|
|
|
(unsigned long long)logical+len,
|
|
|
|
(unsigned long long)ce->start,
|
|
|
|
(unsigned long long)ce->start + ce->size);
|
|
|
|
return 1;
|
|
|
|
}
|
2008-04-09 20:28:12 +00:00
|
|
|
map = container_of(ce, struct map_lookup, ce);
|
|
|
|
|
|
|
|
if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
|
|
|
|
ret = map->num_stripes;
|
2008-04-16 15:14:21 +00:00
|
|
|
else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
|
|
|
|
ret = map->sub_stripes;
|
2009-07-11 17:12:37 +00:00
|
|
|
else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
|
|
|
|
ret = 2;
|
|
|
|
else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
|
|
|
|
ret = 3;
|
2008-04-09 20:28:12 +00:00
|
|
|
else
|
|
|
|
ret = 1;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-08-26 13:51:36 +00:00
|
|
|
int btrfs_next_metadata(struct btrfs_mapping_tree *map_tree, u64 *logical,
|
|
|
|
u64 *size)
|
|
|
|
{
|
|
|
|
struct cache_extent *ce;
|
|
|
|
struct map_lookup *map;
|
|
|
|
|
2013-07-03 13:25:15 +00:00
|
|
|
ce = search_cache_extent(&map_tree->cache_tree, *logical);
|
2011-08-26 13:51:36 +00:00
|
|
|
|
|
|
|
while (ce) {
|
|
|
|
ce = next_cache_extent(ce);
|
|
|
|
if (!ce)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
map = container_of(ce, struct map_lookup, ce);
|
|
|
|
if (map->type & BTRFS_BLOCK_GROUP_METADATA) {
|
|
|
|
*logical = ce->start;
|
|
|
|
*size = ce->size;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
2008-12-05 17:21:31 +00:00
|
|
|
int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
|
|
|
|
u64 chunk_start, u64 physical, u64 devid,
|
|
|
|
u64 **logical, int *naddrs, int *stripe_len)
|
|
|
|
{
|
|
|
|
struct cache_extent *ce;
|
|
|
|
struct map_lookup *map;
|
|
|
|
u64 *buf;
|
|
|
|
u64 bytenr;
|
|
|
|
u64 length;
|
|
|
|
u64 stripe_nr;
|
2009-07-11 17:12:37 +00:00
|
|
|
u64 rmap_len;
|
2008-12-05 17:21:31 +00:00
|
|
|
int i, j, nr = 0;
|
|
|
|
|
2013-07-03 13:25:15 +00:00
|
|
|
ce = search_cache_extent(&map_tree->cache_tree, chunk_start);
|
2008-12-17 21:10:07 +00:00
|
|
|
BUG_ON(!ce);
|
2008-12-05 17:21:31 +00:00
|
|
|
map = container_of(ce, struct map_lookup, ce);
|
|
|
|
|
2008-12-17 21:10:07 +00:00
|
|
|
length = ce->size;
|
2009-07-11 17:12:37 +00:00
|
|
|
rmap_len = map->stripe_len;
|
2008-12-05 17:21:31 +00:00
|
|
|
if (map->type & BTRFS_BLOCK_GROUP_RAID10)
|
|
|
|
length = ce->size / (map->num_stripes / map->sub_stripes);
|
|
|
|
else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
|
|
|
|
length = ce->size / map->num_stripes;
|
2009-07-11 17:12:37 +00:00
|
|
|
else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
|
|
|
|
BTRFS_BLOCK_GROUP_RAID6)) {
|
|
|
|
length = ce->size / nr_data_stripes(map);
|
|
|
|
rmap_len = map->stripe_len * nr_data_stripes(map);
|
|
|
|
}
|
2008-12-05 17:21:31 +00:00
|
|
|
|
|
|
|
buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
|
|
|
|
|
|
|
|
for (i = 0; i < map->num_stripes; i++) {
|
|
|
|
if (devid && map->stripes[i].dev->devid != devid)
|
|
|
|
continue;
|
|
|
|
if (map->stripes[i].physical > physical ||
|
|
|
|
map->stripes[i].physical + length <= physical)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
stripe_nr = (physical - map->stripes[i].physical) /
|
|
|
|
map->stripe_len;
|
|
|
|
|
|
|
|
if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
|
|
|
|
stripe_nr = (stripe_nr * map->num_stripes + i) /
|
|
|
|
map->sub_stripes;
|
|
|
|
} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
|
|
|
|
stripe_nr = stripe_nr * map->num_stripes + i;
|
2009-07-11 17:12:37 +00:00
|
|
|
} /* else if RAID[56], multiply by nr_data_stripes().
|
|
|
|
* Alternatively, just use rmap_len below instead of
|
|
|
|
* map->stripe_len */
|
|
|
|
|
|
|
|
bytenr = ce->start + stripe_nr * rmap_len;
|
2008-12-05 17:21:31 +00:00
|
|
|
for (j = 0; j < nr; j++) {
|
|
|
|
if (buf[j] == bytenr)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (j == nr)
|
|
|
|
buf[nr++] = bytenr;
|
|
|
|
}
|
|
|
|
|
|
|
|
*logical = buf;
|
|
|
|
*naddrs = nr;
|
2009-07-11 17:12:37 +00:00
|
|
|
*stripe_len = rmap_len;
|
2008-12-05 17:21:31 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-07-11 17:12:37 +00:00
|
|
|
static inline int parity_smaller(u64 a, u64 b)
|
|
|
|
{
|
|
|
|
return a > b;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Bubble-sort the stripe set to put the parity/syndrome stripes last */
|
|
|
|
static void sort_parity_stripes(struct btrfs_multi_bio *bbio, u64 *raid_map)
|
|
|
|
{
|
|
|
|
struct btrfs_bio_stripe s;
|
|
|
|
int i;
|
|
|
|
u64 l;
|
|
|
|
int again = 1;
|
|
|
|
|
|
|
|
while (again) {
|
|
|
|
again = 0;
|
|
|
|
for (i = 0; i < bbio->num_stripes - 1; i++) {
|
|
|
|
if (parity_smaller(raid_map[i], raid_map[i+1])) {
|
|
|
|
s = bbio->stripes[i];
|
|
|
|
l = raid_map[i];
|
|
|
|
bbio->stripes[i] = bbio->stripes[i+1];
|
|
|
|
raid_map[i] = raid_map[i+1];
|
|
|
|
bbio->stripes[i+1] = s;
|
|
|
|
raid_map[i+1] = l;
|
|
|
|
again = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-04-03 20:35:48 +00:00
|
|
|
int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
|
2008-04-09 20:28:12 +00:00
|
|
|
u64 logical, u64 *length,
|
2009-07-11 17:12:37 +00:00
|
|
|
struct btrfs_multi_bio **multi_ret, int mirror_num,
|
|
|
|
u64 **raid_map_ret)
|
2011-08-26 13:51:36 +00:00
|
|
|
{
|
|
|
|
return __btrfs_map_block(map_tree, rw, logical, length, NULL,
|
2009-07-11 17:12:37 +00:00
|
|
|
multi_ret, mirror_num, raid_map_ret);
|
2011-08-26 13:51:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
|
|
|
|
u64 logical, u64 *length, u64 *type,
|
2009-07-11 17:12:37 +00:00
|
|
|
struct btrfs_multi_bio **multi_ret, int mirror_num,
|
|
|
|
u64 **raid_map_ret)
|
2008-03-24 19:03:18 +00:00
|
|
|
{
|
|
|
|
struct cache_extent *ce;
|
|
|
|
struct map_lookup *map;
|
|
|
|
u64 offset;
|
2008-03-25 20:50:20 +00:00
|
|
|
u64 stripe_offset;
|
|
|
|
u64 stripe_nr;
|
2009-07-11 17:12:37 +00:00
|
|
|
u64 *raid_map = NULL;
|
2008-04-09 20:28:12 +00:00
|
|
|
int stripes_allocated = 8;
|
2008-04-16 15:14:21 +00:00
|
|
|
int stripes_required = 1;
|
2008-03-25 20:50:20 +00:00
|
|
|
int stripe_index;
|
2008-04-09 20:28:12 +00:00
|
|
|
int i;
|
|
|
|
struct btrfs_multi_bio *multi = NULL;
|
|
|
|
|
|
|
|
if (multi_ret && rw == READ) {
|
|
|
|
stripes_allocated = 1;
|
|
|
|
}
|
|
|
|
again:
|
2013-07-03 13:25:15 +00:00
|
|
|
ce = search_cache_extent(&map_tree->cache_tree, logical);
|
2011-08-26 13:51:36 +00:00
|
|
|
if (!ce) {
|
2013-12-12 10:41:07 +00:00
|
|
|
kfree(multi);
|
2011-08-26 13:51:36 +00:00
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
if (ce->start > logical || ce->start + ce->size < logical) {
|
2013-12-12 10:41:07 +00:00
|
|
|
kfree(multi);
|
2011-08-26 13:51:36 +00:00
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
2008-04-09 20:28:12 +00:00
|
|
|
if (multi_ret) {
|
|
|
|
multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
|
|
|
|
GFP_NOFS);
|
|
|
|
if (!multi)
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2008-03-24 19:03:18 +00:00
|
|
|
map = container_of(ce, struct map_lookup, ce);
|
|
|
|
offset = logical - ce->start;
|
2008-03-25 20:50:20 +00:00
|
|
|
|
2008-04-16 15:14:21 +00:00
|
|
|
if (rw == WRITE) {
|
|
|
|
if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
|
|
|
|
BTRFS_BLOCK_GROUP_DUP)) {
|
|
|
|
stripes_required = map->num_stripes;
|
|
|
|
} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
|
|
|
|
stripes_required = map->sub_stripes;
|
|
|
|
}
|
|
|
|
}
|
2009-07-11 17:12:37 +00:00
|
|
|
if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)
|
|
|
|
&& multi_ret && ((rw & WRITE) || mirror_num > 1) && raid_map_ret) {
|
|
|
|
/* RAID[56] write or recovery. Return all stripes */
|
|
|
|
stripes_required = map->num_stripes;
|
|
|
|
|
|
|
|
/* Only allocate the map if we've already got a large enough multi_ret */
|
|
|
|
if (stripes_allocated >= stripes_required) {
|
|
|
|
raid_map = kmalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
|
|
|
|
if (!raid_map) {
|
|
|
|
kfree(multi);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-04-09 20:28:12 +00:00
|
|
|
/* if our multi bio struct is too small, back off and try again */
|
2009-07-11 17:12:37 +00:00
|
|
|
if (multi_ret && stripes_allocated < stripes_required) {
|
|
|
|
stripes_allocated = stripes_required;
|
2008-04-09 20:28:12 +00:00
|
|
|
kfree(multi);
|
2013-02-25 22:54:38 +00:00
|
|
|
multi = NULL;
|
2008-04-09 20:28:12 +00:00
|
|
|
goto again;
|
|
|
|
}
|
2008-03-25 20:50:20 +00:00
|
|
|
stripe_nr = offset;
|
|
|
|
/*
|
|
|
|
* stripe_nr counts the total number of stripes we have to stride
|
|
|
|
* to get to this block
|
|
|
|
*/
|
|
|
|
stripe_nr = stripe_nr / map->stripe_len;
|
|
|
|
|
|
|
|
stripe_offset = stripe_nr * map->stripe_len;
|
|
|
|
BUG_ON(offset < stripe_offset);
|
|
|
|
|
|
|
|
/* stripe_offset is the offset of this block in its stripe*/
|
|
|
|
stripe_offset = offset - stripe_offset;
|
|
|
|
|
2008-04-09 20:28:12 +00:00
|
|
|
if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
|
2009-07-11 17:12:37 +00:00
|
|
|
BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
|
2008-04-16 15:14:21 +00:00
|
|
|
BTRFS_BLOCK_GROUP_RAID10 |
|
2008-04-09 20:28:12 +00:00
|
|
|
BTRFS_BLOCK_GROUP_DUP)) {
|
|
|
|
/* we limit the length of each bio to what fits in a stripe */
|
|
|
|
*length = min_t(u64, ce->size - offset,
|
|
|
|
map->stripe_len - stripe_offset);
|
|
|
|
} else {
|
|
|
|
*length = ce->size - offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!multi_ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
multi->num_stripes = 1;
|
|
|
|
stripe_index = 0;
|
2008-04-03 20:35:48 +00:00
|
|
|
if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
|
|
|
|
if (rw == WRITE)
|
2008-04-09 20:28:12 +00:00
|
|
|
multi->num_stripes = map->num_stripes;
|
2008-04-09 20:28:12 +00:00
|
|
|
else if (mirror_num)
|
|
|
|
stripe_index = mirror_num - 1;
|
2008-04-09 20:28:12 +00:00
|
|
|
else
|
2008-04-03 20:35:48 +00:00
|
|
|
stripe_index = stripe_nr % map->num_stripes;
|
2008-04-16 15:14:21 +00:00
|
|
|
} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
|
|
|
|
int factor = map->num_stripes / map->sub_stripes;
|
|
|
|
|
|
|
|
stripe_index = stripe_nr % factor;
|
|
|
|
stripe_index *= map->sub_stripes;
|
|
|
|
|
|
|
|
if (rw == WRITE)
|
|
|
|
multi->num_stripes = map->sub_stripes;
|
|
|
|
else if (mirror_num)
|
|
|
|
stripe_index += mirror_num - 1;
|
|
|
|
|
|
|
|
stripe_nr = stripe_nr / factor;
|
2008-04-03 20:35:48 +00:00
|
|
|
} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
|
2008-04-09 20:28:12 +00:00
|
|
|
if (rw == WRITE)
|
|
|
|
multi->num_stripes = map->num_stripes;
|
2008-04-09 20:28:12 +00:00
|
|
|
else if (mirror_num)
|
|
|
|
stripe_index = mirror_num - 1;
|
2009-07-11 17:12:37 +00:00
|
|
|
} else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
|
|
|
|
BTRFS_BLOCK_GROUP_RAID6)) {
|
|
|
|
|
|
|
|
if (raid_map) {
|
2013-08-14 23:16:34 +00:00
|
|
|
int rot;
|
2009-07-11 17:12:37 +00:00
|
|
|
u64 tmp;
|
|
|
|
u64 raid56_full_stripe_start;
|
|
|
|
u64 full_stripe_len = nr_data_stripes(map) * map->stripe_len;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* align the start of our data stripe in the logical
|
|
|
|
* address space
|
|
|
|
*/
|
|
|
|
raid56_full_stripe_start = offset / full_stripe_len;
|
|
|
|
raid56_full_stripe_start *= full_stripe_len;
|
|
|
|
|
|
|
|
/* get the data stripe number */
|
|
|
|
stripe_nr = raid56_full_stripe_start / map->stripe_len;
|
|
|
|
stripe_nr = stripe_nr / nr_data_stripes(map);
|
|
|
|
|
|
|
|
/* Work out the disk rotation on this stripe-set */
|
|
|
|
rot = stripe_nr % map->num_stripes;
|
|
|
|
|
|
|
|
/* Fill in the logical address of each stripe */
|
|
|
|
tmp = stripe_nr * nr_data_stripes(map);
|
|
|
|
|
|
|
|
for (i = 0; i < nr_data_stripes(map); i++)
|
|
|
|
raid_map[(i+rot) % map->num_stripes] =
|
|
|
|
ce->start + (tmp + i) * map->stripe_len;
|
|
|
|
|
|
|
|
raid_map[(i+rot) % map->num_stripes] = BTRFS_RAID5_P_STRIPE;
|
|
|
|
if (map->type & BTRFS_BLOCK_GROUP_RAID6)
|
|
|
|
raid_map[(i+rot+1) % map->num_stripes] = BTRFS_RAID6_Q_STRIPE;
|
|
|
|
|
|
|
|
*length = map->stripe_len;
|
|
|
|
stripe_index = 0;
|
|
|
|
stripe_offset = 0;
|
|
|
|
multi->num_stripes = map->num_stripes;
|
|
|
|
} else {
|
|
|
|
stripe_index = stripe_nr % nr_data_stripes(map);
|
|
|
|
stripe_nr = stripe_nr / nr_data_stripes(map);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mirror #0 or #1 means the original data block.
|
|
|
|
* Mirror #2 is RAID5 parity block.
|
|
|
|
* Mirror #3 is RAID6 Q block.
|
|
|
|
*/
|
|
|
|
if (mirror_num > 1)
|
|
|
|
stripe_index = nr_data_stripes(map) + mirror_num - 2;
|
|
|
|
|
|
|
|
/* We distribute the parity blocks across stripes */
|
|
|
|
stripe_index = (stripe_nr + stripe_index) % map->num_stripes;
|
|
|
|
}
|
2008-04-03 20:35:48 +00:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* after this do_div call, stripe_nr is the number of stripes
|
|
|
|
* on this device we have to walk to find the data, and
|
|
|
|
* stripe_index is the number of our device in the stripe array
|
|
|
|
*/
|
|
|
|
stripe_index = stripe_nr % map->num_stripes;
|
|
|
|
stripe_nr = stripe_nr / map->num_stripes;
|
|
|
|
}
|
2008-03-25 20:50:20 +00:00
|
|
|
BUG_ON(stripe_index >= map->num_stripes);
|
|
|
|
|
2008-04-09 20:28:12 +00:00
|
|
|
for (i = 0; i < multi->num_stripes; i++) {
|
|
|
|
multi->stripes[i].physical =
|
|
|
|
map->stripes[stripe_index].physical + stripe_offset +
|
|
|
|
stripe_nr * map->stripe_len;
|
|
|
|
multi->stripes[i].dev = map->stripes[stripe_index].dev;
|
|
|
|
stripe_index++;
|
2008-03-25 20:50:20 +00:00
|
|
|
}
|
2008-04-09 20:28:12 +00:00
|
|
|
*multi_ret = multi;
|
2009-07-11 17:12:37 +00:00
|
|
|
|
2011-08-26 13:51:36 +00:00
|
|
|
if (type)
|
|
|
|
*type = map->type;
|
2009-07-11 17:12:37 +00:00
|
|
|
|
|
|
|
if (raid_map) {
|
|
|
|
sort_parity_stripes(multi, raid_map);
|
|
|
|
*raid_map_ret = raid_map;
|
|
|
|
}
|
2008-04-09 20:28:12 +00:00
|
|
|
out:
|
2008-03-24 19:03:18 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-04-18 14:31:42 +00:00
|
|
|
struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
|
2008-11-18 15:40:06 +00:00
|
|
|
u8 *uuid, u8 *fsid)
|
2008-03-24 19:03:18 +00:00
|
|
|
{
|
2008-11-18 15:40:06 +00:00
|
|
|
struct btrfs_device *device;
|
|
|
|
struct btrfs_fs_devices *cur_devices;
|
|
|
|
|
|
|
|
cur_devices = root->fs_info->fs_devices;
|
|
|
|
while (cur_devices) {
|
|
|
|
if (!fsid ||
|
|
|
|
!memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
|
|
|
|
device = __find_device(&cur_devices->devices,
|
|
|
|
devid, uuid);
|
|
|
|
if (device)
|
|
|
|
return device;
|
|
|
|
}
|
|
|
|
cur_devices = cur_devices->seed;
|
|
|
|
}
|
|
|
|
return NULL;
|
2008-03-24 19:03:18 +00:00
|
|
|
}
|
|
|
|
|
2013-07-03 13:25:19 +00:00
|
|
|
struct btrfs_device *
|
|
|
|
btrfs_find_device_by_devid(struct btrfs_fs_devices *fs_devices,
|
|
|
|
u64 devid, int instance)
|
2012-05-07 12:00:20 +00:00
|
|
|
{
|
2013-07-03 13:25:19 +00:00
|
|
|
struct list_head *head = &fs_devices->devices;
|
2012-05-07 12:00:20 +00:00
|
|
|
struct btrfs_device *dev;
|
|
|
|
int num_found = 0;
|
|
|
|
|
2013-07-03 13:25:19 +00:00
|
|
|
list_for_each_entry(dev, head, dev_list) {
|
2012-05-07 12:00:20 +00:00
|
|
|
if (dev->devid == devid && num_found++ == instance)
|
|
|
|
return dev;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2008-11-18 15:40:06 +00:00
|
|
|
int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
|
|
|
|
{
|
|
|
|
struct cache_extent *ce;
|
|
|
|
struct map_lookup *map;
|
|
|
|
struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
|
|
|
|
int readonly = 0;
|
|
|
|
int i;
|
|
|
|
|
2013-11-28 05:32:49 +00:00
|
|
|
/*
|
|
|
|
* During chunk recovering, we may fail to find block group's
|
|
|
|
* corresponding chunk, we will rebuild it later
|
|
|
|
*/
|
2013-07-03 13:25:15 +00:00
|
|
|
ce = search_cache_extent(&map_tree->cache_tree, chunk_offset);
|
2013-11-28 05:32:49 +00:00
|
|
|
if (!root->fs_info->is_chunk_recover)
|
|
|
|
BUG_ON(!ce);
|
|
|
|
else
|
|
|
|
return 0;
|
2008-11-18 15:40:06 +00:00
|
|
|
|
|
|
|
map = container_of(ce, struct map_lookup, ce);
|
|
|
|
for (i = 0; i < map->num_stripes; i++) {
|
|
|
|
if (!map->stripes[i].dev->writeable) {
|
|
|
|
readonly = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return readonly;
|
|
|
|
}
|
|
|
|
|
2010-12-15 21:00:23 +00:00
|
|
|
static struct btrfs_device *fill_missing_device(u64 devid)
|
|
|
|
{
|
|
|
|
struct btrfs_device *device;
|
|
|
|
|
|
|
|
device = kzalloc(sizeof(*device), GFP_NOFS);
|
|
|
|
device->devid = devid;
|
|
|
|
device->fd = -1;
|
|
|
|
return device;
|
|
|
|
}
|
|
|
|
|
2008-03-24 19:03:18 +00:00
|
|
|
static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
|
|
|
|
struct extent_buffer *leaf,
|
|
|
|
struct btrfs_chunk *chunk)
|
|
|
|
{
|
|
|
|
struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
|
|
|
|
struct map_lookup *map;
|
|
|
|
struct cache_extent *ce;
|
|
|
|
u64 logical;
|
|
|
|
u64 length;
|
|
|
|
u64 devid;
|
2008-04-18 14:31:42 +00:00
|
|
|
u8 uuid[BTRFS_UUID_SIZE];
|
2008-03-25 20:50:20 +00:00
|
|
|
int num_stripes;
|
2008-03-24 19:03:18 +00:00
|
|
|
int ret;
|
2008-03-25 20:50:20 +00:00
|
|
|
int i;
|
2008-03-24 19:03:18 +00:00
|
|
|
|
2008-04-15 19:42:08 +00:00
|
|
|
logical = key->offset;
|
|
|
|
length = btrfs_chunk_length(leaf, chunk);
|
2008-04-10 20:22:00 +00:00
|
|
|
|
2013-07-03 13:25:15 +00:00
|
|
|
ce = search_cache_extent(&map_tree->cache_tree, logical);
|
2008-03-24 19:03:18 +00:00
|
|
|
|
|
|
|
/* already mapped? */
|
|
|
|
if (ce && ce->start <= logical && ce->start + ce->size > logical) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-03-25 20:50:20 +00:00
|
|
|
num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
|
2013-07-03 13:25:17 +00:00
|
|
|
map = kmalloc(btrfs_map_lookup_size(num_stripes), GFP_NOFS);
|
2008-03-24 19:03:18 +00:00
|
|
|
if (!map)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
map->ce.start = logical;
|
2008-12-05 17:21:31 +00:00
|
|
|
map->ce.size = length;
|
2008-03-25 20:50:20 +00:00
|
|
|
map->num_stripes = num_stripes;
|
|
|
|
map->io_width = btrfs_chunk_io_width(leaf, chunk);
|
|
|
|
map->io_align = btrfs_chunk_io_align(leaf, chunk);
|
|
|
|
map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
|
|
|
|
map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
|
|
|
|
map->type = btrfs_chunk_type(leaf, chunk);
|
2008-04-16 15:14:21 +00:00
|
|
|
map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
|
2008-04-04 19:42:17 +00:00
|
|
|
|
2008-03-25 20:50:20 +00:00
|
|
|
for (i = 0; i < num_stripes; i++) {
|
|
|
|
map->stripes[i].physical =
|
2008-12-05 17:21:31 +00:00
|
|
|
btrfs_stripe_offset_nr(leaf, chunk, i);
|
2008-03-25 20:50:20 +00:00
|
|
|
devid = btrfs_stripe_devid_nr(leaf, chunk, i);
|
2008-04-18 14:31:42 +00:00
|
|
|
read_extent_buffer(leaf, uuid, (unsigned long)
|
|
|
|
btrfs_stripe_dev_uuid_nr(chunk, i),
|
|
|
|
BTRFS_UUID_SIZE);
|
2008-11-18 15:40:06 +00:00
|
|
|
map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
|
|
|
|
NULL);
|
2008-03-25 20:50:20 +00:00
|
|
|
if (!map->stripes[i].dev) {
|
2010-12-15 21:00:23 +00:00
|
|
|
map->stripes[i].dev = fill_missing_device(devid);
|
|
|
|
printf("warning, device %llu is missing\n",
|
|
|
|
(unsigned long long)devid);
|
2008-03-25 20:50:20 +00:00
|
|
|
}
|
2008-03-24 19:03:18 +00:00
|
|
|
|
|
|
|
}
|
2013-07-03 13:25:13 +00:00
|
|
|
ret = insert_cache_extent(&map_tree->cache_tree, &map->ce);
|
2008-03-24 19:03:18 +00:00
|
|
|
BUG_ON(ret);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fill_device_from_item(struct extent_buffer *leaf,
|
|
|
|
struct btrfs_dev_item *dev_item,
|
|
|
|
struct btrfs_device *device)
|
|
|
|
{
|
|
|
|
unsigned long ptr;
|
|
|
|
|
|
|
|
device->devid = btrfs_device_id(leaf, dev_item);
|
|
|
|
device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
|
|
|
|
device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
|
|
|
|
device->type = btrfs_device_type(leaf, dev_item);
|
|
|
|
device->io_align = btrfs_device_io_align(leaf, dev_item);
|
|
|
|
device->io_width = btrfs_device_io_width(leaf, dev_item);
|
|
|
|
device->sector_size = btrfs_device_sector_size(leaf, dev_item);
|
|
|
|
|
|
|
|
ptr = (unsigned long)btrfs_device_uuid(dev_item);
|
2008-04-15 19:42:08 +00:00
|
|
|
read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
|
2008-03-24 19:03:18 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-11-18 15:40:06 +00:00
|
|
|
static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_devices *fs_devices;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
fs_devices = root->fs_info->fs_devices->seed;
|
|
|
|
while (fs_devices) {
|
|
|
|
if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
|
|
|
|
ret = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
fs_devices = fs_devices->seed;
|
|
|
|
}
|
|
|
|
|
|
|
|
fs_devices = find_fsid(fsid);
|
|
|
|
if (!fs_devices) {
|
|
|
|
ret = -ENOENT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = btrfs_open_devices(fs_devices, O_RDONLY);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
fs_devices->seed = root->fs_info->fs_devices->seed;
|
|
|
|
root->fs_info->fs_devices->seed = fs_devices;
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-03-24 19:04:37 +00:00
|
|
|
static int read_one_dev(struct btrfs_root *root,
|
2008-03-24 19:03:18 +00:00
|
|
|
struct extent_buffer *leaf,
|
|
|
|
struct btrfs_dev_item *dev_item)
|
|
|
|
{
|
|
|
|
struct btrfs_device *device;
|
|
|
|
u64 devid;
|
2008-03-25 20:50:20 +00:00
|
|
|
int ret = 0;
|
2008-11-18 15:40:06 +00:00
|
|
|
u8 fs_uuid[BTRFS_UUID_SIZE];
|
2008-04-18 14:31:42 +00:00
|
|
|
u8 dev_uuid[BTRFS_UUID_SIZE];
|
2008-03-24 19:03:18 +00:00
|
|
|
|
|
|
|
devid = btrfs_device_id(leaf, dev_item);
|
2008-04-18 14:31:42 +00:00
|
|
|
read_extent_buffer(leaf, dev_uuid,
|
|
|
|
(unsigned long)btrfs_device_uuid(dev_item),
|
|
|
|
BTRFS_UUID_SIZE);
|
2008-11-18 15:40:06 +00:00
|
|
|
read_extent_buffer(leaf, fs_uuid,
|
|
|
|
(unsigned long)btrfs_device_fsid(dev_item),
|
|
|
|
BTRFS_UUID_SIZE);
|
|
|
|
|
|
|
|
if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
|
|
|
|
ret = open_seed_devices(root, fs_uuid);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
|
2008-03-24 19:03:58 +00:00
|
|
|
if (!device) {
|
2008-04-01 14:52:22 +00:00
|
|
|
printk("warning devid %llu not found already\n",
|
|
|
|
(unsigned long long)devid);
|
2013-06-26 16:41:36 +00:00
|
|
|
device = kzalloc(sizeof(*device), GFP_NOFS);
|
2008-03-24 19:03:58 +00:00
|
|
|
if (!device)
|
|
|
|
return -ENOMEM;
|
2013-06-26 16:41:36 +00:00
|
|
|
device->fd = -1;
|
2008-03-24 19:05:44 +00:00
|
|
|
list_add(&device->dev_list,
|
|
|
|
&root->fs_info->fs_devices->devices);
|
2008-03-24 19:03:58 +00:00
|
|
|
}
|
2008-03-24 19:03:18 +00:00
|
|
|
|
|
|
|
fill_device_from_item(leaf, dev_item, device);
|
|
|
|
device->dev_root = root->fs_info->dev_root;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_read_sys_array(struct btrfs_root *root)
|
|
|
|
{
|
2013-03-06 16:32:51 +00:00
|
|
|
struct btrfs_super_block *super_copy = root->fs_info->super_copy;
|
2008-12-17 21:10:07 +00:00
|
|
|
struct extent_buffer *sb;
|
2008-03-24 19:03:18 +00:00
|
|
|
struct btrfs_disk_key *disk_key;
|
|
|
|
struct btrfs_chunk *chunk;
|
|
|
|
struct btrfs_key key;
|
|
|
|
u32 num_stripes;
|
|
|
|
u32 len = 0;
|
|
|
|
u8 *ptr;
|
2013-06-27 06:25:18 +00:00
|
|
|
u8 *array_end;
|
2011-08-26 13:51:36 +00:00
|
|
|
int ret = 0;
|
2008-03-24 19:03:18 +00:00
|
|
|
|
2008-12-17 21:10:07 +00:00
|
|
|
sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
|
|
|
|
BTRFS_SUPER_INFO_SIZE);
|
|
|
|
if (!sb)
|
|
|
|
return -ENOMEM;
|
|
|
|
btrfs_set_buffer_uptodate(sb);
|
2013-01-22 23:03:46 +00:00
|
|
|
write_extent_buffer(sb, super_copy, 0, sizeof(*super_copy));
|
2013-06-27 06:25:18 +00:00
|
|
|
array_end = ((u8 *)super_copy->sys_chunk_array) +
|
|
|
|
btrfs_super_sys_array_size(super_copy);
|
2008-03-24 19:03:18 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* we do this loop twice, once for the device items and
|
|
|
|
* once for all of the chunks. This way there are device
|
|
|
|
* structs filled in for every chunk
|
|
|
|
*/
|
|
|
|
ptr = super_copy->sys_chunk_array;
|
|
|
|
|
2013-06-27 06:25:18 +00:00
|
|
|
while (ptr < array_end) {
|
2008-03-24 19:03:18 +00:00
|
|
|
disk_key = (struct btrfs_disk_key *)ptr;
|
|
|
|
btrfs_disk_key_to_cpu(&key, disk_key);
|
|
|
|
|
|
|
|
len = sizeof(*disk_key);
|
|
|
|
ptr += len;
|
|
|
|
|
2008-03-24 19:04:37 +00:00
|
|
|
if (key.type == BTRFS_CHUNK_ITEM_KEY) {
|
2013-06-27 06:25:18 +00:00
|
|
|
chunk = (struct btrfs_chunk *)(ptr - (u8 *)super_copy);
|
2008-03-24 19:04:37 +00:00
|
|
|
ret = read_one_chunk(root, &key, sb, chunk);
|
2011-08-26 13:51:36 +00:00
|
|
|
if (ret)
|
|
|
|
break;
|
2008-03-24 19:03:18 +00:00
|
|
|
num_stripes = btrfs_chunk_num_stripes(sb, chunk);
|
|
|
|
len = btrfs_chunk_item_size(num_stripes);
|
|
|
|
} else {
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
ptr += len;
|
|
|
|
}
|
2008-12-17 21:10:07 +00:00
|
|
|
free_extent_buffer(sb);
|
2011-08-26 13:51:36 +00:00
|
|
|
return ret;
|
2008-03-24 19:03:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_read_chunk_tree(struct btrfs_root *root)
|
|
|
|
{
|
|
|
|
struct btrfs_path *path;
|
|
|
|
struct extent_buffer *leaf;
|
|
|
|
struct btrfs_key key;
|
|
|
|
struct btrfs_key found_key;
|
|
|
|
int ret;
|
|
|
|
int slot;
|
|
|
|
|
|
|
|
root = root->fs_info->chunk_root;
|
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2013-07-30 11:08:17 +00:00
|
|
|
/*
|
|
|
|
* Read all device items, and then all the chunk items. All
|
|
|
|
* device items are found before any chunk item (their object id
|
|
|
|
* is smaller than the lowest possible object id for a chunk
|
|
|
|
* item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
|
2008-03-24 19:03:18 +00:00
|
|
|
*/
|
|
|
|
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
|
|
|
|
key.offset = 0;
|
|
|
|
key.type = 0;
|
|
|
|
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
2013-07-29 18:36:36 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto error;
|
2008-03-24 19:03:18 +00:00
|
|
|
while(1) {
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
slot = path->slots[0];
|
|
|
|
if (slot >= btrfs_header_nritems(leaf)) {
|
|
|
|
ret = btrfs_next_leaf(root, path);
|
|
|
|
if (ret == 0)
|
|
|
|
continue;
|
|
|
|
if (ret < 0)
|
|
|
|
goto error;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
btrfs_item_key_to_cpu(leaf, &found_key, slot);
|
2013-07-30 11:08:17 +00:00
|
|
|
if (found_key.type == BTRFS_DEV_ITEM_KEY) {
|
|
|
|
struct btrfs_dev_item *dev_item;
|
|
|
|
dev_item = btrfs_item_ptr(leaf, slot,
|
2008-03-24 19:03:18 +00:00
|
|
|
struct btrfs_dev_item);
|
2013-07-30 11:08:17 +00:00
|
|
|
ret = read_one_dev(root, leaf, dev_item);
|
|
|
|
BUG_ON(ret);
|
2008-03-24 19:03:18 +00:00
|
|
|
} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
|
|
|
|
struct btrfs_chunk *chunk;
|
|
|
|
chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
|
|
|
|
ret = read_one_chunk(root, &found_key, leaf, chunk);
|
2008-11-18 15:40:06 +00:00
|
|
|
BUG_ON(ret);
|
2008-03-24 19:03:18 +00:00
|
|
|
}
|
|
|
|
path->slots[0]++;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
error:
|
2013-01-22 23:52:17 +00:00
|
|
|
btrfs_free_path(path);
|
2008-03-24 19:03:18 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-04-22 18:06:31 +00:00
|
|
|
struct list_head *btrfs_scanned_uuids(void)
|
|
|
|
{
|
|
|
|
return &fs_uuids;
|
|
|
|
}
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
|
|
|
|
static int rmw_eb(struct btrfs_fs_info *info,
|
|
|
|
struct extent_buffer *eb, struct extent_buffer *orig_eb)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
unsigned long orig_off = 0;
|
|
|
|
unsigned long dest_off = 0;
|
|
|
|
unsigned long copy_len = eb->len;
|
|
|
|
|
|
|
|
ret = read_whole_eb(info, eb, 0);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (eb->start + eb->len <= orig_eb->start ||
|
|
|
|
eb->start >= orig_eb->start + orig_eb->len)
|
|
|
|
return 0;
|
|
|
|
/*
|
|
|
|
* | ----- orig_eb ------- |
|
|
|
|
* | ----- stripe ------- |
|
|
|
|
* | ----- orig_eb ------- |
|
|
|
|
* | ----- orig_eb ------- |
|
|
|
|
*/
|
|
|
|
if (eb->start > orig_eb->start)
|
|
|
|
orig_off = eb->start - orig_eb->start;
|
|
|
|
if (orig_eb->start > eb->start)
|
|
|
|
dest_off = orig_eb->start - eb->start;
|
|
|
|
|
|
|
|
if (copy_len > orig_eb->len - orig_off)
|
|
|
|
copy_len = orig_eb->len - orig_off;
|
|
|
|
if (copy_len > eb->len - dest_off)
|
|
|
|
copy_len = eb->len - dest_off;
|
|
|
|
|
|
|
|
memcpy(eb->data + dest_off, orig_eb->data + orig_off, copy_len);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void split_eb_for_raid56(struct btrfs_fs_info *info,
|
|
|
|
struct extent_buffer *orig_eb,
|
|
|
|
struct extent_buffer **ebs,
|
|
|
|
u64 stripe_len, u64 *raid_map,
|
|
|
|
int num_stripes)
|
|
|
|
{
|
|
|
|
struct extent_buffer *eb;
|
|
|
|
u64 start = orig_eb->start;
|
|
|
|
u64 this_eb_start;
|
|
|
|
int i;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
for (i = 0; i < num_stripes; i++) {
|
|
|
|
if (raid_map[i] >= BTRFS_RAID5_P_STRIPE)
|
|
|
|
break;
|
|
|
|
|
|
|
|
eb = malloc(sizeof(struct extent_buffer) + stripe_len);
|
|
|
|
if (!eb)
|
|
|
|
BUG();
|
|
|
|
memset(eb, 0, sizeof(struct extent_buffer) + stripe_len);
|
|
|
|
|
|
|
|
eb->start = raid_map[i];
|
|
|
|
eb->len = stripe_len;
|
|
|
|
eb->refs = 1;
|
|
|
|
eb->flags = 0;
|
|
|
|
eb->fd = -1;
|
|
|
|
eb->dev_bytenr = (u64)-1;
|
|
|
|
|
|
|
|
this_eb_start = raid_map[i];
|
|
|
|
|
|
|
|
if (start > this_eb_start ||
|
|
|
|
start + orig_eb->len < this_eb_start + stripe_len) {
|
|
|
|
ret = rmw_eb(info, eb, orig_eb);
|
|
|
|
BUG_ON(ret);
|
|
|
|
} else {
|
|
|
|
memcpy(eb->data, orig_eb->data + eb->start - start, stripe_len);
|
|
|
|
}
|
|
|
|
ebs[i] = eb;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int write_raid56_with_parity(struct btrfs_fs_info *info,
|
|
|
|
struct extent_buffer *eb,
|
|
|
|
struct btrfs_multi_bio *multi,
|
|
|
|
u64 stripe_len, u64 *raid_map)
|
|
|
|
{
|
2013-08-14 23:16:35 +00:00
|
|
|
struct extent_buffer **ebs, *p_eb = NULL, *q_eb = NULL;
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
int i;
|
|
|
|
int j;
|
|
|
|
int ret;
|
|
|
|
int alloc_size = eb->len;
|
|
|
|
|
2013-08-14 23:16:35 +00:00
|
|
|
ebs = kmalloc(sizeof(*ebs) * multi->num_stripes, GFP_NOFS);
|
|
|
|
BUG_ON(!ebs);
|
|
|
|
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
if (stripe_len > alloc_size)
|
|
|
|
alloc_size = stripe_len;
|
|
|
|
|
|
|
|
split_eb_for_raid56(info, eb, ebs, stripe_len, raid_map,
|
|
|
|
multi->num_stripes);
|
|
|
|
|
|
|
|
for (i = 0; i < multi->num_stripes; i++) {
|
|
|
|
struct extent_buffer *new_eb;
|
|
|
|
if (raid_map[i] < BTRFS_RAID5_P_STRIPE) {
|
|
|
|
ebs[i]->dev_bytenr = multi->stripes[i].physical;
|
|
|
|
ebs[i]->fd = multi->stripes[i].dev->fd;
|
|
|
|
multi->stripes[i].dev->total_ios++;
|
|
|
|
BUG_ON(ebs[i]->start != raid_map[i]);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
new_eb = kmalloc(sizeof(*eb) + alloc_size, GFP_NOFS);
|
|
|
|
BUG_ON(!new_eb);
|
|
|
|
new_eb->dev_bytenr = multi->stripes[i].physical;
|
|
|
|
new_eb->fd = multi->stripes[i].dev->fd;
|
|
|
|
multi->stripes[i].dev->total_ios++;
|
|
|
|
new_eb->len = stripe_len;
|
|
|
|
|
|
|
|
if (raid_map[i] == BTRFS_RAID5_P_STRIPE)
|
|
|
|
p_eb = new_eb;
|
|
|
|
else if (raid_map[i] == BTRFS_RAID6_Q_STRIPE)
|
|
|
|
q_eb = new_eb;
|
|
|
|
}
|
|
|
|
if (q_eb) {
|
2013-08-14 23:16:35 +00:00
|
|
|
void **pointers;
|
|
|
|
|
|
|
|
pointers = kmalloc(sizeof(*pointers) * multi->num_stripes,
|
|
|
|
GFP_NOFS);
|
|
|
|
BUG_ON(!pointers);
|
|
|
|
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
ebs[multi->num_stripes - 2] = p_eb;
|
|
|
|
ebs[multi->num_stripes - 1] = q_eb;
|
|
|
|
|
|
|
|
for (i = 0; i < multi->num_stripes; i++)
|
|
|
|
pointers[i] = ebs[i]->data;
|
|
|
|
|
|
|
|
raid6_gen_syndrome(multi->num_stripes, stripe_len, pointers);
|
2013-08-14 23:16:35 +00:00
|
|
|
kfree(pointers);
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
} else {
|
|
|
|
ebs[multi->num_stripes - 1] = p_eb;
|
|
|
|
memcpy(p_eb->data, ebs[0]->data, stripe_len);
|
|
|
|
for (j = 1; j < multi->num_stripes - 1; j++) {
|
|
|
|
for (i = 0; i < stripe_len; i += sizeof(unsigned long)) {
|
|
|
|
*(unsigned long *)(p_eb->data + i) ^=
|
|
|
|
*(unsigned long *)(ebs[j]->data + i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < multi->num_stripes; i++) {
|
|
|
|
ret = write_extent_to_disk(ebs[i]);
|
|
|
|
BUG_ON(ret);
|
|
|
|
if (ebs[i] != eb)
|
|
|
|
kfree(ebs[i]);
|
|
|
|
}
|
2013-08-14 23:16:35 +00:00
|
|
|
|
|
|
|
kfree(ebs);
|
|
|
|
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
return 0;
|
|
|
|
}
|