Dynamic chunk allocation

This commit is contained in:
Chris Mason 2008-03-24 15:03:58 -04:00 committed by David Woodhouse
parent 510be29677
commit d12d4c7203
6 changed files with 223 additions and 94 deletions

12
ctree.h
View File

@ -419,10 +419,20 @@ struct btrfs_block_group_item {
__le64 flags; __le64 flags;
} __attribute__ ((__packed__)); } __attribute__ ((__packed__));
struct btrfs_space_info {
u64 flags;
u64 total_bytes;
u64 bytes_used;
u64 bytes_pinned;
int full;
struct list_head list;
};
struct btrfs_block_group_cache { struct btrfs_block_group_cache {
struct cache_extent cache; struct cache_extent cache;
struct btrfs_key key; struct btrfs_key key;
struct btrfs_block_group_item item; struct btrfs_block_group_item item;
struct btrfs_space_info *space_info;
u64 pinned; u64 pinned;
u64 flags; u64 flags;
int cached; int cached;
@ -466,7 +476,7 @@ struct btrfs_fs_info {
struct list_head dirty_cowonly_roots; struct list_head dirty_cowonly_roots;
struct list_head devices; struct list_head devices;
struct list_head *last_device; struct list_head space_info;
int fp; int fp;
int force_system_allocs; int force_system_allocs;
void *priv_data; void *priv_data;

View File

@ -449,7 +449,7 @@ struct btrfs_root *open_ctree_fd(int fp, u64 sb_bytenr)
mutex_init(&fs_info->fs_mutex); mutex_init(&fs_info->fs_mutex);
INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
INIT_LIST_HEAD(&fs_info->devices); INIT_LIST_HEAD(&fs_info->devices);
fs_info->last_device = &fs_info->devices; INIT_LIST_HEAD(&fs_info->space_info);
__setup_root(4096, 4096, 4096, 4096, tree_root, __setup_root(4096, 4096, 4096, 4096, tree_root,
fs_info, BTRFS_ROOT_TREE_OBJECTID); fs_info, BTRFS_ROOT_TREE_OBJECTID);

View File

@ -37,6 +37,10 @@ static int finish_current_insert(struct btrfs_trans_handle *trans, struct
btrfs_root *extent_root); btrfs_root *extent_root);
static int del_pending_extents(struct btrfs_trans_handle *trans, struct static int del_pending_extents(struct btrfs_trans_handle *trans, struct
btrfs_root *extent_root); btrfs_root *extent_root);
int btrfs_make_block_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytes_used,
u64 type, u64 chunk_tree, u64 chunk_objectid,
u64 size);
static int cache_block_group(struct btrfs_root *root, static int cache_block_group(struct btrfs_root *root,
struct btrfs_block_group_cache *block_group) struct btrfs_block_group_cache *block_group)
@ -168,16 +172,7 @@ struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
{ {
if ((bits & BLOCK_GROUP_DATA) && return (cache->flags & bits);
(cache->flags & BTRFS_BLOCK_GROUP_DATA))
return 1;
if ((bits & BLOCK_GROUP_METADATA) &&
(cache->flags & BTRFS_BLOCK_GROUP_METADATA))
return 1;
if ((bits & BLOCK_GROUP_SYSTEM) &&
(cache->flags & BTRFS_BLOCK_GROUP_SYSTEM))
return 1;
return 0;
} }
static int noinline find_search_start(struct btrfs_root *root, static int noinline find_search_start(struct btrfs_root *root,
@ -270,6 +265,18 @@ static u64 div_factor(u64 num, int factor)
return num; return num;
} }
static int block_group_state_bits(u64 flags)
{
int bits = 0;
if (flags & BTRFS_BLOCK_GROUP_DATA)
bits |= BLOCK_GROUP_DATA;
if (flags & BTRFS_BLOCK_GROUP_METADATA)
bits |= BLOCK_GROUP_METADATA;
if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
bits |= BLOCK_GROUP_SYSTEM;
return bits;
}
struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root, struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
struct btrfs_block_group_cache struct btrfs_block_group_cache
*hint, u64 search_start, *hint, u64 search_start,
@ -296,7 +303,7 @@ struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
if (!owner) if (!owner)
factor = 8; factor = 8;
bit = data; bit = block_group_state_bits(data);
if (search_start) { if (search_start) {
struct btrfs_block_group_cache *shint; struct btrfs_block_group_cache *shint;
@ -344,10 +351,15 @@ again:
free_check = cache->key.offset; free_check = cache->key.offset;
else else
free_check = div_factor(cache->key.offset, factor); free_check = div_factor(cache->key.offset, factor);
if (used + cache->pinned < free_check) { if (used + cache->pinned < free_check) {
found_group = cache; found_group = cache;
goto found; goto found;
} }
if (full_search) {
printk("failed on cache %Lu used %Lu total %Lu\n",
cache->key.objectid, used, cache->key.offset);
}
cond_resched(); cond_resched();
} }
if (!full_search) { if (!full_search) {
@ -966,6 +978,58 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
return werr; return werr;
} }
static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
u64 flags)
{
struct list_head *head = &info->space_info;
struct list_head *cur;
struct btrfs_space_info *found;
list_for_each(cur, head) {
found = list_entry(cur, struct btrfs_space_info, list);
if (found->flags == flags)
return found;
}
return NULL;
}
static int do_chunk_alloc(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root, u64 alloc_bytes,
u64 flags)
{
struct btrfs_space_info *space_info;
u64 thresh;
u64 start;
u64 num_bytes;
int ret;
space_info = __find_space_info(extent_root->fs_info, flags);
BUG_ON(!space_info);
if (space_info->full)
return 0;
thresh = div_factor(space_info->total_bytes, 7);
if ((space_info->bytes_used + space_info->bytes_pinned + alloc_bytes) <
thresh)
return 0;
ret = btrfs_alloc_chunk(trans, extent_root, &start, &num_bytes, flags);
if (ret == -ENOSPC) {
printk("space info full %Lu\n", flags);
space_info->full = 1;
return 0;
}
BUG_ON(ret);
ret = btrfs_make_block_group(trans, extent_root, 0, flags,
extent_root->fs_info->chunk_root->root_key.objectid,
start, num_bytes);
BUG_ON(ret);
return 0;
}
static int update_block_group(struct btrfs_trans_handle *trans, static int update_block_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
u64 bytenr, u64 num_bytes, int alloc, u64 bytenr, u64 num_bytes, int alloc,
@ -995,8 +1059,10 @@ static int update_block_group(struct btrfs_trans_handle *trans,
num_bytes = min(total, cache->key.offset - byte_in_group); num_bytes = min(total, cache->key.offset - byte_in_group);
if (alloc) { if (alloc) {
old_val += num_bytes; old_val += num_bytes;
cache->space_info->bytes_used += num_bytes;
} else { } else {
old_val -= num_bytes; old_val -= num_bytes;
cache->space_info->bytes_used -= num_bytes;
if (mark_free) { if (mark_free) {
set_extent_dirty(&info->free_space_cache, set_extent_dirty(&info->free_space_cache,
bytenr, bytenr + num_bytes - 1, bytenr, bytenr + num_bytes - 1,
@ -1009,6 +1075,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
} }
return 0; return 0;
} }
static int update_pinned_extents(struct btrfs_root *root, static int update_pinned_extents(struct btrfs_root *root,
u64 bytenr, u64 num, int pin) u64 bytenr, u64 num, int pin)
{ {
@ -1030,9 +1097,11 @@ static int update_pinned_extents(struct btrfs_root *root,
(bytenr - cache->key.objectid)); (bytenr - cache->key.objectid));
if (pin) { if (pin) {
cache->pinned += len; cache->pinned += len;
cache->space_info->bytes_pinned += len;
fs_info->total_pinned += len; fs_info->total_pinned += len;
} else { } else {
cache->pinned -= len; cache->pinned -= len;
cache->space_info->bytes_pinned -= len;
fs_info->total_pinned -= len; fs_info->total_pinned -= len;
} }
bytenr += len; bytenr += len;
@ -1456,7 +1525,7 @@ check_failed:
goto new_group; goto new_group;
} }
if (!(data & BLOCK_GROUP_DATA)) { if (!(data & BTRFS_BLOCK_GROUP_DATA)) {
block_group = btrfs_lookup_block_group(info, ins->objectid); block_group = btrfs_lookup_block_group(info, ins->objectid);
if (block_group) if (block_group)
trans->block_group = block_group; trans->block_group = block_group;
@ -1515,12 +1584,26 @@ int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
struct btrfs_extent_ref *ref; struct btrfs_extent_ref *ref;
struct btrfs_key keys[2]; struct btrfs_key keys[2];
if (data) if (data) {
data = BLOCK_GROUP_DATA; data = BTRFS_BLOCK_GROUP_DATA;
else if (info->force_system_allocs || root == root->fs_info->chunk_root) } else if (root == root->fs_info->chunk_root ||
data = BLOCK_GROUP_SYSTEM; info->force_system_allocs) {
else data = BTRFS_BLOCK_GROUP_SYSTEM;
data = BLOCK_GROUP_METADATA; } else {
data = BTRFS_BLOCK_GROUP_METADATA;
}
if (root->ref_cows) {
if (data != BTRFS_BLOCK_GROUP_METADATA) {
ret = do_chunk_alloc(trans, root->fs_info->extent_root,
num_bytes,
BTRFS_BLOCK_GROUP_METADATA);
BUG_ON(ret);
}
ret = do_chunk_alloc(trans, root->fs_info->extent_root,
num_bytes, data);
BUG_ON(ret);
}
WARN_ON(num_bytes < root->sectorsize); WARN_ON(num_bytes < root->sectorsize);
if (ops && ops->alloc_extent) { if (ops && ops->alloc_extent) {
@ -2064,6 +2147,34 @@ error:
return ret; return ret;
} }
static int update_space_info(struct btrfs_fs_info *info, u64 flags,
u64 total_bytes, u64 bytes_used,
struct btrfs_space_info **space_info)
{
struct btrfs_space_info *found;
found = __find_space_info(info, flags);
if (found) {
found->total_bytes += total_bytes;
found->bytes_used += bytes_used;
WARN_ON(found->total_bytes < found->bytes_used);
*space_info = found;
return 0;
}
found = kmalloc(sizeof(*found), GFP_NOFS);
if (!found)
return -ENOMEM;
list_add(&found->list, &info->space_info);
found->flags = flags;
found->total_bytes = total_bytes;
found->bytes_used = bytes_used;
found->bytes_pinned = 0;
found->full = 0;
*space_info = found;
return 0;
}
int btrfs_read_block_groups(struct btrfs_root *root) int btrfs_read_block_groups(struct btrfs_root *root)
{ {
struct btrfs_path *path; struct btrfs_path *path;
@ -2071,6 +2182,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
int bit; int bit;
struct btrfs_block_group_cache *cache; struct btrfs_block_group_cache *cache;
struct btrfs_fs_info *info = root->fs_info; struct btrfs_fs_info *info = root->fs_info;
struct btrfs_space_info *space_info;
struct extent_io_tree *block_group_cache; struct extent_io_tree *block_group_cache;
struct btrfs_key key; struct btrfs_key key;
struct btrfs_key found_key; struct btrfs_key found_key;
@ -2121,6 +2233,12 @@ int btrfs_read_block_groups(struct btrfs_root *root)
bit = BLOCK_GROUP_METADATA; bit = BLOCK_GROUP_METADATA;
} }
ret = update_space_info(info, cache->flags, found_key.offset,
btrfs_block_group_used(&cache->item),
&space_info);
BUG_ON(ret);
cache->space_info = space_info;
/* use EXTENT_LOCKED to prevent merging */ /* use EXTENT_LOCKED to prevent merging */
set_extent_bits(block_group_cache, found_key.objectid, set_extent_bits(block_group_cache, found_key.objectid,
found_key.objectid + found_key.offset - 1, found_key.objectid + found_key.offset - 1,
@ -2138,33 +2256,13 @@ error:
return ret; return ret;
} }
static int btrfs_insert_block_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_key *key,
struct btrfs_block_group_item *bi)
{
int ret;
int pending_ret;
struct btrfs_root *extent_root;
extent_root = root->fs_info->extent_root;
ret = btrfs_insert_item(trans, extent_root, key, bi, sizeof(*bi));
finish_current_insert(trans, extent_root);
pending_ret = del_pending_extents(trans, extent_root);
if (ret)
return ret;
if (pending_ret)
return pending_ret;
return 0;
}
int btrfs_make_block_group(struct btrfs_trans_handle *trans, int btrfs_make_block_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytes_used, struct btrfs_root *root, u64 bytes_used,
u64 type, u64 chunk_tree, u64 chunk_objectid, u64 type, u64 chunk_tree, u64 chunk_objectid,
u64 size) u64 size)
{ {
int ret; int ret;
int bit; int bit = 0;
struct btrfs_root *extent_root; struct btrfs_root *extent_root;
struct btrfs_block_group_cache *cache; struct btrfs_block_group_cache *cache;
struct extent_io_tree *block_group_cache; struct extent_io_tree *block_group_cache;
@ -2172,7 +2270,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
extent_root = root->fs_info->extent_root; extent_root = root->fs_info->extent_root;
block_group_cache = &root->fs_info->block_group_cache; block_group_cache = &root->fs_info->block_group_cache;
cache = malloc(sizeof(*cache)); cache = kmalloc(sizeof(*cache), GFP_NOFS);
BUG_ON(!cache); BUG_ON(!cache);
cache->key.objectid = chunk_objectid; cache->key.objectid = chunk_objectid;
cache->key.offset = size; cache->key.offset = size;
@ -2185,6 +2283,10 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
cache->flags = type; cache->flags = type;
btrfs_set_block_group_flags(&cache->item, type); btrfs_set_block_group_flags(&cache->item, type);
ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
&cache->space_info);
BUG_ON(ret);
if (type & BTRFS_BLOCK_GROUP_DATA) { if (type & BTRFS_BLOCK_GROUP_DATA) {
bit = BLOCK_GROUP_DATA; bit = BLOCK_GROUP_DATA;
} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
@ -2197,7 +2299,13 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
bit | EXTENT_LOCKED, GFP_NOFS); bit | EXTENT_LOCKED, GFP_NOFS);
set_state_private(block_group_cache, chunk_objectid, set_state_private(block_group_cache, chunk_objectid,
(unsigned long)cache); (unsigned long)cache);
ret = btrfs_insert_block_group(trans, root, &cache->key, &cache->item);
ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
sizeof(cache->item));
BUG_ON(ret);
finish_current_insert(trans, extent_root);
ret = del_pending_extents(trans, extent_root);
BUG_ON(ret); BUG_ON(ret);
return 0; return 0;
} }

12
list.h
View File

@ -279,7 +279,7 @@ static inline void list_splice_init(struct list_head *list,
* @head: the head for your list. * @head: the head for your list.
*/ */
#define list_for_each(pos, head) \ #define list_for_each(pos, head) \
for (pos = (head)->next; prefetch(pos->next), pos != (head); \ for (pos = (head)->next; pos != (head); \
pos = pos->next) pos = pos->next)
/** /**
@ -301,7 +301,7 @@ static inline void list_splice_init(struct list_head *list,
* @head: the head for your list. * @head: the head for your list.
*/ */
#define list_for_each_prev(pos, head) \ #define list_for_each_prev(pos, head) \
for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \ for (pos = (head)->prev; pos != (head); \
pos = pos->prev) pos = pos->prev)
/** /**
@ -322,7 +322,7 @@ static inline void list_splice_init(struct list_head *list,
*/ */
#define list_for_each_entry(pos, head, member) \ #define list_for_each_entry(pos, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member); \ for (pos = list_entry((head)->next, typeof(*pos), member); \
prefetch(pos->member.next), &pos->member != (head); \ &pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member)) pos = list_entry(pos->member.next, typeof(*pos), member))
/** /**
@ -333,7 +333,7 @@ static inline void list_splice_init(struct list_head *list,
*/ */
#define list_for_each_entry_reverse(pos, head, member) \ #define list_for_each_entry_reverse(pos, head, member) \
for (pos = list_entry((head)->prev, typeof(*pos), member); \ for (pos = list_entry((head)->prev, typeof(*pos), member); \
prefetch(pos->member.prev), &pos->member != (head); \ &pos->member != (head); \
pos = list_entry(pos->member.prev, typeof(*pos), member)) pos = list_entry(pos->member.prev, typeof(*pos), member))
/** /**
@ -358,7 +358,7 @@ static inline void list_splice_init(struct list_head *list,
*/ */
#define list_for_each_entry_continue(pos, head, member) \ #define list_for_each_entry_continue(pos, head, member) \
for (pos = list_entry(pos->member.next, typeof(*pos), member); \ for (pos = list_entry(pos->member.next, typeof(*pos), member); \
prefetch(pos->member.next), &pos->member != (head); \ &pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member)) pos = list_entry(pos->member.next, typeof(*pos), member))
/** /**
@ -370,7 +370,7 @@ static inline void list_splice_init(struct list_head *list,
* Iterate over list of given type, continuing from current position. * Iterate over list of given type, continuing from current position.
*/ */
#define list_for_each_entry_from(pos, head, member) \ #define list_for_each_entry_from(pos, head, member) \
for (; prefetch(pos->member.next), &pos->member != (head); \ for (; &pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member)) pos = list_entry(pos->member.next, typeof(*pos), member))
/** /**

View File

@ -129,7 +129,7 @@ check_pending:
btrfs_release_path(root, path); btrfs_release_path(root, path);
BUG_ON(*start < search_start); BUG_ON(*start < search_start);
if (*start + num_bytes >= search_end) { if (*start + num_bytes > search_end) {
ret = -ENOSPC; ret = -ENOSPC;
goto error; goto error;
} }
@ -157,8 +157,9 @@ int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
return -ENOMEM; return -ENOMEM;
ret = find_free_dev_extent(trans, device, path, num_bytes, start); ret = find_free_dev_extent(trans, device, path, num_bytes, start);
if (ret) if (ret) {
goto err; goto err;
}
key.objectid = device->devid; key.objectid = device->devid;
key.offset = *start; key.offset = *start;
@ -212,22 +213,6 @@ error:
return ret; return ret;
} }
static struct btrfs_device *next_device(struct list_head *head,
struct list_head *last)
{
struct list_head *next = last->next;
struct btrfs_device *dev;
if (list_empty(head))
return NULL;
if (next == head)
next = next->next;
dev = list_entry(next, struct btrfs_device, dev_list);
return dev;
}
static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path, static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
u64 *objectid) u64 *objectid)
{ {
@ -395,48 +380,74 @@ int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root, u64 *start, struct btrfs_root *extent_root, u64 *start,
u64 *num_bytes, u32 type) u64 *num_bytes, u64 type)
{ {
u64 dev_offset; u64 dev_offset;
struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root; struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
struct btrfs_stripe *stripes; struct btrfs_stripe *stripes;
struct btrfs_device *device = NULL; struct btrfs_device *device = NULL;
struct btrfs_chunk *chunk; struct btrfs_chunk *chunk;
struct list_head private_devs;
struct list_head *dev_list = &extent_root->fs_info->devices; struct list_head *dev_list = &extent_root->fs_info->devices;
struct list_head *last_dev = extent_root->fs_info->last_device; struct list_head *cur;
struct map_lookup *map; struct map_lookup *map;
u64 physical; u64 physical;
u64 calc_size; u64 calc_size = 1024 * 1024 * 1024;
int num_stripes; u64 avail;
u64 max_avail = 0;
int num_stripes = 1;
int looped = 0;
int ret; int ret;
int index = 0; int index;
struct btrfs_key key; struct btrfs_key key;
if (list_empty(dev_list))
return -ENOSPC;
again:
INIT_LIST_HEAD(&private_devs);
cur = dev_list->next;
index = 0;
/* build a private list of devices we will allocate from */
while(index < num_stripes) {
device = list_entry(cur, struct btrfs_device, dev_list);
avail = device->total_bytes - device->bytes_used;
cur = cur->next;
if (avail > max_avail)
max_avail = avail;
if (avail >= calc_size) {
list_move_tail(&device->dev_list, &private_devs);
index++;
}
if (cur == dev_list)
break;
}
if (index < num_stripes) {
list_splice(&private_devs, dev_list);
if (!looped && max_avail > 0) {
looped = 1;
calc_size = max_avail;
goto again;
}
return -ENOSPC;
}
ret = find_next_chunk(chunk_root, &key.objectid); ret = find_next_chunk(chunk_root, &key.objectid);
if (ret) if (ret)
return ret; return ret;
num_stripes = 1;
chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS); chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
if (!chunk) if (!chunk)
return -ENOMEM; return -ENOMEM;
stripes = &chunk->stripe; stripes = &chunk->stripe;
*num_bytes = calc_size;
index = 0;
while(index < num_stripes) { while(index < num_stripes) {
device = next_device(dev_list, last_dev); BUG_ON(list_empty(&private_devs));
BUG_ON(!device); cur = private_devs.next;
last_dev = &device->dev_list; device = list_entry(cur, struct btrfs_device, dev_list);
extent_root->fs_info->last_device = last_dev; list_move_tail(&device->dev_list, dev_list);
if (index == 0) {
int mask = device->io_align;
calc_size = (device->total_bytes * 95) / 100;
calc_size = device->total_bytes - calc_size;
calc_size = (calc_size / mask) * mask;
*num_bytes = calc_size;
}
ret = btrfs_alloc_dev_extent(trans, device, ret = btrfs_alloc_dev_extent(trans, device,
key.objectid, key.objectid,
@ -452,6 +463,7 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
physical = dev_offset; physical = dev_offset;
index++; index++;
} }
BUG_ON(!list_empty(&private_devs));
/* key.objectid was set above */ /* key.objectid was set above */
key.offset = *num_bytes; key.offset = *num_bytes;
@ -613,19 +625,18 @@ static int read_one_dev(struct btrfs_root *root, struct btrfs_key *key,
int ret; int ret;
devid = btrfs_device_id(leaf, dev_item); devid = btrfs_device_id(leaf, dev_item);
if (btrfs_find_device(root, devid)) device = btrfs_find_device(root, devid);
return 0; if (!device) {
device = kmalloc(sizeof(*device), GFP_NOFS);
device = kmalloc(sizeof(*device), GFP_NOFS); if (!device)
if (!device) return -ENOMEM;
return -ENOMEM; list_add(&device->dev_list, &root->fs_info->devices);
}
fill_device_from_item(leaf, dev_item, device); fill_device_from_item(leaf, dev_item, device);
device->dev_root = root->fs_info->dev_root; device->dev_root = root->fs_info->dev_root;
device->fd = 0; device->fd = 0;
list_add(&device->dev_list, &root->fs_info->devices);
memcpy(&device->dev_key, key, sizeof(*key)); memcpy(&device->dev_key, key, sizeof(*key));
ret = btrfs_open_device(device); ret = btrfs_open_device(device);
if (ret) { if (ret) {
kfree(device); kfree(device);

View File

@ -71,5 +71,5 @@ int btrfs_read_sys_array(struct btrfs_root *root);
int btrfs_read_chunk_tree(struct btrfs_root *root); int btrfs_read_chunk_tree(struct btrfs_root *root);
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root, u64 *start, struct btrfs_root *extent_root, u64 *start,
u64 *num_bytes, u32 type); u64 *num_bytes, u64 type);
#endif #endif