2008-05-13 17:48:58 +00:00
|
|
|
|
2008-01-04 15:36:26 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2007 Oracle. All rights reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public
|
|
|
|
* License v2 as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public
|
|
|
|
* License along with this program; if not, write to the
|
|
|
|
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
|
|
* Boston, MA 021110-1307, USA.
|
|
|
|
*/
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <unistd.h>
|
2017-07-25 20:51:34 +00:00
|
|
|
#include <stdbool.h>
|
2008-01-04 15:36:26 +00:00
|
|
|
#include "kerncompat.h"
|
2020-08-18 13:56:04 +00:00
|
|
|
#include "kernel-shared/extent_io.h"
|
2019-06-20 13:30:57 +00:00
|
|
|
#include "kernel-lib/list.h"
|
2020-08-18 13:56:04 +00:00
|
|
|
#include "kernel-shared/ctree.h"
|
2020-08-18 13:56:04 +00:00
|
|
|
#include "kernel-shared/volumes.h"
|
2019-06-19 23:46:21 +00:00
|
|
|
#include "common/utils.h"
|
2019-06-19 22:44:36 +00:00
|
|
|
#include "common/internal.h"
|
2008-01-04 15:36:26 +00:00
|
|
|
|
2008-03-04 16:16:54 +00:00
|
|
|
void extent_io_tree_init(struct extent_io_tree *tree)
|
2008-01-04 15:36:26 +00:00
|
|
|
{
|
|
|
|
cache_tree_init(&tree->state);
|
|
|
|
cache_tree_init(&tree->cache);
|
|
|
|
INIT_LIST_HEAD(&tree->lru);
|
|
|
|
tree->cache_size = 0;
|
2017-07-25 20:51:34 +00:00
|
|
|
tree->max_cache_size = (u64)total_memory() / 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
void extent_io_tree_init_cache_max(struct extent_io_tree *tree,
|
|
|
|
u64 max_cache_size)
|
|
|
|
{
|
|
|
|
extent_io_tree_init(tree);
|
|
|
|
tree->max_cache_size = max_cache_size;
|
2008-01-04 15:36:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct extent_state *alloc_extent_state(void)
|
|
|
|
{
|
|
|
|
struct extent_state *state;
|
|
|
|
|
|
|
|
state = malloc(sizeof(*state));
|
|
|
|
if (!state)
|
|
|
|
return NULL;
|
2013-07-03 13:25:15 +00:00
|
|
|
state->cache_node.objectid = 0;
|
2008-01-04 15:36:26 +00:00
|
|
|
state->refs = 1;
|
|
|
|
state->state = 0;
|
2013-01-30 22:50:26 +00:00
|
|
|
state->xprivate = 0;
|
2008-01-04 15:36:26 +00:00
|
|
|
return state;
|
|
|
|
}
|
|
|
|
|
2013-07-03 13:25:13 +00:00
|
|
|
static void btrfs_free_extent_state(struct extent_state *state)
|
2008-01-04 15:36:26 +00:00
|
|
|
{
|
|
|
|
state->refs--;
|
|
|
|
BUG_ON(state->refs < 0);
|
|
|
|
if (state->refs == 0)
|
|
|
|
free(state);
|
|
|
|
}
|
|
|
|
|
2013-07-03 13:25:13 +00:00
|
|
|
static void free_extent_state_func(struct cache_extent *cache)
|
2008-01-04 15:36:26 +00:00
|
|
|
{
|
|
|
|
struct extent_state *es;
|
2013-07-03 13:25:13 +00:00
|
|
|
|
|
|
|
es = container_of(cache, struct extent_state, cache_node);
|
|
|
|
btrfs_free_extent_state(es);
|
|
|
|
}
|
|
|
|
|
2017-07-25 20:51:34 +00:00
|
|
|
static void free_extent_buffer_final(struct extent_buffer *eb);
|
2013-07-03 13:25:13 +00:00
|
|
|
void extent_io_tree_cleanup(struct extent_io_tree *tree)
|
|
|
|
{
|
2008-01-04 15:36:26 +00:00
|
|
|
struct extent_buffer *eb;
|
|
|
|
|
|
|
|
while(!list_empty(&tree->lru)) {
|
|
|
|
eb = list_entry(tree->lru.next, struct extent_buffer, lru);
|
2017-07-25 20:51:34 +00:00
|
|
|
if (eb->refs) {
|
|
|
|
fprintf(stderr,
|
|
|
|
"extent buffer leak: start %llu len %u\n",
|
|
|
|
(unsigned long long)eb->start, eb->len);
|
|
|
|
free_extent_buffer_nocache(eb);
|
|
|
|
} else {
|
|
|
|
free_extent_buffer_final(eb);
|
|
|
|
}
|
2008-01-04 15:36:26 +00:00
|
|
|
}
|
2013-07-03 13:25:13 +00:00
|
|
|
|
|
|
|
cache_tree_free_extents(&tree->state, free_extent_state_func);
|
2008-01-04 15:36:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void update_extent_state(struct extent_state *state)
|
|
|
|
{
|
|
|
|
state->cache_node.start = state->start;
|
|
|
|
state->cache_node.size = state->end + 1 - state->start;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Utility function to look for merge candidates inside a given range.
|
|
|
|
* Any extents with matching state are merged together into a single
|
|
|
|
* extent in the tree. Extents with EXTENT_IO in their state field are
|
|
|
|
* not merged
|
|
|
|
*/
|
2008-03-04 16:16:54 +00:00
|
|
|
static int merge_state(struct extent_io_tree *tree,
|
2008-01-04 15:36:26 +00:00
|
|
|
struct extent_state *state)
|
|
|
|
{
|
|
|
|
struct extent_state *other;
|
|
|
|
struct cache_extent *other_node;
|
|
|
|
|
|
|
|
if (state->state & EXTENT_IOBITS)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
other_node = prev_cache_extent(&state->cache_node);
|
|
|
|
if (other_node) {
|
|
|
|
other = container_of(other_node, struct extent_state,
|
|
|
|
cache_node);
|
|
|
|
if (other->end == state->start - 1 &&
|
|
|
|
other->state == state->state) {
|
|
|
|
state->start = other->start;
|
|
|
|
update_extent_state(state);
|
|
|
|
remove_cache_extent(&tree->state, &other->cache_node);
|
2013-07-03 13:25:13 +00:00
|
|
|
btrfs_free_extent_state(other);
|
2008-01-04 15:36:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
other_node = next_cache_extent(&state->cache_node);
|
|
|
|
if (other_node) {
|
|
|
|
other = container_of(other_node, struct extent_state,
|
|
|
|
cache_node);
|
|
|
|
if (other->start == state->end + 1 &&
|
|
|
|
other->state == state->state) {
|
|
|
|
other->start = state->start;
|
2008-01-14 18:35:00 +00:00
|
|
|
update_extent_state(other);
|
2008-01-04 15:36:26 +00:00
|
|
|
remove_cache_extent(&tree->state, &state->cache_node);
|
2013-07-03 13:25:13 +00:00
|
|
|
btrfs_free_extent_state(state);
|
2008-01-04 15:36:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* insert an extent_state struct into the tree. 'bits' are set on the
|
|
|
|
* struct before it is inserted.
|
|
|
|
*/
|
2008-03-04 16:16:54 +00:00
|
|
|
static int insert_state(struct extent_io_tree *tree,
|
2008-01-04 15:36:26 +00:00
|
|
|
struct extent_state *state, u64 start, u64 end,
|
|
|
|
int bits)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
BUG_ON(end < start);
|
|
|
|
state->state |= bits;
|
|
|
|
state->start = start;
|
|
|
|
state->end = end;
|
|
|
|
update_extent_state(state);
|
2013-07-03 13:25:13 +00:00
|
|
|
ret = insert_cache_extent(&tree->state, &state->cache_node);
|
2008-01-04 15:36:26 +00:00
|
|
|
BUG_ON(ret);
|
|
|
|
merge_state(tree, state);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* split a given extent state struct in two, inserting the preallocated
|
|
|
|
* struct 'prealloc' as the newly created second half. 'split' indicates an
|
|
|
|
* offset inside 'orig' where it should be split.
|
|
|
|
*/
|
2008-03-04 16:16:54 +00:00
|
|
|
static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
|
2008-01-04 15:36:26 +00:00
|
|
|
struct extent_state *prealloc, u64 split)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
prealloc->start = orig->start;
|
|
|
|
prealloc->end = split - 1;
|
|
|
|
prealloc->state = orig->state;
|
|
|
|
update_extent_state(prealloc);
|
|
|
|
orig->start = split;
|
|
|
|
update_extent_state(orig);
|
2013-07-03 13:25:13 +00:00
|
|
|
ret = insert_cache_extent(&tree->state, &prealloc->cache_node);
|
2008-01-04 15:36:26 +00:00
|
|
|
BUG_ON(ret);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* clear some bits on a range in the tree.
|
|
|
|
*/
|
2008-03-04 16:16:54 +00:00
|
|
|
static int clear_state_bit(struct extent_io_tree *tree,
|
2008-01-04 15:36:26 +00:00
|
|
|
struct extent_state *state, int bits)
|
|
|
|
{
|
|
|
|
int ret = state->state & bits;
|
|
|
|
|
|
|
|
state->state &= ~bits;
|
|
|
|
if (state->state == 0) {
|
|
|
|
remove_cache_extent(&tree->state, &state->cache_node);
|
2013-07-03 13:25:13 +00:00
|
|
|
btrfs_free_extent_state(state);
|
2008-01-04 15:36:26 +00:00
|
|
|
} else {
|
|
|
|
merge_state(tree, state);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-10-01 14:46:13 +00:00
|
|
|
/*
|
|
|
|
* extent_buffer_bitmap_set - set an area of a bitmap
|
|
|
|
* @eb: the extent buffer
|
|
|
|
* @start: offset of the bitmap item in the extent buffer
|
|
|
|
* @pos: bit number of the first bit
|
|
|
|
* @len: number of bits to set
|
|
|
|
*/
|
|
|
|
void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
|
|
|
|
unsigned long pos, unsigned long len)
|
|
|
|
{
|
|
|
|
u8 *p = (u8 *)eb->data + start + BIT_BYTE(pos);
|
|
|
|
const unsigned int size = pos + len;
|
|
|
|
int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
|
|
|
|
u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
|
|
|
|
|
|
|
|
while (len >= bits_to_set) {
|
|
|
|
*p |= mask_to_set;
|
|
|
|
len -= bits_to_set;
|
|
|
|
bits_to_set = BITS_PER_BYTE;
|
|
|
|
mask_to_set = ~0;
|
|
|
|
p++;
|
|
|
|
}
|
|
|
|
if (len) {
|
|
|
|
mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
|
|
|
|
*p |= mask_to_set;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* extent_buffer_bitmap_clear - clear an area of a bitmap
|
|
|
|
* @eb: the extent buffer
|
|
|
|
* @start: offset of the bitmap item in the extent buffer
|
|
|
|
* @pos: bit number of the first bit
|
|
|
|
* @len: number of bits to clear
|
|
|
|
*/
|
|
|
|
void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
|
|
|
|
unsigned long pos, unsigned long len)
|
|
|
|
{
|
|
|
|
u8 *p = (u8 *)eb->data + start + BIT_BYTE(pos);
|
|
|
|
const unsigned int size = pos + len;
|
|
|
|
int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
|
|
|
|
u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
|
|
|
|
|
|
|
|
while (len >= bits_to_clear) {
|
|
|
|
*p &= ~mask_to_clear;
|
|
|
|
len -= bits_to_clear;
|
|
|
|
bits_to_clear = BITS_PER_BYTE;
|
|
|
|
mask_to_clear = ~0;
|
|
|
|
p++;
|
|
|
|
}
|
|
|
|
if (len) {
|
|
|
|
mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
|
|
|
|
*p &= ~mask_to_clear;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-01-04 15:36:26 +00:00
|
|
|
/*
|
2013-06-24 07:09:15 +00:00
|
|
|
* clear some bits on a range in the tree.
|
2008-01-04 15:36:26 +00:00
|
|
|
*/
|
2017-02-09 16:42:02 +00:00
|
|
|
int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int bits)
|
2008-01-04 15:36:26 +00:00
|
|
|
{
|
|
|
|
struct extent_state *state;
|
|
|
|
struct extent_state *prealloc = NULL;
|
|
|
|
struct cache_extent *node;
|
2009-05-29 20:35:30 +00:00
|
|
|
u64 last_end;
|
2008-01-04 15:36:26 +00:00
|
|
|
int err;
|
|
|
|
int set = 0;
|
|
|
|
|
|
|
|
again:
|
2013-09-20 18:22:11 +00:00
|
|
|
if (!prealloc) {
|
|
|
|
prealloc = alloc_extent_state();
|
|
|
|
if (!prealloc)
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2008-01-04 15:36:26 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* this search will find the extents that end after
|
|
|
|
* our range starts
|
|
|
|
*/
|
2013-07-03 13:25:15 +00:00
|
|
|
node = search_cache_extent(&tree->state, start);
|
2008-01-04 15:36:26 +00:00
|
|
|
if (!node)
|
|
|
|
goto out;
|
|
|
|
state = container_of(node, struct extent_state, cache_node);
|
|
|
|
if (state->start > end)
|
|
|
|
goto out;
|
2009-05-29 20:35:30 +00:00
|
|
|
last_end = state->end;
|
2008-01-04 15:36:26 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* | ---- desired range ---- |
|
|
|
|
* | state | or
|
|
|
|
* | ------------- state -------------- |
|
|
|
|
*
|
|
|
|
* We need to split the extent we found, and may flip
|
|
|
|
* bits on second half.
|
|
|
|
*
|
|
|
|
* If the extent we found extends past our range, we
|
|
|
|
* just split and search again. It'll get split again
|
|
|
|
* the next time though.
|
|
|
|
*
|
|
|
|
* If the extent we found is inside our range, we clear
|
|
|
|
* the desired bit on it.
|
|
|
|
*/
|
|
|
|
if (state->start < start) {
|
|
|
|
err = split_state(tree, state, prealloc, start);
|
|
|
|
BUG_ON(err == -EEXIST);
|
|
|
|
prealloc = NULL;
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
if (state->end <= end) {
|
|
|
|
set |= clear_state_bit(tree, state, bits);
|
2009-05-29 20:35:30 +00:00
|
|
|
if (last_end == (u64)-1)
|
|
|
|
goto out;
|
|
|
|
start = last_end + 1;
|
2008-01-04 15:36:26 +00:00
|
|
|
} else {
|
|
|
|
start = state->start;
|
|
|
|
}
|
|
|
|
goto search_again;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* | ---- desired range ---- |
|
|
|
|
* | state |
|
|
|
|
* We need to split the extent, and clear the bit
|
|
|
|
* on the first half
|
|
|
|
*/
|
|
|
|
if (state->start <= end && state->end > end) {
|
|
|
|
err = split_state(tree, state, prealloc, end + 1);
|
|
|
|
BUG_ON(err == -EEXIST);
|
|
|
|
|
|
|
|
set |= clear_state_bit(tree, prealloc, bits);
|
|
|
|
prealloc = NULL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
start = state->end + 1;
|
|
|
|
set |= clear_state_bit(tree, state, bits);
|
2009-05-29 20:35:30 +00:00
|
|
|
if (last_end == (u64)-1)
|
|
|
|
goto out;
|
|
|
|
start = last_end + 1;
|
2008-01-04 15:36:26 +00:00
|
|
|
goto search_again;
|
|
|
|
out:
|
|
|
|
if (prealloc)
|
2013-07-03 13:25:13 +00:00
|
|
|
btrfs_free_extent_state(prealloc);
|
2008-01-04 15:36:26 +00:00
|
|
|
return set;
|
|
|
|
|
|
|
|
search_again:
|
|
|
|
if (start > end)
|
|
|
|
goto out;
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* set some bits on a range in the tree.
|
|
|
|
*/
|
2017-02-09 16:42:02 +00:00
|
|
|
int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int bits)
|
2008-01-04 15:36:26 +00:00
|
|
|
{
|
|
|
|
struct extent_state *state;
|
|
|
|
struct extent_state *prealloc = NULL;
|
|
|
|
struct cache_extent *node;
|
|
|
|
int err = 0;
|
|
|
|
u64 last_start;
|
|
|
|
u64 last_end;
|
|
|
|
again:
|
2013-01-23 20:18:32 +00:00
|
|
|
if (!prealloc) {
|
|
|
|
prealloc = alloc_extent_state();
|
|
|
|
if (!prealloc)
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2008-01-04 15:36:26 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* this search will find the extents that end after
|
|
|
|
* our range starts
|
|
|
|
*/
|
2013-07-03 13:25:15 +00:00
|
|
|
node = search_cache_extent(&tree->state, start);
|
2008-01-04 15:36:26 +00:00
|
|
|
if (!node) {
|
|
|
|
err = insert_state(tree, prealloc, start, end, bits);
|
|
|
|
BUG_ON(err == -EEXIST);
|
|
|
|
prealloc = NULL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
state = container_of(node, struct extent_state, cache_node);
|
|
|
|
last_start = state->start;
|
|
|
|
last_end = state->end;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* | ---- desired range ---- |
|
|
|
|
* | state |
|
|
|
|
*
|
|
|
|
* Just lock what we found and keep going
|
|
|
|
*/
|
|
|
|
if (state->start == start && state->end <= end) {
|
|
|
|
state->state |= bits;
|
|
|
|
merge_state(tree, state);
|
2009-05-29 20:35:30 +00:00
|
|
|
if (last_end == (u64)-1)
|
|
|
|
goto out;
|
|
|
|
start = last_end + 1;
|
2008-01-04 15:36:26 +00:00
|
|
|
goto search_again;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* | ---- desired range ---- |
|
|
|
|
* | state |
|
|
|
|
* or
|
|
|
|
* | ------------- state -------------- |
|
|
|
|
*
|
|
|
|
* We need to split the extent we found, and may flip bits on
|
|
|
|
* second half.
|
|
|
|
*
|
|
|
|
* If the extent we found extends past our
|
|
|
|
* range, we just split and search again. It'll get split
|
|
|
|
* again the next time though.
|
|
|
|
*
|
|
|
|
* If the extent we found is inside our range, we set the
|
|
|
|
* desired bit on it.
|
|
|
|
*/
|
|
|
|
if (state->start < start) {
|
|
|
|
err = split_state(tree, state, prealloc, start);
|
|
|
|
BUG_ON(err == -EEXIST);
|
|
|
|
prealloc = NULL;
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
if (state->end <= end) {
|
|
|
|
state->state |= bits;
|
|
|
|
start = state->end + 1;
|
|
|
|
merge_state(tree, state);
|
2009-05-29 20:35:30 +00:00
|
|
|
if (last_end == (u64)-1)
|
|
|
|
goto out;
|
|
|
|
start = last_end + 1;
|
2008-01-04 15:36:26 +00:00
|
|
|
} else {
|
|
|
|
start = state->start;
|
|
|
|
}
|
|
|
|
goto search_again;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* | ---- desired range ---- |
|
|
|
|
* | state | or | state |
|
|
|
|
*
|
|
|
|
* There's a hole, we need to insert something in it and
|
|
|
|
* ignore the extent we found.
|
|
|
|
*/
|
|
|
|
if (state->start > start) {
|
|
|
|
u64 this_end;
|
|
|
|
if (end < last_start)
|
|
|
|
this_end = end;
|
|
|
|
else
|
|
|
|
this_end = last_start -1;
|
|
|
|
err = insert_state(tree, prealloc, start, this_end,
|
|
|
|
bits);
|
|
|
|
BUG_ON(err == -EEXIST);
|
|
|
|
prealloc = NULL;
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
start = this_end + 1;
|
|
|
|
goto search_again;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* | ---- desired range ---- |
|
|
|
|
* | ---------- state ---------- |
|
|
|
|
* We need to split the extent, and set the bit
|
|
|
|
* on the first half
|
|
|
|
*/
|
|
|
|
err = split_state(tree, state, prealloc, end + 1);
|
|
|
|
BUG_ON(err == -EEXIST);
|
|
|
|
|
|
|
|
state->state |= bits;
|
|
|
|
merge_state(tree, prealloc);
|
|
|
|
prealloc = NULL;
|
|
|
|
out:
|
|
|
|
if (prealloc)
|
2013-07-03 13:25:13 +00:00
|
|
|
btrfs_free_extent_state(prealloc);
|
2008-01-04 15:36:26 +00:00
|
|
|
return err;
|
|
|
|
search_again:
|
|
|
|
if (start > end)
|
|
|
|
goto out;
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
|
2017-02-09 16:42:02 +00:00
|
|
|
int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end)
|
2008-01-04 15:36:26 +00:00
|
|
|
{
|
2017-02-09 16:42:02 +00:00
|
|
|
return set_extent_bits(tree, start, end, EXTENT_DIRTY);
|
2008-01-04 15:36:26 +00:00
|
|
|
}
|
|
|
|
|
2017-02-09 16:42:02 +00:00
|
|
|
int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end)
|
2008-01-04 15:36:26 +00:00
|
|
|
{
|
2017-02-09 16:42:02 +00:00
|
|
|
return clear_extent_bits(tree, start, end, EXTENT_DIRTY);
|
2008-01-04 15:36:26 +00:00
|
|
|
}
|
|
|
|
|
2008-03-04 16:16:54 +00:00
|
|
|
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
|
2008-01-04 15:36:26 +00:00
|
|
|
u64 *start_ret, u64 *end_ret, int bits)
|
|
|
|
{
|
|
|
|
struct cache_extent *node;
|
|
|
|
struct extent_state *state;
|
|
|
|
int ret = 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* this search will find all the extents that end after
|
|
|
|
* our range starts.
|
|
|
|
*/
|
2013-07-03 13:25:15 +00:00
|
|
|
node = search_cache_extent(&tree->state, start);
|
2008-01-04 15:36:26 +00:00
|
|
|
if (!node)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
while(1) {
|
|
|
|
state = container_of(node, struct extent_state, cache_node);
|
|
|
|
if (state->end >= start && (state->state & bits)) {
|
|
|
|
*start_ret = state->start;
|
|
|
|
*end_ret = state->end;
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
node = next_cache_extent(node);
|
|
|
|
if (!node)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-03-04 16:16:54 +00:00
|
|
|
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
2008-01-04 15:36:26 +00:00
|
|
|
int bits, int filled)
|
|
|
|
{
|
|
|
|
struct extent_state *state = NULL;
|
|
|
|
struct cache_extent *node;
|
|
|
|
int bitset = 0;
|
|
|
|
|
2013-07-03 13:25:15 +00:00
|
|
|
node = search_cache_extent(&tree->state, start);
|
2008-01-04 15:36:26 +00:00
|
|
|
while (node && start <= end) {
|
|
|
|
state = container_of(node, struct extent_state, cache_node);
|
|
|
|
|
|
|
|
if (filled && state->start > start) {
|
|
|
|
bitset = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (state->start > end)
|
|
|
|
break;
|
|
|
|
if (state->state & bits) {
|
|
|
|
bitset = 1;
|
|
|
|
if (!filled)
|
|
|
|
break;
|
|
|
|
} else if (filled) {
|
|
|
|
bitset = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
start = state->end + 1;
|
|
|
|
if (start > end)
|
|
|
|
break;
|
|
|
|
node = next_cache_extent(node);
|
2008-01-22 16:34:13 +00:00
|
|
|
if (!node) {
|
|
|
|
if (filled)
|
|
|
|
bitset = 0;
|
|
|
|
break;
|
|
|
|
}
|
2008-01-04 15:36:26 +00:00
|
|
|
}
|
|
|
|
return bitset;
|
|
|
|
}
|
|
|
|
|
2008-03-04 16:16:54 +00:00
|
|
|
int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
|
2008-01-04 15:36:26 +00:00
|
|
|
{
|
|
|
|
struct cache_extent *node;
|
|
|
|
struct extent_state *state;
|
|
|
|
int ret = 0;
|
|
|
|
|
2013-07-03 13:25:15 +00:00
|
|
|
node = search_cache_extent(&tree->state, start);
|
2008-01-04 15:36:26 +00:00
|
|
|
if (!node) {
|
|
|
|
ret = -ENOENT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
state = container_of(node, struct extent_state, cache_node);
|
|
|
|
if (state->start != start) {
|
|
|
|
ret = -ENOENT;
|
|
|
|
goto out;
|
|
|
|
}
|
2013-01-30 22:50:26 +00:00
|
|
|
state->xprivate = private;
|
2008-01-04 15:36:26 +00:00
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-03-04 16:16:54 +00:00
|
|
|
int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
|
2008-01-04 15:36:26 +00:00
|
|
|
{
|
|
|
|
struct cache_extent *node;
|
|
|
|
struct extent_state *state;
|
|
|
|
int ret = 0;
|
|
|
|
|
2013-07-03 13:25:15 +00:00
|
|
|
node = search_cache_extent(&tree->state, start);
|
2008-01-04 15:36:26 +00:00
|
|
|
if (!node) {
|
|
|
|
ret = -ENOENT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
state = container_of(node, struct extent_state, cache_node);
|
|
|
|
if (state->start != start) {
|
|
|
|
ret = -ENOENT;
|
|
|
|
goto out;
|
|
|
|
}
|
2013-01-30 22:50:26 +00:00
|
|
|
*private = state->xprivate;
|
2008-01-04 15:36:26 +00:00
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-03-30 05:48:55 +00:00
|
|
|
static struct extent_buffer *__alloc_extent_buffer(struct btrfs_fs_info *info,
|
2008-01-04 15:36:26 +00:00
|
|
|
u64 bytenr, u32 blocksize)
|
|
|
|
{
|
|
|
|
struct extent_buffer *eb;
|
|
|
|
|
2015-09-29 17:10:36 +00:00
|
|
|
eb = calloc(1, sizeof(struct extent_buffer) + blocksize);
|
2016-07-28 00:04:30 +00:00
|
|
|
if (!eb)
|
2008-01-04 15:36:26 +00:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
eb->start = bytenr;
|
|
|
|
eb->len = blocksize;
|
2013-11-27 16:08:24 +00:00
|
|
|
eb->refs = 1;
|
2008-01-04 15:36:26 +00:00
|
|
|
eb->flags = 0;
|
|
|
|
eb->fd = -1;
|
|
|
|
eb->dev_bytenr = (u64)-1;
|
|
|
|
eb->cache_node.start = bytenr;
|
|
|
|
eb->cache_node.size = blocksize;
|
2018-03-30 05:48:55 +00:00
|
|
|
eb->fs_info = info;
|
2013-10-01 13:00:19 +00:00
|
|
|
INIT_LIST_HEAD(&eb->recow);
|
2018-03-30 05:48:54 +00:00
|
|
|
INIT_LIST_HEAD(&eb->lru);
|
2020-03-24 10:53:14 +00:00
|
|
|
memset_extent_buffer(eb, 0, 0, blocksize);
|
2008-01-04 15:36:26 +00:00
|
|
|
|
|
|
|
return eb;
|
|
|
|
}
|
|
|
|
|
2014-10-10 20:57:07 +00:00
|
|
|
struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
|
|
|
|
{
|
|
|
|
struct extent_buffer *new;
|
|
|
|
|
2018-03-30 05:48:55 +00:00
|
|
|
new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
|
2016-07-28 00:04:30 +00:00
|
|
|
if (!new)
|
2014-10-10 20:57:07 +00:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
copy_extent_buffer(new, src, 0, 0, src->len);
|
|
|
|
new->flags |= EXTENT_BUFFER_DUMMY;
|
|
|
|
|
|
|
|
return new;
|
|
|
|
}
|
|
|
|
|
2017-07-25 20:51:34 +00:00
|
|
|
static void free_extent_buffer_final(struct extent_buffer *eb)
|
|
|
|
{
|
|
|
|
BUG_ON(eb->refs);
|
|
|
|
list_del_init(&eb->lru);
|
|
|
|
if (!(eb->flags & EXTENT_BUFFER_DUMMY)) {
|
2020-04-14 01:34:04 +00:00
|
|
|
struct extent_io_tree *tree = &eb->fs_info->extent_cache;
|
|
|
|
|
2017-07-25 20:51:34 +00:00
|
|
|
remove_cache_extent(&tree->cache, &eb->cache_node);
|
2020-04-14 01:34:04 +00:00
|
|
|
BUG_ON(tree->cache_size < eb->len);
|
2017-07-25 20:51:34 +00:00
|
|
|
tree->cache_size -= eb->len;
|
|
|
|
}
|
|
|
|
free(eb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void free_extent_buffer_internal(struct extent_buffer *eb, bool free_now)
|
2008-01-04 15:36:26 +00:00
|
|
|
{
|
2015-01-28 02:12:55 +00:00
|
|
|
if (!eb || IS_ERR(eb))
|
2008-01-04 15:36:26 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
eb->refs--;
|
|
|
|
BUG_ON(eb->refs < 0);
|
|
|
|
if (eb->refs == 0) {
|
btrfs-progs: only warn if there are leaked extent buffers after transaction abort
Another BUG_ON() during fuzz/003:
====== RUN MAYFAIL btrfs check --init-csum-tree tests/fuzz-tests/images/bko-161821.raw.restored
[1/7] checking root items
Fixed 0 roots.
[2/7] checking extents
parent transid verify failed on 4198400 wanted 14 found 1114126
parent transid verify failed on 4198400 wanted 14 found 1114126
Ignoring transid failure
owner ref check failed [4198400 4096]
repair deleting extent record: key [4198400,169,0]
adding new tree backref on start 4198400 len 4096 parent 0 root 5
Repaired extent references for 4198400
ref mismatch on [4222976 4096] extent item 1, found 0
backref 4222976 root 7 not referenced back 0x5617f8ecf780
incorrect global backref count on 4222976 found 1 wanted 0
backpointer mismatch on [4222976 4096]
owner ref check failed [4222976 4096]
repair deleting extent record: key [4222976,169,0]
Repaired extent references for 4222976
[3/7] checking free space cache
[4/7] checking fs roots
parent transid verify failed on 4198400 wanted 14 found 1114126
Ignoring transid failure
Wrong generation of child node/leaf, wanted: 1114126, have: 14
root 5 missing its root dir, recreating
parent transid verify failed on 4198400 wanted 14 found 1114126
Ignoring transid failure
ERROR: child eb corrupted: parent bytenr=4222976 item=0 parent level=1 child level=2
ERROR: errors found in fs roots
extent buffer leak: start 4222976 len 4096
extent_io.c:611: free_extent_buffer_internal: BUG_ON `eb->flags & EXTENT_DIRTY` triggered, value 1
failed (ignored, ret=134): btrfs check --init-csum-tree tests/fuzz-tests/images/bko-161821.raw.restored
mayfail: returned code 134 (SIGABRT), not ignored
test failed for case 003-multi-check-unmounted
Since we're shifting to use btrfs_abort_transaction() in btrfs-progs,
it will be more and more common to see dirty leaked eb. Instead of
BUG_ON(), we only need to report it as a warning.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2018-08-03 05:50:19 +00:00
|
|
|
if (eb->flags & EXTENT_DIRTY) {
|
|
|
|
warning(
|
|
|
|
"dirty eb leak (aborted trans): start %llu len %u",
|
|
|
|
eb->start, eb->len);
|
|
|
|
}
|
2013-10-01 13:00:19 +00:00
|
|
|
list_del_init(&eb->recow);
|
2017-07-25 20:51:34 +00:00
|
|
|
if (eb->flags & EXTENT_BUFFER_DUMMY || free_now)
|
|
|
|
free_extent_buffer_final(eb);
|
2008-01-04 15:36:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-25 20:51:34 +00:00
|
|
|
void free_extent_buffer(struct extent_buffer *eb)
|
|
|
|
{
|
|
|
|
free_extent_buffer_internal(eb, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void free_extent_buffer_nocache(struct extent_buffer *eb)
|
|
|
|
{
|
|
|
|
free_extent_buffer_internal(eb, 1);
|
|
|
|
}
|
|
|
|
|
2008-03-04 16:16:54 +00:00
|
|
|
struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
|
2008-01-04 15:36:26 +00:00
|
|
|
u64 bytenr, u32 blocksize)
|
|
|
|
{
|
|
|
|
struct extent_buffer *eb = NULL;
|
|
|
|
struct cache_extent *cache;
|
|
|
|
|
2013-07-03 13:25:15 +00:00
|
|
|
cache = lookup_cache_extent(&tree->cache, bytenr, blocksize);
|
2013-07-03 13:25:13 +00:00
|
|
|
if (cache && cache->start == bytenr &&
|
|
|
|
cache->size == blocksize) {
|
2008-01-04 15:36:26 +00:00
|
|
|
eb = container_of(cache, struct extent_buffer, cache_node);
|
|
|
|
list_move_tail(&eb->lru, &tree->lru);
|
|
|
|
eb->refs++;
|
|
|
|
}
|
|
|
|
return eb;
|
|
|
|
}
|
|
|
|
|
2008-03-04 16:16:54 +00:00
|
|
|
struct extent_buffer *find_first_extent_buffer(struct extent_io_tree *tree,
|
2008-01-04 15:36:26 +00:00
|
|
|
u64 start)
|
|
|
|
{
|
|
|
|
struct extent_buffer *eb = NULL;
|
|
|
|
struct cache_extent *cache;
|
|
|
|
|
2013-07-03 13:25:15 +00:00
|
|
|
cache = search_cache_extent(&tree->cache, start);
|
2008-01-04 15:36:26 +00:00
|
|
|
if (cache) {
|
|
|
|
eb = container_of(cache, struct extent_buffer, cache_node);
|
|
|
|
list_move_tail(&eb->lru, &tree->lru);
|
|
|
|
eb->refs++;
|
|
|
|
}
|
|
|
|
return eb;
|
|
|
|
}
|
|
|
|
|
2017-07-25 20:51:34 +00:00
|
|
|
static void trim_extent_buffer_cache(struct extent_io_tree *tree)
|
|
|
|
{
|
|
|
|
struct extent_buffer *eb, *tmp;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(eb, tmp, &tree->lru, lru) {
|
|
|
|
if (eb->refs == 0)
|
|
|
|
free_extent_buffer_final(eb);
|
|
|
|
if (tree->cache_size <= ((tree->max_cache_size * 9) / 10))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-30 05:48:55 +00:00
|
|
|
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
|
2008-01-04 15:36:26 +00:00
|
|
|
u64 bytenr, u32 blocksize)
|
|
|
|
{
|
|
|
|
struct extent_buffer *eb;
|
2018-03-30 05:48:55 +00:00
|
|
|
struct extent_io_tree *tree = &fs_info->extent_cache;
|
2008-01-04 15:36:26 +00:00
|
|
|
struct cache_extent *cache;
|
|
|
|
|
2013-07-03 13:25:15 +00:00
|
|
|
cache = lookup_cache_extent(&tree->cache, bytenr, blocksize);
|
2013-07-03 13:25:13 +00:00
|
|
|
if (cache && cache->start == bytenr &&
|
|
|
|
cache->size == blocksize) {
|
2008-01-04 15:36:26 +00:00
|
|
|
eb = container_of(cache, struct extent_buffer, cache_node);
|
|
|
|
list_move_tail(&eb->lru, &tree->lru);
|
|
|
|
eb->refs++;
|
|
|
|
} else {
|
2014-10-10 20:57:07 +00:00
|
|
|
int ret;
|
|
|
|
|
2008-01-04 15:36:26 +00:00
|
|
|
if (cache) {
|
|
|
|
eb = container_of(cache, struct extent_buffer,
|
|
|
|
cache_node);
|
|
|
|
free_extent_buffer(eb);
|
|
|
|
}
|
2018-03-30 05:48:55 +00:00
|
|
|
eb = __alloc_extent_buffer(fs_info, bytenr, blocksize);
|
2014-10-10 20:57:07 +00:00
|
|
|
if (!eb)
|
|
|
|
return NULL;
|
|
|
|
ret = insert_cache_extent(&tree->cache, &eb->cache_node);
|
|
|
|
if (ret) {
|
|
|
|
free(eb);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
list_add_tail(&eb->lru, &tree->lru);
|
|
|
|
tree->cache_size += blocksize;
|
2017-07-25 20:51:34 +00:00
|
|
|
if (tree->cache_size >= tree->max_cache_size)
|
|
|
|
trim_extent_buffer_cache(tree);
|
2008-01-04 15:36:26 +00:00
|
|
|
}
|
|
|
|
return eb;
|
|
|
|
}
|
|
|
|
|
btrfs-progs: disk-io: Verify the bytenr passed in is mapped for read_tree_block()
[BUG]
For a fuzzed image, `btrfs check` will segfault at open_ctree() stage:
$ btrfs check --mode=lowmem issue_207.raw
Opening filesystem to check...
extent_io.c:665: free_extent_buffer_internal: BUG_ON `eb->refs < 0` triggered, value 1
btrfs(+0x6bf67)[0x56431d278f67]
btrfs(+0x6c16e)[0x56431d27916e]
btrfs(alloc_extent_buffer+0x45)[0x56431d279db5]
btrfs(read_tree_block+0x59)[0x56431d2848f9]
btrfs(btrfs_setup_all_roots+0x29c)[0x56431d28535c]
btrfs(+0x78903)[0x56431d285903]
btrfs(open_ctree_fs_info+0x90)[0x56431d285b60]
btrfs(+0x45a01)[0x56431d252a01]
btrfs(main+0x94)[0x56431d2220c4]
/usr/lib/libc.so.6(__libc_start_main+0xf3)[0x7f6e28519153]
btrfs(_start+0x2e)[0x56431d22235e]
[CAUSE]
The fuzzed image has a strange log root bytenr:
log_root 61440
log_root_transid 0
In fact, the log_root seems to be fuzzed, as its transid is 0, which is
invalid.
Note that range [61440, 77824) covers the physical offset of the primary
super block.
The bug is caused by the following sequence:
1. cache for tree block [64K, 68K) is created by open_ctree()
__open_ctree_fd()
|- btrfs_setup_chunk_tree_and_device_map()
|- btrfs_read_sys_array()
|- sb = btrfs_find_create_tree_block()
|- free_extent_buffer(sb)
This created an extent buffer [64K, 68K) in fs_info->extent_cache, then
reduce the refcount of that eb back to 0, but not freed yet.
2. Try to read that corrupted log root
__open_ctree_fd()
|- btrfs_setup_chunk_tree_and_device_map()
|- btrfs_setup_all_roots()
|- find_and_setup_log_root()
|- read_tree_block()
|- btrfs_find_create_tree_block()
|- alloc_extent_buffer()
The final alloc_extent_buffer() will try to free that cached eb
[64K, 68K), since it doesn't match with current search.
And since that cached eb is already released (refcount == 0), the
extra free_extent_buffer() will cause above BUG_ON().
[FIX]
Here we fix it through a more comprehensive method, instead of simply
verifying log_root_transid, here we just don't pollute eb cache when
reading sys chunk array.
So that we won't have an eb cache [64K, 68K), and will error out at
logical mapping phase.
Issue: #207
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2019-12-18 01:19:39 +00:00
|
|
|
/*
|
|
|
|
* Allocate a dummy extent buffer which won't be inserted into extent buffer
|
|
|
|
* cache.
|
|
|
|
*
|
|
|
|
* This mostly allows super block read write using existing eb infrastructure
|
|
|
|
* without pulluting the eb cache.
|
|
|
|
*
|
|
|
|
* This is especially important to avoid injecting eb->start == SZ_64K, as
|
|
|
|
* fuzzed image could have invalid tree bytenr covers super block range,
|
|
|
|
* and cause ref count underflow.
|
|
|
|
*/
|
|
|
|
struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
|
|
|
|
u64 bytenr, u32 blocksize)
|
|
|
|
{
|
|
|
|
struct extent_buffer *ret;
|
|
|
|
|
|
|
|
ret = __alloc_extent_buffer(fs_info, bytenr, blocksize);
|
|
|
|
if (!ret)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
ret->flags |= EXTENT_BUFFER_DUMMY;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-07-11 17:12:37 +00:00
|
|
|
int read_extent_from_disk(struct extent_buffer *eb,
|
|
|
|
unsigned long offset, unsigned long len)
|
2008-01-04 15:36:26 +00:00
|
|
|
{
|
|
|
|
int ret;
|
2009-07-11 17:12:37 +00:00
|
|
|
ret = pread(eb->fd, eb->data + offset, len, eb->dev_bytenr);
|
2014-10-15 23:14:19 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
ret = -errno;
|
2008-01-04 15:36:26 +00:00
|
|
|
goto out;
|
2014-10-15 23:14:19 +00:00
|
|
|
}
|
2009-07-11 17:12:37 +00:00
|
|
|
if (ret != len) {
|
2008-01-04 15:36:26 +00:00
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int write_extent_to_disk(struct extent_buffer *eb)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
ret = pwrite(eb->fd, eb->data, eb->len, eb->dev_bytenr);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
if (ret != eb->len) {
|
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-04-04 13:57:50 +00:00
|
|
|
int read_data_from_disk(struct btrfs_fs_info *info, void *buf, u64 offset,
|
|
|
|
u64 bytes, int mirror)
|
|
|
|
{
|
|
|
|
struct btrfs_multi_bio *multi = NULL;
|
|
|
|
struct btrfs_device *device;
|
|
|
|
u64 bytes_left = bytes;
|
|
|
|
u64 read_len;
|
|
|
|
u64 total_read = 0;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
while (bytes_left) {
|
|
|
|
read_len = bytes_left;
|
2017-06-13 09:19:17 +00:00
|
|
|
ret = btrfs_map_block(info, READ, offset, &read_len, &multi,
|
|
|
|
mirror, NULL);
|
2013-04-04 13:57:50 +00:00
|
|
|
if (ret) {
|
|
|
|
fprintf(stderr, "Couldn't map the block %Lu\n",
|
|
|
|
offset);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
device = multi->stripes[0].dev;
|
|
|
|
|
|
|
|
read_len = min(bytes_left, read_len);
|
2015-08-21 03:21:26 +00:00
|
|
|
if (device->fd <= 0) {
|
2013-04-04 13:57:50 +00:00
|
|
|
kfree(multi);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = pread(device->fd, buf + total_read, read_len,
|
|
|
|
multi->stripes[0].physical);
|
|
|
|
kfree(multi);
|
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "Error reading %Lu, %d\n", offset,
|
|
|
|
ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
if (ret != read_len) {
|
|
|
|
fprintf(stderr, "Short read for %Lu, read %d, "
|
|
|
|
"read_len %Lu\n", offset, ret, read_len);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
bytes_left -= read_len;
|
|
|
|
offset += read_len;
|
|
|
|
total_read += read_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
int write_data_to_disk(struct btrfs_fs_info *info, void *buf, u64 offset,
|
|
|
|
u64 bytes, int mirror)
|
|
|
|
{
|
|
|
|
struct btrfs_multi_bio *multi = NULL;
|
|
|
|
struct btrfs_device *device;
|
|
|
|
u64 bytes_left = bytes;
|
|
|
|
u64 this_len;
|
|
|
|
u64 total_write = 0;
|
|
|
|
u64 *raid_map = NULL;
|
|
|
|
u64 dev_bytenr;
|
|
|
|
int dev_nr;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
while (bytes_left > 0) {
|
|
|
|
this_len = bytes_left;
|
|
|
|
dev_nr = 0;
|
|
|
|
|
2017-06-13 09:19:17 +00:00
|
|
|
ret = btrfs_map_block(info, WRITE, offset, &this_len, &multi,
|
|
|
|
mirror, &raid_map);
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
if (ret) {
|
|
|
|
fprintf(stderr, "Couldn't map the block %Lu\n",
|
|
|
|
offset);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (raid_map) {
|
|
|
|
struct extent_buffer *eb;
|
|
|
|
u64 stripe_len = this_len;
|
|
|
|
|
|
|
|
this_len = min(this_len, bytes_left);
|
2017-05-18 01:27:32 +00:00
|
|
|
this_len = min(this_len, (u64)info->nodesize);
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
|
|
|
|
eb = malloc(sizeof(struct extent_buffer) + this_len);
|
2016-07-28 00:16:43 +00:00
|
|
|
if (!eb) {
|
|
|
|
fprintf(stderr, "cannot allocate memory for eb\n");
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
|
|
|
|
memset(eb, 0, sizeof(struct extent_buffer) + this_len);
|
|
|
|
eb->start = offset;
|
|
|
|
eb->len = this_len;
|
|
|
|
|
|
|
|
memcpy(eb->data, buf + total_write, this_len);
|
|
|
|
ret = write_raid56_with_parity(info, eb, multi,
|
|
|
|
stripe_len, raid_map);
|
|
|
|
BUG_ON(ret);
|
|
|
|
|
|
|
|
free(eb);
|
|
|
|
kfree(raid_map);
|
|
|
|
raid_map = NULL;
|
|
|
|
} else while (dev_nr < multi->num_stripes) {
|
|
|
|
device = multi->stripes[dev_nr].dev;
|
2015-08-21 03:21:26 +00:00
|
|
|
if (device->fd <= 0) {
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
kfree(multi);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_bytenr = multi->stripes[dev_nr].physical;
|
|
|
|
this_len = min(this_len, bytes_left);
|
|
|
|
dev_nr++;
|
|
|
|
|
|
|
|
ret = pwrite(device->fd, buf + total_write, this_len, dev_bytenr);
|
|
|
|
if (ret != this_len) {
|
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "Error writing to "
|
|
|
|
"device %d\n", errno);
|
|
|
|
ret = errno;
|
|
|
|
kfree(multi);
|
|
|
|
return ret;
|
|
|
|
} else {
|
|
|
|
fprintf(stderr, "Short write\n");
|
|
|
|
kfree(multi);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
BUG_ON(bytes_left < this_len);
|
|
|
|
|
|
|
|
bytes_left -= this_len;
|
|
|
|
offset += this_len;
|
|
|
|
total_write += this_len;
|
|
|
|
|
|
|
|
kfree(multi);
|
|
|
|
multi = NULL;
|
|
|
|
}
|
|
|
|
return 0;
|
2016-07-28 00:16:43 +00:00
|
|
|
|
|
|
|
out:
|
|
|
|
kfree(raid_map);
|
|
|
|
return ret;
|
Btrfs-progs: enhance btrfs-image to restore image onto multiple disks
This adds a 'btrfs-image -m' option, which let us restore an image that
is built from a btrfs of multiple disks onto several disks altogether.
This aims to address the following case,
$ mkfs.btrfs -m raid0 sda sdb
$ btrfs-image sda image.file
$ btrfs-image -r image.file sdc
---------
so we can only restore metadata onto sdc, and another thing is we can
only mount sdc with degraded mode as we don't provide informations of
another disk. And, it's built as RAID0 and we have only one disk,
so after mount sdc we'll get into readonly mode.
This is just annoying for people(like me) who're trying to restore image
but turn to find they cannot make it work.
So this'll make your life easier, just tap
$ btrfs-image -m image.file sdc sdd
---------
then you get everything about metadata done, the same offset with that of
the originals(of course, you need offer enough disk size, at least the disk
size of the original disks).
Besides, this also works with raid5 and raid6 metadata image.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
2013-06-22 05:32:45 +00:00
|
|
|
}
|
|
|
|
|
2008-01-04 15:36:26 +00:00
|
|
|
int set_extent_buffer_dirty(struct extent_buffer *eb)
|
|
|
|
{
|
2020-04-14 01:34:04 +00:00
|
|
|
struct extent_io_tree *tree = &eb->fs_info->extent_cache;
|
2008-01-04 15:36:26 +00:00
|
|
|
if (!(eb->flags & EXTENT_DIRTY)) {
|
|
|
|
eb->flags |= EXTENT_DIRTY;
|
2017-02-09 16:42:02 +00:00
|
|
|
set_extent_dirty(tree, eb->start, eb->start + eb->len - 1);
|
2008-01-04 15:36:26 +00:00
|
|
|
extent_buffer_get(eb);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int clear_extent_buffer_dirty(struct extent_buffer *eb)
|
|
|
|
{
|
2020-04-14 01:34:04 +00:00
|
|
|
struct extent_io_tree *tree = &eb->fs_info->extent_cache;
|
2008-01-04 15:36:26 +00:00
|
|
|
if (eb->flags & EXTENT_DIRTY) {
|
|
|
|
eb->flags &= ~EXTENT_DIRTY;
|
2017-02-09 16:42:02 +00:00
|
|
|
clear_extent_dirty(tree, eb->start, eb->start + eb->len - 1);
|
2008-01-04 15:36:26 +00:00
|
|
|
free_extent_buffer(eb);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-06-17 07:59:33 +00:00
|
|
|
int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
|
2008-01-04 15:36:26 +00:00
|
|
|
unsigned long start, unsigned long len)
|
|
|
|
{
|
|
|
|
return memcmp(eb->data + start, ptrv, len);
|
|
|
|
}
|
|
|
|
|
2019-06-17 07:59:33 +00:00
|
|
|
void read_extent_buffer(const struct extent_buffer *eb, void *dst,
|
2008-01-04 15:36:26 +00:00
|
|
|
unsigned long start, unsigned long len)
|
|
|
|
{
|
|
|
|
memcpy(dst, eb->data + start, len);
|
|
|
|
}
|
|
|
|
|
|
|
|
void write_extent_buffer(struct extent_buffer *eb, const void *src,
|
|
|
|
unsigned long start, unsigned long len)
|
|
|
|
{
|
|
|
|
memcpy(eb->data + start, src, len);
|
|
|
|
}
|
|
|
|
|
|
|
|
void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
|
|
|
|
unsigned long dst_offset, unsigned long src_offset,
|
|
|
|
unsigned long len)
|
|
|
|
{
|
|
|
|
memcpy(dst->data + dst_offset, src->data + src_offset, len);
|
|
|
|
}
|
|
|
|
|
|
|
|
void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
|
|
|
|
unsigned long src_offset, unsigned long len)
|
|
|
|
{
|
|
|
|
memmove(dst->data + dst_offset, dst->data + src_offset, len);
|
|
|
|
}
|
|
|
|
|
|
|
|
void memset_extent_buffer(struct extent_buffer *eb, char c,
|
|
|
|
unsigned long start, unsigned long len)
|
|
|
|
{
|
|
|
|
memset(eb->data + start, c, len);
|
|
|
|
}
|
2015-09-30 03:51:45 +00:00
|
|
|
|
|
|
|
int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
|
|
|
|
unsigned long nr)
|
|
|
|
{
|
2016-07-15 19:12:48 +00:00
|
|
|
return le_test_bit(nr, (u8 *)eb->data + start);
|
2015-09-30 03:51:45 +00:00
|
|
|
}
|