2008-01-04 16:29:55 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2007 Oracle. All rights reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public
|
|
|
|
* License v2 as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public
|
|
|
|
* License along with this program; if not, write to the
|
|
|
|
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
|
|
* Boston, MA 021110-1307, USA.
|
|
|
|
*/
|
|
|
|
|
2017-02-22 07:50:32 +00:00
|
|
|
/*
|
|
|
|
* Btrfs convert design:
|
|
|
|
*
|
|
|
|
* The overall design of btrfs convert is like the following:
|
|
|
|
*
|
|
|
|
* |<------------------Old fs----------------------------->|
|
|
|
|
* |<- used ->| |<- used ->| |<- used ->|
|
|
|
|
* ||
|
|
|
|
* \/
|
|
|
|
* |<---------------Btrfs fs------------------------------>|
|
|
|
|
* |<- Old data chunk ->|< new chunk (D/M/S)>|<- ODC ->|
|
|
|
|
* |<-Old-FE->| |<-Old-FE->|<- Btrfs extents ->|<-Old-FE->|
|
|
|
|
*
|
|
|
|
* ODC = Old data chunk, btrfs chunks containing old fs data
|
|
|
|
* Mapped 1:1 (logical address == device offset)
|
|
|
|
* Old-FE = file extents pointing to old fs.
|
|
|
|
*
|
|
|
|
* So old fs used space is (mostly) kept as is, while btrfs will insert
|
|
|
|
* its chunk (Data/Meta/Sys) into large enough free space.
|
|
|
|
* In this way, we can create different profiles for metadata/data for
|
|
|
|
* converted fs.
|
|
|
|
*
|
|
|
|
* We must reserve and relocate 3 ranges for btrfs:
|
|
|
|
* * [0, 1M) - area never used for any data except the first
|
|
|
|
* superblock
|
|
|
|
* * [btrfs_sb_offset(1), +64K) - 1st superblock backup copy
|
|
|
|
* * [btrfs_sb_offset(2), +64K) - 2nd, dtto
|
|
|
|
*
|
|
|
|
* Most work is spent handling corner cases around these reserved ranges.
|
|
|
|
*
|
|
|
|
* Detailed workflow is:
|
|
|
|
* 1) Scan old fs used space and calculate data chunk layout
|
|
|
|
* 1.1) Scan old fs
|
|
|
|
* We can a map used space of old fs
|
|
|
|
*
|
|
|
|
* 1.2) Calculate data chunk layout - this is the hard part
|
2020-03-27 20:36:52 +00:00
|
|
|
* New data chunks must meet 3 conditions using result from 1.1
|
2017-02-22 07:50:32 +00:00
|
|
|
* a. Large enough to be a chunk
|
|
|
|
* b. Doesn't intersect reserved ranges
|
|
|
|
* c. Covers all the remaining old fs used space
|
|
|
|
*
|
|
|
|
* NOTE: This can be simplified if we don't need to handle backup supers
|
|
|
|
*
|
|
|
|
* 1.3) Calculate usable space for new btrfs chunks
|
|
|
|
* Btrfs chunk usable space must meet 3 conditions using result from 1.2
|
|
|
|
* a. Large enough to be a chunk
|
|
|
|
* b. Doesn't intersect reserved ranges
|
|
|
|
* c. Doesn't cover any data chunks in 1.1
|
|
|
|
*
|
|
|
|
* 2) Create basic btrfs filesystem structure
|
2018-11-26 17:01:42 +00:00
|
|
|
* Initial metadata and sys chunks are inserted in the first available
|
2017-02-22 07:50:32 +00:00
|
|
|
* space found in step 1.3
|
|
|
|
* Then insert all data chunks into the basic btrfs
|
|
|
|
*
|
|
|
|
* 3) Create convert image
|
|
|
|
* We need to relocate reserved ranges here.
|
|
|
|
* After this step, the convert image is done, and we can use the image
|
|
|
|
* as reflink source to create old files
|
|
|
|
*
|
|
|
|
* 4) Iterate old fs to create files
|
|
|
|
* We just reflink file extents from old fs to newly created files on
|
|
|
|
* btrfs.
|
|
|
|
*/
|
|
|
|
|
2012-09-20 21:26:28 +00:00
|
|
|
#include "kerncompat.h"
|
|
|
|
|
2008-01-04 16:29:55 +00:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <unistd.h>
|
2015-03-09 10:56:04 +00:00
|
|
|
#include <getopt.h>
|
2017-07-12 20:05:38 +00:00
|
|
|
#include <pthread.h>
|
2017-02-23 07:46:46 +00:00
|
|
|
#include <stdbool.h>
|
2021-08-17 14:47:18 +00:00
|
|
|
#include <uuid/uuid.h>
|
2012-09-20 21:26:28 +00:00
|
|
|
|
2020-08-18 13:56:04 +00:00
|
|
|
#include "kernel-shared/ctree.h"
|
2020-08-18 13:56:04 +00:00
|
|
|
#include "kernel-shared/disk-io.h"
|
2020-08-18 13:56:04 +00:00
|
|
|
#include "kernel-shared/volumes.h"
|
2020-08-18 13:56:04 +00:00
|
|
|
#include "kernel-shared/transaction.h"
|
2019-06-19 23:46:21 +00:00
|
|
|
#include "common/utils.h"
|
2019-06-19 22:44:36 +00:00
|
|
|
#include "common/task-utils.h"
|
2019-07-01 18:54:39 +00:00
|
|
|
#include "common/path-utils.h"
|
2019-06-19 23:46:21 +00:00
|
|
|
#include "common/help.h"
|
2021-09-03 20:29:06 +00:00
|
|
|
#include "common/parse-utils.h"
|
2017-01-25 15:54:35 +00:00
|
|
|
#include "mkfs/common.h"
|
2017-01-25 16:09:10 +00:00
|
|
|
#include "convert/common.h"
|
2017-01-27 14:47:17 +00:00
|
|
|
#include "convert/source-fs.h"
|
2019-09-25 13:37:27 +00:00
|
|
|
#include "crypto/crc32c.h"
|
2019-06-19 23:46:21 +00:00
|
|
|
#include "common/fsfeatures.h"
|
2021-09-21 13:38:19 +00:00
|
|
|
#include "common/device-scan.h"
|
2015-06-21 16:23:19 +00:00
|
|
|
#include "common/box.h"
|
2021-04-29 17:52:15 +00:00
|
|
|
#include "common/open-utils.h"
|
2021-11-10 20:08:01 +00:00
|
|
|
#include "common/repair.h"
|
2016-08-23 15:21:30 +00:00
|
|
|
|
2017-08-22 16:30:43 +00:00
|
|
|
extern const struct btrfs_convert_operations ext2_convert_ops;
|
|
|
|
extern const struct btrfs_convert_operations reiserfs_convert_ops;
|
2017-01-30 09:11:22 +00:00
|
|
|
|
|
|
|
static const struct btrfs_convert_operations *convert_operations[] = {
|
|
|
|
#if BTRFSCONVERT_EXT2
|
|
|
|
&ext2_convert_ops,
|
|
|
|
#endif
|
2017-08-22 16:30:43 +00:00
|
|
|
#if BTRFSCONVERT_REISERFS
|
|
|
|
&reiserfs_convert_ops,
|
|
|
|
#endif
|
2017-01-30 09:11:22 +00:00
|
|
|
};
|
|
|
|
|
2014-11-09 22:16:56 +00:00
|
|
|
static void *print_copied_inodes(void *p)
|
|
|
|
{
|
|
|
|
struct task_ctx *priv = p;
|
|
|
|
const char work_indicator[] = { '.', 'o', 'O', 'o' };
|
2017-01-30 09:33:01 +00:00
|
|
|
u64 count = 0;
|
2014-11-09 22:16:56 +00:00
|
|
|
|
|
|
|
task_period_start(priv->info, 1000 /* 1s */);
|
|
|
|
while (1) {
|
|
|
|
count++;
|
2017-07-12 20:05:38 +00:00
|
|
|
pthread_mutex_lock(&priv->mutex);
|
2021-08-19 20:48:22 +00:00
|
|
|
printf("Copy inodes [%c] [%10llu/%10llu]\r",
|
2017-01-30 09:33:01 +00:00
|
|
|
work_indicator[count % 4],
|
|
|
|
(unsigned long long)priv->cur_copy_inodes,
|
|
|
|
(unsigned long long)priv->max_copy_inodes);
|
2017-07-12 20:05:38 +00:00
|
|
|
pthread_mutex_unlock(&priv->mutex);
|
2014-11-09 22:16:56 +00:00
|
|
|
fflush(stdout);
|
|
|
|
task_period_wait(priv->info);
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int after_copied_inodes(void *p)
|
|
|
|
{
|
|
|
|
printf("\n");
|
2015-07-27 12:24:28 +00:00
|
|
|
fflush(stdout);
|
2014-11-09 22:16:56 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-08-14 18:39:21 +00:00
|
|
|
static inline int copy_inodes(struct btrfs_convert_context *cctx,
|
2017-01-30 16:52:15 +00:00
|
|
|
struct btrfs_root *root, u32 convert_flags,
|
|
|
|
struct task_ctx *p)
|
2013-08-14 18:39:21 +00:00
|
|
|
{
|
2017-01-30 16:52:15 +00:00
|
|
|
return cctx->convert_ops->copy_inodes(cctx, root, convert_flags, p);
|
2013-08-14 18:39:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void convert_close_fs(struct btrfs_convert_context *cctx)
|
|
|
|
{
|
|
|
|
cctx->convert_ops->close_fs(cctx);
|
|
|
|
}
|
|
|
|
|
2016-10-04 14:24:00 +00:00
|
|
|
static inline int convert_check_state(struct btrfs_convert_context *cctx)
|
|
|
|
{
|
|
|
|
return cctx->convert_ops->check_state(cctx);
|
|
|
|
}
|
|
|
|
|
2016-08-23 15:38:55 +00:00
|
|
|
static int csum_disk_extent(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root,
|
|
|
|
u64 disk_bytenr, u64 num_bytes)
|
|
|
|
{
|
2017-05-18 02:13:27 +00:00
|
|
|
u32 blocksize = root->fs_info->sectorsize;
|
2016-08-23 15:38:55 +00:00
|
|
|
u64 offset;
|
|
|
|
char *buffer;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
buffer = malloc(blocksize);
|
|
|
|
if (!buffer)
|
|
|
|
return -ENOMEM;
|
|
|
|
for (offset = 0; offset < num_bytes; offset += blocksize) {
|
|
|
|
ret = read_disk_extent(root, disk_bytenr + offset,
|
|
|
|
blocksize, buffer);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
ret = btrfs_csum_file_block(trans,
|
|
|
|
disk_bytenr + num_bytes,
|
|
|
|
disk_bytenr + offset,
|
|
|
|
buffer, blocksize);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
free(buffer);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-08-23 15:42:01 +00:00
|
|
|
static int create_image_file_range(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root,
|
|
|
|
struct cache_tree *used,
|
|
|
|
struct btrfs_inode_item *inode,
|
|
|
|
u64 ino, u64 bytenr, u64 *ret_len,
|
2017-01-30 16:52:15 +00:00
|
|
|
u32 convert_flags)
|
2016-08-23 15:42:01 +00:00
|
|
|
{
|
|
|
|
struct cache_extent *cache;
|
2020-05-01 06:52:19 +00:00
|
|
|
struct btrfs_block_group *bg_cache;
|
2016-08-23 15:42:01 +00:00
|
|
|
u64 len = *ret_len;
|
|
|
|
u64 disk_bytenr;
|
|
|
|
int i;
|
|
|
|
int ret;
|
2017-01-30 16:52:15 +00:00
|
|
|
u32 datacsum = convert_flags & CONVERT_FLAG_DATACSUM;
|
2016-08-23 15:42:01 +00:00
|
|
|
|
2017-05-18 02:13:27 +00:00
|
|
|
if (bytenr != round_down(bytenr, root->fs_info->sectorsize)) {
|
2016-09-06 12:07:25 +00:00
|
|
|
error("bytenr not sectorsize aligned: %llu",
|
|
|
|
(unsigned long long)bytenr);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2017-05-18 02:13:27 +00:00
|
|
|
if (len != round_down(len, root->fs_info->sectorsize)) {
|
2016-09-06 12:07:25 +00:00
|
|
|
error("length not sectorsize aligned: %llu",
|
|
|
|
(unsigned long long)len);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2016-08-23 15:42:01 +00:00
|
|
|
len = min_t(u64, len, BTRFS_MAX_EXTENT_SIZE);
|
|
|
|
|
|
|
|
/*
|
2017-02-23 02:22:22 +00:00
|
|
|
* Skip reserved ranges first
|
2016-08-23 15:42:01 +00:00
|
|
|
*
|
|
|
|
* Or we will insert a hole into current image file, and later
|
|
|
|
* migrate block will fail as there is already a file extent.
|
|
|
|
*/
|
2017-02-23 02:22:22 +00:00
|
|
|
for (i = 0; i < ARRAY_SIZE(btrfs_reserved_ranges); i++) {
|
2017-03-16 03:18:31 +00:00
|
|
|
const struct simple_range *reserved = &btrfs_reserved_ranges[i];
|
2016-08-23 15:42:01 +00:00
|
|
|
|
2017-02-23 02:22:22 +00:00
|
|
|
/*
|
|
|
|
* |-- reserved --|
|
|
|
|
* |--range---|
|
|
|
|
* or
|
|
|
|
* |---- reserved ----|
|
|
|
|
* |-- range --|
|
|
|
|
* Skip to reserved range end
|
|
|
|
*/
|
|
|
|
if (bytenr >= reserved->start && bytenr < range_end(reserved)) {
|
|
|
|
*ret_len = range_end(reserved) - bytenr;
|
2016-08-23 15:42:01 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2017-02-23 02:22:22 +00:00
|
|
|
* |---reserved---|
|
2016-08-23 15:42:01 +00:00
|
|
|
* |----range-------|
|
2017-02-23 02:22:22 +00:00
|
|
|
* Leading part may still create a file extent
|
2016-08-23 15:42:01 +00:00
|
|
|
*/
|
2017-02-23 02:22:22 +00:00
|
|
|
if (bytenr < reserved->start &&
|
|
|
|
bytenr + len >= range_end(reserved)) {
|
|
|
|
len = min_t(u64, len, reserved->start - bytenr);
|
2016-08-23 15:42:01 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-23 02:22:22 +00:00
|
|
|
/* Check if we are going to insert regular file extent, or hole */
|
2016-08-23 15:42:01 +00:00
|
|
|
cache = search_cache_extent(used, bytenr);
|
|
|
|
if (cache) {
|
|
|
|
if (cache->start <= bytenr) {
|
|
|
|
/*
|
|
|
|
* |///////Used///////|
|
|
|
|
* |<--insert--->|
|
|
|
|
* bytenr
|
2017-02-23 02:22:22 +00:00
|
|
|
* Insert one real file extent
|
2016-08-23 15:42:01 +00:00
|
|
|
*/
|
|
|
|
len = min_t(u64, len, cache->start + cache->size -
|
|
|
|
bytenr);
|
|
|
|
disk_bytenr = bytenr;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* |//Used//|
|
|
|
|
* |<-insert-->|
|
|
|
|
* bytenr
|
2017-02-23 02:22:22 +00:00
|
|
|
* Insert one hole
|
2016-08-23 15:42:01 +00:00
|
|
|
*/
|
|
|
|
len = min(len, cache->start - bytenr);
|
|
|
|
disk_bytenr = 0;
|
|
|
|
datacsum = 0;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* |//Used//| |EOF
|
|
|
|
* |<-insert-->|
|
|
|
|
* bytenr
|
2017-02-23 02:22:22 +00:00
|
|
|
* Insert one hole
|
2016-08-23 15:42:01 +00:00
|
|
|
*/
|
|
|
|
disk_bytenr = 0;
|
|
|
|
datacsum = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (disk_bytenr) {
|
|
|
|
/* Check if the range is in a data block group */
|
|
|
|
bg_cache = btrfs_lookup_block_group(root->fs_info, bytenr);
|
2018-09-14 07:25:05 +00:00
|
|
|
if (!bg_cache) {
|
|
|
|
error("missing data block for bytenr %llu", bytenr);
|
2016-08-23 15:42:01 +00:00
|
|
|
return -ENOENT;
|
2018-09-14 07:25:05 +00:00
|
|
|
}
|
|
|
|
if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_DATA)) {
|
|
|
|
error(
|
|
|
|
"data bytenr %llu is covered by non-data block group %llu flags 0x%llu",
|
2020-05-01 06:52:17 +00:00
|
|
|
bytenr, bg_cache->start, bg_cache->flags);
|
2016-08-23 15:42:01 +00:00
|
|
|
return -EINVAL;
|
2018-09-14 07:25:05 +00:00
|
|
|
}
|
2016-08-23 15:42:01 +00:00
|
|
|
|
|
|
|
/* The extent should never cross block group boundary */
|
2020-05-01 06:52:17 +00:00
|
|
|
len = min_t(u64, len, bg_cache->start + bg_cache->length -
|
|
|
|
bytenr);
|
2016-08-23 15:42:01 +00:00
|
|
|
}
|
|
|
|
|
2017-05-18 02:13:27 +00:00
|
|
|
if (len != round_down(len, root->fs_info->sectorsize)) {
|
2016-09-06 12:07:25 +00:00
|
|
|
error("remaining length not sectorsize aligned: %llu",
|
|
|
|
(unsigned long long)len);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2016-08-23 15:42:01 +00:00
|
|
|
ret = btrfs_record_file_extent(trans, root, ino, inode, bytenr,
|
|
|
|
disk_bytenr, len);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2018-09-14 07:25:05 +00:00
|
|
|
if (datacsum) {
|
2016-08-23 15:42:01 +00:00
|
|
|
ret = csum_disk_extent(trans, root, bytenr, len);
|
2018-10-25 12:10:54 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
errno = -ret;
|
2018-09-14 07:25:05 +00:00
|
|
|
error(
|
2018-10-25 12:10:54 +00:00
|
|
|
"failed to calculate csum for bytenr %llu len %llu: %m",
|
|
|
|
bytenr, len);
|
|
|
|
}
|
2018-09-14 07:25:05 +00:00
|
|
|
}
|
2016-08-23 15:42:01 +00:00
|
|
|
*ret_len = len;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-08-23 15:42:23 +00:00
|
|
|
/*
|
|
|
|
* Relocate old fs data in one reserved ranges
|
|
|
|
*
|
|
|
|
* Since all old fs data in reserved range is not covered by any chunk nor
|
|
|
|
* data extent, we don't need to handle any reference but add new
|
|
|
|
* extent/reference, which makes codes more clear
|
|
|
|
*/
|
|
|
|
static int migrate_one_reserved_range(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root,
|
|
|
|
struct cache_tree *used,
|
|
|
|
struct btrfs_inode_item *inode, int fd,
|
2017-03-16 03:18:31 +00:00
|
|
|
u64 ino, const struct simple_range *range,
|
2017-01-30 16:52:15 +00:00
|
|
|
u32 convert_flags)
|
2016-08-23 15:42:23 +00:00
|
|
|
{
|
2017-02-23 02:22:22 +00:00
|
|
|
u64 cur_off = range->start;
|
|
|
|
u64 cur_len = range->len;
|
|
|
|
u64 hole_start = range->start;
|
2016-08-23 15:42:23 +00:00
|
|
|
u64 hole_len;
|
|
|
|
struct cache_extent *cache;
|
|
|
|
struct btrfs_key key;
|
|
|
|
struct extent_buffer *eb;
|
|
|
|
int ret = 0;
|
|
|
|
|
2017-02-23 02:22:22 +00:00
|
|
|
/*
|
|
|
|
* It's possible that there are holes in reserved range:
|
|
|
|
* |<---------------- Reserved range ---------------------->|
|
|
|
|
* |<- Old fs data ->| |<- Old fs data ->|
|
|
|
|
* So here we need to iterate through old fs used space and only
|
|
|
|
* migrate ranges that covered by old fs data.
|
|
|
|
*/
|
|
|
|
while (cur_off < range_end(range)) {
|
2017-07-27 15:47:21 +00:00
|
|
|
cache = search_cache_extent(used, cur_off);
|
2016-08-23 15:42:23 +00:00
|
|
|
if (!cache)
|
|
|
|
break;
|
|
|
|
cur_off = max(cache->start, cur_off);
|
2017-07-27 15:47:21 +00:00
|
|
|
if (cur_off >= range_end(range))
|
|
|
|
break;
|
2017-02-23 02:22:22 +00:00
|
|
|
cur_len = min(cache->start + cache->size, range_end(range)) -
|
2016-08-23 15:42:23 +00:00
|
|
|
cur_off;
|
2017-05-18 02:13:27 +00:00
|
|
|
BUG_ON(cur_len < root->fs_info->sectorsize);
|
2016-08-23 15:42:23 +00:00
|
|
|
|
|
|
|
/* reserve extent for the data */
|
|
|
|
ret = btrfs_reserve_extent(trans, root, cur_len, 0, 0, (u64)-1,
|
|
|
|
&key, 1);
|
|
|
|
if (ret < 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
eb = malloc(sizeof(*eb) + cur_len);
|
|
|
|
if (!eb) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = pread(fd, eb->data, cur_len, cur_off);
|
|
|
|
if (ret < cur_len) {
|
|
|
|
ret = (ret < 0 ? ret : -EIO);
|
|
|
|
free(eb);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
eb->start = key.objectid;
|
|
|
|
eb->len = key.offset;
|
2021-10-05 06:23:00 +00:00
|
|
|
eb->fs_info = root->fs_info;
|
2016-08-23 15:42:23 +00:00
|
|
|
|
|
|
|
/* Write the data */
|
2017-06-13 09:19:21 +00:00
|
|
|
ret = write_and_map_eb(root->fs_info, eb);
|
2016-08-23 15:42:23 +00:00
|
|
|
free(eb);
|
|
|
|
if (ret < 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Now handle extent item and file extent things */
|
|
|
|
ret = btrfs_record_file_extent(trans, root, ino, inode, cur_off,
|
|
|
|
key.objectid, key.offset);
|
|
|
|
if (ret < 0)
|
|
|
|
break;
|
|
|
|
/* Finally, insert csum items */
|
2017-01-30 16:52:15 +00:00
|
|
|
if (convert_flags & CONVERT_FLAG_DATACSUM)
|
2016-08-23 15:42:23 +00:00
|
|
|
ret = csum_disk_extent(trans, root, key.objectid,
|
|
|
|
key.offset);
|
|
|
|
|
|
|
|
/* Don't forget to insert hole */
|
|
|
|
hole_len = cur_off - hole_start;
|
|
|
|
if (hole_len) {
|
|
|
|
ret = btrfs_record_file_extent(trans, root, ino, inode,
|
|
|
|
hole_start, 0, hole_len);
|
|
|
|
if (ret < 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
cur_off += key.offset;
|
|
|
|
hole_start = cur_off;
|
2017-02-23 02:22:22 +00:00
|
|
|
cur_len = range_end(range) - cur_off;
|
2016-08-23 15:42:23 +00:00
|
|
|
}
|
2017-02-23 02:22:22 +00:00
|
|
|
/*
|
|
|
|
* Last hole
|
|
|
|
* |<---- reserved -------->|
|
|
|
|
* |<- Old fs data ->| |
|
|
|
|
* | Hole |
|
|
|
|
*/
|
|
|
|
if (range_end(range) - hole_start > 0)
|
2016-08-23 15:42:23 +00:00
|
|
|
ret = btrfs_record_file_extent(trans, root, ino, inode,
|
2017-02-23 02:22:22 +00:00
|
|
|
hole_start, 0, range_end(range) - hole_start);
|
2016-08-23 15:42:23 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-08-23 15:43:21 +00:00
|
|
|
/*
|
2017-08-22 16:30:43 +00:00
|
|
|
* Relocate the used source fs data in reserved ranges
|
2016-08-23 15:43:21 +00:00
|
|
|
*/
|
|
|
|
static int migrate_reserved_ranges(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root,
|
|
|
|
struct cache_tree *used,
|
|
|
|
struct btrfs_inode_item *inode, int fd,
|
2017-01-30 16:52:15 +00:00
|
|
|
u64 ino, u64 total_bytes, u32 convert_flags)
|
2016-08-23 15:43:21 +00:00
|
|
|
{
|
2017-02-23 02:22:22 +00:00
|
|
|
int i;
|
2016-08-23 15:43:21 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2017-02-23 02:22:22 +00:00
|
|
|
for (i = 0; i < ARRAY_SIZE(btrfs_reserved_ranges); i++) {
|
2017-03-16 03:18:31 +00:00
|
|
|
const struct simple_range *range = &btrfs_reserved_ranges[i];
|
2016-08-23 15:43:21 +00:00
|
|
|
|
2017-02-23 02:22:22 +00:00
|
|
|
if (range->start > total_bytes)
|
|
|
|
return ret;
|
|
|
|
ret = migrate_one_reserved_range(trans, root, used, inode, fd,
|
|
|
|
ino, range, convert_flags);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
2016-08-23 15:43:21 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-08-23 15:44:03 +00:00
|
|
|
/*
|
|
|
|
* Helper for expand and merge extent_cache for wipe_one_reserved_range() to
|
|
|
|
* handle wiping a range that exists in cache.
|
|
|
|
*/
|
|
|
|
static int _expand_extent_cache(struct cache_tree *tree,
|
|
|
|
struct cache_extent *entry,
|
|
|
|
u64 min_stripe_size, int backward)
|
|
|
|
{
|
|
|
|
struct cache_extent *ce;
|
|
|
|
int diff;
|
|
|
|
|
|
|
|
if (entry->size >= min_stripe_size)
|
|
|
|
return 0;
|
|
|
|
diff = min_stripe_size - entry->size;
|
|
|
|
|
|
|
|
if (backward) {
|
|
|
|
ce = prev_cache_extent(entry);
|
|
|
|
if (!ce)
|
|
|
|
goto expand_back;
|
|
|
|
if (ce->start + ce->size >= entry->start - diff) {
|
|
|
|
/* Directly merge with previous extent */
|
|
|
|
ce->size = entry->start + entry->size - ce->start;
|
|
|
|
remove_cache_extent(tree, entry);
|
|
|
|
free(entry);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
expand_back:
|
|
|
|
/* No overlap, normal extent */
|
|
|
|
if (entry->start < diff) {
|
|
|
|
error("cannot find space for data chunk layout");
|
|
|
|
return -ENOSPC;
|
|
|
|
}
|
|
|
|
entry->start -= diff;
|
|
|
|
entry->size += diff;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
ce = next_cache_extent(entry);
|
|
|
|
if (!ce)
|
|
|
|
goto expand_after;
|
|
|
|
if (entry->start + entry->size + diff >= ce->start) {
|
|
|
|
/* Directly merge with next extent */
|
|
|
|
entry->size = ce->start + ce->size - entry->start;
|
|
|
|
remove_cache_extent(tree, ce);
|
|
|
|
free(ce);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
expand_after:
|
|
|
|
entry->size += diff;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-08-23 15:44:36 +00:00
|
|
|
/*
|
|
|
|
* Remove one reserve range from given cache tree
|
|
|
|
* if min_stripe_size is non-zero, it will ensure for split case,
|
|
|
|
* all its split cache extent is no smaller than @min_strip_size / 2.
|
|
|
|
*/
|
|
|
|
static int wipe_one_reserved_range(struct cache_tree *tree,
|
|
|
|
u64 start, u64 len, u64 min_stripe_size,
|
|
|
|
int ensure_size)
|
|
|
|
{
|
|
|
|
struct cache_extent *cache;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
BUG_ON(ensure_size && min_stripe_size == 0);
|
|
|
|
/*
|
|
|
|
* The logical here is simplified to handle special cases only
|
|
|
|
* So we don't need to consider merge case for ensure_size
|
|
|
|
*/
|
|
|
|
BUG_ON(min_stripe_size && (min_stripe_size < len * 2 ||
|
|
|
|
min_stripe_size / 2 < BTRFS_STRIPE_LEN));
|
|
|
|
|
|
|
|
/* Also, wipe range should already be aligned */
|
|
|
|
BUG_ON(start != round_down(start, BTRFS_STRIPE_LEN) ||
|
|
|
|
start + len != round_up(start + len, BTRFS_STRIPE_LEN));
|
|
|
|
|
|
|
|
min_stripe_size /= 2;
|
|
|
|
|
|
|
|
cache = lookup_cache_extent(tree, start, len);
|
|
|
|
if (!cache)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (start <= cache->start) {
|
|
|
|
/*
|
|
|
|
* |--------cache---------|
|
|
|
|
* |-wipe-|
|
|
|
|
*/
|
|
|
|
BUG_ON(start + len <= cache->start);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The wipe size is smaller than min_stripe_size / 2,
|
|
|
|
* so the result length should still meet min_stripe_size
|
|
|
|
* And no need to do alignment
|
|
|
|
*/
|
|
|
|
cache->size -= (start + len - cache->start);
|
|
|
|
if (cache->size == 0) {
|
|
|
|
remove_cache_extent(tree, cache);
|
|
|
|
free(cache);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
BUG_ON(ensure_size && cache->size < min_stripe_size);
|
|
|
|
|
|
|
|
cache->start = start + len;
|
|
|
|
return 0;
|
|
|
|
} else if (start > cache->start && start + len < cache->start +
|
|
|
|
cache->size) {
|
|
|
|
/*
|
|
|
|
* |-------cache-----|
|
|
|
|
* |-wipe-|
|
|
|
|
*/
|
|
|
|
u64 old_start = cache->start;
|
|
|
|
u64 old_len = cache->size;
|
|
|
|
u64 insert_start = start + len;
|
|
|
|
u64 insert_len;
|
|
|
|
|
|
|
|
cache->size = start - cache->start;
|
|
|
|
/* Expand the leading half part if needed */
|
|
|
|
if (ensure_size && cache->size < min_stripe_size) {
|
|
|
|
ret = _expand_extent_cache(tree, cache,
|
|
|
|
min_stripe_size, 1);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* And insert the new one */
|
|
|
|
insert_len = old_start + old_len - start - len;
|
|
|
|
ret = add_merge_cache_extent(tree, insert_start, insert_len);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* Expand the last half part if needed */
|
|
|
|
if (ensure_size && insert_len < min_stripe_size) {
|
|
|
|
cache = lookup_cache_extent(tree, insert_start,
|
|
|
|
insert_len);
|
|
|
|
if (!cache || cache->start != insert_start ||
|
|
|
|
cache->size != insert_len)
|
|
|
|
return -ENOENT;
|
|
|
|
ret = _expand_extent_cache(tree, cache,
|
|
|
|
min_stripe_size, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* |----cache-----|
|
|
|
|
* |--wipe-|
|
|
|
|
* Wipe len should be small enough and no need to expand the
|
|
|
|
* remaining extent
|
|
|
|
*/
|
|
|
|
cache->size = start - cache->start;
|
|
|
|
BUG_ON(ensure_size && cache->size < min_stripe_size);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-08-23 15:45:15 +00:00
|
|
|
/*
|
|
|
|
* Remove reserved ranges from given cache_tree
|
|
|
|
*
|
|
|
|
* It will remove the following ranges
|
|
|
|
* 1) 0~1M
|
|
|
|
* 2) 2nd superblock, +64K (make sure chunks are 64K aligned)
|
|
|
|
* 3) 3rd superblock, +64K
|
|
|
|
*
|
|
|
|
* @min_stripe must be given for safety check
|
|
|
|
* and if @ensure_size is given, it will ensure affected cache_extent will be
|
|
|
|
* larger than min_stripe_size
|
|
|
|
*/
|
|
|
|
static int wipe_reserved_ranges(struct cache_tree *tree, u64 min_stripe_size,
|
|
|
|
int ensure_size)
|
|
|
|
{
|
2017-02-23 02:22:22 +00:00
|
|
|
int i;
|
2016-08-23 15:45:15 +00:00
|
|
|
int ret;
|
|
|
|
|
2017-02-23 02:22:22 +00:00
|
|
|
for (i = 0; i < ARRAY_SIZE(btrfs_reserved_ranges); i++) {
|
2017-03-16 03:18:31 +00:00
|
|
|
const struct simple_range *range = &btrfs_reserved_ranges[i];
|
2017-02-23 02:22:22 +00:00
|
|
|
|
|
|
|
ret = wipe_one_reserved_range(tree, range->start, range->len,
|
|
|
|
min_stripe_size, ensure_size);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
2016-08-23 15:45:15 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-08-23 15:45:54 +00:00
|
|
|
static int calculate_available_space(struct btrfs_convert_context *cctx)
|
|
|
|
{
|
2017-02-01 14:06:04 +00:00
|
|
|
struct cache_tree *used = &cctx->used_space;
|
2016-08-23 15:45:54 +00:00
|
|
|
struct cache_tree *data_chunks = &cctx->data_chunks;
|
2017-02-01 14:06:04 +00:00
|
|
|
struct cache_tree *free = &cctx->free_space;
|
2016-08-23 15:45:54 +00:00
|
|
|
struct cache_extent *cache;
|
|
|
|
u64 cur_off = 0;
|
|
|
|
/*
|
|
|
|
* Twice the minimal chunk size, to allow later wipe_reserved_ranges()
|
|
|
|
* works without need to consider overlap
|
|
|
|
*/
|
2017-07-27 08:17:00 +00:00
|
|
|
u64 min_stripe_size = SZ_32M;
|
2016-08-23 15:45:54 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Calculate data_chunks */
|
|
|
|
for (cache = first_cache_extent(used); cache;
|
|
|
|
cache = next_cache_extent(cache)) {
|
|
|
|
u64 cur_len;
|
|
|
|
|
|
|
|
if (cache->start + cache->size < cur_off)
|
|
|
|
continue;
|
|
|
|
if (cache->start > cur_off + min_stripe_size)
|
|
|
|
cur_off = cache->start;
|
|
|
|
cur_len = max(cache->start + cache->size - cur_off,
|
|
|
|
min_stripe_size);
|
2020-06-24 11:55:26 +00:00
|
|
|
/* data chunks should never exceed device boundary */
|
|
|
|
cur_len = min(cctx->total_bytes - cur_off, cur_len);
|
2016-08-23 15:45:54 +00:00
|
|
|
ret = add_merge_cache_extent(data_chunks, cur_off, cur_len);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
cur_off += cur_len;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* remove reserved ranges, so we won't ever bother relocating an old
|
|
|
|
* filesystem extent to other place.
|
|
|
|
*/
|
|
|
|
ret = wipe_reserved_ranges(data_chunks, min_stripe_size, 1);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
cur_off = 0;
|
|
|
|
/*
|
|
|
|
* Calculate free space
|
2018-11-26 17:01:42 +00:00
|
|
|
* Always round up the start bytenr, to avoid metadata extent cross
|
2016-08-23 15:45:54 +00:00
|
|
|
* stripe boundary, as later mkfs_convert() won't have all the extent
|
|
|
|
* allocation check
|
|
|
|
*/
|
|
|
|
for (cache = first_cache_extent(data_chunks); cache;
|
|
|
|
cache = next_cache_extent(cache)) {
|
|
|
|
if (cache->start < cur_off)
|
|
|
|
continue;
|
|
|
|
if (cache->start > cur_off) {
|
|
|
|
u64 insert_start;
|
|
|
|
u64 len;
|
|
|
|
|
|
|
|
len = cache->start - round_up(cur_off,
|
|
|
|
BTRFS_STRIPE_LEN);
|
|
|
|
insert_start = round_up(cur_off, BTRFS_STRIPE_LEN);
|
|
|
|
|
|
|
|
ret = add_merge_cache_extent(free, insert_start, len);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
cur_off = cache->start + cache->size;
|
|
|
|
}
|
|
|
|
/* Don't forget the last range */
|
|
|
|
if (cctx->total_bytes > cur_off) {
|
|
|
|
u64 len = cctx->total_bytes - cur_off;
|
|
|
|
u64 insert_start;
|
|
|
|
|
|
|
|
insert_start = round_up(cur_off, BTRFS_STRIPE_LEN);
|
|
|
|
|
|
|
|
ret = add_merge_cache_extent(free, insert_start, len);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove reserved bytes */
|
|
|
|
ret = wipe_reserved_ranges(free, min_stripe_size, 0);
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-07-29 08:40:38 +00:00
|
|
|
static int copy_free_space_tree(struct btrfs_convert_context *cctx)
|
|
|
|
{
|
|
|
|
struct cache_tree *src = &cctx->free_space;
|
|
|
|
struct cache_tree *dst = &cctx->free_space_initial;
|
|
|
|
struct cache_extent *cache;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
for (cache = search_cache_extent(src, 0);
|
|
|
|
cache;
|
|
|
|
cache = next_cache_extent(cache)) {
|
|
|
|
ret = add_merge_cache_extent(dst, cache->start, cache->size);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
cctx->free_bytes_initial += cache->size;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-08-23 15:46:25 +00:00
|
|
|
/*
|
|
|
|
* Read used space, and since we have the used space,
|
2018-11-26 17:01:42 +00:00
|
|
|
* calculate data_chunks and free for later mkfs
|
2016-08-23 15:46:25 +00:00
|
|
|
*/
|
|
|
|
static int convert_read_used_space(struct btrfs_convert_context *cctx)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = cctx->convert_ops->read_used_space(cctx);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = calculate_available_space(cctx);
|
2020-07-29 08:40:38 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return copy_free_space_tree(cctx);
|
2016-08-23 15:46:25 +00:00
|
|
|
}
|
|
|
|
|
2016-08-23 15:48:07 +00:00
|
|
|
/*
|
|
|
|
* Create the fs image file of old filesystem.
|
|
|
|
*
|
|
|
|
* This is completely fs independent as we have cctx->used, only
|
|
|
|
* need to create file extents pointing to all the positions.
|
|
|
|
*/
|
|
|
|
static int create_image(struct btrfs_root *root,
|
|
|
|
struct btrfs_mkfs_config *cfg,
|
|
|
|
struct btrfs_convert_context *cctx, int fd,
|
2017-01-30 16:52:15 +00:00
|
|
|
u64 size, char *name, u32 convert_flags)
|
2016-08-23 15:48:07 +00:00
|
|
|
{
|
|
|
|
struct btrfs_inode_item buf;
|
|
|
|
struct btrfs_trans_handle *trans;
|
2016-11-02 23:37:51 +00:00
|
|
|
struct btrfs_path path;
|
2016-08-23 15:48:07 +00:00
|
|
|
struct btrfs_key key;
|
|
|
|
struct cache_extent *cache;
|
|
|
|
struct cache_tree used_tmp;
|
|
|
|
u64 cur;
|
|
|
|
u64 ino;
|
|
|
|
u64 flags = BTRFS_INODE_READONLY;
|
|
|
|
int ret;
|
|
|
|
|
2017-01-30 16:52:15 +00:00
|
|
|
if (!(convert_flags & CONVERT_FLAG_DATACSUM))
|
2016-08-23 15:48:07 +00:00
|
|
|
flags |= BTRFS_INODE_NODATASUM;
|
|
|
|
|
|
|
|
trans = btrfs_start_transaction(root, 1);
|
2017-08-28 14:48:16 +00:00
|
|
|
if (IS_ERR(trans))
|
|
|
|
return PTR_ERR(trans);
|
2016-08-23 15:48:07 +00:00
|
|
|
|
|
|
|
cache_tree_init(&used_tmp);
|
2016-11-02 23:37:51 +00:00
|
|
|
btrfs_init_path(&path);
|
2016-08-23 15:48:07 +00:00
|
|
|
|
|
|
|
ret = btrfs_find_free_objectid(trans, root, BTRFS_FIRST_FREE_OBJECTID,
|
|
|
|
&ino);
|
2018-09-14 07:25:05 +00:00
|
|
|
if (ret < 0) {
|
2018-10-25 12:10:54 +00:00
|
|
|
errno = -ret;
|
|
|
|
error("failed to find free objectid for root %llu: %m",
|
|
|
|
root->root_key.objectid);
|
2016-08-23 15:48:07 +00:00
|
|
|
goto out;
|
2018-09-14 07:25:05 +00:00
|
|
|
}
|
2016-08-23 15:48:07 +00:00
|
|
|
ret = btrfs_new_inode(trans, root, ino, 0400 | S_IFREG);
|
2018-09-14 07:25:05 +00:00
|
|
|
if (ret < 0) {
|
2018-10-25 12:10:54 +00:00
|
|
|
errno = -ret;
|
|
|
|
error("failed to create new inode for root %llu: %m",
|
|
|
|
root->root_key.objectid);
|
2016-08-23 15:48:07 +00:00
|
|
|
goto out;
|
2018-09-14 07:25:05 +00:00
|
|
|
}
|
2016-08-23 15:48:07 +00:00
|
|
|
ret = btrfs_change_inode_flags(trans, root, ino, flags);
|
2018-09-14 07:25:05 +00:00
|
|
|
if (ret < 0) {
|
2018-10-25 12:10:54 +00:00
|
|
|
errno = -ret;
|
|
|
|
error("failed to change inode flag for ino %llu root %llu: %m",
|
|
|
|
ino, root->root_key.objectid);
|
2016-08-23 15:48:07 +00:00
|
|
|
goto out;
|
2018-09-14 07:25:05 +00:00
|
|
|
}
|
2016-08-23 15:48:07 +00:00
|
|
|
ret = btrfs_add_link(trans, root, ino, BTRFS_FIRST_FREE_OBJECTID, name,
|
2017-08-28 07:08:09 +00:00
|
|
|
strlen(name), BTRFS_FT_REG_FILE, NULL, 1, 0);
|
2018-09-14 07:25:05 +00:00
|
|
|
if (ret < 0) {
|
2018-10-25 12:10:54 +00:00
|
|
|
errno = -ret;
|
|
|
|
error("failed to link ino %llu to '/%s' in root %llu: %m",
|
|
|
|
ino, name, root->root_key.objectid);
|
2016-08-23 15:48:07 +00:00
|
|
|
goto out;
|
2018-09-14 07:25:05 +00:00
|
|
|
}
|
2016-08-23 15:48:07 +00:00
|
|
|
|
|
|
|
key.objectid = ino;
|
|
|
|
key.type = BTRFS_INODE_ITEM_KEY;
|
|
|
|
key.offset = 0;
|
|
|
|
|
2016-11-02 23:37:51 +00:00
|
|
|
ret = btrfs_search_slot(trans, root, &key, &path, 0, 1);
|
2016-08-23 15:48:07 +00:00
|
|
|
if (ret) {
|
|
|
|
ret = (ret > 0 ? -ENOENT : ret);
|
|
|
|
goto out;
|
|
|
|
}
|
2016-11-02 23:37:51 +00:00
|
|
|
read_extent_buffer(path.nodes[0], &buf,
|
|
|
|
btrfs_item_ptr_offset(path.nodes[0], path.slots[0]),
|
2016-08-23 15:48:07 +00:00
|
|
|
sizeof(buf));
|
2016-11-02 23:37:51 +00:00
|
|
|
btrfs_release_path(&path);
|
2016-08-23 15:48:07 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Create a new used space cache, which doesn't contain the reserved
|
|
|
|
* range
|
|
|
|
*/
|
2017-02-01 14:06:04 +00:00
|
|
|
for (cache = first_cache_extent(&cctx->used_space); cache;
|
2016-08-23 15:48:07 +00:00
|
|
|
cache = next_cache_extent(cache)) {
|
|
|
|
ret = add_cache_extent(&used_tmp, cache->start, cache->size);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
ret = wipe_reserved_ranges(&used_tmp, 0, 0);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Start from 1M, as 0~1M is reserved, and create_image_file_range()
|
|
|
|
* can't handle bytenr 0(will consider it as a hole)
|
|
|
|
*/
|
2017-07-27 08:17:00 +00:00
|
|
|
cur = SZ_1M;
|
2016-08-23 15:48:07 +00:00
|
|
|
while (cur < size) {
|
|
|
|
u64 len = size - cur;
|
|
|
|
|
|
|
|
ret = create_image_file_range(trans, root, &used_tmp,
|
2017-01-30 16:52:15 +00:00
|
|
|
&buf, ino, cur, &len,
|
|
|
|
convert_flags);
|
2016-08-23 15:48:07 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
cur += len;
|
|
|
|
}
|
|
|
|
/* Handle the reserved ranges */
|
2017-02-01 14:06:04 +00:00
|
|
|
ret = migrate_reserved_ranges(trans, root, &cctx->used_space, &buf, fd,
|
|
|
|
ino, cfg->num_bytes, convert_flags);
|
2016-08-23 15:48:07 +00:00
|
|
|
|
|
|
|
key.objectid = ino;
|
|
|
|
key.type = BTRFS_INODE_ITEM_KEY;
|
|
|
|
key.offset = 0;
|
2016-11-02 23:37:51 +00:00
|
|
|
ret = btrfs_search_slot(trans, root, &key, &path, 0, 1);
|
2016-08-23 15:48:07 +00:00
|
|
|
if (ret) {
|
|
|
|
ret = (ret > 0 ? -ENOENT : ret);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
btrfs_set_stack_inode_size(&buf, cfg->num_bytes);
|
2016-11-02 23:37:51 +00:00
|
|
|
write_extent_buffer(path.nodes[0], &buf,
|
|
|
|
btrfs_item_ptr_offset(path.nodes[0], path.slots[0]),
|
2016-08-23 15:48:07 +00:00
|
|
|
sizeof(buf));
|
|
|
|
out:
|
|
|
|
free_extent_cache_tree(&used_tmp);
|
2016-11-02 23:37:51 +00:00
|
|
|
btrfs_release_path(&path);
|
2016-08-23 15:48:07 +00:00
|
|
|
btrfs_commit_transaction(trans, root);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-08-23 15:48:44 +00:00
|
|
|
static int create_subvol(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root, u64 root_objectid)
|
|
|
|
{
|
|
|
|
struct extent_buffer *tmp;
|
|
|
|
struct btrfs_root *new_root;
|
|
|
|
struct btrfs_key key;
|
|
|
|
struct btrfs_root_item root_item;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = btrfs_copy_root(trans, root, root->node, &tmp,
|
|
|
|
root_objectid);
|
2016-08-23 17:24:48 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2016-08-23 15:48:44 +00:00
|
|
|
|
|
|
|
memcpy(&root_item, &root->root_item, sizeof(root_item));
|
|
|
|
btrfs_set_root_bytenr(&root_item, tmp->start);
|
|
|
|
btrfs_set_root_level(&root_item, btrfs_header_level(tmp));
|
|
|
|
btrfs_set_root_generation(&root_item, trans->transid);
|
|
|
|
free_extent_buffer(tmp);
|
|
|
|
|
|
|
|
key.objectid = root_objectid;
|
|
|
|
key.type = BTRFS_ROOT_ITEM_KEY;
|
|
|
|
key.offset = trans->transid;
|
|
|
|
ret = btrfs_insert_root(trans, root->fs_info->tree_root,
|
|
|
|
&key, &root_item);
|
|
|
|
|
|
|
|
key.offset = (u64)-1;
|
|
|
|
new_root = btrfs_read_fs_root(root->fs_info, &key);
|
2016-08-23 17:24:48 +00:00
|
|
|
if (!new_root || IS_ERR(new_root)) {
|
|
|
|
error("unable to fs read root: %lu", PTR_ERR(new_root));
|
|
|
|
return PTR_ERR(new_root);
|
|
|
|
}
|
2016-08-23 15:48:44 +00:00
|
|
|
|
|
|
|
ret = btrfs_make_root_dir(trans, new_root, BTRFS_FIRST_FREE_OBJECTID);
|
|
|
|
|
2016-08-23 17:24:48 +00:00
|
|
|
return ret;
|
2016-08-23 15:48:44 +00:00
|
|
|
}
|
|
|
|
|
2016-08-23 15:49:08 +00:00
|
|
|
/*
|
|
|
|
* New make_btrfs() has handle system and meta chunks quite well.
|
|
|
|
* So only need to add remaining data chunks.
|
|
|
|
*/
|
|
|
|
static int make_convert_data_block_groups(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_fs_info *fs_info,
|
|
|
|
struct btrfs_mkfs_config *cfg,
|
|
|
|
struct btrfs_convert_context *cctx)
|
|
|
|
{
|
|
|
|
struct cache_tree *data_chunks = &cctx->data_chunks;
|
|
|
|
struct cache_extent *cache;
|
|
|
|
u64 max_chunk_size;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't create data chunk over 10% of the convert device
|
|
|
|
* And for single chunk, don't create chunk larger than 1G.
|
|
|
|
*/
|
|
|
|
max_chunk_size = cfg->num_bytes / 10;
|
2017-07-27 08:17:00 +00:00
|
|
|
max_chunk_size = min((u64)(SZ_1G), max_chunk_size);
|
2021-11-08 19:26:41 +00:00
|
|
|
max_chunk_size = round_down(max_chunk_size, fs_info->sectorsize);
|
2016-08-23 15:49:08 +00:00
|
|
|
|
|
|
|
for (cache = first_cache_extent(data_chunks); cache;
|
|
|
|
cache = next_cache_extent(cache)) {
|
|
|
|
u64 cur = cache->start;
|
|
|
|
|
|
|
|
while (cur < cache->start + cache->size) {
|
|
|
|
u64 len;
|
|
|
|
u64 cur_backup = cur;
|
|
|
|
|
|
|
|
len = min(max_chunk_size,
|
|
|
|
cache->start + cache->size - cur);
|
2019-10-30 12:22:27 +00:00
|
|
|
ret = btrfs_alloc_data_chunk(trans, fs_info, &cur_backup, len);
|
2016-08-23 15:49:08 +00:00
|
|
|
if (ret < 0)
|
|
|
|
break;
|
2017-06-13 09:19:35 +00:00
|
|
|
ret = btrfs_make_block_group(trans, fs_info, 0,
|
2018-01-24 02:30:28 +00:00
|
|
|
BTRFS_BLOCK_GROUP_DATA, cur, len);
|
2016-08-23 15:49:08 +00:00
|
|
|
if (ret < 0)
|
|
|
|
break;
|
|
|
|
cur += len;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-08-23 15:49:54 +00:00
|
|
|
/*
|
|
|
|
* Init the temp btrfs to a operational status.
|
|
|
|
*
|
|
|
|
* It will fix the extent usage accounting(XXX: Do we really need?) and
|
|
|
|
* insert needed data chunks, to ensure all old fs data extents are covered
|
|
|
|
* by DATA chunks, preventing wrong chunks are allocated.
|
|
|
|
*
|
|
|
|
* And also create convert image subvolume and relocation tree.
|
|
|
|
* (XXX: Not need again?)
|
|
|
|
* But the convert image subvolume is *NOT* linked to fs tree yet.
|
|
|
|
*/
|
|
|
|
static int init_btrfs(struct btrfs_mkfs_config *cfg, struct btrfs_root *root,
|
2017-01-30 16:52:15 +00:00
|
|
|
struct btrfs_convert_context *cctx, u32 convert_flags)
|
2016-08-23 15:49:54 +00:00
|
|
|
{
|
|
|
|
struct btrfs_key location;
|
|
|
|
struct btrfs_trans_handle *trans;
|
|
|
|
struct btrfs_fs_info *fs_info = root->fs_info;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't alloc any metadata/system chunk, as we don't want
|
2018-11-26 17:01:42 +00:00
|
|
|
* any meta/sys chunk allocated before all data chunks are inserted.
|
2016-08-23 15:49:54 +00:00
|
|
|
* Or we screw up the chunk layout just like the old implement.
|
|
|
|
*/
|
|
|
|
fs_info->avoid_sys_chunk_alloc = 1;
|
|
|
|
fs_info->avoid_meta_chunk_alloc = 1;
|
|
|
|
trans = btrfs_start_transaction(root, 1);
|
2017-08-28 14:48:16 +00:00
|
|
|
if (IS_ERR(trans)) {
|
2016-08-23 17:24:48 +00:00
|
|
|
error("unable to start transaction");
|
2017-08-28 14:48:16 +00:00
|
|
|
ret = PTR_ERR(trans);
|
2016-08-23 17:24:48 +00:00
|
|
|
goto err;
|
|
|
|
}
|
2018-05-28 06:36:41 +00:00
|
|
|
ret = btrfs_fix_block_accounting(trans);
|
2016-08-23 15:49:54 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
ret = make_convert_data_block_groups(trans, fs_info, cfg, cctx);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
ret = btrfs_make_root_dir(trans, fs_info->tree_root,
|
|
|
|
BTRFS_ROOT_TREE_DIR_OBJECTID);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
memcpy(&location, &root->root_key, sizeof(location));
|
|
|
|
location.offset = (u64)-1;
|
|
|
|
ret = btrfs_insert_dir_item(trans, fs_info->tree_root, "default", 7,
|
|
|
|
btrfs_super_root_dir(fs_info->super_copy),
|
|
|
|
&location, BTRFS_FT_DIR, 0);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
ret = btrfs_insert_inode_ref(trans, fs_info->tree_root, "default", 7,
|
|
|
|
location.objectid,
|
|
|
|
btrfs_super_root_dir(fs_info->super_copy), 0);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
btrfs_set_root_dirid(&fs_info->fs_root->root_item,
|
|
|
|
BTRFS_FIRST_FREE_OBJECTID);
|
|
|
|
|
|
|
|
/* subvol for fs image file */
|
|
|
|
ret = create_subvol(trans, root, CONV_IMAGE_SUBVOL_OBJECTID);
|
2016-08-23 17:24:48 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
error("failed to create subvolume image root: %d", ret);
|
2016-08-23 15:49:54 +00:00
|
|
|
goto err;
|
2016-08-23 17:24:48 +00:00
|
|
|
}
|
2016-08-23 15:49:54 +00:00
|
|
|
/* subvol for data relocation tree */
|
|
|
|
ret = create_subvol(trans, root, BTRFS_DATA_RELOC_TREE_OBJECTID);
|
2016-08-23 17:24:48 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
error("failed to create DATA_RELOC root: %d", ret);
|
2016-08-23 15:49:54 +00:00
|
|
|
goto err;
|
2016-08-23 17:24:48 +00:00
|
|
|
}
|
2016-08-23 15:49:54 +00:00
|
|
|
|
|
|
|
ret = btrfs_commit_transaction(trans, root);
|
|
|
|
fs_info->avoid_sys_chunk_alloc = 0;
|
|
|
|
fs_info->avoid_meta_chunk_alloc = 0;
|
|
|
|
err:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-08-23 15:50:16 +00:00
|
|
|
/*
|
|
|
|
* Migrate super block to its default position and zero 0 ~ 16k
|
|
|
|
*/
|
2016-12-08 13:56:20 +00:00
|
|
|
static int migrate_super_block(int fd, u64 old_bytenr)
|
2016-08-23 15:50:16 +00:00
|
|
|
{
|
|
|
|
int ret;
|
2021-10-21 01:40:19 +00:00
|
|
|
struct btrfs_super_block super;
|
|
|
|
u8 result[BTRFS_CSUM_SIZE] = {};
|
2016-08-23 15:50:16 +00:00
|
|
|
u32 len;
|
|
|
|
u32 bytenr;
|
|
|
|
|
2021-10-21 01:40:19 +00:00
|
|
|
ret = pread(fd, &super, BTRFS_SUPER_INFO_SIZE, old_bytenr);
|
2016-12-08 13:56:20 +00:00
|
|
|
if (ret != BTRFS_SUPER_INFO_SIZE)
|
2016-08-23 15:50:16 +00:00
|
|
|
goto fail;
|
|
|
|
|
2021-10-21 01:40:19 +00:00
|
|
|
BUG_ON(btrfs_super_bytenr(&super) != old_bytenr);
|
|
|
|
btrfs_set_super_bytenr(&super, BTRFS_SUPER_INFO_OFFSET);
|
2016-08-23 15:50:16 +00:00
|
|
|
|
2021-10-21 01:40:19 +00:00
|
|
|
btrfs_csum_data(NULL, btrfs_super_csum_type(&super),
|
|
|
|
(u8 *)&super + BTRFS_CSUM_SIZE, result,
|
|
|
|
BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
|
|
|
|
memcpy(&super.csum[0], result, BTRFS_CSUM_SIZE);
|
|
|
|
ret = pwrite(fd, &super , BTRFS_SUPER_INFO_SIZE,
|
2016-12-08 13:56:20 +00:00
|
|
|
BTRFS_SUPER_INFO_OFFSET);
|
|
|
|
if (ret != BTRFS_SUPER_INFO_SIZE)
|
2016-08-23 15:50:16 +00:00
|
|
|
goto fail;
|
|
|
|
|
|
|
|
ret = fsync(fd);
|
|
|
|
if (ret)
|
|
|
|
goto fail;
|
|
|
|
|
2021-10-21 01:40:19 +00:00
|
|
|
memset(&super, 0, BTRFS_SUPER_INFO_SIZE);
|
2016-08-23 15:50:16 +00:00
|
|
|
for (bytenr = 0; bytenr < BTRFS_SUPER_INFO_OFFSET; ) {
|
|
|
|
len = BTRFS_SUPER_INFO_OFFSET - bytenr;
|
2016-12-08 13:56:20 +00:00
|
|
|
if (len > BTRFS_SUPER_INFO_SIZE)
|
|
|
|
len = BTRFS_SUPER_INFO_SIZE;
|
2021-10-21 01:40:19 +00:00
|
|
|
ret = pwrite(fd, &super, len, bytenr);
|
2016-08-23 15:50:16 +00:00
|
|
|
if (ret != len) {
|
|
|
|
fprintf(stderr, "unable to zero fill device\n");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
bytenr += len;
|
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
fsync(fd);
|
|
|
|
fail:
|
|
|
|
if (ret > 0)
|
|
|
|
ret = -1;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-08-14 18:39:21 +00:00
|
|
|
static int convert_open_fs(const char *devname,
|
|
|
|
struct btrfs_convert_context *cctx)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(convert_operations); i++) {
|
|
|
|
int ret = convert_operations[i]->open_fs(cctx, devname);
|
|
|
|
|
|
|
|
if (ret == 0) {
|
|
|
|
cctx->convert_ops = convert_operations[i];
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-28 16:34:03 +00:00
|
|
|
error("no file system found to convert");
|
2013-08-14 18:39:21 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-01-30 16:52:15 +00:00
|
|
|
static int do_convert(const char *devname, u32 convert_flags, u32 nodesize,
|
2021-08-17 14:47:18 +00:00
|
|
|
const char *fslabel, int progress, u64 features, u16 csum_type,
|
|
|
|
char fsid[BTRFS_UUID_UNPARSED_SIZE])
|
2016-01-29 05:03:29 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
int fd = -1;
|
|
|
|
u32 blocksize;
|
|
|
|
u64 total_bytes;
|
|
|
|
struct btrfs_root *root;
|
|
|
|
struct btrfs_root *image_root;
|
|
|
|
struct btrfs_convert_context cctx;
|
|
|
|
struct btrfs_key key;
|
2017-01-30 13:20:30 +00:00
|
|
|
char subvol_name[SOURCE_FS_NAME_LEN + 8];
|
2016-01-29 05:03:29 +00:00
|
|
|
struct task_ctx ctx;
|
|
|
|
char features_buf[64];
|
2021-08-17 14:47:18 +00:00
|
|
|
char fsid_str[BTRFS_UUID_UNPARSED_SIZE];
|
2016-01-29 05:03:29 +00:00
|
|
|
struct btrfs_mkfs_config mkfs_cfg;
|
2020-07-29 08:40:37 +00:00
|
|
|
bool btrfs_sb_committed = false;
|
2016-01-29 05:03:29 +00:00
|
|
|
|
2021-08-17 14:47:18 +00:00
|
|
|
memset(&mkfs_cfg, 0, sizeof(mkfs_cfg));
|
2016-01-29 05:03:29 +00:00
|
|
|
init_convert_context(&cctx);
|
|
|
|
ret = convert_open_fs(devname, &cctx);
|
|
|
|
if (ret)
|
|
|
|
goto fail;
|
2016-10-04 14:24:00 +00:00
|
|
|
ret = convert_check_state(&cctx);
|
2016-09-15 12:08:52 +00:00
|
|
|
if (ret)
|
|
|
|
warning(
|
|
|
|
"source filesystem is not clean, running filesystem check is recommended");
|
2016-01-29 05:03:29 +00:00
|
|
|
ret = convert_read_used_space(&cctx);
|
|
|
|
if (ret)
|
|
|
|
goto fail;
|
|
|
|
|
2020-08-26 18:08:20 +00:00
|
|
|
ASSERT(cctx.total_bytes != 0);
|
2016-01-29 05:03:29 +00:00
|
|
|
blocksize = cctx.blocksize;
|
|
|
|
total_bytes = (u64)blocksize * (u64)cctx.block_count;
|
|
|
|
if (blocksize < 4096) {
|
2016-08-23 17:40:32 +00:00
|
|
|
error("block size is too small: %u < 4096", blocksize);
|
2016-01-29 05:03:29 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
2020-03-04 15:22:05 +00:00
|
|
|
if (blocksize != getpagesize())
|
|
|
|
warning(
|
|
|
|
"blocksize %u is not equal to the page size %u, converted filesystem won't mount on this system",
|
|
|
|
blocksize, getpagesize());
|
|
|
|
|
2016-01-29 05:03:29 +00:00
|
|
|
if (btrfs_check_nodesize(nodesize, blocksize, features))
|
|
|
|
goto fail;
|
|
|
|
fd = open(devname, O_RDWR);
|
|
|
|
if (fd < 0) {
|
2018-01-07 21:54:21 +00:00
|
|
|
error("unable to open %s: %m", devname);
|
2016-01-29 05:03:29 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
2018-05-08 06:31:52 +00:00
|
|
|
btrfs_parse_fs_features_to_string(features_buf, features);
|
2016-01-29 05:03:29 +00:00
|
|
|
if (features == BTRFS_MKFS_DEFAULT_FEATURES)
|
|
|
|
strcat(features_buf, " (default)");
|
|
|
|
|
2021-08-17 14:47:18 +00:00
|
|
|
if (convert_flags & CONVERT_FLAG_COPY_FSID) {
|
|
|
|
uuid_unparse(cctx.fs_uuid, mkfs_cfg.fs_uuid);
|
2021-09-20 14:36:52 +00:00
|
|
|
if (!test_uuid_unique(mkfs_cfg.fs_uuid))
|
|
|
|
warning("non-unique UUID (copy): %s", mkfs_cfg.fs_uuid);
|
2021-08-17 14:47:18 +00:00
|
|
|
} else if (fsid[0] == 0) {
|
|
|
|
uuid_t uuid;
|
|
|
|
|
|
|
|
uuid_generate(uuid);
|
|
|
|
uuid_unparse(uuid, mkfs_cfg.fs_uuid);
|
|
|
|
} else {
|
|
|
|
memcpy(mkfs_cfg.fs_uuid, fsid, BTRFS_UUID_UNPARSED_SIZE);
|
2021-09-20 14:36:52 +00:00
|
|
|
if (!test_uuid_unique(mkfs_cfg.fs_uuid))
|
|
|
|
warning("non-unique UUID (user set): %s", mkfs_cfg.fs_uuid);
|
2021-08-17 14:47:18 +00:00
|
|
|
}
|
|
|
|
|
2021-08-19 20:48:22 +00:00
|
|
|
printf("Source filesystem:\n");
|
|
|
|
printf(" Type: %s\n", cctx.convert_ops->name);
|
|
|
|
printf(" Label: %s\n", cctx.label);
|
|
|
|
printf(" Blocksize: %u\n", blocksize);
|
2021-08-17 14:47:18 +00:00
|
|
|
uuid_unparse(cctx.fs_uuid, fsid_str);
|
2021-08-19 20:48:22 +00:00
|
|
|
printf(" UUID: %s\n", fsid_str);
|
|
|
|
printf("Target filesystem:\n");
|
|
|
|
printf(" Label: %s\n", fslabel);
|
|
|
|
printf(" Blocksize: %u\n", blocksize);
|
|
|
|
printf(" Nodesize: %u\n", nodesize);
|
|
|
|
printf(" UUID: %s\n", mkfs_cfg.fs_uuid);
|
|
|
|
printf(" Checksum: %s\n", btrfs_super_csum_name(csum_type));
|
|
|
|
printf(" Features: %s\n", features_buf);
|
|
|
|
printf(" Data csum: %s\n", (convert_flags & CONVERT_FLAG_DATACSUM) ? "yes" : "no");
|
|
|
|
printf(" Inline data: %s\n", (convert_flags & CONVERT_FLAG_INLINE_DATA) ? "yes" : "no");
|
|
|
|
printf(" Copy xattr: %s\n", (convert_flags & CONVERT_FLAG_XATTR) ? "yes" : "no");
|
|
|
|
printf("Reported stats:\n");
|
|
|
|
printf(" Total space: %12llu\n", cctx.total_bytes);
|
|
|
|
printf(" Free space: %12llu (%.2f%%)\n", cctx.free_bytes_initial,
|
2020-07-29 08:40:38 +00:00
|
|
|
100.0 * cctx.free_bytes_initial / cctx.total_bytes);
|
2021-08-19 20:48:22 +00:00
|
|
|
printf(" Inode count: %12llu\n", cctx.inodes_count);
|
|
|
|
printf(" Free inodes: %12llu\n", cctx.free_inodes_count);
|
|
|
|
printf(" Block count: %12llu\n", cctx.block_count);
|
|
|
|
|
2019-11-05 18:30:09 +00:00
|
|
|
mkfs_cfg.csum_type = csum_type;
|
2021-08-17 20:39:33 +00:00
|
|
|
mkfs_cfg.label = cctx.label;
|
2016-01-29 05:03:29 +00:00
|
|
|
mkfs_cfg.num_bytes = total_bytes;
|
|
|
|
mkfs_cfg.nodesize = nodesize;
|
|
|
|
mkfs_cfg.sectorsize = blocksize;
|
|
|
|
mkfs_cfg.stripesize = blocksize;
|
|
|
|
mkfs_cfg.features = features;
|
|
|
|
|
2021-08-19 20:48:22 +00:00
|
|
|
printf("Create initial btrfs filesystem\n");
|
2017-01-24 18:36:15 +00:00
|
|
|
ret = make_convert_btrfs(fd, &mkfs_cfg, &cctx);
|
2016-01-29 05:03:29 +00:00
|
|
|
if (ret) {
|
2018-10-25 12:10:54 +00:00
|
|
|
errno = -ret;
|
|
|
|
error("unable to create initial ctree: %m");
|
2016-01-29 05:03:29 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
root = open_ctree_fd(fd, devname, mkfs_cfg.super_bytenr,
|
2018-04-11 07:29:35 +00:00
|
|
|
OPEN_CTREE_WRITES | OPEN_CTREE_TEMPORARY_SUPER);
|
2016-01-29 05:03:29 +00:00
|
|
|
if (!root) {
|
2016-08-23 17:40:32 +00:00
|
|
|
error("unable to open ctree");
|
2016-01-29 05:03:29 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
2017-01-30 16:52:15 +00:00
|
|
|
ret = init_btrfs(&mkfs_cfg, root, &cctx, convert_flags);
|
2016-01-29 05:03:29 +00:00
|
|
|
if (ret) {
|
2016-08-23 17:40:32 +00:00
|
|
|
error("unable to setup the root tree: %d", ret);
|
2016-01-29 05:03:29 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2021-08-19 20:48:22 +00:00
|
|
|
printf("Create %s image file\n", cctx.convert_ops->name);
|
2017-01-30 13:20:30 +00:00
|
|
|
snprintf(subvol_name, sizeof(subvol_name), "%s_saved",
|
2016-01-29 05:03:29 +00:00
|
|
|
cctx.convert_ops->name);
|
|
|
|
key.objectid = CONV_IMAGE_SUBVOL_OBJECTID;
|
|
|
|
key.offset = (u64)-1;
|
|
|
|
key.type = BTRFS_ROOT_ITEM_KEY;
|
|
|
|
image_root = btrfs_read_fs_root(root->fs_info, &key);
|
|
|
|
if (!image_root) {
|
2016-08-23 17:40:32 +00:00
|
|
|
error("unable to create image subvolume");
|
2016-01-29 05:03:29 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
2016-01-29 05:03:32 +00:00
|
|
|
ret = create_image(image_root, &mkfs_cfg, &cctx, fd,
|
2017-01-30 16:52:15 +00:00
|
|
|
mkfs_cfg.num_bytes, "image",
|
|
|
|
convert_flags);
|
2016-01-29 05:03:29 +00:00
|
|
|
if (ret) {
|
2016-08-23 17:40:32 +00:00
|
|
|
error("failed to create %s/image: %d", subvol_name, ret);
|
2016-01-29 05:03:29 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2021-08-19 20:48:22 +00:00
|
|
|
printf("Create btrfs metadata\n");
|
2017-07-12 20:05:38 +00:00
|
|
|
ret = pthread_mutex_init(&ctx.mutex, NULL);
|
|
|
|
if (ret) {
|
|
|
|
error("failed to initialize mutex: %d", ret);
|
|
|
|
goto fail;
|
|
|
|
}
|
2016-01-29 05:03:29 +00:00
|
|
|
ctx.max_copy_inodes = (cctx.inodes_count - cctx.free_inodes_count);
|
|
|
|
ctx.cur_copy_inodes = 0;
|
|
|
|
|
|
|
|
if (progress) {
|
|
|
|
ctx.info = task_init(print_copied_inodes, after_copied_inodes,
|
|
|
|
&ctx);
|
2018-07-04 19:20:14 +00:00
|
|
|
task_start(ctx.info, NULL, NULL);
|
2016-01-29 05:03:29 +00:00
|
|
|
}
|
2017-01-30 16:52:15 +00:00
|
|
|
ret = copy_inodes(&cctx, root, convert_flags, &ctx);
|
2016-01-29 05:03:29 +00:00
|
|
|
if (ret) {
|
2016-08-23 17:40:32 +00:00
|
|
|
error("error during copy_inodes %d", ret);
|
2016-01-29 05:03:29 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
if (progress) {
|
|
|
|
task_stop(ctx.info);
|
|
|
|
task_deinit(ctx.info);
|
|
|
|
}
|
|
|
|
|
2017-09-15 17:49:21 +00:00
|
|
|
image_root = btrfs_mksubvol(root, subvol_name,
|
2017-09-15 18:17:31 +00:00
|
|
|
CONV_IMAGE_SUBVOL_OBJECTID, true);
|
2016-08-23 17:24:48 +00:00
|
|
|
if (!image_root) {
|
|
|
|
error("unable to link subvolume %s", subvol_name);
|
|
|
|
goto fail;
|
|
|
|
}
|
2016-01-29 05:03:29 +00:00
|
|
|
|
|
|
|
memset(root->fs_info->super_copy->label, 0, BTRFS_LABEL_SIZE);
|
2017-01-30 16:52:15 +00:00
|
|
|
if (convert_flags & CONVERT_FLAG_COPY_LABEL) {
|
2016-01-29 05:03:29 +00:00
|
|
|
__strncpy_null(root->fs_info->super_copy->label,
|
2021-08-17 20:39:33 +00:00
|
|
|
cctx.label, BTRFS_LABEL_SIZE - 1);
|
2021-08-19 20:48:22 +00:00
|
|
|
printf("Copy label '%s'\n", root->fs_info->super_copy->label);
|
2017-01-30 16:52:15 +00:00
|
|
|
} else if (convert_flags & CONVERT_FLAG_SET_LABEL) {
|
2016-01-29 05:03:29 +00:00
|
|
|
strcpy(root->fs_info->super_copy->label, fslabel);
|
2021-08-19 20:48:22 +00:00
|
|
|
printf("Set label to '%s'\n", fslabel);
|
2016-01-29 05:03:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = close_ctree(root);
|
|
|
|
if (ret) {
|
2016-08-23 17:40:32 +00:00
|
|
|
error("close_ctree failed: %d", ret);
|
2016-01-29 05:03:29 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
convert_close_fs(&cctx);
|
|
|
|
clean_convert_context(&cctx);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this step succeed, we get a mountable btrfs. Otherwise
|
|
|
|
* the source fs is left unchanged.
|
|
|
|
*/
|
2016-12-08 13:56:20 +00:00
|
|
|
ret = migrate_super_block(fd, mkfs_cfg.super_bytenr);
|
2016-01-29 05:03:29 +00:00
|
|
|
if (ret) {
|
2016-08-23 17:40:32 +00:00
|
|
|
error("unable to migrate super block: %d", ret);
|
2016-01-29 05:03:29 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
2020-07-29 08:40:37 +00:00
|
|
|
btrfs_sb_committed = true;
|
2016-01-29 05:03:29 +00:00
|
|
|
|
2016-08-22 14:32:24 +00:00
|
|
|
root = open_ctree_fd(fd, devname, 0,
|
2018-04-11 07:29:35 +00:00
|
|
|
OPEN_CTREE_WRITES | OPEN_CTREE_TEMPORARY_SUPER);
|
2016-01-29 05:03:29 +00:00
|
|
|
if (!root) {
|
2016-08-23 17:40:32 +00:00
|
|
|
error("unable to open ctree for finalization");
|
2016-01-29 05:03:29 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
2016-08-22 14:32:24 +00:00
|
|
|
root->fs_info->finalize_on_close = 1;
|
|
|
|
close_ctree(root);
|
2016-01-29 05:03:29 +00:00
|
|
|
close(fd);
|
|
|
|
|
2021-08-19 20:48:22 +00:00
|
|
|
printf("Conversion complete\n");
|
2016-01-29 05:03:29 +00:00
|
|
|
return 0;
|
|
|
|
fail:
|
|
|
|
clean_convert_context(&cctx);
|
|
|
|
if (fd != -1)
|
|
|
|
close(fd);
|
2020-07-29 08:40:37 +00:00
|
|
|
if (btrfs_sb_committed)
|
|
|
|
warning(
|
|
|
|
"error during conversion, filesystem is partially created but not finalized and not mountable");
|
|
|
|
else
|
|
|
|
warning(
|
|
|
|
"error during conversion, the original filesystem is not modified");
|
2016-01-29 05:03:29 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-02-23 07:17:16 +00:00
|
|
|
/*
|
|
|
|
* Read out data of convert image which is in btrfs reserved ranges so we can
|
|
|
|
* use them to overwrite the ranges during rollback.
|
|
|
|
*/
|
|
|
|
static int read_reserved_ranges(struct btrfs_root *root, u64 ino,
|
|
|
|
u64 total_bytes, char *reserved_ranges[])
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(btrfs_reserved_ranges); i++) {
|
2017-03-16 03:18:31 +00:00
|
|
|
const struct simple_range *range = &btrfs_reserved_ranges[i];
|
2017-02-23 07:17:16 +00:00
|
|
|
|
|
|
|
if (range->start + range->len >= total_bytes)
|
|
|
|
break;
|
|
|
|
ret = btrfs_read_file(root, ino, range->start, range->len,
|
|
|
|
reserved_ranges[i]);
|
|
|
|
if (ret < range->len) {
|
|
|
|
error(
|
|
|
|
"failed to read data of convert image, offset=%llu len=%llu ret=%d",
|
|
|
|
range->start, range->len, ret);
|
|
|
|
if (ret >= 0)
|
|
|
|
ret = -EIO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
return ret;
|
2017-02-23 07:46:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool is_subset_of_reserved_ranges(u64 start, u64 len)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
bool ret = false;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(btrfs_reserved_ranges); i++) {
|
2017-03-16 03:18:31 +00:00
|
|
|
const struct simple_range *range = &btrfs_reserved_ranges[i];
|
2017-02-23 07:46:46 +00:00
|
|
|
|
|
|
|
if (start >= range->start && start + len <= range_end(range)) {
|
|
|
|
ret = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool is_chunk_direct_mapped(struct btrfs_fs_info *fs_info, u64 start)
|
|
|
|
{
|
|
|
|
struct cache_extent *ce;
|
|
|
|
struct map_lookup *map;
|
|
|
|
bool ret = false;
|
|
|
|
|
|
|
|
ce = search_cache_extent(&fs_info->mapping_tree.cache_tree, start);
|
|
|
|
if (!ce)
|
|
|
|
goto out;
|
|
|
|
if (ce->start > start || ce->start + ce->size < start)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
map = container_of(ce, struct map_lookup, ce);
|
|
|
|
|
|
|
|
/* Not SINGLE chunk */
|
|
|
|
if (map->num_stripes != 1)
|
|
|
|
goto out;
|
|
|
|
|
2018-11-26 17:01:42 +00:00
|
|
|
/* Chunk's logical doesn't match with physical, not 1:1 mapped */
|
2017-02-23 07:46:46 +00:00
|
|
|
if (map->ce.start != map->stripes[0].physical)
|
|
|
|
goto out;
|
|
|
|
ret = true;
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Iterate all file extents of the convert image.
|
|
|
|
*
|
|
|
|
* All file extents except ones in btrfs_reserved_ranges must be mapped 1:1
|
2018-11-26 17:01:42 +00:00
|
|
|
* on disk. (Means their file_offset must match their on disk bytenr)
|
2017-02-23 07:46:46 +00:00
|
|
|
*
|
|
|
|
* File extents in reserved ranges can be relocated to other place, and in
|
|
|
|
* that case we will read them out for later use.
|
|
|
|
*/
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
static int check_convert_image(struct btrfs_root *image_root, u64 ino,
|
|
|
|
u64 total_size, char *reserved_ranges[])
|
2017-02-23 07:46:46 +00:00
|
|
|
{
|
|
|
|
struct btrfs_key key;
|
|
|
|
struct btrfs_path path;
|
|
|
|
struct btrfs_fs_info *fs_info = image_root->fs_info;
|
|
|
|
u64 checked_bytes = 0;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
key.objectid = ino;
|
|
|
|
key.offset = 0;
|
|
|
|
key.type = BTRFS_EXTENT_DATA_KEY;
|
|
|
|
|
|
|
|
btrfs_init_path(&path);
|
|
|
|
ret = btrfs_search_slot(NULL, image_root, &key, &path, 0, 0);
|
|
|
|
/*
|
|
|
|
* It's possible that some fs doesn't store any (including sb)
|
|
|
|
* data into 0~1M range, and NO_HOLES is enabled.
|
|
|
|
*
|
|
|
|
* So we only need to check if ret < 0
|
|
|
|
*/
|
|
|
|
if (ret < 0) {
|
2018-10-25 12:10:54 +00:00
|
|
|
errno = -ret;
|
|
|
|
error("failed to iterate file extents at offset 0: %m");
|
2017-02-23 07:46:46 +00:00
|
|
|
btrfs_release_path(&path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Loop from the first file extents */
|
|
|
|
while (1) {
|
|
|
|
struct btrfs_file_extent_item *fi;
|
|
|
|
struct extent_buffer *leaf = path.nodes[0];
|
|
|
|
u64 disk_bytenr;
|
|
|
|
u64 file_offset;
|
|
|
|
u64 ram_bytes;
|
|
|
|
int slot = path.slots[0];
|
|
|
|
|
|
|
|
if (slot >= btrfs_header_nritems(leaf))
|
|
|
|
goto next;
|
|
|
|
btrfs_item_key_to_cpu(leaf, &key, slot);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Iteration is done, exit normally, we have extra check out of
|
|
|
|
* the loop
|
|
|
|
*/
|
|
|
|
if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
file_offset = key.offset;
|
|
|
|
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
|
|
|
|
if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
error(
|
|
|
|
"ino %llu offset %llu doesn't have a regular file extent",
|
|
|
|
ino, file_offset);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (btrfs_file_extent_compression(leaf, fi) ||
|
|
|
|
btrfs_file_extent_encryption(leaf, fi) ||
|
|
|
|
btrfs_file_extent_other_encoding(leaf, fi)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
error(
|
|
|
|
"ino %llu offset %llu doesn't have a plain file extent",
|
|
|
|
ino, file_offset);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
|
|
|
|
ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
|
|
|
|
|
|
|
|
checked_bytes += ram_bytes;
|
|
|
|
/* Skip hole */
|
|
|
|
if (disk_bytenr == 0)
|
|
|
|
goto next;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Most file extents must be 1:1 mapped, which means 2 things:
|
|
|
|
* 1) File extent file offset == disk_bytenr
|
|
|
|
* 2) That data chunk's logical == chunk's physical
|
|
|
|
*
|
|
|
|
* So file extent's file offset == physical position on disk.
|
|
|
|
*
|
|
|
|
* And after rolling back btrfs reserved range, other part
|
|
|
|
* remains what old fs used to be.
|
|
|
|
*/
|
|
|
|
if (file_offset != disk_bytenr ||
|
|
|
|
!is_chunk_direct_mapped(fs_info, disk_bytenr)) {
|
|
|
|
/*
|
|
|
|
* Only file extent in btrfs reserved ranges are
|
|
|
|
* allowed to be non-1:1 mapped
|
|
|
|
*/
|
|
|
|
if (!is_subset_of_reserved_ranges(file_offset,
|
|
|
|
ram_bytes)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
error(
|
|
|
|
"ino %llu offset %llu file extent should not be relocated",
|
|
|
|
ino, file_offset);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
next:
|
|
|
|
ret = btrfs_next_item(image_root, &path);
|
|
|
|
if (ret) {
|
|
|
|
if (ret > 0)
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
btrfs_release_path(&path);
|
2017-11-29 13:48:05 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-02-23 07:46:46 +00:00
|
|
|
/*
|
|
|
|
* For HOLES mode (without NO_HOLES), we must ensure file extents
|
|
|
|
* cover the whole range of the image
|
|
|
|
*/
|
|
|
|
if (!ret && !btrfs_fs_incompat(fs_info, NO_HOLES)) {
|
|
|
|
if (checked_bytes != total_size) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
error("inode %llu has some file extents not checked",
|
|
|
|
ino);
|
2017-03-21 01:00:55 +00:00
|
|
|
return ret;
|
2017-02-23 07:46:46 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* So far so good, read old data located in btrfs reserved ranges */
|
|
|
|
ret = read_reserved_ranges(image_root, ino, total_size,
|
|
|
|
reserved_ranges);
|
|
|
|
return ret;
|
2017-02-23 07:17:16 +00:00
|
|
|
}
|
|
|
|
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
/*
|
|
|
|
* btrfs rollback is just reverted convert:
|
|
|
|
* |<---------------Btrfs fs------------------------------>|
|
|
|
|
* |<- Old data chunk ->|< new chunk (D/M/S)>|<- ODC ->|
|
|
|
|
* |<-Old-FE->| |<-Old-FE->|<- Btrfs extents ->|<-Old-FE->|
|
|
|
|
* ||
|
|
|
|
* \/
|
|
|
|
* |<------------------Old fs----------------------------->|
|
|
|
|
* |<- used ->| |<- used ->| |<- used ->|
|
|
|
|
*
|
|
|
|
* However things are much easier than convert, we don't really need to
|
|
|
|
* do the complex space calculation, but only to handle btrfs reserved space
|
|
|
|
*
|
|
|
|
* |<---------------------------Btrfs fs----------------------------->|
|
|
|
|
* | RSV 1 | | Old | | RSV 2 | | Old | | RSV 3 |
|
|
|
|
* | 0~1M | | Fs | | SB2 + 64K | | Fs | | SB3 + 64K |
|
|
|
|
*
|
2017-07-27 08:17:00 +00:00
|
|
|
* On the other hand, the converted fs image in btrfs is a completely
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
* valid old fs.
|
|
|
|
*
|
|
|
|
* |<-----------------Converted fs image in btrfs-------------------->|
|
|
|
|
* | RSV 1 | | Old | | RSV 2 | | Old | | RSV 3 |
|
|
|
|
* | Relocated | | Fs | | Relocated | | Fs | | Relocated |
|
|
|
|
*
|
|
|
|
* Used space in fs image should be at the same physical position on disk.
|
|
|
|
* We only need to recover the data in reserved ranges, so the whole
|
|
|
|
* old fs is back.
|
|
|
|
*
|
|
|
|
* The idea to rollback is also straightforward, we just "read" out the data
|
|
|
|
* of reserved ranges, and write them back to there they should be.
|
|
|
|
* Then the old fs is back.
|
|
|
|
*/
|
2014-08-07 02:35:58 +00:00
|
|
|
static int do_rollback(const char *devname)
|
2008-04-22 18:06:56 +00:00
|
|
|
{
|
|
|
|
struct btrfs_root *root;
|
2013-08-14 17:44:21 +00:00
|
|
|
struct btrfs_root *image_root;
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
struct btrfs_fs_info *fs_info;
|
2008-04-22 18:06:56 +00:00
|
|
|
struct btrfs_key key;
|
|
|
|
struct btrfs_path path;
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
struct btrfs_dir_item *dir;
|
|
|
|
struct btrfs_inode_item *inode_item;
|
2021-08-19 20:48:22 +00:00
|
|
|
struct btrfs_root_ref *root_ref_item;
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
char *image_name = "image";
|
2021-08-19 20:48:22 +00:00
|
|
|
char dir_name[PATH_MAX];
|
|
|
|
int name_len;
|
|
|
|
char fsid_str[BTRFS_UUID_UNPARSED_SIZE];
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
char *reserved_ranges[ARRAY_SIZE(btrfs_reserved_ranges)] = { NULL };
|
2008-01-08 20:56:32 +00:00
|
|
|
u64 total_bytes;
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
u64 fsize;
|
|
|
|
u64 root_dir;
|
|
|
|
u64 ino;
|
|
|
|
int fd = -1;
|
|
|
|
int ret;
|
|
|
|
int i;
|
2008-04-22 18:06:56 +00:00
|
|
|
|
2021-08-19 20:48:22 +00:00
|
|
|
printf("Open filesystem for rollback:\n");
|
|
|
|
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
for (i = 0; i < ARRAY_SIZE(btrfs_reserved_ranges); i++) {
|
2017-03-16 03:18:31 +00:00
|
|
|
const struct simple_range *range = &btrfs_reserved_ranges[i];
|
2008-01-08 20:56:32 +00:00
|
|
|
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
reserved_ranges[i] = calloc(1, range->len);
|
|
|
|
if (!reserved_ranges[i]) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto free_mem;
|
|
|
|
}
|
|
|
|
}
|
2008-01-08 20:56:32 +00:00
|
|
|
fd = open(devname, O_RDWR);
|
|
|
|
if (fd < 0) {
|
2018-01-07 21:54:21 +00:00
|
|
|
error("unable to open %s: %m", devname);
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
ret = -EIO;
|
|
|
|
goto free_mem;
|
2008-01-08 20:56:32 +00:00
|
|
|
}
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
fsize = lseek(fd, 0, SEEK_END);
|
2017-10-26 07:28:37 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For rollback, we don't really need to write anything so open it
|
|
|
|
* read-only. The write part will happen after we close the
|
|
|
|
* filesystem.
|
|
|
|
*/
|
|
|
|
root = open_ctree_fd(fd, devname, 0, 0);
|
2008-01-08 20:56:32 +00:00
|
|
|
if (!root) {
|
2016-08-23 17:58:07 +00:00
|
|
|
error("unable to open ctree");
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
ret = -EIO;
|
|
|
|
goto free_mem;
|
2008-01-08 20:56:32 +00:00
|
|
|
}
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
fs_info = root->fs_info;
|
2008-01-08 20:56:32 +00:00
|
|
|
|
2021-08-19 20:48:22 +00:00
|
|
|
printf(" Label: %s\n", fs_info->super_copy->label);
|
|
|
|
uuid_unparse(fs_info->super_copy->fsid, fsid_str);
|
|
|
|
printf(" UUID: %s\n", fsid_str);
|
|
|
|
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
/*
|
|
|
|
* Search root backref first, or after subvolume deletion (orphan),
|
|
|
|
* we can still rollback the image.
|
|
|
|
*/
|
2015-10-18 05:44:41 +00:00
|
|
|
key.objectid = CONV_IMAGE_SUBVOL_OBJECTID;
|
|
|
|
key.type = BTRFS_ROOT_BACKREF_KEY;
|
|
|
|
key.offset = BTRFS_FS_TREE_OBJECTID;
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
btrfs_init_path(&path);
|
|
|
|
ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, &path, 0, 0);
|
2015-10-18 05:44:41 +00:00
|
|
|
if (ret > 0) {
|
2017-08-22 16:30:43 +00:00
|
|
|
error("unable to find source fs image subvolume, is it deleted?");
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
ret = -ENOENT;
|
|
|
|
goto close_fs;
|
2015-10-18 05:44:41 +00:00
|
|
|
} else if (ret < 0) {
|
2018-10-25 12:10:54 +00:00
|
|
|
errno = -ret;
|
|
|
|
error("failed to find source fs image subvolume: %m");
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
goto close_fs;
|
2015-10-18 05:44:41 +00:00
|
|
|
}
|
2021-08-19 20:48:22 +00:00
|
|
|
/* (256 ROOT_BACKREF 5) */
|
|
|
|
/* root backref key dirid 256 sequence 3 name ext2_saved */
|
|
|
|
root_ref_item = btrfs_item_ptr(path.nodes[0], path.slots[0], struct btrfs_root_ref);
|
|
|
|
name_len = btrfs_root_ref_name_len(path.nodes[0], root_ref_item);
|
|
|
|
if (name_len > sizeof(dir_name))
|
|
|
|
name_len = sizeof(dir_name) - 1;
|
|
|
|
read_extent_buffer(path.nodes[0], dir_name, (unsigned long)(root_ref_item + 1), name_len);
|
|
|
|
dir_name[sizeof(dir_name) - 1] = 0;
|
|
|
|
|
|
|
|
printf(" Restoring from: %s/%s\n", dir_name, image_name);
|
|
|
|
|
|
|
|
btrfs_release_path(&path);
|
2015-10-18 05:44:41 +00:00
|
|
|
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
/* Search convert subvolume */
|
2013-08-14 17:44:21 +00:00
|
|
|
key.objectid = CONV_IMAGE_SUBVOL_OBJECTID;
|
2008-12-17 21:10:07 +00:00
|
|
|
key.type = BTRFS_ROOT_ITEM_KEY;
|
|
|
|
key.offset = (u64)-1;
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
image_root = btrfs_read_fs_root(fs_info, &key);
|
|
|
|
if (IS_ERR(image_root)) {
|
|
|
|
ret = PTR_ERR(image_root);
|
2018-10-25 12:10:54 +00:00
|
|
|
errno = -ret;
|
|
|
|
error("failed to open convert image subvolume: %m");
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
goto close_fs;
|
2008-01-08 20:56:32 +00:00
|
|
|
}
|
|
|
|
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
/* Search the image file */
|
|
|
|
root_dir = btrfs_root_dirid(&image_root->root_item);
|
|
|
|
dir = btrfs_lookup_dir_item(NULL, image_root, &path, root_dir,
|
|
|
|
image_name, strlen(image_name), 0);
|
|
|
|
|
2008-01-08 20:56:32 +00:00
|
|
|
if (!dir || IS_ERR(dir)) {
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
btrfs_release_path(&path);
|
|
|
|
if (dir)
|
|
|
|
ret = PTR_ERR(dir);
|
|
|
|
else
|
|
|
|
ret = -ENOENT;
|
2018-10-25 12:10:54 +00:00
|
|
|
errno = -ret;
|
|
|
|
error("failed to locate file %s: %m", image_name);
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
goto close_fs;
|
2008-01-08 20:56:32 +00:00
|
|
|
}
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
btrfs_dir_item_key_to_cpu(path.nodes[0], dir, &key);
|
2013-08-03 00:52:43 +00:00
|
|
|
btrfs_release_path(&path);
|
2008-01-08 20:56:32 +00:00
|
|
|
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
/* Get total size of the original image */
|
|
|
|
ino = key.objectid;
|
2008-01-08 20:56:32 +00:00
|
|
|
|
2013-08-14 17:44:21 +00:00
|
|
|
ret = btrfs_lookup_inode(NULL, image_root, &path, &key, 0);
|
2008-01-08 20:56:32 +00:00
|
|
|
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
if (ret < 0) {
|
2013-08-03 00:52:43 +00:00
|
|
|
btrfs_release_path(&path);
|
2018-10-25 12:10:54 +00:00
|
|
|
errno = -ret;
|
|
|
|
error("unable to find inode %llu: %m", ino);
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
goto close_fs;
|
2008-01-08 20:56:32 +00:00
|
|
|
}
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
inode_item = btrfs_item_ptr(path.nodes[0], path.slots[0],
|
|
|
|
struct btrfs_inode_item);
|
|
|
|
total_bytes = btrfs_inode_size(path.nodes[0], inode_item);
|
2013-08-03 00:52:43 +00:00
|
|
|
btrfs_release_path(&path);
|
2008-01-08 20:56:32 +00:00
|
|
|
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
/* Check if we can rollback the image */
|
|
|
|
ret = check_convert_image(image_root, ino, total_bytes, reserved_ranges);
|
|
|
|
if (ret < 0) {
|
|
|
|
error("old fs image can't be rolled back");
|
|
|
|
goto close_fs;
|
2008-01-08 20:56:32 +00:00
|
|
|
}
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
close_fs:
|
|
|
|
btrfs_release_path(&path);
|
|
|
|
close_ctree_fs_info(fs_info);
|
|
|
|
if (ret)
|
|
|
|
goto free_mem;
|
2008-04-22 18:06:56 +00:00
|
|
|
|
|
|
|
/*
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
* Everything is OK, just write back old fs data into btrfs reserved
|
|
|
|
* ranges
|
|
|
|
*
|
|
|
|
* Here, we starts from the backup blocks first, so if something goes
|
|
|
|
* wrong, the fs is still mountable
|
2008-04-22 18:06:56 +00:00
|
|
|
*/
|
|
|
|
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
for (i = ARRAY_SIZE(btrfs_reserved_ranges) - 1; i >= 0; i--) {
|
|
|
|
u64 real_size;
|
2017-03-16 03:18:31 +00:00
|
|
|
const struct simple_range *range = &btrfs_reserved_ranges[i];
|
2008-04-22 18:06:56 +00:00
|
|
|
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
if (range_end(range) >= fsize)
|
|
|
|
continue;
|
2008-04-22 18:06:56 +00:00
|
|
|
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
real_size = min(range_end(range), fsize) - range->start;
|
|
|
|
ret = pwrite(fd, reserved_ranges[i], real_size, range->start);
|
|
|
|
if (ret < real_size) {
|
|
|
|
if (ret < 0)
|
|
|
|
ret = -errno;
|
|
|
|
else
|
|
|
|
ret = -EIO;
|
2018-10-25 12:10:54 +00:00
|
|
|
errno = -ret;
|
|
|
|
error("failed to recover range [%llu, %llu): %m",
|
|
|
|
range->start, real_size);
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
goto free_mem;
|
2008-04-22 18:06:56 +00:00
|
|
|
}
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
ret = 0;
|
2008-04-22 18:06:56 +00:00
|
|
|
}
|
|
|
|
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
free_mem:
|
|
|
|
for (i = 0; i < ARRAY_SIZE(btrfs_reserved_ranges); i++)
|
|
|
|
free(reserved_ranges[i]);
|
|
|
|
if (ret)
|
|
|
|
error("rollback failed");
|
|
|
|
else
|
2021-08-19 20:48:22 +00:00
|
|
|
printf("Rollback succeeded\n");
|
btrfs-progs: convert: Rework rollback
Rework rollback to a more easy to understand way.
New convert behavior makes us to have a more flex chunk layout, which
only data chunk containing old fs data will be at the same physical
location, while new chunks (data/meta/sys) can be mapped anywhere else.
This behavior makes old rollback behavior can't handle it.
As old behavior assumes all data/meta is mapped in a large chunk, which is
mapped 1:1 on disk.
So rework rollback to handle new convert behavior, enhance the check by
only checking all file extents of convert image, only to check if these
file extents and therir chunks are mapped 1:1.
This new rollback check behavior can handle both new and old convert
behavior, as the new behavior is a superset of old behavior.
Further more, introduce a simple rollback mechanisim:
1) Read reserved data (offset = file offset) from convert image
2) Write reserved data into disk (offset = physical offset)
Since old fs image is a valid fs, and we only need to rollback
superblocks (btrfs reserved ranges), then we just read out data in
reserved range, and write it back.
Due to the fact that all other file extents of converted image is mapped
1:1 on disk, we put the missing piece back, then the fs is as good as
old one.
Then what we do in btrfs is just another dream.
With this new rollback mechanisim, we can open btrfs read-only, so we
won't cause any damage to current btrfs, until the final piece (0~1M,
containing 1st super block) is put back.
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
[ port to v4.10 ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-02-23 08:21:14 +00:00
|
|
|
return ret;
|
2008-01-08 20:56:32 +00:00
|
|
|
}
|
2008-01-22 16:32:03 +00:00
|
|
|
|
2008-01-08 20:56:32 +00:00
|
|
|
static void print_usage(void)
|
|
|
|
{
|
2015-03-09 11:06:22 +00:00
|
|
|
printf("usage: btrfs-convert [options] device\n");
|
2015-03-23 17:45:56 +00:00
|
|
|
printf("options:\n");
|
|
|
|
printf("\t-d|--no-datasum disable data checksum, sets NODATASUM\n");
|
|
|
|
printf("\t-i|--no-xattr ignore xattrs and ACLs\n");
|
|
|
|
printf("\t-n|--no-inline disable inlining of small files to metadata\n");
|
2019-11-05 18:30:09 +00:00
|
|
|
printf("\t--csum TYPE\n");
|
|
|
|
printf("\t--checksum TYPE checksum algorithm to use (default: crc32c)\n");
|
2015-03-23 17:45:56 +00:00
|
|
|
printf("\t-N|--nodesize SIZE set filesystem metadata nodesize\n");
|
2013-08-14 18:39:21 +00:00
|
|
|
printf("\t-r|--rollback roll back to the original filesystem\n");
|
2015-03-23 17:45:56 +00:00
|
|
|
printf("\t-l|--label LABEL set filesystem label\n");
|
|
|
|
printf("\t-L|--copy-label use label from converted filesystem\n");
|
2021-08-17 14:47:18 +00:00
|
|
|
printf("\t--uuid SPEC new, copy or user-defined conforming UUID\n");
|
2015-03-23 17:45:56 +00:00
|
|
|
printf("\t-p|--progress show converting progress (default)\n");
|
2015-03-23 18:31:31 +00:00
|
|
|
printf("\t-O|--features LIST comma separated list of filesystem features\n");
|
2015-03-23 17:45:56 +00:00
|
|
|
printf("\t--no-progress show only overview, not the detailed progress\n");
|
2016-08-23 16:05:56 +00:00
|
|
|
printf("\n");
|
2016-09-25 01:26:41 +00:00
|
|
|
printf("Supported filesystems:\n");
|
2016-08-23 16:05:56 +00:00
|
|
|
printf("\text2/3/4: %s\n", BTRFSCONVERT_EXT2 ? "yes" : "no");
|
2017-08-22 16:30:43 +00:00
|
|
|
printf("\treiserfs: %s\n", BTRFSCONVERT_REISERFS ? "yes" : "no");
|
2008-01-04 16:29:55 +00:00
|
|
|
}
|
2008-01-08 20:56:32 +00:00
|
|
|
|
2015-06-21 16:23:19 +00:00
|
|
|
int BOX_MAIN(convert)(int argc, char *argv[])
|
2008-01-08 20:56:32 +00:00
|
|
|
{
|
|
|
|
int ret;
|
2008-01-14 18:35:00 +00:00
|
|
|
int packing = 1;
|
|
|
|
int noxattr = 0;
|
2008-01-08 20:56:32 +00:00
|
|
|
int datacsum = 1;
|
2015-03-20 01:11:11 +00:00
|
|
|
u32 nodesize = max_t(u32, sysconf(_SC_PAGESIZE),
|
|
|
|
BTRFS_MKFS_DEFAULT_NODE_SIZE);
|
2008-01-08 20:56:32 +00:00
|
|
|
int rollback = 0;
|
2012-01-31 13:40:22 +00:00
|
|
|
int copylabel = 0;
|
2014-08-07 02:35:57 +00:00
|
|
|
int usage_error = 0;
|
2015-02-27 15:30:05 +00:00
|
|
|
int progress = 1;
|
2008-01-08 20:56:32 +00:00
|
|
|
char *file;
|
2015-09-02 16:02:23 +00:00
|
|
|
char fslabel[BTRFS_LABEL_SIZE];
|
2015-03-23 18:31:31 +00:00
|
|
|
u64 features = BTRFS_MKFS_DEFAULT_FEATURES;
|
2019-11-05 18:30:09 +00:00
|
|
|
u16 csum_type = BTRFS_CSUM_TYPE_CRC32;
|
2021-08-17 14:47:18 +00:00
|
|
|
u32 copy_fsid = 0;
|
|
|
|
char fsid[BTRFS_UUID_UNPARSED_SIZE] = {0};
|
2012-01-31 13:40:22 +00:00
|
|
|
|
2019-05-27 04:46:27 +00:00
|
|
|
crc32c_optimization_init();
|
2021-08-19 20:48:22 +00:00
|
|
|
printf("btrfs-convert from %s\n\n", PACKAGE_STRING);
|
2019-05-27 04:46:27 +00:00
|
|
|
|
2008-01-08 20:56:32 +00:00
|
|
|
while(1) {
|
2021-08-17 14:47:18 +00:00
|
|
|
enum { GETOPT_VAL_NO_PROGRESS = 256, GETOPT_VAL_CHECKSUM,
|
|
|
|
GETOPT_VAL_UUID };
|
2015-03-09 10:56:04 +00:00
|
|
|
static const struct option long_options[] = {
|
2015-03-23 17:27:19 +00:00
|
|
|
{ "no-progress", no_argument, NULL,
|
|
|
|
GETOPT_VAL_NO_PROGRESS },
|
2015-03-23 17:45:56 +00:00
|
|
|
{ "no-datasum", no_argument, NULL, 'd' },
|
|
|
|
{ "no-inline", no_argument, NULL, 'n' },
|
|
|
|
{ "no-xattr", no_argument, NULL, 'i' },
|
2019-11-05 18:30:09 +00:00
|
|
|
{ "checksum", required_argument, NULL,
|
|
|
|
GETOPT_VAL_CHECKSUM },
|
|
|
|
{ "csum", required_argument, NULL,
|
|
|
|
GETOPT_VAL_CHECKSUM },
|
2015-03-23 17:45:56 +00:00
|
|
|
{ "rollback", no_argument, NULL, 'r' },
|
2015-03-23 18:31:31 +00:00
|
|
|
{ "features", required_argument, NULL, 'O' },
|
2015-03-23 17:45:56 +00:00
|
|
|
{ "progress", no_argument, NULL, 'p' },
|
|
|
|
{ "label", required_argument, NULL, 'l' },
|
|
|
|
{ "copy-label", no_argument, NULL, 'L' },
|
2021-08-17 14:47:18 +00:00
|
|
|
{ "uuid", required_argument, NULL, GETOPT_VAL_UUID },
|
2015-03-23 17:45:56 +00:00
|
|
|
{ "nodesize", required_argument, NULL, 'N' },
|
2015-06-10 22:04:19 +00:00
|
|
|
{ "help", no_argument, NULL, GETOPT_VAL_HELP},
|
2015-03-09 10:56:04 +00:00
|
|
|
{ NULL, 0, NULL, 0 }
|
|
|
|
};
|
2015-03-23 18:31:31 +00:00
|
|
|
int c = getopt_long(argc, argv, "dinN:rl:LpO:", long_options, NULL);
|
2015-03-09 10:56:04 +00:00
|
|
|
|
2008-01-08 20:56:32 +00:00
|
|
|
if (c < 0)
|
|
|
|
break;
|
|
|
|
switch(c) {
|
|
|
|
case 'd':
|
|
|
|
datacsum = 0;
|
|
|
|
break;
|
2008-01-14 18:35:00 +00:00
|
|
|
case 'i':
|
|
|
|
noxattr = 1;
|
|
|
|
break;
|
|
|
|
case 'n':
|
|
|
|
packing = 0;
|
|
|
|
break;
|
2015-03-20 01:11:11 +00:00
|
|
|
case 'N':
|
2021-01-21 15:25:51 +00:00
|
|
|
nodesize = parse_size_from_string(optarg);
|
2015-03-20 01:11:11 +00:00
|
|
|
break;
|
2008-01-08 20:56:32 +00:00
|
|
|
case 'r':
|
|
|
|
rollback = 1;
|
|
|
|
break;
|
2012-01-31 13:40:22 +00:00
|
|
|
case 'l':
|
2017-01-30 16:52:15 +00:00
|
|
|
copylabel = CONVERT_FLAG_SET_LABEL;
|
2015-09-02 16:02:23 +00:00
|
|
|
if (strlen(optarg) >= BTRFS_LABEL_SIZE) {
|
2016-10-28 16:34:03 +00:00
|
|
|
warning(
|
|
|
|
"label too long, trimmed to %d bytes",
|
2015-09-02 16:02:23 +00:00
|
|
|
BTRFS_LABEL_SIZE - 1);
|
2012-01-31 13:40:22 +00:00
|
|
|
}
|
2016-03-24 18:19:31 +00:00
|
|
|
__strncpy_null(fslabel, optarg, BTRFS_LABEL_SIZE - 1);
|
2012-01-31 13:40:22 +00:00
|
|
|
break;
|
|
|
|
case 'L':
|
2017-01-30 16:52:15 +00:00
|
|
|
copylabel = CONVERT_FLAG_COPY_LABEL;
|
2012-01-31 13:40:22 +00:00
|
|
|
break;
|
2014-11-09 22:16:56 +00:00
|
|
|
case 'p':
|
|
|
|
progress = 1;
|
|
|
|
break;
|
2015-03-23 18:31:31 +00:00
|
|
|
case 'O': {
|
|
|
|
char *orig = strdup(optarg);
|
|
|
|
char *tmp = orig;
|
|
|
|
|
|
|
|
tmp = btrfs_parse_fs_features(tmp, &features);
|
|
|
|
if (tmp) {
|
2016-10-28 16:34:03 +00:00
|
|
|
error("unrecognized filesystem feature: %s",
|
2015-03-23 18:31:31 +00:00
|
|
|
tmp);
|
|
|
|
free(orig);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
free(orig);
|
|
|
|
if (features & BTRFS_FEATURE_LIST_ALL) {
|
|
|
|
btrfs_list_all_fs_features(
|
|
|
|
~BTRFS_CONVERT_ALLOWED_FEATURES);
|
|
|
|
exit(0);
|
|
|
|
}
|
|
|
|
if (features & ~BTRFS_CONVERT_ALLOWED_FEATURES) {
|
|
|
|
char buf[64];
|
|
|
|
|
2018-05-08 06:31:52 +00:00
|
|
|
btrfs_parse_fs_features_to_string(buf,
|
2015-03-23 18:31:31 +00:00
|
|
|
features & ~BTRFS_CONVERT_ALLOWED_FEATURES);
|
2016-10-28 16:34:03 +00:00
|
|
|
error("features not allowed for convert: %s",
|
2015-03-23 18:31:31 +00:00
|
|
|
buf);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
2015-03-09 10:56:04 +00:00
|
|
|
case GETOPT_VAL_NO_PROGRESS:
|
|
|
|
progress = 0;
|
|
|
|
break;
|
2019-11-05 18:30:09 +00:00
|
|
|
case GETOPT_VAL_CHECKSUM:
|
|
|
|
csum_type = parse_csum_type(optarg);
|
|
|
|
break;
|
2021-08-17 14:47:18 +00:00
|
|
|
case GETOPT_VAL_UUID:
|
|
|
|
copy_fsid = 0;
|
|
|
|
fsid[0] = 0;
|
|
|
|
if (strcmp(optarg, "copy") == 0) {
|
|
|
|
copy_fsid = CONVERT_FLAG_COPY_FSID;
|
|
|
|
} else if (strcmp(optarg, "new") == 0) {
|
|
|
|
/* Generated later */
|
|
|
|
} else {
|
|
|
|
uuid_t uuid;
|
|
|
|
|
|
|
|
if (uuid_parse(optarg, uuid) != 0) {
|
|
|
|
error("invalid UUID: %s\n", optarg);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
strncpy(fsid, optarg, sizeof(fsid));
|
|
|
|
}
|
|
|
|
break;
|
2015-06-10 22:04:19 +00:00
|
|
|
case GETOPT_VAL_HELP:
|
2008-01-08 20:56:32 +00:00
|
|
|
default:
|
|
|
|
print_usage();
|
2015-06-10 22:04:19 +00:00
|
|
|
return c != GETOPT_VAL_HELP;
|
2008-01-08 20:56:32 +00:00
|
|
|
}
|
|
|
|
}
|
2014-07-16 03:59:46 +00:00
|
|
|
set_argv0(argv);
|
2016-03-01 15:29:16 +00:00
|
|
|
if (check_argc_exact(argc - optind, 1)) {
|
2008-01-08 20:56:32 +00:00
|
|
|
print_usage();
|
2008-04-22 18:06:56 +00:00
|
|
|
return 1;
|
2008-01-08 20:56:32 +00:00
|
|
|
}
|
2008-04-22 18:06:56 +00:00
|
|
|
|
2014-08-07 02:35:57 +00:00
|
|
|
if (rollback && (!datacsum || noxattr || !packing)) {
|
|
|
|
fprintf(stderr,
|
|
|
|
"Usage error: -d, -i, -n options do not apply to rollback\n");
|
|
|
|
usage_error++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (usage_error) {
|
|
|
|
print_usage();
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2008-04-22 18:06:56 +00:00
|
|
|
file = argv[optind];
|
2014-02-20 02:49:03 +00:00
|
|
|
ret = check_mounted(file);
|
|
|
|
if (ret < 0) {
|
2018-10-25 12:10:54 +00:00
|
|
|
errno = -ret;
|
|
|
|
error("could not check mount status: %m");
|
2014-02-20 02:49:03 +00:00
|
|
|
return 1;
|
|
|
|
} else if (ret) {
|
2016-10-28 16:34:03 +00:00
|
|
|
error("%s is mounted", file);
|
2008-04-22 18:06:56 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2008-01-08 20:56:32 +00:00
|
|
|
if (rollback) {
|
2014-08-07 02:35:58 +00:00
|
|
|
ret = do_rollback(file);
|
2008-01-08 20:56:32 +00:00
|
|
|
} else {
|
2017-01-30 16:52:15 +00:00
|
|
|
u32 cf = 0;
|
|
|
|
|
|
|
|
cf |= datacsum ? CONVERT_FLAG_DATACSUM : 0;
|
|
|
|
cf |= packing ? CONVERT_FLAG_INLINE_DATA : 0;
|
|
|
|
cf |= noxattr ? 0 : CONVERT_FLAG_XATTR;
|
2021-08-17 14:47:18 +00:00
|
|
|
cf |= copy_fsid;
|
2017-01-30 16:52:15 +00:00
|
|
|
cf |= copylabel;
|
2019-11-05 18:30:09 +00:00
|
|
|
ret = do_convert(file, cf, nodesize, fslabel, progress, features,
|
2021-08-17 14:47:18 +00:00
|
|
|
csum_type, fsid);
|
2008-01-08 20:56:32 +00:00
|
|
|
}
|
2008-01-22 16:32:03 +00:00
|
|
|
if (ret)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
2008-01-08 20:56:32 +00:00
|
|
|
}
|