mirror of
https://github.com/ceph/ceph
synced 2025-01-18 09:02:08 +00:00
Merge pull request #2054 from ceph/wip-fs-cmds-oops
Apply some accidentally-dropped commits from the wip-fs-cmds branch to be a little friendlier and pass tests. Reviewed-by: Greg Farnum <greg@inktank.com>
This commit is contained in:
commit
3a6711f867
@ -296,7 +296,11 @@ function test_mon_mds()
|
||||
{
|
||||
ceph osd pool create fs_data 10
|
||||
ceph osd pool create fs_metadata 10
|
||||
ceph fs new default fs_metadata fs_data
|
||||
ceph fs new cephfs fs_metadata fs_data
|
||||
|
||||
# Check for default crash_replay_interval set automatically in 'fs new'
|
||||
ceph osd dump | grep fs_data > $TMPFILE
|
||||
check_response "crash_replay_interval 45 "
|
||||
|
||||
ceph mds cluster_down
|
||||
ceph mds cluster_up
|
||||
@ -356,13 +360,24 @@ function test_mon_mds()
|
||||
ceph mds add_data_pool mds-ec-pool 2>$TMPFILE
|
||||
check_response 'erasure-code' $? 22
|
||||
set -e
|
||||
poolnum=$(ceph osd dump | grep 'pool.*mds-ec-pool' | awk '{print $2;}')
|
||||
ec_poolnum=$(ceph osd dump | grep 'pool.*mds-ec-pool' | awk '{print $2;}')
|
||||
data_poolnum=$(ceph osd dump | grep 'pool.*fs_data' | awk '{print $2;}')
|
||||
metadata_poolnum=$(ceph osd dump | grep 'pool.*fs_metadata' | awk '{print $2;}')
|
||||
set +e
|
||||
ceph mds newfs 0 $poolnum --yes-i-really-mean-it 2>$TMPFILE
|
||||
|
||||
ceph fs rm cephfs --yes-i-really-mean-it
|
||||
|
||||
ceph mds newfs $metadata_poolnum $ec_poolnum --yes-i-really-mean-it 2>$TMPFILE
|
||||
check_response 'erasure-code' $? 22
|
||||
ceph mds newfs $poolnum 1 --yes-i-really-mean-it 2>$TMPFILE
|
||||
ceph mds newfs $ec_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE
|
||||
check_response 'erasure-code' $? 22
|
||||
ceph mds newfs $poolnum $poolnum --yes-i-really-mean-it 2>$TMPFILE
|
||||
ceph mds newfs $ec_poolnum $ec_poolnum --yes-i-really-mean-it 2>$TMPFILE
|
||||
check_response 'erasure-code' $? 22
|
||||
ceph fs new cephfs fs_metadata mds-ec-pool 2>$TMPFILE
|
||||
check_response 'erasure-code' $? 22
|
||||
ceph fs new cephfs mds-ec-pool fs_data 2>$TMPFILE
|
||||
check_response 'erasure-code' $? 22
|
||||
ceph fs new cephfs mds-ec-pool mds-ec-pool 2>$TMPFILE
|
||||
check_response 'erasure-code' $? 22
|
||||
set -e
|
||||
ceph osd pool delete mds-ec-pool mds-ec-pool --yes-i-really-really-mean-it
|
||||
@ -374,7 +389,6 @@ function test_mon_mds()
|
||||
# ceph mds set_state
|
||||
# ceph mds stop
|
||||
|
||||
ceph fs rm default --yes-i-really-mean-it
|
||||
ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
|
||||
ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
|
||||
}
|
||||
|
@ -22,9 +22,13 @@ getfattr -n ceph.dir.layout file 2>&1 | grep -q 'No such attribute'
|
||||
setfattr -n ceph.file.layout.stripe_unit -v 1048576 file2
|
||||
setfattr -n ceph.file.layout.stripe_count -v 8 file2
|
||||
setfattr -n ceph.file.layout.object_size -v 10485760 file2
|
||||
|
||||
# Assumption: that the data pool is called 'data' and has ID '2'
|
||||
# (see teuthology.task.ceph.cephfs_setup)
|
||||
setfattr -n ceph.file.layout.pool -v data file2
|
||||
setfattr -n ceph.file.layout.pool -v 0 file2
|
||||
setfattr -n ceph.file.layout.pool -v 2 file2
|
||||
getfattr -n ceph.file.layout.pool file2 | grep -q data
|
||||
|
||||
getfattr -n ceph.file.layout.stripe_unit file2 | grep -q 1048576
|
||||
getfattr -n ceph.file.layout.stripe_count file2 | grep -q 8
|
||||
getfattr -n ceph.file.layout.object_size file2 | grep -q 10485760
|
||||
@ -52,8 +56,12 @@ getfattr -d -m - dir | grep -q ceph.file.layout && exit 1 || true
|
||||
setfattr -n ceph.dir.layout.stripe_unit -v 1048576 dir
|
||||
setfattr -n ceph.dir.layout.stripe_count -v 8 dir
|
||||
setfattr -n ceph.dir.layout.object_size -v 10485760 dir
|
||||
|
||||
# Assumption: that the data pool is called 'data' and has ID '2'
|
||||
# (see teuthology.task.ceph.cephfs_setup)
|
||||
setfattr -n ceph.dir.layout.pool -v data dir
|
||||
setfattr -n ceph.dir.layout.pool -v 0 dir
|
||||
setfattr -n ceph.dir.layout.pool -v 2 dir
|
||||
|
||||
getfattr -n ceph.dir.layout dir
|
||||
getfattr -n ceph.dir.layout dir | grep -q object_size=10485760
|
||||
getfattr -n ceph.dir.layout dir | grep -q stripe_count=8
|
||||
|
@ -71,6 +71,8 @@ extern CompatSet get_mdsmap_compat_set_base(); // pre v0.20
|
||||
#define MDS_FEATURE_INCOMPAT_INLINE CompatSet::Feature(7, "mds uses inline data")
|
||||
#define MDS_FEATURE_INCOMPAT_NOANCHOR CompatSet::Feature(8, "no anchor table")
|
||||
|
||||
#define MDS_FS_NAME_DEFAULT "cephfs"
|
||||
|
||||
class MDSMap {
|
||||
public:
|
||||
// mds states
|
||||
@ -193,7 +195,7 @@ public:
|
||||
|
||||
public:
|
||||
MDSMap()
|
||||
: epoch(0), enabled(false), fs_name("default"),
|
||||
: epoch(0), enabled(false), fs_name(MDS_FS_NAME_DEFAULT),
|
||||
flags(0), last_failure(0),
|
||||
last_failure_osd_epoch(0),
|
||||
tableserver(0), root(0),
|
||||
|
@ -908,8 +908,8 @@ bool MDSMonitor::management_command(
|
||||
{
|
||||
if (prefix == "mds newfs") {
|
||||
/* Legacy `newfs` command, takes pool numbers instead of
|
||||
* names, assumes fs name to be 'default', and can
|
||||
* overwrite existing filesystem settings */
|
||||
* names, assumes fs name to be MDS_FS_NAME_DEFAULT, and
|
||||
* can overwrite existing filesystem settings */
|
||||
MDSMap newmap;
|
||||
int64_t metadata, data;
|
||||
|
||||
@ -966,7 +966,7 @@ bool MDSMonitor::management_command(
|
||||
newmap.inc = pending_mdsmap.inc;
|
||||
pending_mdsmap = newmap;
|
||||
pending_mdsmap.epoch = mdsmap.epoch + 1;
|
||||
create_new_fs(pending_mdsmap, "default", metadata, data);
|
||||
create_new_fs(pending_mdsmap, MDS_FS_NAME_DEFAULT, metadata, data);
|
||||
ss << "new fs with metadata pool " << metadata << " and data pool " << data;
|
||||
r = 0;
|
||||
return true;
|
||||
@ -989,15 +989,7 @@ bool MDSMonitor::management_command(
|
||||
ss << "pool '" << data_name << "' does not exist";
|
||||
return true;
|
||||
}
|
||||
|
||||
// Warn if crash_replay_interval is not set on the data pool
|
||||
// (on creation should have done pools[pool].crash_replay_interval =
|
||||
// cct->_conf->osd_default_data_pool_replay_window;)
|
||||
pg_pool_t const *data_pool = mon->osdmon()->osdmap.get_pg_pool(data);
|
||||
if (data_pool->get_crash_replay_interval() == 0) {
|
||||
ss << "warning: crash_replay_interval not set on data pool '" << data << "', ";
|
||||
}
|
||||
|
||||
|
||||
string fs_name;
|
||||
cmd_getval(g_ceph_context, cmdmap, "fs_name", fs_name);
|
||||
if (fs_name.empty()) {
|
||||
@ -1023,6 +1015,31 @@ bool MDSMonitor::management_command(
|
||||
r = -EINVAL;
|
||||
}
|
||||
|
||||
pg_pool_t const *data_pool = mon->osdmon()->osdmap.get_pg_pool(data);
|
||||
assert(data_pool != NULL); // Checked it existed above
|
||||
pg_pool_t const *metadata_pool = mon->osdmon()->osdmap.get_pg_pool(metadata);
|
||||
assert(metadata_pool != NULL); // Checked it existed above
|
||||
|
||||
// Automatically set crash_replay_interval on data pool if it
|
||||
// isn't already set.
|
||||
if (data_pool->get_crash_replay_interval() == 0) {
|
||||
r = mon->osdmon()->set_crash_replay_interval(data, g_conf->osd_default_data_pool_replay_window);
|
||||
assert(r == 0); // We just did get_pg_pool so it must exist and be settable
|
||||
request_proposal(mon->osdmon());
|
||||
}
|
||||
|
||||
if (data_pool->is_erasure()) {
|
||||
ss << "data pool '" << data_name << " is an erasure-code pool";
|
||||
r = -EINVAL;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (metadata_pool->is_erasure()) {
|
||||
ss << "metadata pool '" << metadata_name << " is an erasure-code pool";
|
||||
r = -EINVAL;
|
||||
return true;
|
||||
}
|
||||
|
||||
// All checks passed, go ahead and create.
|
||||
MDSMap newmap;
|
||||
newmap.inc = pending_mdsmap.inc;
|
||||
|
@ -3309,6 +3309,35 @@ int OSDMonitor::parse_osd_id(const char *s, stringstream *pss)
|
||||
return id;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Special setter for crash_replay_interval on a pool. Equivalent to
|
||||
* using prepare_command_pool_set, but in a form convenient for use
|
||||
* from MDSMonitor rather than from an administrative command.
|
||||
*/
|
||||
int OSDMonitor::set_crash_replay_interval(const int64_t pool_id, const uint32_t cri)
|
||||
{
|
||||
pg_pool_t p;
|
||||
if (pending_inc.new_pools.count(pool_id)) {
|
||||
p = pending_inc.new_pools[pool_id];
|
||||
} else {
|
||||
const pg_pool_t *p_ptr = osdmap.get_pg_pool(pool_id);
|
||||
if (p_ptr == NULL) {
|
||||
return -ENOENT;
|
||||
} else {
|
||||
p = *p_ptr;
|
||||
}
|
||||
}
|
||||
|
||||
dout(10) << "Set pool " << pool_id << " crash_replay_interval=" << cri << dendl;
|
||||
p.crash_replay_interval = cri;
|
||||
p.last_change = pending_inc.epoch;
|
||||
pending_inc.new_pools[pool_id] = p;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int OSDMonitor::prepare_command_pool_set(map<string,cmd_vartype> &cmdmap,
|
||||
stringstream& ss)
|
||||
{
|
||||
|
@ -363,6 +363,7 @@ private:
|
||||
bool prepare_command(MMonCommand *m);
|
||||
bool prepare_command_impl(MMonCommand *m, map<string,cmd_vartype> &cmdmap);
|
||||
|
||||
int set_crash_replay_interval(const int64_t pool_id, const uint32_t cri);
|
||||
int prepare_command_pool_set(map<string,cmd_vartype> &cmdmap,
|
||||
stringstream& ss);
|
||||
|
||||
|
@ -536,7 +536,7 @@ EOF
|
||||
echo $cmd
|
||||
$cmd
|
||||
|
||||
cmd="$CEPH_ADM fs new default cephfs_metadata cephfs_data"
|
||||
cmd="$CEPH_ADM fs new cephfs cephfs_metadata cephfs_data"
|
||||
echo $cmd
|
||||
$cmd
|
||||
fi
|
||||
|
Loading…
Reference in New Issue
Block a user