Merge pull request #51993 from dparmar18/wip-58072

mon/MDSMonitor: enable 'ceph fs new' use 'ceph fs set' options

Reviewed-by: Patrick Donnelly <pdonnell@redhat.com>
Reviewed-by: Rishabh Dave <ridave@redhat.com>
This commit is contained in:
Rishabh Dave 2024-04-04 17:50:16 +05:30 committed by GitHub
commit 64c99bf737
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 382 additions and 86 deletions

View File

@ -52,13 +52,16 @@ Once the pools are created, you may enable the file system using the ``fs new``
.. code:: bash
$ ceph fs new <fs_name> <metadata> <data> [--force] [--allow-dangerous-metadata-overlay] [<fscid:int>] [--recover]
$ ceph fs new <fs_name> <metadata> <data> [--force] [--allow-dangerous-metadata-overlay] [<fscid:int>] [--recover] [--yes-i-really-really-mean-it] [<set>...]
This command creates a new file system with specified metadata and data pool.
The specified data pool is the default data pool and cannot be changed once set.
Each file system has its own set of MDS daemons assigned to ranks so ensure that
you have sufficient standby daemons available to accommodate the new file system.
.. note::
``--yes-i-really-really-mean-it`` may be used for some ``fs set`` commands
The ``--force`` option is used to achieve any of the following:
- To set an erasure-coded pool for the default data pool. Use of an EC pool for the
@ -82,11 +85,14 @@ failed. So when a MDS daemon eventually picks up rank 0, the daemon reads the
existing in-RADOS metadata and doesn't overwrite it. The flag also prevents the
standby MDS daemons to join the file system.
The ``set`` option allows to set multiple options supported by ``fs set``
atomically with the creation of the file system.
For example:
.. code:: bash
$ ceph fs new cephfs cephfs_metadata cephfs_data
$ ceph fs new cephfs cephfs_metadata cephfs_data set max_mds 2 allow_standby_replay true
$ ceph fs ls
name: cephfs, metadata pool: cephfs_metadata, data pools: [cephfs_data ]

View File

@ -396,8 +396,12 @@ class MDSCluster(CephCluster):
def mds_is_running(self, mds_id):
return self.mds_daemons[mds_id].running()
def newfs(self, name='cephfs', create=True):
return Filesystem(self._ctx, name=name, create=create)
def newfs(self, name='cephfs', create=True, **kwargs):
"""
kwargs accepts recover: bool, allow_dangerous_metadata_overlay: bool,
yes_i_really_really_mean_it: bool and fs_ops: list[str]
"""
return Filesystem(self._ctx, name=name, create=create, **kwargs)
def status(self, epoch=None):
return FSStatus(self.mon_manager, epoch)
@ -528,7 +532,12 @@ class Filesystem(MDSCluster):
This object is for driving a CephFS filesystem. The MDS daemons driven by
MDSCluster may be shared with other Filesystems.
"""
def __init__(self, ctx, fs_config={}, fscid=None, name=None, create=False):
def __init__(self, ctx, fs_config={}, fscid=None, name=None, create=False,
**kwargs):
"""
kwargs accepts recover: bool, allow_dangerous_metadata_overlay: bool,
yes_i_really_really_mean_it: bool and fs_ops: list[str]
"""
super(Filesystem, self).__init__(ctx)
self.name = name
@ -547,7 +556,7 @@ class Filesystem(MDSCluster):
if fscid is not None:
raise RuntimeError("cannot specify fscid when creating fs")
if create and not self.legacy_configured():
self.create()
self.create(**kwargs)
else:
if fscid is not None:
self.id = fscid
@ -669,7 +678,11 @@ class Filesystem(MDSCluster):
target_size_ratio = 0.9
target_size_ratio_ec = 0.9
def create(self, recover=False, metadata_overlay=False):
def create(self, **kwargs):
"""
kwargs accepts recover: bool, allow_dangerous_metadata_overlay: bool,
yes_i_really_really_mean_it: bool and fs_ops: list[str]
"""
if self.name is None:
self.name = "cephfs"
if self.metadata_pool_name is None:
@ -679,6 +692,12 @@ class Filesystem(MDSCluster):
else:
data_pool_name = self.data_pool_name
recover = kwargs.pop("recover", False)
metadata_overlay = kwargs.pop("metadata_overlay", False)
yes_i_really_really_mean_it = kwargs.pop("yes_i_really_really_mean_it",
False)
fs_ops = kwargs.pop("fs_ops", None)
# will use the ec pool to store the data and a small amount of
# metadata still goes to the primary data pool for all files.
if not metadata_overlay and self.ec_profile and 'disabled' not in self.ec_profile:
@ -712,6 +731,12 @@ class Filesystem(MDSCluster):
args.append('--recover')
if metadata_overlay:
args.append('--allow-dangerous-metadata-overlay')
if yes_i_really_really_mean_it:
args.append('--yes-i-really-really-mean-it')
if fs_ops:
args.append('set')
for key_or_val in fs_ops:
args.append(key_or_val)
self.run_ceph_cmd(*args)
if not recover:
@ -931,6 +956,18 @@ class Filesystem(MDSCluster):
def get_var(self, var, status=None):
return self.get_mds_map(status=status)[var]
def get_var_from_fs(self, fsname, var):
val = None
for fs in self.status().get_filesystems():
if fs["mdsmap"]["fs_name"] == fsname:
try:
val = fs["mdsmap"][var]
break
except KeyError:
val = fs["mdsmap"]["flags_state"][var]
break
return val
def set_dir_layout(self, mount, path, layout):
for name, value in layout.items():
mount.run_shell(args=["setfattr", "-n", "ceph.dir.layout."+name, "-v", str(value), path])

View File

@ -656,3 +656,183 @@ class TestSkipReplayInoTable(CephFSTestCase):
ls_out = set(self.mount_a.ls("test_alloc_ino/"))
self.assertEqual(ls_out, set({"dir1", "dir2"}))
class TestNewFSCreation(CephFSTestCase):
MDSS_REQUIRED = 1
TEST_FS = "test_fs"
TEST_FS1 = "test_fs1"
def test_fs_creation_valid_ops(self):
"""
Test setting fs ops with CLI command `ceph fs new`.
"""
fs_ops = [["max_mds", "3"], ["refuse_client_session", "true"],
["allow_new_snaps", "true", "max_file_size", "65536"],
["session_timeout", "234", "session_autoclose",
"100", "max_xattr_size", "150"]]
for fs_ops_list in fs_ops:
test_fs = None
try:
test_fs = self.mds_cluster.newfs(name=self.TEST_FS,
create=True,
fs_ops=fs_ops_list)
for i in range(0, len(fs_ops_list), 2):
# edge case: for option `allow_new_snaps`, the flag name
# is `allow_snaps` in mdsmap
if fs_ops_list[i] == "allow_new_snaps":
fs_ops_list[i] = "allow_snaps"
fs_op_val = str(test_fs.get_var_from_fs(
self.TEST_FS, fs_ops_list[i])).lower()
self.assertEqual(fs_op_val, fs_ops_list[i+1])
finally:
if test_fs is not None:
test_fs.destroy()
def test_fs_creation_invalid_ops(self):
"""
Test setting invalid fs ops with CLI command `ceph fs new`.
"""
invalid_fs_ops = {("inline_data", "true"): errno.EPERM,
("session_timeout", "3"): errno.ERANGE,
("session_autoclose", "foo"): errno.EINVAL,
("max_mds", "-1"): errno.EINVAL,
("bal_rank_mask", ""): errno.EINVAL,
("foo", "2"): errno.EINVAL,
("", ""): errno.EINVAL,
("session_timeout", "180", "", "3"): errno.EINVAL,
("allow_new_snaps", "true", "max_mddds", "3"):
errno.EINVAL,
("allow_new_snapsss", "true", "max_mds", "3"):
errno.EINVAL,
("session_timeout", "20", "max_mddds", "3"):
errno.ERANGE}
for invalid_op_list, expected_errno in invalid_fs_ops.items():
test_fs = None
try:
test_fs = self.mds_cluster.newfs(name=self.TEST_FS, create=True,
fs_ops=invalid_op_list)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, expected_errno)
else:
self.fail(f"Expected {expected_errno}")
finally:
if test_fs is not None:
test_fs.destroy()
def test_fs_creation_incomplete_args(self):
"""
Test sending incomplete key-val pair of fs ops.
"""
invalid_args_fs_ops = [["max_mds"], ["max_mds", "2", "3"], [""]]
for incomplete_args in invalid_args_fs_ops:
test_fs = None
try:
test_fs = self.mds_cluster.newfs(name=self.TEST_FS, create=True,
fs_ops=incomplete_args)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
self.fail("Expected EINVAL")
finally:
if test_fs is not None:
test_fs.destroy()
def test_endure_fs_fields_post_failure(self):
"""
Test fields like epoch and legacy_client_fscid should not change after
fs creation failure.
"""
initial_epoch_ = self.mds_cluster.status()["epoch"]
initial_default_fscid = self.mds_cluster.status()["default_fscid"]
test_fs = None
try:
test_fs = self.mds_cluster.newfs(name=self.TEST_FS, create=True,
fs_ops=["foo"])
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
self.assertEqual(initial_epoch_,
self.mds_cluster.status()["epoch"])
self.assertEqual(initial_default_fscid,
self.mds_cluster.status()["default_fscid"])
else:
self.fail("Expected EINVAL")
finally:
if test_fs is not None:
test_fs.destroy()
def test_yes_i_really_really_mean_it(self):
"""
--yes-i-really-really-mean-it can be used while creating fs with
CLI command `ceph fs new`, test fs creation succeeds.
"""
test_fs = None
try:
test_fs = self.mds_cluster.newfs(name=self.TEST_FS, create=True,
yes_i_really_really_mean_it=True)
self.assertTrue(test_fs.exists())
finally:
if test_fs is not None:
test_fs.destroy()
def test_inline_data(self):
"""
inline_data needs --yes-i-really-really-mean-it to get it enabled.
Test fs creation by with/without providing it.
NOTE: inline_data is deprecated, this test case would be removed in
the future.
"""
test_fs = None
try:
test_fs = self.mds_cluster.newfs(name=self.TEST_FS, create=True,
fs_ops=["inline_data", "true"])
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EPERM)
test_fs = self.mds_cluster.newfs(name=self.TEST_FS, create=True,
fs_ops=["inline_data", "true"],
yes_i_really_really_mean_it=True)
self.assertIn("mds uses inline data", str(test_fs.status()))
else:
self.fail("Expected EPERM")
finally:
if test_fs is not None:
test_fs.destroy()
def test_no_fs_id_incr_on_fs_creation_fail(self):
"""
Failure while creating fs due to error in setting fs ops will keep on
incrementing `next_filesystem_id`, test its value is preserved and
rolled back in case fs creation fails.
"""
test_fs, test_fs1 = None, None
try:
test_fs = self.mds_cluster.newfs(name=self.TEST_FS, create=True)
for _ in range(5):
try:
self.mds_cluster.newfs(name=self.TEST_FS1, create=True,
fs_ops=["max_mdss", "2"])
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
test_fs1 = self.mds_cluster.newfs(name=self.TEST_FS1, create=True,
fs_ops=["max_mds", "2"])
test_fs_id, test_fs1_id = None, None
for fs in self.mds_cluster.status().get_filesystems():
if fs["mdsmap"]["fs_name"] == self.TEST_FS:
test_fs_id = fs["id"]
if fs["mdsmap"]["fs_name"] == self.TEST_FS1:
test_fs1_id = fs["id"]
self.assertEqual(test_fs_id, test_fs1_id - 1)
finally:
if test_fs is not None:
test_fs.destroy()
if test_fs1 is not None:
test_fs1.destroy()

View File

@ -917,8 +917,8 @@ class LocalMDSCluster(LocalCephCluster, MDSCluster):
# FIXME: unimplemented
pass
def newfs(self, name='cephfs', create=True):
return LocalFilesystem(self._ctx, name=name, create=create)
def newfs(self, name='cephfs', create=True, **kwargs):
return LocalFilesystem(self._ctx, name=name, create=create, **kwargs)
def delete_all_filesystems(self):
"""
@ -937,7 +937,8 @@ class LocalMgrCluster(LocalCephCluster, MgrCluster):
class LocalFilesystem(LocalMDSCluster, Filesystem):
def __init__(self, ctx, fs_config={}, fscid=None, name=None, create=False):
def __init__(self, ctx, fs_config={}, fscid=None, name=None, create=False,
**kwargs):
# Deliberately skip calling Filesystem constructor
LocalMDSCluster.__init__(self, ctx)
@ -960,7 +961,12 @@ class LocalFilesystem(LocalMDSCluster, Filesystem):
if fscid is not None:
raise RuntimeError("cannot specify fscid when creating fs")
if create and not self.legacy_configured():
self.create()
self.create(recover=kwargs.pop("fs_recover", False),
metadata_overlay=kwargs.pop("fs_metadata_overlay",
False),
fs_ops=kwargs.pop("fs_ops", None),
yes_i_really_really_mean_it=kwargs.pop(
"yes_i_really_really_mean_it", False))
else:
if fscid is not None:
self.id = fscid

View File

@ -450,9 +450,9 @@ mds_gid_t Filesystem::get_standby_replay(mds_gid_t who) const
return MDS_GID_NONE;
}
const Filesystem& FSMap::create_filesystem(std::string_view name,
Filesystem FSMap::create_filesystem(std::string_view name,
int64_t metadata_pool, int64_t data_pool, uint64_t features,
fs_cluster_id_t fscid, bool recover)
bool recover)
{
auto fs = Filesystem();
fs.mds_map.epoch = epoch;
@ -474,6 +474,11 @@ const Filesystem& FSMap::create_filesystem(std::string_view name,
fs.mds_map.set_flag(CEPH_MDSMAP_NOT_JOINABLE);
}
return fs;
}
const Filesystem& FSMap::commit_filesystem(fs_cluster_id_t fscid, Filesystem fs)
{
if (fscid == FS_CLUSTER_ID_NONE) {
fs.fscid = next_filesystem_id++;
} else {

View File

@ -455,9 +455,15 @@ public:
* Caller must already have validated all arguments vs. the existing
* FSMap and OSDMap contents.
*/
const Filesystem& create_filesystem(
Filesystem create_filesystem(
std::string_view name, int64_t metadata_pool, int64_t data_pool,
uint64_t features, fs_cluster_id_t fscid, bool recover);
uint64_t features, bool recover);
/**
* Commit the created filesystem to the FSMap.
*
*/
const Filesystem& commit_filesystem(fs_cluster_id_t fscid, Filesystem fs);
/**
* Remove the filesystem (it must exist). Caller should already

View File

@ -237,6 +237,25 @@ class FsNewHandler : public FileSystemCommandHandler
}
}
vector<string> fsops_vec;
cmd_getval(cmdmap, "set", fsops_vec);
if(!fsops_vec.empty()) {
if(fsops_vec[0] != "set") {
ss << "invalid command";
return -EINVAL;
}
if(fsops_vec.size() % 2 == 0 || fsops_vec.size() < 2) {
/* since "set" is part of fs options vector, if size of vec is divisble
by 2, it indicates that the fsops key-value pairs are incomplete e.g.
["set", "max_mds", "2"] # valid
["set", "max_mds"] # invalid
*/
ss << "incomplete list of key-val pairs provided "
<< fsops_vec.size() - 1;
return -EINVAL;
}
}
pg_pool_t const *data_pool = mon->osdmon()->osdmap.get_pg_pool(data);
ceph_assert(data_pool != NULL); // Checked it existed above
pg_pool_t const *metadata_pool = mon->osdmon()->osdmap.get_pg_pool(metadata);
@ -257,41 +276,66 @@ class FsNewHandler : public FileSystemCommandHandler
mon->osdmon()->wait_for_writeable(op, new PaxosService::C_RetryMessage(mon->mdsmon(), op));
return -EAGAIN;
}
mon->osdmon()->do_application_enable(data, APP_NAME_CEPHFS, "data",
fs_name, true);
mon->osdmon()->do_application_enable(metadata, APP_NAME_CEPHFS,
"metadata", fs_name, true);
mon->osdmon()->do_set_pool_opt(metadata,
pool_opts_t::RECOVERY_PRIORITY,
static_cast<int64_t>(5));
mon->osdmon()->do_set_pool_opt(metadata,
pool_opts_t::PG_NUM_MIN,
static_cast<int64_t>(16));
mon->osdmon()->do_set_pool_opt(metadata,
pool_opts_t::PG_AUTOSCALE_BIAS,
static_cast<double>(4.0));
mon->osdmon()->propose_pending();
bool recover = false;
cmd_getval(cmdmap, "recover", recover);
// All checks passed, go ahead and create.
auto&& fs = fsmap.create_filesystem(fs_name, metadata, data,
mon->get_quorum_con_features(), fscid, recover);
auto fs = fsmap.create_filesystem(fs_name, metadata, data, mon->get_quorum_con_features(), recover);
ss << "new fs with metadata pool " << metadata << " and data pool " << data;
if (recover) {
return 0;
// set fs options
string set_fsops_info;
for (size_t i = 1 ; i < fsops_vec.size() ; i+=2) {
std::ostringstream oss;
int ret = set_val(mon, fsmap, op, cmdmap, oss, &fs, fsops_vec[i], fsops_vec[i+1]);
if (ret < 0) {
ss << oss.str();
return ret;
}
if ((i + 2) <= fsops_vec.size()) {
set_fsops_info.append("; ");
}
set_fsops_info.append(oss.str());
}
// assign a standby to rank 0 to avoid health warnings
auto info = fsmap.find_replacement_for({fs.get_fscid(), 0});
{
auto& cfs = fsmap.commit_filesystem(fscid, std::move(fs));
if (info) {
mon->clog->info() << info->human_name() << " assigned to filesystem "
<< fs_name << " as rank 0";
fsmap.promote(info->global_id, fs.get_fscid(), 0);
ss << "new fs with metadata pool " << metadata << " and data pool " << data;
ss << set_fsops_info;
mon->osdmon()->do_application_enable(data,
pg_pool_t::APPLICATION_NAME_CEPHFS,
"data", fs_name, true);
mon->osdmon()->do_application_enable(metadata,
pg_pool_t::APPLICATION_NAME_CEPHFS,
"metadata", fs_name, true);
mon->osdmon()->do_set_pool_opt(metadata,
pool_opts_t::RECOVERY_PRIORITY,
static_cast<int64_t>(5));
mon->osdmon()->do_set_pool_opt(metadata,
pool_opts_t::PG_NUM_MIN,
static_cast<int64_t>(16));
mon->osdmon()->do_set_pool_opt(metadata,
pool_opts_t::PG_AUTOSCALE_BIAS,
static_cast<double>(4.0));
mon->osdmon()->propose_pending();
if (recover) {
return 0;
}
// assign a standby to all the ranks to avoid health warnings
for (int i = 0 ; i < cfs.get_mds_map().get_max_mds() ; ++i) {
auto info = fsmap.find_replacement_for({cfs.get_fscid(), i});
if (info) {
mon->clog->info() << info->human_name() << " assigned to filesystem "
<< cfs.get_mds_map().get_fs_name() << " as rank " << i;
fsmap.promote(info->global_id, cfs.get_fscid(), i);
} else {
break;
}
}
}
return 0;
@ -328,13 +372,38 @@ public:
return -EINVAL;
}
string val;
string interr;
int64_t n = 0;
if (!cmd_getval(cmdmap, "val", val)) {
return -EINVAL;
}
return set_val(mon, fsmap, op, cmdmap, ss, fsp->get_fscid(), var, val);
}
};
static void modify_filesystem(FSMap& fsmap, auto&& fsv, auto&& fn)
{
if (std::holds_alternative<Filesystem*>(fsv)) {
fn(*std::get<Filesystem*>(fsv));
} else if (std::holds_alternative<fs_cluster_id_t>(fsv)) {
fsmap.modify_filesystem(std::get<fs_cluster_id_t>(fsv), std::move(fn));
} else ceph_assert(0);
}
int FileSystemCommandHandler::set_val(Monitor *mon, FSMap& fsmap, MonOpRequestRef op,
const cmdmap_t& cmdmap, std::ostream &ss, fs_or_fscid fsv,
std::string var, std::string val)
{
const Filesystem* fsp;
if (std::holds_alternative<Filesystem*>(fsv)) {
fsp = std::get<Filesystem*>(fsv);
} else if (std::holds_alternative<fs_cluster_id_t>(fsv)) {
fsp = &fsmap.get_filesystem(std::get<fs_cluster_id_t>(fsv));
} else ceph_assert(0);
{
std::string interr;
// we got a string. see if it contains an int.
n = strict_strtoll(val.c_str(), 10, &interr);
int64_t n = strict_strtoll(val.c_str(), 10, &interr);
if (var == "max_mds") {
// NOTE: see also "mds set_max_mds", which can modify the same field.
if (interr.length()) {
@ -359,8 +428,7 @@ public:
return -EINVAL;
}
fsmap.modify_filesystem(
fsp->get_fscid(),
modify_filesystem(fsmap, fsv,
[n](auto&& fs)
{
fs.get_mds_map().clear_flag(CEPH_MDSMAP_NOT_JOINABLE);
@ -383,16 +451,14 @@ public:
}
ss << "inline data enabled";
fsmap.modify_filesystem(
fsp->get_fscid(),
modify_filesystem(fsmap, fsv,
[](auto&& fs)
{
fs.get_mds_map().set_inline_data_enabled(true);
});
} else {
ss << "inline data disabled";
fsmap.modify_filesystem(
fsp->get_fscid(),
modify_filesystem(fsmap, fsv,
[](auto&& fs)
{
fs.get_mds_map().set_inline_data_enabled(false);
@ -404,8 +470,7 @@ public:
} else {
ss << "setting the metadata load balancer to " << val;
}
fsmap.modify_filesystem(
fsp->get_fscid(),
modify_filesystem(fsmap, fsv,
[val](auto&& fs)
{
fs.get_mds_map().set_balancer(val);
@ -426,8 +491,7 @@ public:
}
ss << "setting the metadata balancer rank mask to " << val;
fsmap.modify_filesystem(
fsp->get_fscid(),
modify_filesystem(fsmap, fsv,
[val](auto&& fs)
{
fs.get_mds_map().set_bal_rank_mask(val);
@ -442,8 +506,7 @@ public:
ss << var << " must at least " << CEPH_MIN_STRIPE_UNIT;
return -ERANGE;
}
fsmap.modify_filesystem(
fsp->get_fscid(),
modify_filesystem(fsmap, fsv,
[n](auto&& fs)
{
fs.get_mds_map().set_max_filesize(n);
@ -453,8 +516,7 @@ public:
ss << var << " requires an integer value";
return -EINVAL;
}
fsmap.modify_filesystem(
fsp->get_fscid(),
modify_filesystem(fsmap, fsv,
[n](auto&& fs)
{
fs.get_mds_map().set_max_xattr_size(n);
@ -467,16 +529,14 @@ public:
}
if (!enable_snaps) {
fsmap.modify_filesystem(
fsp->get_fscid(),
modify_filesystem(fsmap, fsv,
[](auto&& fs)
{
fs.get_mds_map().clear_snaps_allowed();
});
ss << "disabled new snapshots";
} else {
fsmap.modify_filesystem(
fsp->get_fscid(),
modify_filesystem(fsmap, fsv,
[](auto&& fs)
{
fs.get_mds_map().set_snaps_allowed();
@ -504,16 +564,14 @@ public:
if (enable) {
ss << "enabled multimds with snapshot";
fsmap.modify_filesystem(
fsp->get_fscid(),
modify_filesystem(fsmap, fsv,
[](auto&& fs)
{
fs.get_mds_map().set_multimds_snaps_allowed();
});
} else {
ss << "disabled multimds with snapshot";
fsmap.modify_filesystem(
fsp->get_fscid(),
modify_filesystem(fsmap, fsv,
[](auto&& fs)
{
fs.get_mds_map().clear_multimds_snaps_allowed();
@ -531,8 +589,7 @@ public:
ss << fsp->get_mds_map().get_fs_name();
fsmap.modify_filesystem(
fsp->get_fscid(),
modify_filesystem(fsmap, fsv,
[is_down](auto&& fs)
{
if (is_down) {
@ -563,8 +620,7 @@ public:
ss << fsp->get_mds_map().get_fs_name();
fsmap.modify_filesystem(
fsp->get_fscid(),
modify_filesystem(fsmap, fsv,
[joinable](auto&& fs)
{
if (joinable) {
@ -593,8 +649,7 @@ public:
ss << var << " must be non-negative";
return -ERANGE;
}
fsmap.modify_filesystem(
fsp->get_fscid(),
modify_filesystem(fsmap, fsv,
[n](auto&& fs)
{
fs.get_mds_map().set_standby_count_wanted(n);
@ -608,8 +663,7 @@ public:
ss << var << " must be at least 30s";
return -ERANGE;
}
fsmap.modify_filesystem(
fsp->get_fscid(),
modify_filesystem(fsmap, fsv,
[n](auto&& fs)
{
fs.get_mds_map().set_session_timeout((uint32_t)n);
@ -623,8 +677,7 @@ public:
ss << var << " must be at least 30s";
return -ERANGE;
}
fsmap.modify_filesystem(
fsp->get_fscid(),
modify_filesystem(fsmap, fsv,
[n](auto&& fs)
{
fs.get_mds_map().set_session_autoclose((uint32_t)n);
@ -693,7 +746,7 @@ public:
auto f = [vno](auto&& fs) {
fs.get_mds_map().set_min_compat_client(vno);
};
fsmap.modify_filesystem(fsp->get_fscid(), std::move(f));
modify_filesystem(fsmap, fsv, std::move(f));
} else if (var == "refuse_client_session") {
bool refuse_session = false;
int r = parse_bool(val, &refuse_session, ss);
@ -703,8 +756,7 @@ public:
if (refuse_session) {
if (!(fsp->get_mds_map().test_flag(CEPH_MDSMAP_REFUSE_CLIENT_SESSION))) {
fsmap.modify_filesystem(
fsp->get_fscid(),
modify_filesystem(fsmap, fsv,
[](auto&& fs)
{
fs.get_mds_map().set_flag(CEPH_MDSMAP_REFUSE_CLIENT_SESSION);
@ -715,8 +767,7 @@ public:
}
} else {
if (fsp->get_mds_map().test_flag(CEPH_MDSMAP_REFUSE_CLIENT_SESSION)) {
fsmap.modify_filesystem(
fsp->get_fscid(),
modify_filesystem(fsmap, fsv,
[](auto&& fs)
{
fs.get_mds_map().clear_flag(CEPH_MDSMAP_REFUSE_CLIENT_SESSION);
@ -762,10 +813,9 @@ public:
ss << "unknown variable " << var;
return -EINVAL;
}
return 0;
}
};
return 0;
}
class CompatSetHandler : public FileSystemCommandHandler
{

View File

@ -30,11 +30,13 @@ class FileSystemCommandHandler : protected CommandHandler
protected:
std::string prefix;
using fs_or_fscid = std::variant<Filesystem*, fs_cluster_id_t>;
enum {
POOL_METADATA,
POOL_DATA_DEFAULT,
POOL_DATA_EXTRA,
};
/**
* Return 0 if the pool is suitable for use with CephFS, or
* in case of errors return a negative error code, and populate
@ -52,6 +54,8 @@ protected:
virtual std::string const &get_prefix() const {return prefix;}
int set_val(Monitor *mon, FSMap& fsmap, MonOpRequestRef op, const cmdmap_t& cmdmap, std::ostream &ss, fs_or_fscid fs, std::string var, std::string val);
public:
FileSystemCommandHandler(const std::string &prefix_)
: prefix(prefix_)

View File

@ -349,7 +349,9 @@ COMMAND("fs new "
"name=force,type=CephBool,req=false "
"name=allow_dangerous_metadata_overlay,type=CephBool,req=false "
"name=fscid,type=CephInt,range=0,req=false "
"name=recover,type=CephBool,req=false",
"name=recover,type=CephBool,req=false "
"name=yes_i_really_really_mean_it,type=CephBool,req=false "
"name=set,type=CephString,n=N,req=false",
"make new filesystem using named pools <metadata> and <data>",
"fs", "rw")
COMMAND("fs fail "