mirror of
https://github.com/ceph/ceph
synced 2025-02-20 17:37:29 +00:00
Merge PR #37629 into master
* refs/pull/37629/head: qa/cephfs: add session_timeout option support qa/cephfs: move the cephfs's opertions setting to create() qa/cephfs: add 'cephfs:' section support Reviewed-by: Patrick Donnelly <pdonnell@redhat.com>
This commit is contained in:
commit
c569036c5d
@ -4,7 +4,8 @@ roles:
|
||||
- [client.0, client.1]
|
||||
overrides:
|
||||
ceph:
|
||||
max_mds: 3
|
||||
cephfs:
|
||||
max_mds: 3
|
||||
openstack:
|
||||
- volumes: # attached to each instance
|
||||
count: 4
|
||||
|
@ -4,7 +4,8 @@ roles:
|
||||
- [client.0, client.1]
|
||||
overrides:
|
||||
ceph:
|
||||
max_mds: 9
|
||||
cephfs:
|
||||
max_mds: 9
|
||||
openstack:
|
||||
- volumes: # attached to each instance
|
||||
count: 4
|
||||
|
4
qa/cephfs/overrides/session_timeout.yaml
Normal file
4
qa/cephfs/overrides/session_timeout.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
overrides:
|
||||
ceph:
|
||||
cephfs:
|
||||
session_timeout: 300
|
1
qa/suites/fs/basic_workload/overrides/session_timeout.yaml
Symbolic link
1
qa/suites/fs/basic_workload/overrides/session_timeout.yaml
Symbolic link
@ -0,0 +1 @@
|
||||
.qa/cephfs/overrides/session_timeout.yaml
|
1
qa/suites/fs/thrash/overrides/session_timeout.yaml
Symbolic link
1
qa/suites/fs/thrash/overrides/session_timeout.yaml
Symbolic link
@ -0,0 +1 @@
|
||||
.qa/cephfs/overrides/session_timeout.yaml
|
@ -1,3 +1,4 @@
|
||||
overrides:
|
||||
ceph:
|
||||
max_mds: 1
|
||||
cephfs:
|
||||
max_mds: 1
|
||||
|
@ -1,3 +1,4 @@
|
||||
overrides:
|
||||
ceph:
|
||||
max_mds: 2
|
||||
cephfs:
|
||||
max_mds: 2
|
||||
|
@ -1,3 +1,4 @@
|
||||
overrides:
|
||||
ceph:
|
||||
max_mds: 1
|
||||
cephfs:
|
||||
max_mds: 1
|
||||
|
@ -1,3 +1,4 @@
|
||||
overrides:
|
||||
ceph:
|
||||
max_mds: 2
|
||||
cephfs:
|
||||
max_mds: 2
|
||||
|
1
qa/suites/fs/verify/overrides/session_timeout.yaml
Symbolic link
1
qa/suites/fs/verify/overrides/session_timeout.yaml
Symbolic link
@ -0,0 +1 @@
|
||||
.qa/cephfs/overrides/session_timeout.yaml
|
@ -428,12 +428,8 @@ def cephfs_setup(ctx, config):
|
||||
if mdss.remotes:
|
||||
log.info('Setting up CephFS filesystem...')
|
||||
|
||||
fs = Filesystem(ctx, name='cephfs', create=True,
|
||||
ec_profile=config.get('cephfs_ec_profile', None))
|
||||
|
||||
max_mds = config.get('max_mds', 1)
|
||||
if max_mds > 1:
|
||||
fs.set_max_mds(max_mds)
|
||||
Filesystem(ctx, fs_config=config.get('cephfs', None), name='cephfs',
|
||||
create=True, ec_profile=config.get('cephfs_ec_profile', None))
|
||||
|
||||
yield
|
||||
|
||||
@ -1710,6 +1706,20 @@ def task(ctx, config):
|
||||
mkfs_options: [-b,size=65536,-l,logdev=/dev/sdc1]
|
||||
mount_options: [nobarrier, inode64]
|
||||
|
||||
To change the cephfs's default max_mds (1), use::
|
||||
|
||||
tasks:
|
||||
- ceph:
|
||||
cephfs:
|
||||
max_mds: 2
|
||||
|
||||
To change the mdsmap's default session_timeout (60 seconds), use::
|
||||
|
||||
tasks:
|
||||
- ceph:
|
||||
cephfs:
|
||||
session_timeout: 300
|
||||
|
||||
Note, this will cause the task to check the /scratch_devs file on each node
|
||||
for available devices. If no such file is found, /dev/sdb will be used.
|
||||
|
||||
|
@ -452,7 +452,7 @@ class Filesystem(MDSCluster):
|
||||
This object is for driving a CephFS filesystem. The MDS daemons driven by
|
||||
MDSCluster may be shared with other Filesystems.
|
||||
"""
|
||||
def __init__(self, ctx, fscid=None, name=None, create=False,
|
||||
def __init__(self, ctx, fs_config=None, fscid=None, name=None, create=False,
|
||||
ec_profile=None):
|
||||
super(Filesystem, self).__init__(ctx)
|
||||
|
||||
@ -463,6 +463,7 @@ class Filesystem(MDSCluster):
|
||||
self.metadata_overlay = False
|
||||
self.data_pool_name = None
|
||||
self.data_pools = None
|
||||
self.fs_config = fs_config
|
||||
|
||||
client_list = list(misc.all_roles_of_type(self._ctx.cluster, 'client'))
|
||||
self.client_id = client_list[0]
|
||||
@ -570,6 +571,9 @@ class Filesystem(MDSCluster):
|
||||
def set_max_mds(self, max_mds):
|
||||
self.set_var("max_mds", "%d" % max_mds)
|
||||
|
||||
def set_session_timeout(self, timeout):
|
||||
self.set_var("session_timeout", "%d" % timeout)
|
||||
|
||||
def set_allow_standby_replay(self, yes):
|
||||
self.set_var("allow_standby_replay", yes)
|
||||
|
||||
@ -632,6 +636,16 @@ class Filesystem(MDSCluster):
|
||||
else:
|
||||
raise
|
||||
|
||||
if self.fs_config is not None:
|
||||
max_mds = self.fs_config.get('max_mds', 1)
|
||||
if max_mds > 1:
|
||||
self.set_max_mds(max_mds)
|
||||
|
||||
# If absent will use the default value (60 seconds)
|
||||
session_timeout = self.fs_config.get('session_timeout', 60)
|
||||
if session_timeout != 60:
|
||||
self.set_session_timeout(session_timeout)
|
||||
|
||||
self.getinfo(refresh = True)
|
||||
|
||||
def destroy(self, reset_obj_attrs=True):
|
||||
|
@ -1166,6 +1166,7 @@ class LocalFilesystem(Filesystem, LocalMDSCluster):
|
||||
self.metadata_overlay = False
|
||||
self.data_pool_name = None
|
||||
self.data_pools = None
|
||||
self.fs_config = None
|
||||
|
||||
# Hack: cheeky inspection of ceph.conf to see what MDSs exist
|
||||
self.mds_ids = set()
|
||||
|
Loading…
Reference in New Issue
Block a user