Merge PR #52686 into main

* refs/pull/52686/head:
	PendingReleaseNotes: note about mandatory fs argument
	doc/cephfs: add note about mandatory --fs argument to snap-schedule
	qa: add test for mandatory fs argument to snap-schedule commands
	mgr/snap-schedule: tweaks to keep mypy happy
	mgr/snap_schedule: validate fs before execution

Reviewed-by: Patrick Donnelly <pdonnell@redhat.com>
Reviewed-by: Venky Shankar <vshankar@redhat.com>
This commit is contained in:
Milind Changire 2023-08-29 20:13:50 +05:30
commit d7dfac8111
No known key found for this signature in database
GPG Key ID: 8A72967603AE8AA0
4 changed files with 100 additions and 43 deletions

View File

@ -12,6 +12,8 @@
a large buildup of session metadata resulting in the MDS going read-only due to
the RADOS operation exceeding the size threshold. `mds_session_metadata_threshold`
config controls the maximum size that a (encoded) session metadata can grow.
* mgr/snap-schedule: For clusters with multiple CephFS file systems, all the
snap-schedule commands now expect the '--fs' argument.
>=18.0.0

View File

@ -167,6 +167,8 @@ Examples::
To ensure a new snapshot can be created, one snapshot less than this will be
retained. So by default, a maximum of 99 snapshots will be retained.
.. note: The --fs argument is now required if there is more than one file system.
Active and inactive schedules
-----------------------------
Snapshot schedules can be added for a path that doesn't exist yet in the

View File

@ -39,8 +39,9 @@ class TestSnapSchedulesHelper(CephFSTestCase):
return self.get_ceph_cmd_stdout("fs", *args)
def fs_snap_schedule_cmd(self, *args, **kwargs):
fs = kwargs.pop('fs', self.volname)
args += ('--fs', fs)
if 'fs' in kwargs:
fs = kwargs.pop('fs')
args += ('--fs', fs)
if 'format' in kwargs:
fmt = kwargs.pop('format')
args += ('--format', fmt)
@ -562,3 +563,45 @@ class TestSnapSchedulesSnapdir(TestSnapSchedulesHelper):
self.remove_snapshots(TestSnapSchedulesSnapdir.TEST_DIRECTORY, sdn)
self.mount_a.run_shell(['rmdir', TestSnapSchedulesSnapdir.TEST_DIRECTORY])
"""
Note that the class TestSnapSchedulesMandatoryFSArgument tests snap-schedule
commands only for multi-fs scenario. Commands for a single default fs should
pass for tests defined above or elsewhere.
"""
class TestSnapSchedulesMandatoryFSArgument(TestSnapSchedulesHelper):
REQUIRE_BACKUP_FILESYSTEM = True
TEST_DIRECTORY = 'mandatory_fs_argument_test_dir'
def test_snap_schedule_without_fs_argument(self):
"""Test command fails without --fs argument in presence of multiple fs"""
test_path = TestSnapSchedulesMandatoryFSArgument.TEST_DIRECTORY
self.mount_a.run_shell(['mkdir', '-p', test_path])
# try setting a schedule on the dir; this should fail now that we are
# working with mutliple fs; we need the --fs argument if there are more
# than one fs hosted by the same cluster
with self.assertRaises(CommandFailedError):
self.fs_snap_schedule_cmd('add', test_path, snap_schedule='1M')
self.mount_a.run_shell(['rmdir', test_path])
def test_snap_schedule_for_non_default_fs(self):
"""Test command succes with --fs argument for non-default fs"""
test_path = TestSnapSchedulesMandatoryFSArgument.TEST_DIRECTORY
self.mount_a.run_shell(['mkdir', '-p', test_path])
# use the backup fs as the second fs; all these commands must pass
self.fs_snap_schedule_cmd('add', test_path, snap_schedule='1M', fs='backup_fs')
self.fs_snap_schedule_cmd('activate', test_path, snap_schedule='1M', fs='backup_fs')
self.fs_snap_schedule_cmd('retention', 'add', test_path, retention_spec_or_period='1M', fs='backup_fs')
self.fs_snap_schedule_cmd('list', test_path, fs='backup_fs', format='json')
self.fs_snap_schedule_cmd('status', test_path, fs='backup_fs', format='json')
self.fs_snap_schedule_cmd('retention', 'remove', test_path, retention_spec_or_period='1M', fs='backup_fs')
self.fs_snap_schedule_cmd('deactivate', test_path, snap_schedule='1M', fs='backup_fs')
self.fs_snap_schedule_cmd('remove', test_path, snap_schedule='1M', fs='backup_fs')
self.mount_a.run_shell(['rmdir', test_path])

View File

@ -38,14 +38,24 @@ class Module(MgrModule):
self.client = SnapSchedClient(self)
@property
def default_fs(self) -> str:
def _default_fs(self) -> Tuple[int, str, str]:
fs_map = self.get('fs_map')
if fs_map['filesystems']:
return fs_map['filesystems'][0]['mdsmap']['fs_name']
if len(fs_map['filesystems']) > 1:
return -errno.EINVAL, '', "filesystem argument is required when there is more than one file system"
elif len(fs_map['filesystems']) == 1:
return 0, fs_map['filesystems'][0]['mdsmap']['fs_name'], "Success"
else:
self.log.error('No filesystem instance could be found.')
raise CephfsConnectionException(
-errno.ENOENT, "no filesystem found")
return -errno.ENOENT, "", "no filesystem found"
def _validate_fs(self, fs: Optional[str]) -> Tuple[int, str, str]:
if not fs:
rc, fs, err = self._default_fs
if rc < 0:
return rc, fs, err
if not self.has_fs(fs):
return -errno.EINVAL, '', f"no such file system: {fs}"
return 0, fs, 'Success'
def has_fs(self, fs_name: str) -> bool:
return fs_name in self.client.get_all_filesystems()
@ -65,11 +75,11 @@ class Module(MgrModule):
'''
List current snapshot schedules
'''
use_fs = fs if fs else self.default_fs
if not self.has_fs(use_fs):
return -errno.EINVAL, '', f"no such filesystem: {use_fs}"
rc, fs, err = self._validate_fs(fs)
if rc < 0:
return rc, fs, err
try:
ret_scheds = self.client.get_snap_schedules(use_fs, path)
ret_scheds = self.client.get_snap_schedules(fs, path)
except CephfsConnectionException as e:
return e.to_tuple()
except Exception as e:
@ -87,11 +97,11 @@ class Module(MgrModule):
'''
Get current snapshot schedule for <path>
'''
rc, fs, err = self._validate_fs(fs)
if rc < 0:
return rc, fs, err
try:
use_fs = fs if fs else self.default_fs
if not self.has_fs(use_fs):
return -errno.EINVAL, '', f"no such filesystem: {use_fs}"
scheds = self.client.list_snap_schedules(use_fs, path, recursive)
scheds = self.client.list_snap_schedules(fs, path, recursive)
self.log.debug(f'recursive is {recursive}')
except CephfsConnectionException as e:
return e.to_tuple()
@ -119,19 +129,19 @@ class Module(MgrModule):
'''
Set a snapshot schedule for <path>
'''
rc, fs, err = self._validate_fs(fs)
if rc < 0:
return rc, fs, err
try:
use_fs = fs if fs else self.default_fs
if not self.has_fs(use_fs):
return -errno.EINVAL, '', f"no such filesystem: {use_fs}"
abs_path = path
subvol = None
self.client.store_snap_schedule(use_fs,
self.client.store_snap_schedule(fs,
abs_path,
(abs_path, snap_schedule,
use_fs, path, start, subvol))
fs, path, start, subvol))
suc_msg = f'Schedule set for path {path}'
except sqlite3.IntegrityError:
existing_scheds = self.client.get_snap_schedules(use_fs, path)
existing_scheds = self.client.get_snap_schedules(fs, path)
report = [s.report() for s in existing_scheds]
error_msg = f'Found existing schedule {report}'
self.log.error(error_msg)
@ -153,12 +163,12 @@ class Module(MgrModule):
'''
Remove a snapshot schedule for <path>
'''
rc, fs, err = self._validate_fs(fs)
if rc < 0:
return rc, fs, err
try:
use_fs = fs if fs else self.default_fs
if not self.has_fs(use_fs):
return -errno.EINVAL, '', f"no such filesystem: {use_fs}"
abs_path = path
self.client.rm_snap_schedule(use_fs, abs_path, repeat, start)
self.client.rm_snap_schedule(fs, abs_path, repeat, start)
except ValueError as e:
return -errno.ENOENT, '', str(e)
except CephfsConnectionException as e:
@ -176,14 +186,14 @@ class Module(MgrModule):
'''
Set a retention specification for <path>
'''
rc, fs, err = self._validate_fs(fs)
if rc < 0:
return rc, fs, err
try:
use_fs = fs if fs else self.default_fs
if not self.has_fs(use_fs):
return -errno.EINVAL, '', f"no such filesystem: {use_fs}"
abs_path = path
self.client.add_retention_spec(use_fs, abs_path,
retention_spec_or_period,
retention_count)
self.client.add_retention_spec(fs, abs_path,
retention_spec_or_period,
retention_count)
except ValueError as e:
return -errno.ENOENT, '', str(e)
except CephfsConnectionException as e:
@ -201,12 +211,12 @@ class Module(MgrModule):
'''
Remove a retention specification for <path>
'''
rc, fs, err = self._validate_fs(fs)
if rc < 0:
return rc, fs, err
try:
use_fs = fs if fs else self.default_fs
if not self.has_fs(use_fs):
return -errno.EINVAL, '', f"no such filesystem: {use_fs}"
abs_path = path
self.client.rm_retention_spec(use_fs, abs_path,
self.client.rm_retention_spec(fs, abs_path,
retention_spec_or_period,
retention_count)
except ValueError as e:
@ -226,12 +236,12 @@ class Module(MgrModule):
'''
Activate a snapshot schedule for <path>
'''
rc, fs, err = self._validate_fs(fs)
if rc < 0:
return rc, fs, err
try:
use_fs = fs if fs else self.default_fs
if not self.has_fs(use_fs):
return -errno.EINVAL, '', f"no such filesystem: {use_fs}"
abs_path = path
self.client.activate_snap_schedule(use_fs, abs_path, repeat, start)
self.client.activate_snap_schedule(fs, abs_path, repeat, start)
except ValueError as e:
return -errno.ENOENT, '', str(e)
except CephfsConnectionException as e:
@ -249,12 +259,12 @@ class Module(MgrModule):
'''
Deactivate a snapshot schedule for <path>
'''
rc, fs, err = self._validate_fs(fs)
if rc < 0:
return rc, fs, err
try:
use_fs = fs if fs else self.default_fs
if not self.has_fs(use_fs):
return -errno.EINVAL, '', f"no such filesystem: {use_fs}"
abs_path = path
self.client.deactivate_snap_schedule(use_fs, abs_path, repeat, start)
self.client.deactivate_snap_schedule(fs, abs_path, repeat, start)
except ValueError as e:
return -errno.ENOENT, '', str(e)
except CephfsConnectionException as e: