mirror of
https://github.com/ceph/ceph
synced 2025-02-24 19:47:44 +00:00
mgr/volumes: Validate mon_allow_pool_delete before volume deletion
Volume deletion wasn't validating mon_allow_pool_delete config before destroying volume metadata. Hence when mon_allow_pool_delete is set to false, it was deleting metadata but failed to delete pool resulting in inconsistent state. This patch validates the config before going ahead with deletion. Fixes: https://tracker.ceph.com/issues/45662 Signed-off-by: Kotresh HR <khiremat@redhat.com>
This commit is contained in:
parent
8415e75682
commit
e770bb9075
@ -227,6 +227,7 @@ class TestVolumes(CephFSTestCase):
|
||||
self.vol_created = False
|
||||
self._enable_multi_fs()
|
||||
self._create_or_reuse_test_volume()
|
||||
self.config_set('mon', 'mon_allow_pool_delete', True)
|
||||
|
||||
def tearDown(self):
|
||||
if self.vol_created:
|
||||
@ -333,6 +334,33 @@ class TestVolumes(CephFSTestCase):
|
||||
for pool in vol_status["pools"]:
|
||||
self.assertNotIn(pool["name"], pools)
|
||||
|
||||
def test_volume_rm_when_mon_delete_pool_false(self):
|
||||
"""
|
||||
That the volume can only be removed when mon_allowd_pool_delete is set
|
||||
to true and verify that the pools are removed after volume deletion.
|
||||
"""
|
||||
self.config_set('mon', 'mon_allow_pool_delete', False)
|
||||
try:
|
||||
self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
|
||||
except CommandFailedError as ce:
|
||||
self.assertEqual(ce.exitstatus, errno.EPERM,
|
||||
"expected the 'fs volume rm' command to fail with EPERM, "
|
||||
"but it failed with {0}".format(ce.exitstatus))
|
||||
vol_status = json.loads(self._fs_cmd("status", self.volname, "--format=json-pretty"))
|
||||
self.config_set('mon', 'mon_allow_pool_delete', True)
|
||||
self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
|
||||
|
||||
#check if fs is gone
|
||||
volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty"))
|
||||
volnames = [volume['name'] for volume in volumes]
|
||||
self.assertNotIn(self.volname, volnames,
|
||||
"volume {0} exists after removal".format(self.volname))
|
||||
#check if pools are gone
|
||||
pools = json.loads(self._raw_cmd("osd", "pool", "ls", "--format=json-pretty"))
|
||||
for pool in vol_status["pools"]:
|
||||
self.assertNotIn(pool["name"], pools,
|
||||
"pool {0} exists after volume removal".format(pool["name"]))
|
||||
|
||||
### basic subvolume operations
|
||||
|
||||
def test_subvolume_create_and_rm(self):
|
||||
|
@ -98,6 +98,18 @@ class VolumeClient(CephfsClient):
|
||||
"that is what you want, re-issue the command followed by " \
|
||||
"--yes-i-really-mean-it.".format(volname)
|
||||
|
||||
ret, out, err = self.mgr.check_mon_command({
|
||||
'prefix': 'config get',
|
||||
'key': 'mon_allow_pool_delete',
|
||||
'who': 'mon',
|
||||
'format': 'json',
|
||||
})
|
||||
mon_allow_pool_delete = json.loads(out)
|
||||
if not mon_allow_pool_delete:
|
||||
return -errno.EPERM, "", "pool deletion is disabled; you must first " \
|
||||
"set the mon_allow_pool_delete config option to true before volumes " \
|
||||
"can be deleted"
|
||||
|
||||
metadata_pool, data_pools = get_pool_names(self.mgr, volname)
|
||||
if not metadata_pool:
|
||||
return -errno.ENOENT, "", "volume {0} doesn't exist".format(volname)
|
||||
|
Loading…
Reference in New Issue
Block a user