mgr/volumes: Validate mon_allow_pool_delete before volume deletion

Volume deletion wasn't validating mon_allow_pool_delete config
before destroying volume metadata. Hence when mon_allow_pool_delete
is set to false, it was deleting metadata but failed to delete pool
resulting in inconsistent state. This patch validates the config
before going ahead with deletion.

Fixes: https://tracker.ceph.com/issues/45662
Signed-off-by: Kotresh HR <khiremat@redhat.com>
This commit is contained in:
Kotresh HR 2020-06-23 23:49:22 +05:30
parent 8415e75682
commit e770bb9075
2 changed files with 40 additions and 0 deletions

View File

@ -227,6 +227,7 @@ class TestVolumes(CephFSTestCase):
self.vol_created = False
self._enable_multi_fs()
self._create_or_reuse_test_volume()
self.config_set('mon', 'mon_allow_pool_delete', True)
def tearDown(self):
if self.vol_created:
@ -333,6 +334,33 @@ class TestVolumes(CephFSTestCase):
for pool in vol_status["pools"]:
self.assertNotIn(pool["name"], pools)
def test_volume_rm_when_mon_delete_pool_false(self):
"""
That the volume can only be removed when mon_allowd_pool_delete is set
to true and verify that the pools are removed after volume deletion.
"""
self.config_set('mon', 'mon_allow_pool_delete', False)
try:
self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
except CommandFailedError as ce:
self.assertEqual(ce.exitstatus, errno.EPERM,
"expected the 'fs volume rm' command to fail with EPERM, "
"but it failed with {0}".format(ce.exitstatus))
vol_status = json.loads(self._fs_cmd("status", self.volname, "--format=json-pretty"))
self.config_set('mon', 'mon_allow_pool_delete', True)
self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
#check if fs is gone
volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty"))
volnames = [volume['name'] for volume in volumes]
self.assertNotIn(self.volname, volnames,
"volume {0} exists after removal".format(self.volname))
#check if pools are gone
pools = json.loads(self._raw_cmd("osd", "pool", "ls", "--format=json-pretty"))
for pool in vol_status["pools"]:
self.assertNotIn(pool["name"], pools,
"pool {0} exists after volume removal".format(pool["name"]))
### basic subvolume operations
def test_subvolume_create_and_rm(self):

View File

@ -98,6 +98,18 @@ class VolumeClient(CephfsClient):
"that is what you want, re-issue the command followed by " \
"--yes-i-really-mean-it.".format(volname)
ret, out, err = self.mgr.check_mon_command({
'prefix': 'config get',
'key': 'mon_allow_pool_delete',
'who': 'mon',
'format': 'json',
})
mon_allow_pool_delete = json.loads(out)
if not mon_allow_pool_delete:
return -errno.EPERM, "", "pool deletion is disabled; you must first " \
"set the mon_allow_pool_delete config option to true before volumes " \
"can be deleted"
metadata_pool, data_pools = get_pool_names(self.mgr, volname)
if not metadata_pool:
return -errno.ENOENT, "", "volume {0} doesn't exist".format(volname)