From e770bb9075d931913847a572d121e02a2e349ca8 Mon Sep 17 00:00:00 2001 From: Kotresh HR Date: Tue, 23 Jun 2020 23:49:22 +0530 Subject: [PATCH] mgr/volumes: Validate mon_allow_pool_delete before volume deletion Volume deletion wasn't validating mon_allow_pool_delete config before destroying volume metadata. Hence when mon_allow_pool_delete is set to false, it was deleting metadata but failed to delete pool resulting in inconsistent state. This patch validates the config before going ahead with deletion. Fixes: https://tracker.ceph.com/issues/45662 Signed-off-by: Kotresh HR --- qa/tasks/cephfs/test_volumes.py | 28 ++++++++++++++++++++++++++++ src/pybind/mgr/volumes/fs/volume.py | 12 ++++++++++++ 2 files changed, 40 insertions(+) diff --git a/qa/tasks/cephfs/test_volumes.py b/qa/tasks/cephfs/test_volumes.py index 4a4c8a57472..0f5dad54879 100644 --- a/qa/tasks/cephfs/test_volumes.py +++ b/qa/tasks/cephfs/test_volumes.py @@ -227,6 +227,7 @@ class TestVolumes(CephFSTestCase): self.vol_created = False self._enable_multi_fs() self._create_or_reuse_test_volume() + self.config_set('mon', 'mon_allow_pool_delete', True) def tearDown(self): if self.vol_created: @@ -333,6 +334,33 @@ class TestVolumes(CephFSTestCase): for pool in vol_status["pools"]: self.assertNotIn(pool["name"], pools) + def test_volume_rm_when_mon_delete_pool_false(self): + """ + That the volume can only be removed when mon_allowd_pool_delete is set + to true and verify that the pools are removed after volume deletion. + """ + self.config_set('mon', 'mon_allow_pool_delete', False) + try: + self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it") + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EPERM, + "expected the 'fs volume rm' command to fail with EPERM, " + "but it failed with {0}".format(ce.exitstatus)) + vol_status = json.loads(self._fs_cmd("status", self.volname, "--format=json-pretty")) + self.config_set('mon', 'mon_allow_pool_delete', True) + self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it") + + #check if fs is gone + volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty")) + volnames = [volume['name'] for volume in volumes] + self.assertNotIn(self.volname, volnames, + "volume {0} exists after removal".format(self.volname)) + #check if pools are gone + pools = json.loads(self._raw_cmd("osd", "pool", "ls", "--format=json-pretty")) + for pool in vol_status["pools"]: + self.assertNotIn(pool["name"], pools, + "pool {0} exists after volume removal".format(pool["name"])) + ### basic subvolume operations def test_subvolume_create_and_rm(self): diff --git a/src/pybind/mgr/volumes/fs/volume.py b/src/pybind/mgr/volumes/fs/volume.py index bd437929556..f0a39cd41c3 100644 --- a/src/pybind/mgr/volumes/fs/volume.py +++ b/src/pybind/mgr/volumes/fs/volume.py @@ -98,6 +98,18 @@ class VolumeClient(CephfsClient): "that is what you want, re-issue the command followed by " \ "--yes-i-really-mean-it.".format(volname) + ret, out, err = self.mgr.check_mon_command({ + 'prefix': 'config get', + 'key': 'mon_allow_pool_delete', + 'who': 'mon', + 'format': 'json', + }) + mon_allow_pool_delete = json.loads(out) + if not mon_allow_pool_delete: + return -errno.EPERM, "", "pool deletion is disabled; you must first " \ + "set the mon_allow_pool_delete config option to true before volumes " \ + "can be deleted" + metadata_pool, data_pools = get_pool_names(self.mgr, volname) if not metadata_pool: return -errno.ENOENT, "", "volume {0} doesn't exist".format(volname)