diff --git a/qa/tasks/cephfs/test_admin.py b/qa/tasks/cephfs/test_admin.py index 5708f7671bc..f322fb4f8cc 100644 --- a/qa/tasks/cephfs/test_admin.py +++ b/qa/tasks/cephfs/test_admin.py @@ -121,6 +121,7 @@ class TestAdminCommands(CephFSTestCase): That a new file system warns/fails with an EC default data pool. """ + self.mount_a.umount_wait(require_clean=True) self.mds_cluster.delete_all_filesystems() n = "test_new_default_ec" self._setup_ec_pools(n) @@ -139,6 +140,7 @@ class TestAdminCommands(CephFSTestCase): That a new file system succeeds with an EC default data pool with --force. """ + self.mount_a.umount_wait(require_clean=True) self.mds_cluster.delete_all_filesystems() n = "test_new_default_ec_force" self._setup_ec_pools(n) @@ -149,6 +151,7 @@ class TestAdminCommands(CephFSTestCase): That a new file system fails with an EC default data pool without overwrite. """ + self.mount_a.umount_wait(require_clean=True) self.mds_cluster.delete_all_filesystems() n = "test_new_default_ec_no_overwrite" self._setup_ec_pools(n, overwrites=False) @@ -176,6 +179,7 @@ class TestAdminCommands(CephFSTestCase): """ That the application metadata set on the pools of a newly created filesystem are as expected. """ + self.mount_a.umount_wait(require_clean=True) self.mds_cluster.delete_all_filesystems() fs_name = "test_fs_new_pool_application" keys = ['metadata', 'data']