qa: unmount all clients before deleting the file system

Otherwise we have unnecessary timeout waits.

Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
This commit is contained in:
Patrick Donnelly 2020-10-05 10:31:23 -07:00
parent a8a23747aa
commit bc25bd70f2
No known key found for this signature in database
GPG Key ID: 3A2A7E25BEA8AADB

View File

@ -121,6 +121,7 @@ class TestAdminCommands(CephFSTestCase):
That a new file system warns/fails with an EC default data pool.
"""
self.mount_a.umount_wait(require_clean=True)
self.mds_cluster.delete_all_filesystems()
n = "test_new_default_ec"
self._setup_ec_pools(n)
@ -139,6 +140,7 @@ class TestAdminCommands(CephFSTestCase):
That a new file system succeeds with an EC default data pool with --force.
"""
self.mount_a.umount_wait(require_clean=True)
self.mds_cluster.delete_all_filesystems()
n = "test_new_default_ec_force"
self._setup_ec_pools(n)
@ -149,6 +151,7 @@ class TestAdminCommands(CephFSTestCase):
That a new file system fails with an EC default data pool without overwrite.
"""
self.mount_a.umount_wait(require_clean=True)
self.mds_cluster.delete_all_filesystems()
n = "test_new_default_ec_no_overwrite"
self._setup_ec_pools(n, overwrites=False)
@ -176,6 +179,7 @@ class TestAdminCommands(CephFSTestCase):
"""
That the application metadata set on the pools of a newly created filesystem are as expected.
"""
self.mount_a.umount_wait(require_clean=True)
self.mds_cluster.delete_all_filesystems()
fs_name = "test_fs_new_pool_application"
keys = ['metadata', 'data']