mirror of
https://github.com/ceph/ceph
synced 2025-01-03 09:32:43 +00:00
tasks: lots of s/mds_restart/mds_fail_restart/
Wherever we are subsequently waiting for daemons to be healthy, we should be doing a fail during the restart. Also catch some places that were doing this longhand and use the handy fail_restart version instead. Signed-off-by: John Spray <john.spray@redhat.com>
This commit is contained in:
parent
79906e3d07
commit
3d3b095bb1
@ -30,7 +30,7 @@ class TestMDSAutoRepair(CephFSTestCase):
|
||||
# trim log segment as fast as possible
|
||||
self.set_conf('mds', 'mds cache size', 100)
|
||||
self.set_conf('mds', 'mds verify backtrace', 1)
|
||||
self.fs.mds_restart()
|
||||
self.fs.mds_fail_restart()
|
||||
self.fs.wait_for_daemons()
|
||||
|
||||
create_script = "mkdir {0}; for i in `seq 0 500`; do touch {0}/file$i; done"
|
||||
@ -98,7 +98,7 @@ class TestMDSAutoRepair(CephFSTestCase):
|
||||
self.assertTrue(writer.finished)
|
||||
|
||||
# restart mds to make it writable
|
||||
self.fs.mds_restart()
|
||||
self.fs.mds_fail_restart()
|
||||
self.fs.wait_for_daemons()
|
||||
|
||||
|
||||
|
@ -63,7 +63,7 @@ class TestClientLimits(CephFSTestCase):
|
||||
open_files = 250
|
||||
|
||||
self.set_conf('mds', 'mds cache size', cache_size)
|
||||
self.fs.mds_restart()
|
||||
self.fs.mds_fail_restart()
|
||||
self.fs.wait_for_daemons()
|
||||
|
||||
mount_a_client_id = self.mount_a.get_global_id()
|
||||
|
@ -57,9 +57,7 @@ class TestClientRecovery(CephFSTestCase):
|
||||
# Check that after an MDS restart both clients reconnect and continue
|
||||
# to handle I/O
|
||||
# =====================================================
|
||||
self.fs.mds_stop()
|
||||
self.fs.mds_fail()
|
||||
self.fs.mds_restart()
|
||||
self.fs.mds_fail_restart()
|
||||
self.fs.wait_for_state('up:active', timeout=MDS_RESTART_GRACE)
|
||||
|
||||
self.mount_a.create_destroy()
|
||||
@ -254,9 +252,7 @@ class TestClientRecovery(CephFSTestCase):
|
||||
self.assertGreaterEqual(num_caps, count)
|
||||
|
||||
# Restart MDS. client should trim its cache when reconnecting to the MDS
|
||||
self.fs.mds_stop()
|
||||
self.fs.mds_fail()
|
||||
self.fs.mds_restart()
|
||||
self.fs.mds_fail_restart()
|
||||
self.fs.wait_for_state('up:active', timeout=MDS_RESTART_GRACE)
|
||||
|
||||
num_caps = self._session_num_caps(client_id)
|
||||
@ -320,9 +316,7 @@ class TestClientRecovery(CephFSTestCase):
|
||||
self.mount_b.wait_for_visible("background_file-2")
|
||||
self.mount_b.check_filelock()
|
||||
|
||||
self.fs.mds_stop()
|
||||
self.fs.mds_fail()
|
||||
self.fs.mds_restart()
|
||||
self.fs.mds_fail_restart()
|
||||
self.fs.wait_for_state('up:active', timeout=MDS_RESTART_GRACE)
|
||||
|
||||
self.mount_b.check_filelock()
|
||||
|
@ -85,7 +85,7 @@ class TestJournalRepair(CephFSTestCase):
|
||||
# Now check the MDS can read what we wrote: truncate the journal
|
||||
# and start the mds.
|
||||
self.fs.journal_tool(['journal', 'reset'])
|
||||
self.fs.mds_restart()
|
||||
self.fs.mds_fail_restart()
|
||||
self.fs.wait_for_daemons()
|
||||
|
||||
# List files
|
||||
@ -280,7 +280,7 @@ class TestJournalRepair(CephFSTestCase):
|
||||
|
||||
# Bring an MDS back online, mount a client, and see that we can walk the full
|
||||
# filesystem tree again
|
||||
self.fs.mds_restart(active_mds_names[0])
|
||||
self.fs.mds_fail_restart(active_mds_names[0])
|
||||
self.wait_until_equal(lambda: self.fs.get_active_names(), [active_mds_names[0]], 30,
|
||||
reject_fn=lambda v: len(v) > 1)
|
||||
self.mount_a.mount()
|
||||
|
Loading…
Reference in New Issue
Block a user