diff --git a/tasks/mds_auto_repair.py b/tasks/mds_auto_repair.py index e7580613b2a..a95b3683186 100644 --- a/tasks/mds_auto_repair.py +++ b/tasks/mds_auto_repair.py @@ -30,7 +30,7 @@ class TestMDSAutoRepair(CephFSTestCase): # trim log segment as fast as possible self.set_conf('mds', 'mds cache size', 100) self.set_conf('mds', 'mds verify backtrace', 1) - self.fs.mds_restart() + self.fs.mds_fail_restart() self.fs.wait_for_daemons() create_script = "mkdir {0}; for i in `seq 0 500`; do touch {0}/file$i; done" @@ -98,7 +98,7 @@ class TestMDSAutoRepair(CephFSTestCase): self.assertTrue(writer.finished) # restart mds to make it writable - self.fs.mds_restart() + self.fs.mds_fail_restart() self.fs.wait_for_daemons() diff --git a/tasks/mds_client_limits.py b/tasks/mds_client_limits.py index ae722886753..ff91d98236a 100644 --- a/tasks/mds_client_limits.py +++ b/tasks/mds_client_limits.py @@ -63,7 +63,7 @@ class TestClientLimits(CephFSTestCase): open_files = 250 self.set_conf('mds', 'mds cache size', cache_size) - self.fs.mds_restart() + self.fs.mds_fail_restart() self.fs.wait_for_daemons() mount_a_client_id = self.mount_a.get_global_id() diff --git a/tasks/mds_client_recovery.py b/tasks/mds_client_recovery.py index 0b986f4d4da..4873b7f77c3 100644 --- a/tasks/mds_client_recovery.py +++ b/tasks/mds_client_recovery.py @@ -57,9 +57,7 @@ class TestClientRecovery(CephFSTestCase): # Check that after an MDS restart both clients reconnect and continue # to handle I/O # ===================================================== - self.fs.mds_stop() - self.fs.mds_fail() - self.fs.mds_restart() + self.fs.mds_fail_restart() self.fs.wait_for_state('up:active', timeout=MDS_RESTART_GRACE) self.mount_a.create_destroy() @@ -254,9 +252,7 @@ class TestClientRecovery(CephFSTestCase): self.assertGreaterEqual(num_caps, count) # Restart MDS. client should trim its cache when reconnecting to the MDS - self.fs.mds_stop() - self.fs.mds_fail() - self.fs.mds_restart() + self.fs.mds_fail_restart() self.fs.wait_for_state('up:active', timeout=MDS_RESTART_GRACE) num_caps = self._session_num_caps(client_id) @@ -320,9 +316,7 @@ class TestClientRecovery(CephFSTestCase): self.mount_b.wait_for_visible("background_file-2") self.mount_b.check_filelock() - self.fs.mds_stop() - self.fs.mds_fail() - self.fs.mds_restart() + self.fs.mds_fail_restart() self.fs.wait_for_state('up:active', timeout=MDS_RESTART_GRACE) self.mount_b.check_filelock() diff --git a/tasks/mds_journal_repair.py b/tasks/mds_journal_repair.py index 62ed0534d89..f08f464e186 100644 --- a/tasks/mds_journal_repair.py +++ b/tasks/mds_journal_repair.py @@ -85,7 +85,7 @@ class TestJournalRepair(CephFSTestCase): # Now check the MDS can read what we wrote: truncate the journal # and start the mds. self.fs.journal_tool(['journal', 'reset']) - self.fs.mds_restart() + self.fs.mds_fail_restart() self.fs.wait_for_daemons() # List files @@ -280,7 +280,7 @@ class TestJournalRepair(CephFSTestCase): # Bring an MDS back online, mount a client, and see that we can walk the full # filesystem tree again - self.fs.mds_restart(active_mds_names[0]) + self.fs.mds_fail_restart(active_mds_names[0]) self.wait_until_equal(lambda: self.fs.get_active_names(), [active_mds_names[0]], 30, reject_fn=lambda v: len(v) > 1) self.mount_a.mount()