qa/tasks/cephadm_cases: increase timeouts in test_cli.py

These seem to be failing sometimes but in my testing
sometimes these events are happening a few seconds after
we hit the timeout. Trying to see if this makes the tests
more consistent. No need to mark the test as failed
if we report something up in 34 seconds vs 25 especially
when cephadm works on a cyclic daemon refresh.

Signed-off-by: Adam King <adking@redhat.com>
This commit is contained in:
Adam King 2022-02-09 20:42:42 -05:00
parent f5b79d7e6b
commit 46f939f057

View File

@ -44,14 +44,14 @@ class TestCephadmCLI(MgrTestCase):
def test_pause(self):
self._orch_cmd('pause')
self.wait_for_health('CEPHADM_PAUSED', 30)
self.wait_for_health('CEPHADM_PAUSED', 60)
self._orch_cmd('resume')
self.wait_for_health_clear(30)
self.wait_for_health_clear(60)
def test_daemon_restart(self):
self._orch_cmd('daemon', 'stop', 'osd.0')
self.wait_for_health('OSD_DOWN', 30)
with safe_while(sleep=1, tries=30) as proceed:
self.wait_for_health('OSD_DOWN', 60)
with safe_while(sleep=2, tries=30) as proceed:
while proceed():
j = json.loads(self._orch_cmd('ps', '--format', 'json'))
d = {d['daemon_name']: d for d in j}
@ -59,7 +59,7 @@ class TestCephadmCLI(MgrTestCase):
break
time.sleep(5)
self._orch_cmd('daemon', 'start', 'osd.0')
self.wait_for_health_clear(90)
self.wait_for_health_clear(120)
self._orch_cmd('daemon', 'restart', 'osd.0')
def test_device_ls_wide(self):
@ -67,7 +67,7 @@ class TestCephadmCLI(MgrTestCase):
def test_cephfs_mirror(self):
self._orch_cmd('apply', 'cephfs-mirror')
self.wait_until_true(lambda: 'cephfs-mirror' in self._orch_cmd('ps'), 30)
self.wait_for_health_clear(30)
self.wait_until_true(lambda: 'cephfs-mirror' in self._orch_cmd('ps'), 60)
self.wait_for_health_clear(60)
self._orch_cmd('rm', 'cephfs-mirror')
self.wait_until_true(lambda: 'cephfs-mirror' not in self._orch_cmd('ps'), 30)
self.wait_until_true(lambda: 'cephfs-mirror' not in self._orch_cmd('ps'), 60)