ceph-volume/tests: use pytest rerunfailures

We already install the dependency from ceph-ansible requirements.txt and to
avoid false positive (like after rebooting a node) we can retry failing test.

Signed-off-by: Dimitri Savineau <dsavinea@redhat.com>
This commit is contained in:
Dimitri Savineau 2021-07-21 17:07:24 -04:00
parent b758fdd829
commit 95056a24e4
3 changed files with 8 additions and 8 deletions

View File

@ -51,7 +51,7 @@ commands=
ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
# test cluster state using testinfra
py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
# reboot all vms - attempt
bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
@ -60,13 +60,13 @@ commands=
sleep 30
# retest to ensure cluster came back up correctly after rebooting
py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
# destroy an OSD, zap it's device and recreate it using it's ID
ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
# retest to ensure cluster came back up correctly
py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
# test zap OSDs by ID
ansible-playbook -vv -i {changedir}/hosts {changedir}/test_zap.yml

View File

@ -51,7 +51,7 @@ commands=
ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
# test cluster state using testinfra
py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
# reboot all vms - attempt
bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
@ -60,12 +60,12 @@ commands=
sleep 30
# retest to ensure cluster came back up correctly after rebooting
py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
# destroy an OSD, zap it's device and recreate it using it's ID
ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
# retest to ensure cluster came back up correctly
py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}

View File

@ -41,7 +41,7 @@ commands=
ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
# test cluster state testinfra
py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
# make ceph-volume simple take over all the OSDs that got deployed, disabling ceph-disk
ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
@ -53,6 +53,6 @@ commands=
sleep 120
# retest to ensure cluster came back up correctly after rebooting
py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}