mirror of
https://github.com/ceph/ceph
synced 2025-02-24 03:27:10 +00:00
ceph-volume/tests: use pytest rerunfailures
We already install the dependency from ceph-ansible requirements.txt and to avoid false positive (like after rebooting a node) we can retry failing test. Signed-off-by: Dimitri Savineau <dsavinea@redhat.com>
This commit is contained in:
parent
b758fdd829
commit
95056a24e4
@ -51,7 +51,7 @@ commands=
|
||||
ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
|
||||
|
||||
# test cluster state using testinfra
|
||||
py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
|
||||
py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
|
||||
|
||||
# reboot all vms - attempt
|
||||
bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
|
||||
@ -60,13 +60,13 @@ commands=
|
||||
sleep 30
|
||||
|
||||
# retest to ensure cluster came back up correctly after rebooting
|
||||
py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
|
||||
py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
|
||||
|
||||
# destroy an OSD, zap it's device and recreate it using it's ID
|
||||
ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
|
||||
|
||||
# retest to ensure cluster came back up correctly
|
||||
py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
|
||||
py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
|
||||
|
||||
# test zap OSDs by ID
|
||||
ansible-playbook -vv -i {changedir}/hosts {changedir}/test_zap.yml
|
||||
|
@ -51,7 +51,7 @@ commands=
|
||||
ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
|
||||
|
||||
# test cluster state using testinfra
|
||||
py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
|
||||
py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
|
||||
|
||||
# reboot all vms - attempt
|
||||
bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
|
||||
@ -60,12 +60,12 @@ commands=
|
||||
sleep 30
|
||||
|
||||
# retest to ensure cluster came back up correctly after rebooting
|
||||
py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
|
||||
py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
|
||||
|
||||
# destroy an OSD, zap it's device and recreate it using it's ID
|
||||
ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
|
||||
|
||||
# retest to ensure cluster came back up correctly
|
||||
py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
|
||||
py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
|
||||
|
||||
vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}
|
||||
|
@ -41,7 +41,7 @@ commands=
|
||||
ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
|
||||
|
||||
# test cluster state testinfra
|
||||
py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
|
||||
py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
|
||||
|
||||
# make ceph-volume simple take over all the OSDs that got deployed, disabling ceph-disk
|
||||
ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
|
||||
@ -53,6 +53,6 @@ commands=
|
||||
sleep 120
|
||||
|
||||
# retest to ensure cluster came back up correctly after rebooting
|
||||
py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
|
||||
py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
|
||||
|
||||
vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}
|
||||
|
Loading…
Reference in New Issue
Block a user