diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini b/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini index a2c08e75307..f5d44e68556 100644 --- a/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini @@ -39,7 +39,7 @@ changedir= commands= git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible - vagrant up --no-provision {posargs:--provider=virtualbox} + vagrant up {env:VAGRANT_GLOBAL_EXTRA_FLAGS:""} --no-provision {posargs:--provider=virtualbox} bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir} # create logical volumes to test with on the vms @@ -58,7 +58,7 @@ commands= testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests # reboot all vms - vagrant reload --no-provision + vagrant reload {env:VAGRANT_GLOBAL_EXTRA_FLAGS:""} --no-provision # retest to ensure cluster came back up correctly after rebooting testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests @@ -69,4 +69,4 @@ commands= # retest to ensure cluster came back up correctly testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests - vagrant destroy --force + vagrant destroy {env:VAGRANT_GLOBAL_EXTRA_FLAGS:""} --force diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini b/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini index e8e62dc1279..21d9c3a709f 100644 --- a/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini @@ -36,7 +36,7 @@ changedir= commands= git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible - vagrant up --no-provision {posargs:--provider=virtualbox} + vagrant up {env:VAGRANT_GLOBAL_EXTRA_FLAGS:""} --no-provision {posargs:--provider=virtualbox} bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir} # use ceph-ansible to deploy a ceph cluster on the vms @@ -52,7 +52,7 @@ commands= ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml # reboot all vms - vagrant reload --no-provision + vagrant reload {env:VAGRANT_GLOBAL_EXTRA_FLAGS:""} --no-provision # wait 2 minutes for services to be ready sleep 120 @@ -60,4 +60,4 @@ commands= # retest to ensure cluster came back up correctly after rebooting testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests - vagrant destroy --force + vagrant destroy {env:VAGRANT_GLOBAL_EXTRA_FLAGS:""} --force