mirror of
https://github.com/ceph/ceph
synced 2025-03-11 02:39:05 +00:00
Update users of the teuthology.orchestra.run APIs
Signed-off-by: Zack Cerza <zack.cerza@inktank.com>
This commit is contained in:
parent
b386f5e5df
commit
eac2c2abbb
@ -854,7 +854,7 @@ def wait_until_fuse_mounted(remote, fuse, mountpoint):
|
||||
fstype=fstype))
|
||||
|
||||
# it shouldn't have exited yet; exposes some trivial problems
|
||||
assert not fuse.exitstatus.ready()
|
||||
assert not fuse.poll()
|
||||
|
||||
time.sleep(5)
|
||||
log.info('ceph-fuse is mounted on %s', mountpoint)
|
||||
|
@ -58,7 +58,7 @@ def shutdown_daemons(ctx):
|
||||
|
||||
for name, proc in nodes.iteritems():
|
||||
log.info('Waiting for %s to finish shutdowns...', name)
|
||||
proc.exitstatus.get()
|
||||
proc.wait()
|
||||
|
||||
|
||||
def find_kernel_mounts(ctx):
|
||||
@ -77,7 +77,7 @@ def find_kernel_mounts(ctx):
|
||||
kernel_mounts = list()
|
||||
for remote, proc in nodes.iteritems():
|
||||
try:
|
||||
proc.exitstatus.get()
|
||||
proc.wait()
|
||||
log.debug('kernel mount exists on %s', remote.name)
|
||||
kernel_mounts.append(remote)
|
||||
except run.CommandFailedError: # no mounts!
|
||||
@ -108,7 +108,7 @@ def remove_kernel_mounts(ctx, kernel_mounts):
|
||||
nodes[remote] = proc
|
||||
|
||||
for remote, proc in nodes:
|
||||
proc.exitstatus.get()
|
||||
proc.wait()
|
||||
|
||||
|
||||
def remove_osd_mounts(ctx):
|
||||
@ -165,7 +165,7 @@ def reboot(ctx, remotes):
|
||||
# we just ignore these procs because reboot -f doesn't actually
|
||||
# send anything back to the ssh client!
|
||||
# for remote, proc in nodes.iteritems():
|
||||
# proc.exitstatus.get()
|
||||
# proc.wait()
|
||||
if remotes:
|
||||
log.info('waiting for nodes to reboot')
|
||||
time.sleep(8) # if we try and reconnect too quickly, it succeeds!
|
||||
@ -193,7 +193,7 @@ def reset_syslog_dir(ctx):
|
||||
|
||||
for name, proc in nodes.iteritems():
|
||||
log.info('Waiting for %s to restart syslog...', name)
|
||||
proc.exitstatus.get()
|
||||
proc.wait()
|
||||
|
||||
|
||||
def dpkg_configure(ctx):
|
||||
@ -215,7 +215,7 @@ def dpkg_configure(ctx):
|
||||
log.info(
|
||||
'Waiting for %s to dpkg --configure -a and apt-get -f install...',
|
||||
name)
|
||||
proc.exitstatus.get()
|
||||
proc.wait()
|
||||
|
||||
|
||||
def remove_installed_packages(ctx):
|
||||
@ -251,7 +251,7 @@ def remove_testing_tree(ctx):
|
||||
|
||||
for name, proc in nodes.iteritems():
|
||||
log.info('Waiting for %s to clear filesystem...', name)
|
||||
proc.exitstatus.get()
|
||||
proc.wait()
|
||||
|
||||
|
||||
def synch_clocks(remotes):
|
||||
@ -274,7 +274,7 @@ def synch_clocks(remotes):
|
||||
nodes[remote.name] = proc
|
||||
for name, proc in nodes.iteritems():
|
||||
log.info('Waiting for clock to synchronize on %s...', name)
|
||||
proc.exitstatus.get()
|
||||
proc.wait()
|
||||
|
||||
|
||||
def main(ctx):
|
||||
|
@ -348,7 +348,7 @@ def valgrind_post(ctx, config):
|
||||
|
||||
valgrind_exception = None
|
||||
for (proc, remote) in lookup_procs:
|
||||
proc.exitstatus.get()
|
||||
proc.wait()
|
||||
out = proc.stdout.getvalue()
|
||||
for line in out.split('\n'):
|
||||
if line == '':
|
||||
|
@ -65,8 +65,8 @@ def task(ctx, config):
|
||||
],
|
||||
wait=False,
|
||||
check_status=False)
|
||||
result = proc.exitstatus.get();
|
||||
|
||||
result = proc.wait()
|
||||
|
||||
if result != 0:
|
||||
remote.run(
|
||||
args=[
|
||||
@ -78,4 +78,4 @@ def task(ctx, config):
|
||||
remote.run(args=[
|
||||
'rm', '-rf', '--', dir
|
||||
])
|
||||
|
||||
|
||||
|
@ -5,7 +5,6 @@ the calls are made from other modules, most notably teuthology/run.py
|
||||
"""
|
||||
from cStringIO import StringIO
|
||||
import contextlib
|
||||
import gevent
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
@ -258,9 +257,8 @@ def check_ceph_data(ctx, config):
|
||||
)
|
||||
failed = False
|
||||
for proc in processes:
|
||||
assert isinstance(proc.exitstatus, gevent.event.AsyncResult)
|
||||
try:
|
||||
proc.exitstatus.get()
|
||||
proc.wait()
|
||||
except run.CommandFailedError:
|
||||
log.error('Host %s has stale /var/lib/ceph, check lock and nuke/cleanup.', proc.remote.shortname)
|
||||
failed = True
|
||||
@ -281,9 +279,8 @@ def check_conflict(ctx, config):
|
||||
)
|
||||
failed = False
|
||||
for proc in processes:
|
||||
assert isinstance(proc.exitstatus, gevent.event.AsyncResult)
|
||||
try:
|
||||
proc.exitstatus.get()
|
||||
proc.wait()
|
||||
except run.CommandFailedError:
|
||||
log.error('Host %s has stale test directory %s, check lock and cleanup.', proc.remote.shortname, testdir)
|
||||
failed = True
|
||||
@ -574,7 +571,7 @@ def vm_setup(ctx, config):
|
||||
r = remote.run(args=['test', '-e', '/ceph-qa-ready',],
|
||||
stdout=StringIO(),
|
||||
check_status=False,)
|
||||
if r.exitstatus != 0:
|
||||
if r.returncode != 0:
|
||||
p1 = subprocess.Popen(['cat', editinfo], stdout=subprocess.PIPE)
|
||||
p2 = subprocess.Popen(['ssh', '-t', '-t', str(remote), 'sudo', 'sh'], stdin=p1.stdout, stdout=subprocess.PIPE)
|
||||
_, err = p2.communicate()
|
||||
|
@ -85,7 +85,7 @@ def _find_arch_and_dist(ctx):
|
||||
|
||||
Currently this only returns armv7l on the quantal distro or x86_64
|
||||
on the precise distro
|
||||
|
||||
|
||||
:param ctx: Context
|
||||
:returns: arch,distro
|
||||
"""
|
||||
@ -293,7 +293,7 @@ def download_deb(ctx, config):
|
||||
|
||||
for name, proc in procs.iteritems():
|
||||
log.debug('Waiting for download/copy to %s to complete...', name)
|
||||
proc.exitstatus.get()
|
||||
proc.wait()
|
||||
|
||||
|
||||
def _no_grub_link(in_file, remote, kernel_ver):
|
||||
@ -302,7 +302,7 @@ def _no_grub_link(in_file, remote, kernel_ver):
|
||||
(as is the case in Arm kernels)
|
||||
|
||||
:param infile: kernel file or image file to be copied.
|
||||
:param remote: remote machine
|
||||
:param remote: remote machine
|
||||
:param kernel_ver: kernel version
|
||||
"""
|
||||
boot1 = '/boot/%s' % in_file
|
||||
@ -469,7 +469,7 @@ def install_and_reboot(ctx, config):
|
||||
|
||||
for name, proc in procs.iteritems():
|
||||
log.debug('Waiting for install on %s to complete...', name)
|
||||
proc.exitstatus.get()
|
||||
proc.wait()
|
||||
|
||||
def enable_disable_kdb(ctx, config):
|
||||
"""
|
||||
|
@ -37,7 +37,7 @@ def task(ctx, config):
|
||||
{client: client.1, lockfile: testfile, holdtime: 5},
|
||||
{client: client.2, lockfile: testfile, holdtime: 5, maxwait: 1, expectfail: True}]
|
||||
|
||||
|
||||
|
||||
In the past this test would have failed; there was a bug where waitlocks weren't
|
||||
cleaned up if the process failed. More involved scenarios are also possible.
|
||||
|
||||
@ -48,7 +48,7 @@ def task(ctx, config):
|
||||
try:
|
||||
assert isinstance(config, list), \
|
||||
"task lockfile got invalid config"
|
||||
|
||||
|
||||
log.info("building executable on each host")
|
||||
buildprocs = list()
|
||||
# build the locker executable on each client
|
||||
@ -72,7 +72,7 @@ def task(ctx, config):
|
||||
badconfig = True
|
||||
if badconfig:
|
||||
raise KeyError("bad config {op_}".format(op_=op))
|
||||
|
||||
|
||||
testdir = teuthology.get_testdir(ctx)
|
||||
clients = set(clients)
|
||||
files = set(files)
|
||||
@ -82,7 +82,7 @@ def task(ctx, config):
|
||||
log.info("got a client remote")
|
||||
(_, _, client_id) = client.partition('.')
|
||||
filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), op["lockfile"])
|
||||
|
||||
|
||||
proc = client_remote.run(
|
||||
args=[
|
||||
'mkdir', '-p', '{tdir}/archive/lockfile'.format(tdir=testdir),
|
||||
@ -100,14 +100,14 @@ def task(ctx, config):
|
||||
],
|
||||
logger=log.getChild('lockfile_client.{id}'.format(id=client_id)),
|
||||
wait=False
|
||||
)
|
||||
)
|
||||
log.info('building sclockandhold on client{id}'.format(id=client_id))
|
||||
buildprocs.append(proc)
|
||||
|
||||
|
||||
# wait for builds to finish
|
||||
run.wait(buildprocs)
|
||||
log.info('finished building sclockandhold on all clients')
|
||||
|
||||
|
||||
# create the files to run these locks on
|
||||
client = clients.pop()
|
||||
clients.add(client)
|
||||
@ -152,7 +152,7 @@ def task(ctx, config):
|
||||
lock_procs.append((greenlet, op))
|
||||
time.sleep(0.1) # to provide proper ordering
|
||||
#for op in config
|
||||
|
||||
|
||||
for (greenlet, op) in lock_procs:
|
||||
log.debug('checking lock for op {op_}'.format(op_=op))
|
||||
result = greenlet.get()
|
||||
@ -217,7 +217,7 @@ def lock_one(op, ctx):
|
||||
stdin=run.PIPE,
|
||||
check_status=False
|
||||
)
|
||||
result = proc.exitstatus.get()
|
||||
result = proc.wait()
|
||||
except gevent.Timeout as tout:
|
||||
if tout is not timeout:
|
||||
raise
|
||||
|
@ -63,7 +63,7 @@ def task(ctx, config):
|
||||
)
|
||||
|
||||
log.info('built locktest on each client')
|
||||
|
||||
|
||||
host.run(args=['sudo', 'touch',
|
||||
'{mnt}/locktestfile'.format(mnt=hostmnt),
|
||||
run.Raw('&&'),
|
||||
@ -96,9 +96,9 @@ def task(ctx, config):
|
||||
logger=log.getChild('locktest.client'),
|
||||
wait=False
|
||||
)
|
||||
|
||||
hostresult = hostproc.exitstatus.get()
|
||||
clientresult = clientproc.exitstatus.get()
|
||||
|
||||
hostresult = hostproc.wait()
|
||||
clientresult = clientproc.wait()
|
||||
if (hostresult != 0) or (clientresult != 0):
|
||||
raise Exception("Did not pass locking test!")
|
||||
log.info('finished locktest executable with results {r} and {s}'. \
|
||||
|
@ -39,10 +39,10 @@ def task(ctx, config):
|
||||
'thrashosds task only accepts a dict for configuration'
|
||||
first_mon = teuthology.get_first_mon(ctx, config)
|
||||
(mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
|
||||
|
||||
|
||||
num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
|
||||
log.info('num_osds is %s' % num_osds)
|
||||
assert num_osds == 3
|
||||
assert num_osds == 3
|
||||
|
||||
manager = ceph_manager.CephManager(
|
||||
mon,
|
||||
@ -60,7 +60,7 @@ def task(ctx, config):
|
||||
# write some data
|
||||
p = rados_start(ctx, mon, ['-p', 'rbd', 'bench', '15', 'write', '-b', '4096',
|
||||
'--no-cleanup'])
|
||||
err = p.exitstatus.get();
|
||||
err = p.wait()
|
||||
log.info('err is %d' % err)
|
||||
|
||||
# mark osd.0 out to trigger a rebalance/backfill
|
||||
@ -88,7 +88,7 @@ def task(ctx, config):
|
||||
manager.revive_osd(1)
|
||||
|
||||
# wait for our writes to complete + succeed
|
||||
err = p.exitstatus.get()
|
||||
err = p.wait()
|
||||
log.info('err is %d' % err)
|
||||
|
||||
# cluster must recover
|
||||
|
@ -76,7 +76,7 @@ def task(ctx, config):
|
||||
|
||||
time.sleep(sleep_time)
|
||||
proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
|
||||
proc.exitstatus.get()
|
||||
proc.wait()
|
||||
|
||||
lines = proc.stdout.getvalue().split('\n')
|
||||
|
||||
@ -103,7 +103,7 @@ def task(ctx, config):
|
||||
|
||||
time.sleep(sleep_time)
|
||||
proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
|
||||
proc.exitstatus.get()
|
||||
proc.wait()
|
||||
|
||||
lines = proc.stdout.getvalue().split('\n')
|
||||
|
||||
@ -142,7 +142,7 @@ def task(ctx, config):
|
||||
|
||||
time.sleep(sleep_time)
|
||||
proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
|
||||
proc.exitstatus.get()
|
||||
proc.wait()
|
||||
|
||||
lines = proc.stdout.getvalue().split('\n')
|
||||
|
||||
@ -172,7 +172,7 @@ def task(ctx, config):
|
||||
|
||||
time.sleep(sleep_time)
|
||||
proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
|
||||
proc.exitstatus.get()
|
||||
proc.wait()
|
||||
|
||||
lines = proc.stdout.getvalue().split('\n')
|
||||
|
||||
@ -200,7 +200,7 @@ def task(ctx, config):
|
||||
|
||||
time.sleep(sleep_time)
|
||||
proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
|
||||
proc.exitstatus.get()
|
||||
proc.wait()
|
||||
|
||||
lines = proc.stdout.getvalue().split('\n')
|
||||
|
||||
|
@ -39,10 +39,10 @@ def task(ctx, config):
|
||||
testdir = teuthology.get_testdir(ctx)
|
||||
first_mon = teuthology.get_first_mon(ctx, config)
|
||||
(mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
|
||||
|
||||
|
||||
num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
|
||||
log.info('num_osds is %s' % num_osds)
|
||||
assert num_osds == 3
|
||||
assert num_osds == 3
|
||||
|
||||
manager = ceph_manager.CephManager(
|
||||
mon,
|
||||
@ -82,7 +82,7 @@ def task(ctx, config):
|
||||
manager.revive_osd(1)
|
||||
|
||||
# wait for our writes to complete + succeed
|
||||
err = p.exitstatus.get()
|
||||
err = p.wait()
|
||||
log.info('err is %d' % err)
|
||||
|
||||
# cluster must repeer
|
||||
@ -92,7 +92,7 @@ def task(ctx, config):
|
||||
|
||||
# write some more (make sure osd.2 really is divergent)
|
||||
p = rados_start(testdir, mon, ['-p', 'rbd', 'bench', '15', 'write', '-b', '4096'])
|
||||
p.exitstatus.get();
|
||||
p.wait()
|
||||
|
||||
# revive divergent osd
|
||||
manager.revive_osd(2)
|
||||
@ -159,13 +159,13 @@ def test_incomplete_pgs(ctx, config):
|
||||
p = rados_start(testdir, mon,
|
||||
['-p', 'rbd', 'bench', '60', 'write', '-b', '1',
|
||||
'--no-cleanup'])
|
||||
p.exitstatus.get()
|
||||
p.wait()
|
||||
|
||||
# few objects in metadata pool (with pg log, normal recovery)
|
||||
for f in range(1, 20):
|
||||
p = rados_start(testdir, mon, ['-p', 'metadata', 'put',
|
||||
'foo.%d' % f, '/etc/passwd'])
|
||||
p.exitstatus.get()
|
||||
p.wait()
|
||||
|
||||
# move it back
|
||||
manager.raw_cluster_cmd('osd', 'in', '0', '1')
|
||||
|
@ -33,7 +33,7 @@ def parallel_test(ctx, config):
|
||||
nodes[remote.name] = proc
|
||||
for name, proc in nodes.iteritems():
|
||||
"""Wait for each process to finish before yielding and allowing other contextmanagers to run."""
|
||||
proc.exitstatus.get()
|
||||
proc.wait()
|
||||
yield
|
||||
|
||||
@contextlib.contextmanager
|
||||
@ -45,7 +45,7 @@ def task(ctx, config):
|
||||
assert(False), "task parallel_example only supports a list or dictionary for configuration"
|
||||
if config is None:
|
||||
config = ['client.{id}'.format(id=id_)
|
||||
for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
|
||||
for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
|
||||
if isinstance(config, list):
|
||||
config = dict.fromkeys(config)
|
||||
clients = config.keys()
|
||||
|
@ -199,7 +199,7 @@ def task(ctx, config):
|
||||
for client, proc in procs:
|
||||
log.info("shutting down sync agent on %s", client)
|
||||
proc.stdin.close()
|
||||
proc.exitstatus.get()
|
||||
proc.wait()
|
||||
finally:
|
||||
for client, proc in procs:
|
||||
ctx.cluster.only(client).run(
|
||||
|
@ -150,7 +150,7 @@ def task(ctx, config):
|
||||
proc.stdin.writelines(['restarted\n'])
|
||||
proc.stdin.flush()
|
||||
try:
|
||||
proc.exitstatus.get()
|
||||
proc.wait()
|
||||
except tor.CommandFailedError:
|
||||
raise Exception('restart task got non-zero exit status from script: {s}'.format(s=c))
|
||||
finally:
|
||||
|
Loading…
Reference in New Issue
Block a user