2013-10-12 08:28:27 +00:00
|
|
|
"""
|
|
|
|
Workunit task -- Run ceph on sets of specific clients
|
|
|
|
"""
|
2011-06-15 18:59:57 +00:00
|
|
|
import logging
|
2012-06-11 01:43:35 +00:00
|
|
|
import pipes
|
2011-06-15 18:59:57 +00:00
|
|
|
import os
|
|
|
|
|
2014-07-18 16:15:20 +00:00
|
|
|
from teuthology import misc
|
2015-12-06 12:08:20 +00:00
|
|
|
from teuthology.config import config as teuth_config
|
2014-07-18 16:15:20 +00:00
|
|
|
from teuthology.orchestra.run import CommandFailedError
|
2011-07-14 20:57:07 +00:00
|
|
|
from teuthology.parallel import parallel
|
2014-08-07 14:24:59 +00:00
|
|
|
from teuthology.orchestra import run
|
2011-06-15 18:59:57 +00:00
|
|
|
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
2014-07-18 13:09:16 +00:00
|
|
|
CLIENT_PREFIX = 'client.'
|
2014-07-18 16:15:20 +00:00
|
|
|
|
2014-07-18 13:09:16 +00:00
|
|
|
|
2011-06-15 18:59:57 +00:00
|
|
|
def task(ctx, config):
|
|
|
|
"""
|
2013-10-12 08:28:27 +00:00
|
|
|
Run ceph on all workunits found under the specified path.
|
2011-06-15 18:59:57 +00:00
|
|
|
|
|
|
|
For example::
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- ceph:
|
2013-02-17 07:53:23 +00:00
|
|
|
- ceph-fuse: [client.0]
|
2011-06-15 18:59:57 +00:00
|
|
|
- workunit:
|
2012-06-11 01:43:35 +00:00
|
|
|
clients:
|
|
|
|
client.0: [direct_io, xattrs.sh]
|
|
|
|
client.1: [snaps]
|
2012-07-13 18:12:31 +00:00
|
|
|
branch: foo
|
2011-08-30 22:48:58 +00:00
|
|
|
|
|
|
|
You can also run a list of workunits on all clients:
|
|
|
|
tasks:
|
|
|
|
- ceph:
|
2013-02-17 07:53:23 +00:00
|
|
|
- ceph-fuse:
|
2011-08-30 22:48:58 +00:00
|
|
|
- workunit:
|
2012-07-13 18:12:31 +00:00
|
|
|
tag: v0.47
|
2012-06-11 01:43:35 +00:00
|
|
|
clients:
|
|
|
|
all: [direct_io, xattrs.sh, snaps]
|
2011-08-30 22:48:58 +00:00
|
|
|
|
|
|
|
If you have an "all" section it will run all the workunits
|
|
|
|
on each client simultaneously, AFTER running any workunits specified
|
|
|
|
for individual clients. (This prevents unintended simultaneous runs.)
|
2012-06-11 01:43:35 +00:00
|
|
|
|
2014-03-05 17:17:13 +00:00
|
|
|
To customize tests, you can specify environment variables as a dict. You
|
2014-03-09 17:29:37 +00:00
|
|
|
can also specify a time limit for each work unit (defaults to 3h):
|
2012-06-11 01:43:35 +00:00
|
|
|
|
|
|
|
tasks:
|
|
|
|
- ceph:
|
2013-02-17 07:53:23 +00:00
|
|
|
- ceph-fuse:
|
2012-06-11 01:43:35 +00:00
|
|
|
- workunit:
|
2012-07-13 18:12:31 +00:00
|
|
|
sha1: 9b28948635b17165d17c1cf83d4a870bd138ddf6
|
2012-06-11 01:43:35 +00:00
|
|
|
clients:
|
|
|
|
all: [snaps]
|
|
|
|
env:
|
|
|
|
FOO: bar
|
|
|
|
BAZ: quux
|
2014-03-09 17:29:37 +00:00
|
|
|
timeout: 3h
|
2013-10-12 08:28:27 +00:00
|
|
|
|
|
|
|
:param ctx: Context
|
|
|
|
:param config: Configuration
|
2011-06-15 18:59:57 +00:00
|
|
|
"""
|
|
|
|
assert isinstance(config, dict)
|
2012-06-11 01:43:35 +00:00
|
|
|
assert isinstance(config.get('clients'), dict), \
|
|
|
|
'configuration must contain a dictionary of clients'
|
2012-07-13 18:12:31 +00:00
|
|
|
|
2012-07-13 18:13:31 +00:00
|
|
|
overrides = ctx.config.get('overrides', {})
|
2014-07-18 16:15:20 +00:00
|
|
|
misc.deep_merge(config, overrides.get('workunit', {}))
|
2012-07-13 18:13:31 +00:00
|
|
|
|
2012-07-13 18:12:31 +00:00
|
|
|
refspec = config.get('branch')
|
|
|
|
if refspec is None:
|
|
|
|
refspec = config.get('tag')
|
2015-12-06 11:30:13 +00:00
|
|
|
if refspec is None:
|
|
|
|
refspec = config.get('sha1')
|
2012-07-13 18:12:31 +00:00
|
|
|
if refspec is None:
|
|
|
|
refspec = 'HEAD'
|
|
|
|
|
2014-03-09 17:29:37 +00:00
|
|
|
timeout = config.get('timeout', '3h')
|
2014-03-05 17:17:13 +00:00
|
|
|
|
2012-07-13 18:12:31 +00:00
|
|
|
log.info('Pulling workunits from ref %s', refspec)
|
|
|
|
|
2014-07-18 13:09:16 +00:00
|
|
|
created_mountpoint = {}
|
2013-01-14 20:09:56 +00:00
|
|
|
|
2012-06-11 01:43:35 +00:00
|
|
|
if config.get('env') is not None:
|
|
|
|
assert isinstance(config['env'], dict), 'env must be a dictionary'
|
|
|
|
clients = config['clients']
|
2014-07-18 13:09:16 +00:00
|
|
|
|
|
|
|
# Create scratch dirs for any non-all workunits
|
2011-06-15 18:59:57 +00:00
|
|
|
log.info('Making a separate scratch dir for every client...')
|
2012-06-11 01:43:35 +00:00
|
|
|
for role in clients.iterkeys():
|
2011-06-15 18:59:57 +00:00
|
|
|
assert isinstance(role, basestring)
|
2011-08-30 22:48:58 +00:00
|
|
|
if role == "all":
|
|
|
|
continue
|
2014-07-18 13:09:16 +00:00
|
|
|
|
|
|
|
assert role.startswith(CLIENT_PREFIX)
|
2013-01-14 20:09:56 +00:00
|
|
|
created_mnt_dir = _make_scratch_dir(ctx, role, config.get('subdir'))
|
2014-07-18 13:09:16 +00:00
|
|
|
created_mountpoint[role] = created_mnt_dir
|
2013-01-14 20:09:56 +00:00
|
|
|
|
2014-07-18 13:09:16 +00:00
|
|
|
# Execute any non-all workunits
|
2011-07-14 20:57:07 +00:00
|
|
|
with parallel() as p:
|
2012-06-11 01:43:35 +00:00
|
|
|
for role, tests in clients.iteritems():
|
2011-08-30 22:48:58 +00:00
|
|
|
if role != "all":
|
2014-03-05 17:17:13 +00:00
|
|
|
p.spawn(_run_tests, ctx, refspec, role, tests,
|
|
|
|
config.get('env'), timeout=timeout)
|
2011-08-30 22:48:58 +00:00
|
|
|
|
2014-07-18 13:09:16 +00:00
|
|
|
# Clean up dirs from any non-all workunits
|
|
|
|
for role, created in created_mountpoint.items():
|
|
|
|
_delete_dir(ctx, role, created)
|
|
|
|
|
|
|
|
# Execute any 'all' workunits
|
|
|
|
if 'all' in clients:
|
2012-06-11 19:31:22 +00:00
|
|
|
all_tasks = clients["all"]
|
2014-03-05 17:17:13 +00:00
|
|
|
_spawn_on_all_clients(ctx, refspec, all_tasks, config.get('env'),
|
2014-03-08 20:16:07 +00:00
|
|
|
config.get('subdir'), timeout=timeout)
|
2011-08-30 22:48:58 +00:00
|
|
|
|
2014-03-05 17:17:13 +00:00
|
|
|
|
2014-07-18 13:09:16 +00:00
|
|
|
def _delete_dir(ctx, role, created_mountpoint):
|
2013-10-12 08:28:27 +00:00
|
|
|
"""
|
|
|
|
Delete file used by this role, and delete the directory that this
|
|
|
|
role appeared in.
|
2013-01-14 20:09:56 +00:00
|
|
|
|
2013-10-12 08:28:27 +00:00
|
|
|
:param ctx: Context
|
|
|
|
:param role: "role.#" where # is used for the role id.
|
|
|
|
"""
|
2014-07-18 16:15:20 +00:00
|
|
|
testdir = misc.get_testdir(ctx)
|
2014-07-18 13:09:16 +00:00
|
|
|
id_ = role[len(CLIENT_PREFIX):]
|
2014-03-27 16:35:28 +00:00
|
|
|
(remote,) = ctx.cluster.only(role).remotes.iterkeys()
|
2013-02-02 17:00:17 +00:00
|
|
|
mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
|
2013-10-12 08:28:27 +00:00
|
|
|
# Is there any reason why this is not: join(mnt, role) ?
|
2013-01-14 20:09:56 +00:00
|
|
|
client = os.path.join(mnt, 'client.{id}'.format(id=id_))
|
|
|
|
|
2014-07-18 13:09:16 +00:00
|
|
|
# Remove the directory inside the mount where the workunit ran
|
|
|
|
remote.run(
|
|
|
|
args=[
|
2014-07-26 20:31:54 +00:00
|
|
|
'sudo',
|
2014-07-18 13:09:16 +00:00
|
|
|
'rm',
|
|
|
|
'-rf',
|
|
|
|
'--',
|
|
|
|
client,
|
|
|
|
],
|
|
|
|
)
|
2014-08-20 11:30:57 +00:00
|
|
|
log.info("Deleted dir {dir}".format(dir=client))
|
2014-07-18 13:09:16 +00:00
|
|
|
|
|
|
|
# If the mount was an artificially created dir, delete that too
|
|
|
|
if created_mountpoint:
|
2013-01-14 20:09:56 +00:00
|
|
|
remote.run(
|
|
|
|
args=[
|
|
|
|
'rmdir',
|
|
|
|
'--',
|
|
|
|
mnt,
|
2014-07-18 13:09:16 +00:00
|
|
|
],
|
|
|
|
)
|
|
|
|
log.info("Deleted artificial mount point {dir}".format(dir=client))
|
|
|
|
|
2013-01-14 20:09:56 +00:00
|
|
|
|
2012-11-07 04:28:56 +00:00
|
|
|
def _make_scratch_dir(ctx, role, subdir):
|
2013-10-12 08:28:27 +00:00
|
|
|
"""
|
|
|
|
Make scratch directories for this role. This also makes the mount
|
|
|
|
point if that directory does not exist.
|
|
|
|
|
|
|
|
:param ctx: Context
|
|
|
|
:param role: "role.#" where # is used for the role id.
|
|
|
|
:param subdir: use this subdir (False if not used)
|
|
|
|
"""
|
2014-07-18 13:09:16 +00:00
|
|
|
created_mountpoint = False
|
|
|
|
id_ = role[len(CLIENT_PREFIX):]
|
2011-08-30 22:48:58 +00:00
|
|
|
log.debug("getting remote for {id} role {role_}".format(id=id_, role_=role))
|
2014-03-27 16:35:28 +00:00
|
|
|
(remote,) = ctx.cluster.only(role).remotes.iterkeys()
|
2014-05-09 17:07:55 +00:00
|
|
|
dir_owner = remote.user
|
2014-07-18 16:15:20 +00:00
|
|
|
mnt = os.path.join(misc.get_testdir(ctx), 'mnt.{id}'.format(id=id_))
|
2013-01-14 20:09:56 +00:00
|
|
|
# if neither kclient nor ceph-fuse are required for a workunit,
|
|
|
|
# mnt may not exist. Stat and create the directory if it doesn't.
|
|
|
|
try:
|
2013-01-31 13:58:57 +00:00
|
|
|
remote.run(
|
2013-01-14 20:09:56 +00:00
|
|
|
args=[
|
|
|
|
'stat',
|
|
|
|
'--',
|
|
|
|
mnt,
|
2014-07-18 16:15:20 +00:00
|
|
|
],
|
|
|
|
)
|
2013-01-14 20:09:56 +00:00
|
|
|
log.info('Did not need to create dir {dir}'.format(dir=mnt))
|
2014-07-18 16:15:20 +00:00
|
|
|
except CommandFailedError:
|
2013-01-31 13:58:57 +00:00
|
|
|
remote.run(
|
2013-01-14 20:09:56 +00:00
|
|
|
args=[
|
|
|
|
'mkdir',
|
|
|
|
'--',
|
|
|
|
mnt,
|
2014-07-18 16:15:20 +00:00
|
|
|
],
|
|
|
|
)
|
2013-01-14 20:09:56 +00:00
|
|
|
log.info('Created dir {dir}'.format(dir=mnt))
|
2014-07-18 13:09:16 +00:00
|
|
|
created_mountpoint = True
|
|
|
|
|
|
|
|
if not subdir:
|
|
|
|
subdir = 'client.{id}'.format(id=id_)
|
2013-01-14 20:09:56 +00:00
|
|
|
|
2014-07-18 13:09:16 +00:00
|
|
|
if created_mountpoint:
|
2013-01-14 20:09:56 +00:00
|
|
|
remote.run(
|
|
|
|
args=[
|
|
|
|
'cd',
|
|
|
|
'--',
|
|
|
|
mnt,
|
|
|
|
run.Raw('&&'),
|
|
|
|
'mkdir',
|
|
|
|
'--',
|
|
|
|
subdir,
|
2014-07-18 16:15:20 +00:00
|
|
|
],
|
|
|
|
)
|
2013-01-14 20:09:56 +00:00
|
|
|
else:
|
|
|
|
remote.run(
|
|
|
|
args=[
|
|
|
|
# cd first so this will fail if the mount point does
|
|
|
|
# not exist; pure install -d will silently do the
|
|
|
|
# wrong thing
|
|
|
|
'cd',
|
|
|
|
'--',
|
|
|
|
mnt,
|
|
|
|
run.Raw('&&'),
|
|
|
|
'sudo',
|
|
|
|
'install',
|
|
|
|
'-d',
|
|
|
|
'-m', '0755',
|
|
|
|
'--owner={user}'.format(user=dir_owner),
|
|
|
|
'--',
|
|
|
|
subdir,
|
2014-07-18 16:15:20 +00:00
|
|
|
],
|
|
|
|
)
|
2013-01-14 20:09:56 +00:00
|
|
|
|
2014-07-18 13:09:16 +00:00
|
|
|
return created_mountpoint
|
2011-08-30 22:48:58 +00:00
|
|
|
|
2014-03-05 17:17:13 +00:00
|
|
|
|
|
|
|
def _spawn_on_all_clients(ctx, refspec, tests, env, subdir, timeout=None):
|
2013-10-12 08:28:27 +00:00
|
|
|
"""
|
|
|
|
Make a scratch directory for each client in the cluster, and then for each
|
2014-03-05 17:17:13 +00:00
|
|
|
test spawn _run_tests() for each role.
|
2013-10-12 08:28:27 +00:00
|
|
|
|
2014-03-05 17:17:13 +00:00
|
|
|
See run_tests() for parameter documentation.
|
2013-10-12 08:28:27 +00:00
|
|
|
"""
|
2014-07-18 16:15:20 +00:00
|
|
|
client_generator = misc.all_roles_of_type(ctx.cluster, 'client')
|
2011-08-30 22:48:58 +00:00
|
|
|
client_remotes = list()
|
2014-07-18 13:09:16 +00:00
|
|
|
|
|
|
|
created_mountpoint = {}
|
2011-08-30 22:48:58 +00:00
|
|
|
for client in client_generator:
|
2014-03-27 16:35:28 +00:00
|
|
|
(client_remote,) = ctx.cluster.only('client.{id}'.format(id=client)).remotes.iterkeys()
|
2011-08-30 22:48:58 +00:00
|
|
|
client_remotes.append((client_remote, 'client.{id}'.format(id=client)))
|
2014-07-18 13:09:16 +00:00
|
|
|
created_mountpoint[client] = _make_scratch_dir(ctx, "client.{id}".format(id=client), subdir)
|
2012-11-07 04:28:56 +00:00
|
|
|
|
2011-08-30 22:48:58 +00:00
|
|
|
for unit in tests:
|
|
|
|
with parallel() as p:
|
|
|
|
for remote, role in client_remotes:
|
2014-03-05 17:17:13 +00:00
|
|
|
p.spawn(_run_tests, ctx, refspec, role, [unit], env, subdir,
|
|
|
|
timeout=timeout)
|
2011-06-22 17:56:40 +00:00
|
|
|
|
2013-03-22 18:56:50 +00:00
|
|
|
# cleanup the generated client directories
|
2014-07-18 16:15:20 +00:00
|
|
|
client_generator = misc.all_roles_of_type(ctx.cluster, 'client')
|
2013-03-22 18:56:50 +00:00
|
|
|
for client in client_generator:
|
2014-07-18 13:09:16 +00:00
|
|
|
_delete_dir(ctx, 'client.{id}'.format(id=client), created_mountpoint[client])
|
2013-03-22 18:56:50 +00:00
|
|
|
|
2014-03-05 17:17:13 +00:00
|
|
|
|
|
|
|
def _run_tests(ctx, refspec, role, tests, env, subdir=None, timeout=None):
|
2013-10-12 08:28:27 +00:00
|
|
|
"""
|
2014-03-05 17:17:13 +00:00
|
|
|
Run the individual test. Create a scratch directory and then extract the
|
|
|
|
workunits from git. Make the executables, and then run the tests.
|
2013-10-12 08:28:27 +00:00
|
|
|
Clean up (remove files created) after the tests are finished.
|
|
|
|
|
2014-03-05 17:17:13 +00:00
|
|
|
:param ctx: Context
|
2013-10-12 08:28:27 +00:00
|
|
|
:param refspec: branch, sha1, or version tag used to identify this
|
2014-03-05 17:17:13 +00:00
|
|
|
build
|
|
|
|
:param tests: specific tests specified.
|
|
|
|
:param env: environment set in yaml file. Could be None.
|
|
|
|
:param subdir: subdirectory set in yaml file. Could be None
|
|
|
|
:param timeout: If present, use the 'timeout' command on the remote host
|
|
|
|
to limit execution time. Must be specified by a number
|
|
|
|
followed by 's' for seconds, 'm' for minutes, 'h' for
|
|
|
|
hours, or 'd' for days. If '0' or anything that evaluates
|
|
|
|
to False is passed, the 'timeout' command is not used.
|
2013-10-12 08:28:27 +00:00
|
|
|
"""
|
2014-07-18 16:15:20 +00:00
|
|
|
testdir = misc.get_testdir(ctx)
|
2011-06-22 17:56:40 +00:00
|
|
|
assert isinstance(role, basestring)
|
2014-07-18 13:09:16 +00:00
|
|
|
assert role.startswith(CLIENT_PREFIX)
|
|
|
|
id_ = role[len(CLIENT_PREFIX):]
|
2014-03-27 16:35:28 +00:00
|
|
|
(remote,) = ctx.cluster.only(role).remotes.iterkeys()
|
2013-01-23 20:37:39 +00:00
|
|
|
mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
|
2011-06-22 17:56:40 +00:00
|
|
|
# subdir so we can remove and recreate this a lot without sudo
|
2012-11-18 17:24:10 +00:00
|
|
|
if subdir is None:
|
2012-11-07 04:28:56 +00:00
|
|
|
scratch_tmp = os.path.join(mnt, 'client.{id}'.format(id=id_), 'tmp')
|
2012-11-18 17:24:10 +00:00
|
|
|
else:
|
|
|
|
scratch_tmp = os.path.join(mnt, subdir)
|
2013-01-23 20:37:39 +00:00
|
|
|
srcdir = '{tdir}/workunit.{role}'.format(tdir=testdir, role=role)
|
2015-12-06 12:08:20 +00:00
|
|
|
clonedir = '{tdir}/clone'.format(tdir=testdir)
|
|
|
|
|
|
|
|
git_url = teuth_config.get_ceph_git_url()
|
|
|
|
if 'github.com/ceph/ceph' in git_url:
|
|
|
|
remote.run(
|
|
|
|
logger=log.getChild(role),
|
|
|
|
args=[
|
|
|
|
'mkdir', '--', srcdir,
|
|
|
|
run.Raw('&&'),
|
|
|
|
'git',
|
|
|
|
'archive',
|
|
|
|
'--remote=git://git.ceph.com/ceph.git',
|
|
|
|
'%s:qa/workunits' % refspec,
|
|
|
|
run.Raw('|'),
|
|
|
|
'tar',
|
|
|
|
'-C', srcdir,
|
|
|
|
'-x',
|
|
|
|
'-f-',
|
|
|
|
],
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
remote.run(
|
|
|
|
logger=log.getChild(role),
|
|
|
|
args=[
|
|
|
|
'git',
|
|
|
|
'clone',
|
|
|
|
git_url,
|
|
|
|
clonedir,
|
|
|
|
run.Raw(';'),
|
|
|
|
'cd', '--', clonedir,
|
|
|
|
run.Raw('&&'),
|
2016-03-16 08:05:46 +00:00
|
|
|
'git', 'checkout', refspec,
|
|
|
|
run.Raw('&&'),
|
2015-12-06 12:08:20 +00:00
|
|
|
'mv', 'qa/workunits', srcdir,
|
|
|
|
],
|
|
|
|
)
|
2011-06-22 17:56:40 +00:00
|
|
|
|
|
|
|
remote.run(
|
|
|
|
logger=log.getChild(role),
|
|
|
|
args=[
|
|
|
|
'cd', '--', srcdir,
|
|
|
|
run.Raw('&&'),
|
|
|
|
'if', 'test', '-e', 'Makefile', run.Raw(';'), 'then', 'make', run.Raw(';'), 'fi',
|
|
|
|
run.Raw('&&'),
|
|
|
|
'find', '-executable', '-type', 'f', '-printf', r'%P\0'.format(srcdir=srcdir),
|
2015-02-12 22:32:39 +00:00
|
|
|
run.Raw('>{tdir}/workunits.list.{role}'.format(tdir=testdir, role=role)),
|
2014-07-18 16:15:20 +00:00
|
|
|
],
|
|
|
|
)
|
2011-06-15 18:59:57 +00:00
|
|
|
|
2015-12-06 12:07:13 +00:00
|
|
|
workunits_file = '{tdir}/workunits.list.{role}'.format(tdir=testdir, role=role)
|
|
|
|
workunits = sorted(misc.get_file(remote, workunits_file).split('\0'))
|
2011-06-22 17:56:40 +00:00
|
|
|
assert workunits
|
|
|
|
|
|
|
|
try:
|
|
|
|
assert isinstance(tests, list)
|
|
|
|
for spec in tests:
|
|
|
|
log.info('Running workunits matching %s on %s...', spec, role)
|
|
|
|
prefix = '{spec}/'.format(spec=spec)
|
|
|
|
to_run = [w for w in workunits if w == spec or w.startswith(prefix)]
|
|
|
|
if not to_run:
|
|
|
|
raise RuntimeError('Spec did not match any workunits: {spec!r}'.format(spec=spec))
|
|
|
|
for workunit in to_run:
|
|
|
|
log.info('Running workunit %s...', workunit)
|
2012-06-11 01:43:35 +00:00
|
|
|
args = [
|
2012-11-07 19:14:09 +00:00
|
|
|
'mkdir', '-p', '--', scratch_tmp,
|
2012-06-11 01:43:35 +00:00
|
|
|
run.Raw('&&'),
|
|
|
|
'cd', '--', scratch_tmp,
|
|
|
|
run.Raw('&&'),
|
2013-07-17 00:15:55 +00:00
|
|
|
run.Raw('CEPH_CLI_TEST_DUP_COMMAND=1'),
|
2013-04-28 19:15:42 +00:00
|
|
|
run.Raw('CEPH_REF={ref}'.format(ref=refspec)),
|
2013-02-13 22:32:52 +00:00
|
|
|
run.Raw('TESTDIR="{tdir}"'.format(tdir=testdir)),
|
2012-06-11 01:43:35 +00:00
|
|
|
run.Raw('CEPH_ID="{id}"'.format(id=id_)),
|
2015-02-24 05:04:16 +00:00
|
|
|
run.Raw('PATH=$PATH:/usr/sbin')
|
2014-07-18 16:15:20 +00:00
|
|
|
]
|
2012-06-11 01:43:35 +00:00
|
|
|
if env is not None:
|
|
|
|
for var, val in env.iteritems():
|
|
|
|
quoted_val = pipes.quote(val)
|
|
|
|
env_arg = '{var}={val}'.format(var=var, val=quoted_val)
|
|
|
|
args.append(run.Raw(env_arg))
|
|
|
|
args.extend([
|
2014-03-05 17:17:13 +00:00
|
|
|
'adjust-ulimits',
|
|
|
|
'ceph-coverage',
|
|
|
|
'{tdir}/archive/coverage'.format(tdir=testdir)])
|
|
|
|
if timeout and timeout != '0':
|
|
|
|
args.extend(['timeout', timeout])
|
|
|
|
args.extend([
|
|
|
|
'{srcdir}/{workunit}'.format(
|
|
|
|
srcdir=srcdir,
|
|
|
|
workunit=workunit,
|
2014-07-18 16:15:20 +00:00
|
|
|
),
|
|
|
|
])
|
2012-06-11 01:43:35 +00:00
|
|
|
remote.run(
|
|
|
|
logger=log.getChild(role),
|
|
|
|
args=args,
|
2015-02-04 21:42:40 +00:00
|
|
|
label="workunit test {workunit}".format(workunit=workunit)
|
2014-07-18 16:15:20 +00:00
|
|
|
)
|
2012-11-08 14:55:36 +00:00
|
|
|
remote.run(
|
|
|
|
logger=log.getChild(role),
|
2013-04-04 05:01:01 +00:00
|
|
|
args=['sudo', 'rm', '-rf', '--', scratch_tmp],
|
2014-07-18 16:15:20 +00:00
|
|
|
)
|
2011-06-22 17:56:40 +00:00
|
|
|
finally:
|
2014-07-18 16:15:20 +00:00
|
|
|
log.info('Stopping %s on %s...', tests, role)
|
2012-11-21 16:29:47 +00:00
|
|
|
remote.run(
|
2011-06-16 22:19:14 +00:00
|
|
|
logger=log.getChild(role),
|
2011-06-15 18:59:57 +00:00
|
|
|
args=[
|
2015-12-06 12:08:20 +00:00
|
|
|
'rm', '-rf', '--', workunits_file, srcdir, clonedir,
|
2014-07-18 16:15:20 +00:00
|
|
|
],
|
|
|
|
)
|