mirror of
https://github.com/ceph/ceph
synced 2024-12-26 13:33:57 +00:00
68888862a1
Often we want to build a test collection that substitutes different sequences of tasks into a parallel/sequential construction. However, the yaml combination that happens when generating jobs is not smart enough to substitute some fragment into a deeply-nested piece of yaml. Instead, make these sequences top-level entries in the config dict, and reference them. For example: tasks: - install: - ceph: - parallel: - workload - upgrade-sequence workload: workunit: - something upgrade-sequence: install.restart: [osd.0, osd.1] Signed-off-by: Sage Weil <sage@inktank.com>
55 lines
1.3 KiB
Python
55 lines
1.3 KiB
Python
import sys
|
|
import logging
|
|
import contextlib
|
|
|
|
from teuthology import run_tasks
|
|
from teuthology import parallel
|
|
from ..orchestra import run
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
def task(ctx, config):
|
|
"""
|
|
Run a group of tasks in parallel.
|
|
|
|
example:
|
|
- parallel:
|
|
- tasktest:
|
|
- tasktest:
|
|
|
|
You can also reference the job from elsewhere:
|
|
|
|
foo:
|
|
tasktest:
|
|
tasks:
|
|
- parallel:
|
|
- foo
|
|
- tasktest:
|
|
|
|
That is, if the entry is not a dict, we will look it up in the top-level
|
|
config.
|
|
|
|
Sequential task and Parallel tasks can be nested.
|
|
"""
|
|
|
|
log.info('starting parallel...')
|
|
with parallel.parallel() as p:
|
|
for entry in config:
|
|
if not isinstance(entry, dict):
|
|
entry = ctx.config.get(entry, {})
|
|
((taskname, confg),) = entry.iteritems()
|
|
p.spawn(_run_spawned, ctx, confg, taskname)
|
|
|
|
def _run_spawned(ctx,config,taskname):
|
|
mgr = {}
|
|
try:
|
|
log.info('In parallel, running task %s...' % taskname)
|
|
mgr = run_tasks.run_one_task(taskname, ctx=ctx, config=config)
|
|
if hasattr(mgr, '__enter__'):
|
|
mgr.__enter__()
|
|
finally:
|
|
exc_info = sys.exc_info()
|
|
if hasattr(mgr, '__exit__'):
|
|
mgr.__exit__(*exc_info)
|
|
del exc_info
|