mirror of
https://github.com/ceph/ceph
synced 2024-12-28 06:23:08 +00:00
68888862a1
Often we want to build a test collection that substitutes different sequences of tasks into a parallel/sequential construction. However, the yaml combination that happens when generating jobs is not smart enough to substitute some fragment into a deeply-nested piece of yaml. Instead, make these sequences top-level entries in the config dict, and reference them. For example: tasks: - install: - ceph: - parallel: - workload - upgrade-sequence workload: workunit: - something upgrade-sequence: install.restart: [osd.0, osd.1] Signed-off-by: Sage Weil <sage@inktank.com>
53 lines
1.2 KiB
Python
53 lines
1.2 KiB
Python
import sys
|
|
import logging
|
|
import contextlib
|
|
|
|
from teuthology import run_tasks
|
|
from ..orchestra import run
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
def task(ctx, config):
|
|
"""
|
|
Sequentialize a group of tasks into one executable block
|
|
|
|
example:
|
|
- sequential:
|
|
- tasktest:
|
|
- tasktest:
|
|
|
|
You can also reference the job from elsewhere:
|
|
|
|
foo:
|
|
tasktest:
|
|
tasks:
|
|
- sequential:
|
|
- tasktest:
|
|
- foo
|
|
- tasktest:
|
|
|
|
That is, if the entry is not a dict, we will look it up in the top-level
|
|
config.
|
|
|
|
Sequential task and Parallel tasks can be nested.
|
|
"""
|
|
stack = []
|
|
try:
|
|
for entry in config:
|
|
if not isinstance(entry, dict):
|
|
entry = ctx.config.get(entry, {})
|
|
((taskname, confg),) = entry.iteritems()
|
|
log.info('In sequential, running task %s...' % taskname)
|
|
mgr = run_tasks.run_one_task(taskname, ctx=ctx, config=confg)
|
|
if hasattr(mgr, '__enter__'):
|
|
mgr.__enter__()
|
|
stack.append(mgr)
|
|
finally:
|
|
try:
|
|
exc_info = sys.exc_info()
|
|
while stack:
|
|
mgr = stack.pop()
|
|
endr = mgr.__exit__(*exc_info)
|
|
finally:
|
|
del exc_info
|