qa/tasks/ceph2: set up managers

Signed-off-by: Sage Weil <sage@redhat.com>
This commit is contained in:
Sage Weil 2019-11-12 22:15:32 +00:00
parent 55d302ac8d
commit e2bc637ace
2 changed files with 69 additions and 33 deletions

View File

@ -642,6 +642,7 @@ def task(ctx, config):
ctx.daemons = DaemonGroup(use_ceph_daemon=True)
if not hasattr(ctx, 'ceph'):
ctx.ceph = {}
ctx.managers = {}
if 'cluster' not in config:
config['cluster'] = 'ceph'
cluster_name = config['cluster']
@ -690,6 +691,14 @@ def task(ctx, config):
lambda: ceph_mdss(ctx=ctx, config=config),
lambda: distribute_config_and_admin_keyring(ctx=ctx, config=config),
):
ctx.managers[cluster_name] = CephManager(
ctx.ceph[cluster_name].bootstrap_remote,
ctx=ctx,
logger=log.getChild('ceph_manager.' + cluster_name),
cluster=cluster_name,
ceph_daemon=True,
)
try:
log.info('Setup complete, yielding')
yield

View File

@ -33,6 +33,22 @@ DEFAULT_CONF_PATH = '/etc/ceph/ceph.conf'
log = logging.getLogger(__name__)
# this is for ceph-daemon clusters
def shell(ctx, cluster_name, remote, args, **kwargs):
testdir = teuthology.get_testdir(ctx)
return remote.run(
args=[
'sudo',
'{}/ceph-daemon'.format(testdir),
'--image', ctx.image,
'shell',
'-c', '{}/{}.conf'.format(testdir, cluster_name),
'-k', '{}/{}.keyring'.format(testdir, cluster_name),
'--fsid', ctx.ceph[cluster_name].fsid,
'--',
] + args,
**kwargs
)
def write_conf(ctx, conf_path=DEFAULT_CONF_PATH, cluster='ceph'):
conf_fp = StringIO()
@ -1200,13 +1216,14 @@ class CephManager:
"""
def __init__(self, controller, ctx=None, config=None, logger=None,
cluster='ceph'):
cluster='ceph', ceph_daemon=False):
self.lock = threading.RLock()
self.ctx = ctx
self.config = config
self.controller = controller
self.next_pool_id = 0
self.cluster = cluster
self.ceph_daemon = ceph_daemon
if (logger):
self.log = lambda x: logger.info(x)
else:
@ -1231,22 +1248,27 @@ class CephManager:
"""
Start ceph on a raw cluster. Return count
"""
testdir = teuthology.get_testdir(self.ctx)
ceph_args = [
'sudo',
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'timeout',
'120',
'ceph',
'--cluster',
self.cluster,
]
ceph_args.extend(args)
proc = self.controller.run(
args=ceph_args,
stdout=StringIO(),
if self.ceph_daemon:
proc = shell(self.ctx, self.cluster, self.controller,
args=['ceph'] + list(args),
stdout=StringIO())
else:
testdir = teuthology.get_testdir(self.ctx)
ceph_args = [
'sudo',
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'timeout',
'120',
'ceph',
'--cluster',
self.cluster,
]
ceph_args.extend(args)
proc = self.controller.run(
args=ceph_args,
stdout=StringIO(),
)
return proc.stdout.getvalue()
@ -1254,22 +1276,27 @@ class CephManager:
"""
Start ceph on a cluster. Return success or failure information.
"""
testdir = teuthology.get_testdir(self.ctx)
ceph_args = [
'sudo',
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'timeout',
'120',
'ceph',
'--cluster',
self.cluster,
]
ceph_args.extend(args)
kwargs['args'] = ceph_args
kwargs['check_status'] = False
proc = self.controller.run(**kwargs)
if self.ceph_daemon:
proc = shell(self.ctx, self.cluster, self.controller,
args=['ceph'] + list(args),
check_status=False)
else:
testdir = teuthology.get_testdir(self.ctx)
ceph_args = [
'sudo',
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'timeout',
'120',
'ceph',
'--cluster',
self.cluster,
]
ceph_args.extend(args)
kwargs['args'] = ceph_args
kwargs['check_status'] = False
proc = self.controller.run(**kwargs)
return proc.exitstatus
def run_ceph_w(self, watch_channel=None):