tasks: update to run ceph-mgr daemons

Signed-off-by: John Spray <john.spray@redhat.com>
This commit is contained in:
John Spray 2016-09-27 12:22:45 +01:00
parent 7b3d152bc1
commit 326a33b4fa
3 changed files with 52 additions and 2 deletions

View File

@ -23,7 +23,7 @@ from teuthology.orchestra import run
import ceph_client as cclient
from teuthology.orchestra.daemon import DaemonGroup
CEPH_ROLE_TYPES = ['mon', 'osd', 'mds', 'rgw']
CEPH_ROLE_TYPES = ['mon', 'mgr', 'osd', 'mds', 'rgw']
log = logging.getLogger(__name__)
@ -39,6 +39,9 @@ def generate_caps(type_):
mon='allow *',
osd='allow *',
),
mgr=dict(
mon='allow *',
),
mds=dict(
mon='allow *',
osd='allow *',
@ -598,6 +601,35 @@ def cluster(ctx, config):
),
)
log.info('Setting up mgr nodes...')
mgrs = ctx.cluster.only(teuthology.is_type('mgr', cluster_name))
for remote, roles_for_host in mgrs.remotes.iteritems():
for role in teuthology.cluster_roles_of_type(roles_for_host, 'mgr',
cluster_name):
_, _, id_ = teuthology.split_role(role)
mgr_dir = '/var/lib/ceph/mgr/{cluster}-{id}'.format(
cluster=cluster_name,
id=id_,
)
remote.run(
args=[
'sudo',
'mkdir',
'-p',
mgr_dir,
run.Raw('&&'),
'sudo',
'adjust-ulimits',
'ceph-coverage',
coverage_dir,
'ceph-authtool',
'--create-keyring',
'--gen-key',
'--name=mgr.{id}'.format(id=id_),
mgr_dir + '/keyring',
],
)
log.info('Setting up mds nodes...')
mdss = ctx.cluster.only(teuthology.is_type('mds', cluster_name))
for remote, roles_for_host in mdss.remotes.iteritems():
@ -753,7 +785,7 @@ def cluster(ctx, config):
keys_fp = StringIO()
keys = []
for remote, roles_for_host in ctx.cluster.remotes.iteritems():
for type_ in ['mds', 'osd']:
for type_ in ['mgr', 'mds', 'osd']:
for role in teuthology.cluster_roles_of_type(roles_for_host, type_, cluster_name):
_, _, id_ = teuthology.split_role(role)
data = teuthology.get_file(
@ -1516,6 +1548,7 @@ def task(ctx, config):
cluster=config['cluster'],
)),
lambda: run_daemon(ctx=ctx, config=config, type_='mon'),
lambda: run_daemon(ctx=ctx, config=config, type_='mgr'),
lambda: crush_setup(ctx=ctx, config=config),
lambda: run_daemon(ctx=ctx, config=config, type_='osd'),
lambda: cephfs_setup(ctx=ctx, config=config),

View File

@ -888,6 +888,9 @@ class CephManager:
"""
Ceph manager object.
Contains several local functions that form a bulk of this module.
Note: this class has nothing to do with the Ceph daemon (ceph-mgr) of
the same name.
"""
REPLICATED_POOL = 1

View File

@ -66,8 +66,10 @@ def task(ctx, config):
name = remote.shortname
mon_name = 'ceph-mon@' + name + '.service'
mds_name = 'ceph-mds@' + name + '.service'
mgr_name = 'ceph-mgr@' + name + '.service'
mon_role_name = 'mon.' + name
mds_role_name = 'mds.' + name
mgr_role_name = 'mgr.' + name
m_osd = re.search('--id (\d+) --setuser ceph', r.stdout.getvalue())
if m_osd:
osd_service = 'ceph-osd@{m}.service'.format(m=m_osd.group(1))
@ -98,6 +100,18 @@ def task(ctx, config):
log.info("Failed to stop ceph mon service")
remote.run(args=['sudo', 'systemctl', 'start', mon_name])
time.sleep(4)
if mgr_role_name in roles:
remote.run(args=['sudo', 'systemctl', 'status', mgr_name])
remote.run(args=['sudo', 'systemctl', 'stop', mgr_name])
time.sleep(4) # immediate check will result in deactivating state
r = remote.run(args=['sudo', 'systemctl', 'status', mgr_name],
stdout=StringIO(), check_status=False)
if r.stdout.getvalue().find('Active: inactive'):
log.info("Sucessfully stopped single ceph mgr service")
else:
log.info("Failed to stop ceph mgr service")
remote.run(args=['sudo', 'systemctl', 'start', mgr_name])
time.sleep(4)
if mds_role_name in roles:
remote.run(args=['sudo', 'systemctl', 'status', mds_name])
remote.run(args=['sudo', 'systemctl', 'stop', mds_name])