2013-10-12 08:28:27 +00:00
|
|
|
"""
|
|
|
|
Execute ceph-deploy as a task
|
|
|
|
"""
|
2013-03-08 01:56:29 +00:00
|
|
|
from cStringIO import StringIO
|
|
|
|
|
|
|
|
import contextlib
|
|
|
|
import os
|
|
|
|
import time
|
|
|
|
import logging
|
2015-04-06 18:54:23 +00:00
|
|
|
import traceback
|
2013-03-08 01:56:29 +00:00
|
|
|
|
|
|
|
from teuthology import misc as teuthology
|
|
|
|
from teuthology import contextutil
|
2014-08-07 14:24:59 +00:00
|
|
|
from teuthology.config import config as teuth_config
|
|
|
|
from teuthology.task import install as install_fn
|
|
|
|
from teuthology.orchestra import run
|
2015-04-06 18:54:23 +00:00
|
|
|
from tasks.cephfs.filesystem import Filesystem
|
2013-03-08 01:56:29 +00:00
|
|
|
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
2013-08-13 15:32:15 +00:00
|
|
|
|
2013-03-08 01:56:29 +00:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def download_ceph_deploy(ctx, config):
|
2013-08-13 15:32:15 +00:00
|
|
|
"""
|
|
|
|
Downloads ceph-deploy from the ceph.com git mirror and (by default)
|
|
|
|
switches to the master branch. If the `ceph-deploy-branch` is specified, it
|
2016-05-26 21:17:13 +00:00
|
|
|
will use that instead. The `bootstrap` script is ran, with the argument
|
|
|
|
obtained from `python_version`, if specified.
|
2013-08-13 15:32:15 +00:00
|
|
|
"""
|
2016-06-22 16:19:12 +00:00
|
|
|
ceph_admin = ctx.cluster.only(teuthology.get_first_mon(ctx, config))
|
|
|
|
|
|
|
|
try:
|
|
|
|
py_ver = str(config['python_version'])
|
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
supported_versions = ['2', '3']
|
|
|
|
if py_ver not in supported_versions:
|
|
|
|
raise ValueError("python_version must be: {}, not {}".format(
|
|
|
|
' or '.join(supported_versions), py_ver
|
|
|
|
))
|
|
|
|
|
|
|
|
log.info("Installing Python")
|
|
|
|
for admin in ceph_admin.remotes:
|
|
|
|
system_type = teuthology.get_system_type(admin)
|
|
|
|
|
|
|
|
if system_type == 'rpm':
|
|
|
|
package = 'python34' if py_ver == '3' else 'python'
|
|
|
|
ctx.cluster.run(args=[
|
|
|
|
'sudo', 'yum', '-y', 'install',
|
|
|
|
package, 'python-virtualenv'
|
|
|
|
])
|
|
|
|
else:
|
|
|
|
package = 'python3' if py_ver == '3' else 'python'
|
|
|
|
ctx.cluster.run(args=[
|
|
|
|
'sudo', 'apt-get', '-y', '--force-yes', 'install',
|
|
|
|
package, 'python-virtualenv'
|
|
|
|
])
|
|
|
|
|
2013-03-08 01:56:29 +00:00
|
|
|
log.info('Downloading ceph-deploy...')
|
|
|
|
testdir = teuthology.get_testdir(ctx)
|
2015-09-25 13:08:18 +00:00
|
|
|
ceph_deploy_branch = config.get('ceph-deploy-branch', 'master')
|
2013-03-08 01:56:29 +00:00
|
|
|
|
2015-04-06 18:54:23 +00:00
|
|
|
ceph_admin.run(
|
2013-03-08 01:56:29 +00:00
|
|
|
args=[
|
2013-08-06 22:17:51 +00:00
|
|
|
'git', 'clone', '-b', ceph_deploy_branch,
|
2013-09-24 19:19:24 +00:00
|
|
|
teuth_config.ceph_git_base_url + 'ceph-deploy.git',
|
2013-03-08 01:56:29 +00:00
|
|
|
'{tdir}/ceph-deploy'.format(tdir=testdir),
|
2016-02-27 04:10:52 +00:00
|
|
|
],
|
|
|
|
)
|
2016-05-26 21:17:13 +00:00
|
|
|
args = [
|
|
|
|
'cd',
|
|
|
|
'{tdir}/ceph-deploy'.format(tdir=testdir),
|
|
|
|
run.Raw('&&'),
|
|
|
|
'./bootstrap',
|
|
|
|
]
|
|
|
|
try:
|
|
|
|
args.append(str(config['python_version']))
|
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
ceph_admin.run(args=args)
|
2013-03-08 01:56:29 +00:00
|
|
|
|
|
|
|
try:
|
|
|
|
yield
|
|
|
|
finally:
|
|
|
|
log.info('Removing ceph-deploy ...')
|
2015-04-06 18:54:23 +00:00
|
|
|
ceph_admin.run(
|
2013-03-08 01:56:29 +00:00
|
|
|
args=[
|
|
|
|
'rm',
|
|
|
|
'-rf',
|
|
|
|
'{tdir}/ceph-deploy'.format(tdir=testdir),
|
2016-02-27 04:10:52 +00:00
|
|
|
],
|
|
|
|
)
|
2013-03-08 01:56:29 +00:00
|
|
|
|
2013-08-13 15:32:15 +00:00
|
|
|
|
2013-03-08 01:56:29 +00:00
|
|
|
def is_healthy(ctx, config):
|
|
|
|
"""Wait until a Ceph cluster is healthy."""
|
|
|
|
testdir = teuthology.get_testdir(ctx)
|
|
|
|
ceph_admin = teuthology.get_first_mon(ctx, config)
|
2014-03-27 16:35:28 +00:00
|
|
|
(remote,) = ctx.cluster.only(ceph_admin).remotes.keys()
|
2013-12-13 19:46:29 +00:00
|
|
|
max_tries = 90 # 90 tries * 10 secs --> 15 minutes
|
|
|
|
tries = 0
|
2013-03-08 01:56:29 +00:00
|
|
|
while True:
|
2013-12-13 19:46:29 +00:00
|
|
|
tries += 1
|
|
|
|
if tries >= max_tries:
|
|
|
|
msg = "ceph health was unable to get 'HEALTH_OK' after waiting 15 minutes"
|
2015-08-27 21:19:22 +00:00
|
|
|
remote.run(
|
|
|
|
args=[
|
|
|
|
'cd',
|
|
|
|
'{tdir}'.format(tdir=testdir),
|
|
|
|
run.Raw('&&'),
|
|
|
|
'sudo', 'ceph',
|
|
|
|
'report',
|
|
|
|
],
|
|
|
|
)
|
2013-12-13 19:46:29 +00:00
|
|
|
raise RuntimeError(msg)
|
|
|
|
|
2013-03-08 01:56:29 +00:00
|
|
|
r = remote.run(
|
|
|
|
args=[
|
|
|
|
'cd',
|
|
|
|
'{tdir}'.format(tdir=testdir),
|
|
|
|
run.Raw('&&'),
|
|
|
|
'sudo', 'ceph',
|
|
|
|
'health',
|
2016-02-27 04:10:52 +00:00
|
|
|
],
|
2013-03-08 01:56:29 +00:00
|
|
|
stdout=StringIO(),
|
|
|
|
logger=log.getChild('health'),
|
2016-02-27 04:10:52 +00:00
|
|
|
)
|
2013-03-08 01:56:29 +00:00
|
|
|
out = r.stdout.getvalue()
|
2014-08-29 01:21:30 +00:00
|
|
|
log.info('Ceph health: %s', out.rstrip('\n'))
|
2013-03-08 01:56:29 +00:00
|
|
|
if out.split(None, 1)[0] == 'HEALTH_OK':
|
|
|
|
break
|
2013-12-13 19:46:29 +00:00
|
|
|
time.sleep(10)
|
2013-03-08 01:56:29 +00:00
|
|
|
|
2015-04-06 18:54:23 +00:00
|
|
|
|
|
|
|
def get_nodes_using_role(ctx, target_role):
|
|
|
|
"""
|
|
|
|
Extract the names of nodes that match a given role from a cluster, and modify the
|
|
|
|
cluster's service IDs to match the resulting node-based naming scheme that ceph-deploy
|
|
|
|
uses, such that if "mon.a" is on host "foo23", it'll be renamed to "mon.foo23".
|
|
|
|
"""
|
|
|
|
|
|
|
|
# Nodes containing a service of the specified role
|
|
|
|
nodes_of_interest = []
|
|
|
|
|
|
|
|
# Prepare a modified version of cluster.remotes with ceph-deploy-ized names
|
|
|
|
modified_remotes = {}
|
|
|
|
|
2013-03-08 01:56:29 +00:00
|
|
|
for _remote, roles_for_host in ctx.cluster.remotes.iteritems():
|
2015-04-06 18:54:23 +00:00
|
|
|
modified_remotes[_remote] = []
|
|
|
|
for svc_id in roles_for_host:
|
|
|
|
if svc_id.startswith("{0}.".format(target_role)):
|
|
|
|
fqdn = str(_remote).split('@')[-1]
|
|
|
|
nodename = str(str(_remote).split('.')[0]).split('@')[1]
|
|
|
|
if target_role == 'mon':
|
|
|
|
nodes_of_interest.append(fqdn)
|
|
|
|
else:
|
|
|
|
nodes_of_interest.append(nodename)
|
|
|
|
|
2016-02-27 04:10:52 +00:00
|
|
|
modified_remotes[_remote].append(
|
|
|
|
"{0}.{1}".format(target_role, nodename))
|
2013-03-08 01:56:29 +00:00
|
|
|
else:
|
2015-04-06 18:54:23 +00:00
|
|
|
modified_remotes[_remote].append(svc_id)
|
|
|
|
|
|
|
|
ctx.cluster.remotes = modified_remotes
|
|
|
|
|
|
|
|
return nodes_of_interest
|
2013-03-08 01:56:29 +00:00
|
|
|
|
2016-02-27 04:10:52 +00:00
|
|
|
|
2013-03-08 01:56:29 +00:00
|
|
|
def get_dev_for_osd(ctx, config):
|
2013-10-12 08:28:27 +00:00
|
|
|
"""Get a list of all osd device names."""
|
2013-03-08 01:56:29 +00:00
|
|
|
osd_devs = []
|
|
|
|
for remote, roles_for_host in ctx.cluster.remotes.iteritems():
|
|
|
|
host = remote.name.split('@')[-1]
|
|
|
|
shortname = host.split('.')[0]
|
|
|
|
devs = teuthology.get_scratch_devices(remote)
|
2016-02-27 04:10:52 +00:00
|
|
|
num_osd_per_host = list(
|
|
|
|
teuthology.roles_of_type(
|
|
|
|
roles_for_host, 'osd'))
|
2013-03-08 01:56:29 +00:00
|
|
|
num_osds = len(num_osd_per_host)
|
2014-08-29 01:21:30 +00:00
|
|
|
if config.get('separate_journal_disk') is not None:
|
|
|
|
num_devs_reqd = 2 * num_osds
|
2016-02-27 04:10:52 +00:00
|
|
|
assert num_devs_reqd <= len(
|
|
|
|
devs), 'fewer data and journal disks than required ' + shortname
|
|
|
|
for dindex in range(0, num_devs_reqd, 2):
|
2014-08-29 01:21:30 +00:00
|
|
|
jd_index = dindex + 1
|
|
|
|
dev_short = devs[dindex].split('/')[-1]
|
|
|
|
jdev_short = devs[jd_index].split('/')[-1]
|
2015-09-30 13:37:37 +00:00
|
|
|
osd_devs.append((shortname, dev_short, jdev_short))
|
2014-08-29 01:21:30 +00:00
|
|
|
else:
|
|
|
|
assert num_osds <= len(devs), 'fewer disks than osds ' + shortname
|
|
|
|
for dev in devs[:num_osds]:
|
|
|
|
dev_short = dev.split('/')[-1]
|
2015-09-30 13:37:37 +00:00
|
|
|
osd_devs.append((shortname, dev_short))
|
2013-03-08 01:56:29 +00:00
|
|
|
return osd_devs
|
|
|
|
|
2016-02-27 04:10:52 +00:00
|
|
|
|
2013-03-08 01:56:29 +00:00
|
|
|
def get_all_nodes(ctx, config):
|
2013-10-12 08:28:27 +00:00
|
|
|
"""Return a string of node names separated by blanks"""
|
2013-03-08 01:56:29 +00:00
|
|
|
nodelist = []
|
|
|
|
for t, k in ctx.config['targets'].iteritems():
|
|
|
|
host = t.split('@')[-1]
|
|
|
|
simple_host = host.split('.')[0]
|
|
|
|
nodelist.append(simple_host)
|
|
|
|
nodelist = " ".join(nodelist)
|
|
|
|
return nodelist
|
|
|
|
|
2016-02-27 04:10:52 +00:00
|
|
|
|
2013-03-08 01:56:29 +00:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def build_ceph_cluster(ctx, config):
|
2013-10-12 08:28:27 +00:00
|
|
|
"""Build a ceph cluster"""
|
2014-01-28 16:05:03 +00:00
|
|
|
|
2015-04-29 18:53:59 +00:00
|
|
|
# Expect to find ceph_admin on the first mon by ID, same place that the download task
|
|
|
|
# puts it. Remember this here, because subsequently IDs will change from those in
|
|
|
|
# the test config to those that ceph-deploy invents.
|
2016-02-27 04:10:52 +00:00
|
|
|
(ceph_admin,) = ctx.cluster.only(
|
|
|
|
teuthology.get_first_mon(ctx, config)).remotes.iterkeys()
|
2015-04-29 18:53:59 +00:00
|
|
|
|
|
|
|
def execute_ceph_deploy(cmd):
|
|
|
|
"""Remotely execute a ceph_deploy command"""
|
|
|
|
return ceph_admin.run(
|
|
|
|
args=[
|
|
|
|
'cd',
|
|
|
|
'{tdir}/ceph-deploy'.format(tdir=testdir),
|
|
|
|
run.Raw('&&'),
|
|
|
|
run.Raw(cmd),
|
|
|
|
],
|
|
|
|
check_status=False,
|
|
|
|
).exitstatus
|
|
|
|
|
2014-01-28 16:05:03 +00:00
|
|
|
try:
|
|
|
|
log.info('Building ceph cluster using ceph-deploy...')
|
|
|
|
testdir = teuthology.get_testdir(ctx)
|
|
|
|
ceph_branch = None
|
|
|
|
if config.get('branch') is not None:
|
|
|
|
cbranch = config.get('branch')
|
|
|
|
for var, val in cbranch.iteritems():
|
|
|
|
ceph_branch = '--{var}={val}'.format(var=var, val=val)
|
|
|
|
all_nodes = get_all_nodes(ctx, config)
|
2015-04-06 18:54:23 +00:00
|
|
|
mds_nodes = get_nodes_using_role(ctx, 'mds')
|
2014-01-28 16:05:03 +00:00
|
|
|
mds_nodes = " ".join(mds_nodes)
|
2015-04-06 18:54:23 +00:00
|
|
|
mon_node = get_nodes_using_role(ctx, 'mon')
|
2014-01-28 16:05:03 +00:00
|
|
|
mon_nodes = " ".join(mon_node)
|
2016-02-27 04:10:52 +00:00
|
|
|
new_mon = './ceph-deploy new' + " " + mon_nodes
|
2014-01-28 16:05:03 +00:00
|
|
|
mon_hostname = mon_nodes.split(' ')[0]
|
|
|
|
mon_hostname = str(mon_hostname)
|
2016-02-27 04:10:52 +00:00
|
|
|
gather_keys = './ceph-deploy gatherkeys' + " " + mon_hostname
|
|
|
|
deploy_mds = './ceph-deploy mds create' + " " + mds_nodes
|
2014-01-28 16:05:03 +00:00
|
|
|
no_of_osds = 0
|
|
|
|
|
|
|
|
if mon_nodes is None:
|
|
|
|
raise RuntimeError("no monitor nodes in the config file")
|
|
|
|
|
2015-04-29 18:53:59 +00:00
|
|
|
estatus_new = execute_ceph_deploy(new_mon)
|
2014-01-28 16:05:03 +00:00
|
|
|
if estatus_new != 0:
|
|
|
|
raise RuntimeError("ceph-deploy: new command failed")
|
|
|
|
|
|
|
|
log.info('adding config inputs...')
|
|
|
|
testdir = teuthology.get_testdir(ctx)
|
|
|
|
conf_path = '{tdir}/ceph-deploy/ceph.conf'.format(tdir=testdir)
|
|
|
|
|
|
|
|
if config.get('conf') is not None:
|
|
|
|
confp = config.get('conf')
|
|
|
|
for section, keys in confp.iteritems():
|
|
|
|
lines = '[{section}]\n'.format(section=section)
|
2015-04-29 18:53:59 +00:00
|
|
|
teuthology.append_lines_to_file(ceph_admin, conf_path, lines,
|
2014-01-28 16:05:03 +00:00
|
|
|
sudo=True)
|
|
|
|
for key, value in keys.iteritems():
|
|
|
|
log.info("[%s] %s = %s" % (section, key, value))
|
|
|
|
lines = '{key} = {value}\n'.format(key=key, value=value)
|
2016-02-27 04:10:52 +00:00
|
|
|
teuthology.append_lines_to_file(
|
|
|
|
ceph_admin, conf_path, lines, sudo=True)
|
2014-01-28 16:05:03 +00:00
|
|
|
|
2015-09-02 22:45:33 +00:00
|
|
|
# install ceph
|
2016-12-15 22:11:00 +00:00
|
|
|
dev_branch = ctx.config['branch']
|
|
|
|
branch = '--dev={branch}'.format(branch=dev_branch)
|
2016-04-22 02:36:00 +00:00
|
|
|
if ceph_branch:
|
|
|
|
option = ceph_branch
|
|
|
|
else:
|
2016-12-15 22:11:00 +00:00
|
|
|
option = branch
|
2016-04-22 02:36:00 +00:00
|
|
|
install_nodes = './ceph-deploy install ' + option + " " + all_nodes
|
2015-04-29 18:53:59 +00:00
|
|
|
estatus_install = execute_ceph_deploy(install_nodes)
|
2014-01-28 16:05:03 +00:00
|
|
|
if estatus_install != 0:
|
|
|
|
raise RuntimeError("ceph-deploy: Failed to install ceph")
|
2015-09-02 22:45:33 +00:00
|
|
|
# install ceph-test package too
|
2016-04-22 02:36:00 +00:00
|
|
|
install_nodes2 = './ceph-deploy install --tests ' + option + \
|
|
|
|
" " + all_nodes
|
2015-09-02 22:45:33 +00:00
|
|
|
estatus_install = execute_ceph_deploy(install_nodes2)
|
|
|
|
if estatus_install != 0:
|
|
|
|
raise RuntimeError("ceph-deploy: Failed to install ceph-test")
|
2014-01-28 16:05:03 +00:00
|
|
|
|
2014-08-21 14:03:28 +00:00
|
|
|
mon_create_nodes = './ceph-deploy mon create-initial'
|
|
|
|
# If the following fails, it is OK, it might just be that the monitors
|
|
|
|
# are taking way more than a minute/monitor to form quorum, so lets
|
|
|
|
# try the next block which will wait up to 15 minutes to gatherkeys.
|
2015-04-29 18:53:59 +00:00
|
|
|
execute_ceph_deploy(mon_create_nodes)
|
2013-06-20 20:42:33 +00:00
|
|
|
|
2017-01-10 00:45:01 +00:00
|
|
|
# create-keys is explicit now
|
|
|
|
# http://tracker.ceph.com/issues/16036
|
|
|
|
mons = ctx.cluster.only(teuthology.is_type('mon'))
|
|
|
|
for remote in mons.remotes.iterkeys():
|
|
|
|
remote.run(args=['sudo', 'ceph-create-keys', '--cluster', 'ceph',
|
|
|
|
'--id', remote.shortname])
|
2014-01-28 16:05:03 +00:00
|
|
|
|
2017-01-10 00:45:01 +00:00
|
|
|
estatus_gather = execute_ceph_deploy(gather_keys)
|
2014-01-28 16:05:03 +00:00
|
|
|
if mds_nodes:
|
2015-04-29 18:53:59 +00:00
|
|
|
estatus_mds = execute_ceph_deploy(deploy_mds)
|
2014-01-28 16:05:03 +00:00
|
|
|
if estatus_mds != 0:
|
|
|
|
raise RuntimeError("ceph-deploy: Failed to deploy mds")
|
|
|
|
|
|
|
|
if config.get('test_mon_destroy') is not None:
|
|
|
|
for d in range(1, len(mon_node)):
|
2016-02-27 04:10:52 +00:00
|
|
|
mon_destroy_nodes = './ceph-deploy mon destroy' + \
|
|
|
|
" " + mon_node[d]
|
2015-04-29 18:53:59 +00:00
|
|
|
estatus_mon_d = execute_ceph_deploy(mon_destroy_nodes)
|
2014-01-28 16:05:03 +00:00
|
|
|
if estatus_mon_d != 0:
|
|
|
|
raise RuntimeError("ceph-deploy: Failed to delete monitor")
|
|
|
|
|
|
|
|
node_dev_list = get_dev_for_osd(ctx, config)
|
|
|
|
for d in node_dev_list:
|
2015-09-30 13:37:37 +00:00
|
|
|
node = d[0]
|
|
|
|
for disk in d[1:]:
|
|
|
|
zap = './ceph-deploy disk zap ' + node + ':' + disk
|
|
|
|
estatus = execute_ceph_deploy(zap)
|
|
|
|
if estatus != 0:
|
|
|
|
raise RuntimeError("ceph-deploy: Failed to zap osds")
|
|
|
|
osd_create_cmd = './ceph-deploy osd create '
|
2014-08-29 01:21:30 +00:00
|
|
|
if config.get('dmcrypt') is not None:
|
2015-09-30 13:37:37 +00:00
|
|
|
osd_create_cmd += '--dmcrypt '
|
|
|
|
osd_create_cmd += ":".join(d)
|
|
|
|
estatus_osd = execute_ceph_deploy(osd_create_cmd)
|
2013-06-20 20:42:33 +00:00
|
|
|
if estatus_osd == 0:
|
|
|
|
log.info('successfully created osd')
|
|
|
|
no_of_osds += 1
|
|
|
|
else:
|
2015-09-30 13:37:37 +00:00
|
|
|
raise RuntimeError("ceph-deploy: Failed to create osds")
|
2014-01-28 16:05:03 +00:00
|
|
|
|
|
|
|
if config.get('wait-for-healthy', True) and no_of_osds >= 2:
|
|
|
|
is_healthy(ctx=ctx, config=None)
|
|
|
|
|
|
|
|
log.info('Setting up client nodes...')
|
|
|
|
conf_path = '/etc/ceph/ceph.conf'
|
|
|
|
admin_keyring_path = '/etc/ceph/ceph.client.admin.keyring'
|
|
|
|
first_mon = teuthology.get_first_mon(ctx, config)
|
2014-03-27 16:35:28 +00:00
|
|
|
(mon0_remote,) = ctx.cluster.only(first_mon).remotes.keys()
|
2014-01-28 16:05:03 +00:00
|
|
|
conf_data = teuthology.get_file(
|
|
|
|
remote=mon0_remote,
|
|
|
|
path=conf_path,
|
|
|
|
sudo=True,
|
2016-02-27 04:10:52 +00:00
|
|
|
)
|
2014-01-28 16:05:03 +00:00
|
|
|
admin_keyring = teuthology.get_file(
|
|
|
|
remote=mon0_remote,
|
|
|
|
path=admin_keyring_path,
|
|
|
|
sudo=True,
|
2016-02-27 04:10:52 +00:00
|
|
|
)
|
2013-05-17 19:08:45 +00:00
|
|
|
|
2014-01-28 16:05:03 +00:00
|
|
|
clients = ctx.cluster.only(teuthology.is_type('client'))
|
|
|
|
for remot, roles_for_host in clients.remotes.iteritems():
|
|
|
|
for id_ in teuthology.roles_of_type(roles_for_host, 'client'):
|
|
|
|
client_keyring = \
|
|
|
|
'/etc/ceph/ceph.client.{id}.keyring'.format(id=id_)
|
|
|
|
mon0_remote.run(
|
|
|
|
args=[
|
|
|
|
'cd',
|
|
|
|
'{tdir}'.format(tdir=testdir),
|
|
|
|
run.Raw('&&'),
|
|
|
|
'sudo', 'bash', '-c',
|
|
|
|
run.Raw('"'), 'ceph',
|
|
|
|
'auth',
|
|
|
|
'get-or-create',
|
|
|
|
'client.{id}'.format(id=id_),
|
|
|
|
'mds', 'allow',
|
|
|
|
'mon', 'allow *',
|
|
|
|
'osd', 'allow *',
|
|
|
|
run.Raw('>'),
|
|
|
|
client_keyring,
|
|
|
|
run.Raw('"'),
|
2016-02-27 04:10:52 +00:00
|
|
|
],
|
|
|
|
)
|
2014-01-28 16:05:03 +00:00
|
|
|
key_data = teuthology.get_file(
|
|
|
|
remote=mon0_remote,
|
|
|
|
path=client_keyring,
|
|
|
|
sudo=True,
|
2016-02-27 04:10:52 +00:00
|
|
|
)
|
2014-01-28 16:05:03 +00:00
|
|
|
teuthology.sudo_write_file(
|
|
|
|
remote=remot,
|
|
|
|
path=client_keyring,
|
|
|
|
data=key_data,
|
|
|
|
perms='0644'
|
|
|
|
)
|
|
|
|
teuthology.sudo_write_file(
|
|
|
|
remote=remot,
|
|
|
|
path=admin_keyring_path,
|
|
|
|
data=admin_keyring,
|
|
|
|
perms='0644'
|
|
|
|
)
|
|
|
|
teuthology.sudo_write_file(
|
|
|
|
remote=remot,
|
|
|
|
path=conf_path,
|
|
|
|
data=conf_data,
|
|
|
|
perms='0644'
|
|
|
|
)
|
2015-04-06 18:54:23 +00:00
|
|
|
|
2015-08-27 21:23:53 +00:00
|
|
|
if mds_nodes:
|
|
|
|
log.info('Configuring CephFS...')
|
2017-01-10 23:43:12 +00:00
|
|
|
ceph_fs = Filesystem(ctx, create=True)
|
2015-08-27 21:23:53 +00:00
|
|
|
elif not config.get('only_mon'):
|
2014-01-28 16:05:03 +00:00
|
|
|
raise RuntimeError(
|
|
|
|
"The cluster is NOT operational due to insufficient OSDs")
|
2013-03-08 01:56:29 +00:00
|
|
|
yield
|
|
|
|
|
2015-04-06 18:54:23 +00:00
|
|
|
except Exception:
|
2016-02-27 04:10:52 +00:00
|
|
|
log.info(
|
|
|
|
"Error encountered, logging exception before tearing down ceph-deploy")
|
2015-04-06 18:54:23 +00:00
|
|
|
log.info(traceback.format_exc())
|
|
|
|
raise
|
2013-03-08 01:56:29 +00:00
|
|
|
finally:
|
2015-08-27 21:20:25 +00:00
|
|
|
if config.get('keep_running'):
|
|
|
|
return
|
2013-04-18 15:06:52 +00:00
|
|
|
log.info('Stopping ceph...')
|
2014-01-28 16:05:03 +00:00
|
|
|
ctx.cluster.run(args=['sudo', 'stop', 'ceph-all', run.Raw('||'),
|
2015-08-31 18:21:29 +00:00
|
|
|
'sudo', 'service', 'ceph', 'stop', run.Raw('||'),
|
|
|
|
'sudo', 'systemctl', 'stop', 'ceph.target'])
|
2013-04-17 03:50:50 +00:00
|
|
|
|
2014-07-30 20:17:01 +00:00
|
|
|
# Are you really not running anymore?
|
2014-07-30 20:27:40 +00:00
|
|
|
# try first with the init tooling
|
2014-08-21 13:44:45 +00:00
|
|
|
# ignoring the status so this becomes informational only
|
2015-08-31 18:21:29 +00:00
|
|
|
ctx.cluster.run(
|
|
|
|
args=[
|
|
|
|
'sudo', 'status', 'ceph-all', run.Raw('||'),
|
2016-02-27 04:10:52 +00:00
|
|
|
'sudo', 'service', 'ceph', 'status', run.Raw('||'),
|
2015-08-31 18:21:29 +00:00
|
|
|
'sudo', 'systemctl', 'status', 'ceph.target'],
|
|
|
|
check_status=False)
|
2014-07-30 20:27:40 +00:00
|
|
|
|
|
|
|
# and now just check for the processes themselves, as if upstart/sysvinit
|
2014-08-06 13:25:33 +00:00
|
|
|
# is lying to us. Ignore errors if the grep fails
|
2014-07-30 20:27:40 +00:00
|
|
|
ctx.cluster.run(args=['sudo', 'ps', 'aux', run.Raw('|'),
|
|
|
|
'grep', '-v', 'grep', run.Raw('|'),
|
2014-08-06 13:25:33 +00:00
|
|
|
'grep', 'ceph'], check_status=False)
|
2014-07-30 20:27:40 +00:00
|
|
|
|
2013-03-08 01:56:29 +00:00
|
|
|
if ctx.archive is not None:
|
|
|
|
# archive mon data, too
|
|
|
|
log.info('Archiving mon data...')
|
|
|
|
path = os.path.join(ctx.archive, 'data')
|
|
|
|
os.makedirs(path)
|
|
|
|
mons = ctx.cluster.only(teuthology.is_type('mon'))
|
|
|
|
for remote, roles in mons.remotes.iteritems():
|
|
|
|
for role in roles:
|
|
|
|
if role.startswith('mon.'):
|
|
|
|
teuthology.pull_directory_tarball(
|
|
|
|
remote,
|
|
|
|
'/var/lib/ceph/mon',
|
|
|
|
path + '/' + role + '.tgz')
|
|
|
|
|
|
|
|
log.info('Compressing logs...')
|
|
|
|
run.wait(
|
|
|
|
ctx.cluster.run(
|
|
|
|
args=[
|
|
|
|
'sudo',
|
|
|
|
'find',
|
|
|
|
'/var/log/ceph',
|
|
|
|
'-name',
|
|
|
|
'*.log',
|
|
|
|
'-print0',
|
|
|
|
run.Raw('|'),
|
|
|
|
'sudo',
|
|
|
|
'xargs',
|
|
|
|
'-0',
|
|
|
|
'--no-run-if-empty',
|
|
|
|
'--',
|
|
|
|
'gzip',
|
|
|
|
'--',
|
2016-02-27 04:10:52 +00:00
|
|
|
],
|
2013-03-08 01:56:29 +00:00
|
|
|
wait=False,
|
2016-02-27 04:10:52 +00:00
|
|
|
),
|
|
|
|
)
|
2013-03-08 01:56:29 +00:00
|
|
|
|
|
|
|
log.info('Archiving logs...')
|
|
|
|
path = os.path.join(ctx.archive, 'remote')
|
|
|
|
os.makedirs(path)
|
|
|
|
for remote in ctx.cluster.remotes.iterkeys():
|
|
|
|
sub = os.path.join(path, remote.shortname)
|
|
|
|
os.makedirs(sub)
|
|
|
|
teuthology.pull_directory(remote, '/var/log/ceph',
|
|
|
|
os.path.join(sub, 'log'))
|
|
|
|
|
2014-03-14 18:44:22 +00:00
|
|
|
# Prevent these from being undefined if the try block fails
|
|
|
|
all_nodes = get_all_nodes(ctx, config)
|
2016-02-27 04:10:52 +00:00
|
|
|
purge_nodes = './ceph-deploy purge' + " " + all_nodes
|
|
|
|
purgedata_nodes = './ceph-deploy purgedata' + " " + all_nodes
|
2014-03-14 18:44:22 +00:00
|
|
|
|
2013-04-18 15:06:52 +00:00
|
|
|
log.info('Purging package...')
|
2015-04-29 18:53:59 +00:00
|
|
|
execute_ceph_deploy(purge_nodes)
|
2013-04-18 15:06:52 +00:00
|
|
|
log.info('Purging data...')
|
2015-04-29 18:53:59 +00:00
|
|
|
execute_ceph_deploy(purgedata_nodes)
|
2013-03-08 01:56:29 +00:00
|
|
|
|
2014-01-28 16:05:03 +00:00
|
|
|
|
2015-06-05 03:09:49 +00:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def cli_test(ctx, config):
|
|
|
|
"""
|
|
|
|
ceph-deploy cli to exercise most commonly use cli's and ensure
|
|
|
|
all commands works and also startup the init system.
|
2016-02-27 04:10:52 +00:00
|
|
|
|
2015-06-05 03:09:49 +00:00
|
|
|
"""
|
|
|
|
log.info('Ceph-deploy Test')
|
|
|
|
if config is None:
|
|
|
|
config = {}
|
2016-02-27 04:10:52 +00:00
|
|
|
test_branch = ''
|
2016-02-27 04:16:10 +00:00
|
|
|
conf_dir = teuthology.get_testdir(ctx) + "/cdtest"
|
|
|
|
|
|
|
|
def execute_cdeploy(admin, cmd, path):
|
|
|
|
"""Execute ceph-deploy commands """
|
|
|
|
"""Either use git path or repo path """
|
|
|
|
args = ['cd', conf_dir, run.Raw(';')]
|
|
|
|
if path:
|
|
|
|
args.append('{path}/ceph-deploy/ceph-deploy'.format(path=path));
|
|
|
|
else:
|
|
|
|
args.append('ceph-deploy')
|
|
|
|
args.append(run.Raw(cmd))
|
|
|
|
ec = admin.run(args=args, check_status=False).exitstatus
|
|
|
|
if ec != 0:
|
|
|
|
raise RuntimeError(
|
|
|
|
"failed during ceph-deploy cmd: {cmd} , ec={ec}".format(cmd=cmd, ec=ec))
|
|
|
|
|
2015-06-05 03:09:49 +00:00
|
|
|
if config.get('rhbuild'):
|
2016-02-27 04:10:52 +00:00
|
|
|
path = None
|
2015-06-05 03:09:49 +00:00
|
|
|
else:
|
2015-07-28 17:38:47 +00:00
|
|
|
path = teuthology.get_testdir(ctx)
|
|
|
|
# test on branch from config eg: wip-* , master or next etc
|
|
|
|
# packages for all distro's should exist for wip*
|
|
|
|
if ctx.config.get('branch'):
|
2016-02-27 04:10:52 +00:00
|
|
|
branch = ctx.config.get('branch')
|
|
|
|
test_branch = ' --dev={branch} '.format(branch=branch)
|
2015-06-05 03:09:49 +00:00
|
|
|
mons = ctx.cluster.only(teuthology.is_type('mon'))
|
2016-02-27 04:10:52 +00:00
|
|
|
for node, role in mons.remotes.iteritems():
|
|
|
|
admin = node
|
2016-02-27 04:16:10 +00:00
|
|
|
admin.run(args=['mkdir', conf_dir], check_status=False)
|
2016-02-27 04:10:52 +00:00
|
|
|
nodename = admin.shortname
|
2015-06-05 03:09:49 +00:00
|
|
|
system_type = teuthology.get_system_type(admin)
|
|
|
|
if config.get('rhbuild'):
|
2016-02-27 04:10:52 +00:00
|
|
|
admin.run(args=['sudo', 'yum', 'install', 'ceph-deploy', '-y'])
|
2015-06-05 03:09:49 +00:00
|
|
|
log.info('system type is %s', system_type)
|
|
|
|
osds = ctx.cluster.only(teuthology.is_type('osd'))
|
2016-02-27 04:10:52 +00:00
|
|
|
|
|
|
|
for remote, roles in osds.remotes.iteritems():
|
2015-06-05 03:09:49 +00:00
|
|
|
devs = teuthology.get_scratch_devices(remote)
|
2016-02-27 04:10:52 +00:00
|
|
|
log.info("roles %s", roles)
|
2015-06-05 03:09:49 +00:00
|
|
|
if (len(devs) < 3):
|
2016-02-27 04:10:52 +00:00
|
|
|
log.error(
|
|
|
|
'Test needs minimum of 3 devices, only found %s',
|
|
|
|
str(devs))
|
|
|
|
raise RuntimeError("Needs minimum of 3 devices ")
|
|
|
|
|
2016-02-27 04:16:10 +00:00
|
|
|
conf_path = '{conf_dir}/ceph.conf'.format(conf_dir=conf_dir)
|
2016-02-27 04:10:52 +00:00
|
|
|
new_cmd = 'new ' + nodename
|
2016-02-27 04:16:10 +00:00
|
|
|
execute_cdeploy(admin, new_cmd, path)
|
|
|
|
if config.get('conf') is not None:
|
|
|
|
confp = config.get('conf')
|
|
|
|
for section, keys in confp.iteritems():
|
|
|
|
lines = '[{section}]\n'.format(section=section)
|
|
|
|
teuthology.append_lines_to_file(admin, conf_path, lines,
|
|
|
|
sudo=True)
|
|
|
|
for key, value in keys.iteritems():
|
|
|
|
log.info("[%s] %s = %s" % (section, key, value))
|
|
|
|
lines = '{key} = {value}\n'.format(key=key, value=value)
|
|
|
|
teuthology.append_lines_to_file(admin, conf_path, lines,
|
|
|
|
sudo=True)
|
2016-02-27 04:10:52 +00:00
|
|
|
new_mon_install = 'install {branch} --mon '.format(
|
|
|
|
branch=test_branch) + nodename
|
|
|
|
new_osd_install = 'install {branch} --osd '.format(
|
|
|
|
branch=test_branch) + nodename
|
2015-07-28 17:38:47 +00:00
|
|
|
new_admin = 'install {branch} --cli '.format(branch=test_branch) + nodename
|
2016-02-27 04:16:10 +00:00
|
|
|
create_initial = 'mon create-initial '
|
2016-02-27 04:10:52 +00:00
|
|
|
execute_cdeploy(admin, new_mon_install, path)
|
|
|
|
execute_cdeploy(admin, new_osd_install, path)
|
|
|
|
execute_cdeploy(admin, new_admin, path)
|
|
|
|
execute_cdeploy(admin, create_initial, path)
|
2015-06-05 03:09:49 +00:00
|
|
|
|
|
|
|
for i in range(3):
|
2016-02-27 04:10:52 +00:00
|
|
|
zap_disk = 'disk zap ' + "{n}:{d}".format(n=nodename, d=devs[i])
|
|
|
|
prepare = 'osd prepare ' + "{n}:{d}".format(n=nodename, d=devs[i])
|
|
|
|
execute_cdeploy(admin, zap_disk, path)
|
|
|
|
execute_cdeploy(admin, prepare, path)
|
|
|
|
|
2016-02-27 04:16:10 +00:00
|
|
|
log.info("list files for debugging purpose to check file permissions")
|
|
|
|
admin.run(args=['ls', run.Raw('-lt'), conf_dir])
|
2016-02-27 04:10:52 +00:00
|
|
|
remote.run(args=['sudo', 'ceph', '-s'], check_status=False)
|
|
|
|
r = remote.run(args=['sudo', 'ceph', 'health'], stdout=StringIO())
|
2015-06-05 03:09:49 +00:00
|
|
|
out = r.stdout.getvalue()
|
|
|
|
log.info('Ceph health: %s', out.rstrip('\n'))
|
2016-02-27 04:16:10 +00:00
|
|
|
log.info("Waiting for cluster to become healthy")
|
2016-03-12 07:15:39 +00:00
|
|
|
with contextutil.safe_while(sleep=10, tries=6,
|
|
|
|
action='check health') as proceed:
|
|
|
|
while proceed():
|
|
|
|
r = remote.run(args=['sudo', 'ceph', 'health'], stdout=StringIO())
|
2016-07-25 22:29:53 +00:00
|
|
|
out = r.stdout.getvalue()
|
2016-03-12 07:15:39 +00:00
|
|
|
if (out.split(None,1)[0] == 'HEALTH_OK'):
|
|
|
|
break
|
2015-08-24 15:09:24 +00:00
|
|
|
rgw_install = 'install {branch} --rgw {node}'.format(
|
|
|
|
branch=test_branch,
|
|
|
|
node=nodename,
|
|
|
|
)
|
2016-02-27 04:10:52 +00:00
|
|
|
rgw_create = 'rgw create ' + nodename
|
|
|
|
execute_cdeploy(admin, rgw_install, path)
|
|
|
|
execute_cdeploy(admin, rgw_create, path)
|
2016-03-12 07:15:39 +00:00
|
|
|
log.info('All ceph-deploy cli tests passed')
|
2015-08-24 15:09:24 +00:00
|
|
|
try:
|
|
|
|
yield
|
2015-06-05 03:09:49 +00:00
|
|
|
finally:
|
|
|
|
log.info("cleaning up")
|
2015-08-31 18:21:29 +00:00
|
|
|
ctx.cluster.run(args=['sudo', 'stop', 'ceph-all', run.Raw('||'),
|
|
|
|
'sudo', 'service', 'ceph', 'stop', run.Raw('||'),
|
|
|
|
'sudo', 'systemctl', 'stop', 'ceph.target'],
|
|
|
|
check_status=False)
|
2015-06-05 03:09:49 +00:00
|
|
|
time.sleep(4)
|
|
|
|
for i in range(3):
|
|
|
|
umount_dev = "{d}1".format(d=devs[i])
|
2016-02-27 04:10:52 +00:00
|
|
|
r = remote.run(args=['sudo', 'umount', run.Raw(umount_dev)])
|
2015-06-05 03:09:49 +00:00
|
|
|
cmd = 'purge ' + nodename
|
2016-02-27 04:10:52 +00:00
|
|
|
execute_cdeploy(admin, cmd, path)
|
2015-06-05 03:09:49 +00:00
|
|
|
cmd = 'purgedata ' + nodename
|
2016-02-27 04:10:52 +00:00
|
|
|
execute_cdeploy(admin, cmd, path)
|
2016-02-27 04:16:10 +00:00
|
|
|
log.info("Removing temporary dir")
|
|
|
|
admin.run(
|
|
|
|
args=[
|
|
|
|
'rm',
|
|
|
|
run.Raw('-rf'),
|
|
|
|
run.Raw(conf_dir)],
|
|
|
|
check_status=False)
|
2015-06-05 03:09:49 +00:00
|
|
|
if config.get('rhbuild'):
|
2016-02-27 04:10:52 +00:00
|
|
|
admin.run(args=['sudo', 'yum', 'remove', 'ceph-deploy', '-y'])
|
|
|
|
|
2015-06-05 03:09:49 +00:00
|
|
|
|
|
|
|
@contextlib.contextmanager
|
|
|
|
def single_node_test(ctx, config):
|
|
|
|
"""
|
|
|
|
- ceph-deploy.single_node_test: null
|
2016-02-27 04:10:52 +00:00
|
|
|
|
2015-06-05 03:09:49 +00:00
|
|
|
#rhbuild testing
|
2016-02-27 04:10:52 +00:00
|
|
|
- ceph-deploy.single_node_test:
|
2015-06-05 03:09:49 +00:00
|
|
|
rhbuild: 1.2.3
|
2016-02-27 04:10:52 +00:00
|
|
|
|
2015-06-05 03:09:49 +00:00
|
|
|
"""
|
|
|
|
log.info("Testing ceph-deploy on single node")
|
|
|
|
if config is None:
|
|
|
|
config = {}
|
2016-02-27 04:16:10 +00:00
|
|
|
overrides = ctx.config.get('overrides', {})
|
|
|
|
teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))
|
2015-06-05 03:09:49 +00:00
|
|
|
|
|
|
|
if config.get('rhbuild'):
|
|
|
|
log.info("RH Build, Skip Download")
|
|
|
|
with contextutil.nested(
|
2016-02-27 04:10:52 +00:00
|
|
|
lambda: cli_test(ctx=ctx, config=config),
|
|
|
|
):
|
|
|
|
yield
|
2015-06-05 03:09:49 +00:00
|
|
|
else:
|
|
|
|
with contextutil.nested(
|
2016-02-27 04:10:52 +00:00
|
|
|
lambda: install_fn.ship_utilities(ctx=ctx, config=None),
|
|
|
|
lambda: download_ceph_deploy(ctx=ctx, config=config),
|
|
|
|
lambda: cli_test(ctx=ctx, config=config),
|
|
|
|
):
|
2015-06-05 03:09:49 +00:00
|
|
|
yield
|
2016-02-27 04:10:52 +00:00
|
|
|
|
|
|
|
|
2013-03-08 01:56:29 +00:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def task(ctx, config):
|
|
|
|
"""
|
|
|
|
Set up and tear down a Ceph cluster.
|
|
|
|
|
|
|
|
For example::
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- install:
|
|
|
|
extras: yes
|
|
|
|
- ssh_keys:
|
|
|
|
- ceph-deploy:
|
|
|
|
branch:
|
|
|
|
stable: bobtail
|
2013-07-09 18:12:29 +00:00
|
|
|
mon_initial_members: 1
|
2015-08-27 21:23:53 +00:00
|
|
|
only_mon: true
|
2015-08-27 21:20:25 +00:00
|
|
|
keep_running: true
|
2013-03-08 01:56:29 +00:00
|
|
|
|
|
|
|
tasks:
|
|
|
|
- install:
|
|
|
|
extras: yes
|
|
|
|
- ssh_keys:
|
|
|
|
- ceph-deploy:
|
|
|
|
branch:
|
|
|
|
dev: master
|
2013-07-17 00:14:33 +00:00
|
|
|
conf:
|
|
|
|
mon:
|
|
|
|
debug mon = 20
|
2013-03-08 01:56:29 +00:00
|
|
|
|
|
|
|
tasks:
|
|
|
|
- install:
|
|
|
|
extras: yes
|
|
|
|
- ssh_keys:
|
|
|
|
- ceph-deploy:
|
|
|
|
branch:
|
|
|
|
testing:
|
2014-08-29 01:21:30 +00:00
|
|
|
dmcrypt: yes
|
|
|
|
separate_journal_disk: yes
|
|
|
|
|
2013-03-08 01:56:29 +00:00
|
|
|
"""
|
|
|
|
if config is None:
|
|
|
|
config = {}
|
2013-07-09 18:12:29 +00:00
|
|
|
|
2013-03-08 01:56:29 +00:00
|
|
|
assert isinstance(config, dict), \
|
|
|
|
"task ceph-deploy only supports a dictionary for configuration"
|
|
|
|
|
2013-07-13 03:54:23 +00:00
|
|
|
overrides = ctx.config.get('overrides', {})
|
|
|
|
teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))
|
|
|
|
|
2013-03-08 01:56:29 +00:00
|
|
|
if config.get('branch') is not None:
|
2016-02-27 04:10:52 +00:00
|
|
|
assert isinstance(
|
|
|
|
config['branch'], dict), 'branch must be a dictionary'
|
2013-03-08 01:56:29 +00:00
|
|
|
|
2015-09-25 13:08:18 +00:00
|
|
|
log.info('task ceph-deploy with config ' + str(config))
|
|
|
|
|
2013-03-08 01:56:29 +00:00
|
|
|
with contextutil.nested(
|
2016-02-27 04:10:52 +00:00
|
|
|
lambda: install_fn.ship_utilities(ctx=ctx, config=None),
|
|
|
|
lambda: download_ceph_deploy(ctx=ctx, config=config),
|
|
|
|
lambda: build_ceph_cluster(ctx=ctx, config=config),
|
|
|
|
):
|
2013-03-08 01:56:29 +00:00
|
|
|
yield
|