2013-10-12 08:28:27 +00:00
|
|
|
"""
|
2013-11-21 19:56:41 +00:00
|
|
|
Ceph cluster task.
|
2013-10-12 08:28:27 +00:00
|
|
|
|
|
|
|
Handle the setup, starting, and clean-up of a Ceph cluster.
|
|
|
|
"""
|
2019-11-26 12:21:15 +00:00
|
|
|
from io import BytesIO
|
2020-06-28 11:43:09 +00:00
|
|
|
from io import StringIO
|
2011-05-31 20:51:48 +00:00
|
|
|
|
2011-11-18 01:00:44 +00:00
|
|
|
import argparse
|
2018-12-07 19:16:31 +00:00
|
|
|
import configobj
|
2011-06-03 21:47:44 +00:00
|
|
|
import contextlib
|
2016-03-30 00:14:45 +00:00
|
|
|
import errno
|
2011-05-31 20:51:48 +00:00
|
|
|
import logging
|
|
|
|
import os
|
2014-02-04 01:17:09 +00:00
|
|
|
import json
|
|
|
|
import time
|
2015-05-13 02:53:00 +00:00
|
|
|
import gevent
|
2018-12-07 19:16:31 +00:00
|
|
|
import re
|
2016-09-05 10:21:24 +00:00
|
|
|
import socket
|
2011-05-31 20:51:48 +00:00
|
|
|
|
2017-02-10 17:15:28 +00:00
|
|
|
from paramiko import SSHException
|
2019-12-14 00:16:34 +00:00
|
|
|
from tasks.ceph_manager import CephManager, write_conf
|
2018-10-28 14:26:00 +00:00
|
|
|
from tarfile import ReadError
|
2014-12-02 13:30:06 +00:00
|
|
|
from tasks.cephfs.filesystem import Filesystem
|
2011-06-03 21:47:44 +00:00
|
|
|
from teuthology import misc as teuthology
|
2011-06-15 21:57:02 +00:00
|
|
|
from teuthology import contextutil
|
2016-03-17 02:10:10 +00:00
|
|
|
from teuthology import exceptions
|
2014-08-07 14:24:59 +00:00
|
|
|
from teuthology.orchestra import run
|
2020-06-19 11:26:10 +00:00
|
|
|
from tasks import ceph_client as cclient
|
2014-07-24 01:58:55 +00:00
|
|
|
from teuthology.orchestra.daemon import DaemonGroup
|
2019-06-06 11:20:18 +00:00
|
|
|
from tasks.daemonwatchdog import DaemonWatchdog
|
2011-05-31 20:51:48 +00:00
|
|
|
|
2016-09-27 11:22:45 +00:00
|
|
|
CEPH_ROLE_TYPES = ['mon', 'mgr', 'osd', 'mds', 'rgw']
|
2017-04-26 21:10:57 +00:00
|
|
|
DATA_PATH = '/var/lib/ceph/{type_}/{cluster}-{id_}'
|
2014-05-12 13:25:26 +00:00
|
|
|
|
2011-05-31 20:51:48 +00:00
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
2011-09-14 23:31:58 +00:00
|
|
|
|
2016-09-27 11:17:22 +00:00
|
|
|
def generate_caps(type_):
|
|
|
|
"""
|
|
|
|
Each call will return the next capability for each system type
|
|
|
|
(essentially a subset of possible role values). Valid types are osd,
|
|
|
|
mds and client.
|
|
|
|
"""
|
|
|
|
defaults = dict(
|
|
|
|
osd=dict(
|
|
|
|
mon='allow *',
|
2017-03-10 19:15:46 +00:00
|
|
|
mgr='allow *',
|
2016-09-27 11:17:22 +00:00
|
|
|
osd='allow *',
|
|
|
|
),
|
2016-09-27 11:22:45 +00:00
|
|
|
mgr=dict(
|
2017-10-23 09:37:49 +00:00
|
|
|
mon='allow profile mgr',
|
|
|
|
osd='allow *',
|
|
|
|
mds='allow *',
|
2016-09-27 11:22:45 +00:00
|
|
|
),
|
2016-09-27 11:17:22 +00:00
|
|
|
mds=dict(
|
|
|
|
mon='allow *',
|
2017-03-10 19:15:46 +00:00
|
|
|
mgr='allow *',
|
2016-09-27 11:17:22 +00:00
|
|
|
osd='allow *',
|
|
|
|
mds='allow',
|
|
|
|
),
|
|
|
|
client=dict(
|
|
|
|
mon='allow rw',
|
2017-03-10 19:15:46 +00:00
|
|
|
mgr='allow r',
|
2016-09-27 11:17:22 +00:00
|
|
|
osd='allow rwx',
|
|
|
|
mds='allow',
|
|
|
|
),
|
|
|
|
)
|
|
|
|
for subsystem, capability in defaults[type_].items():
|
|
|
|
yield '--cap'
|
|
|
|
yield subsystem
|
|
|
|
yield capability
|
|
|
|
|
|
|
|
|
2018-10-28 14:26:00 +00:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def ceph_crash(ctx, config):
|
|
|
|
"""
|
|
|
|
Gather crash dumps from /var/lib/crash
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
yield
|
|
|
|
|
|
|
|
finally:
|
|
|
|
if ctx.archive is not None:
|
|
|
|
log.info('Archiving crash dumps...')
|
|
|
|
path = os.path.join(ctx.archive, 'remote')
|
|
|
|
try:
|
|
|
|
os.makedirs(path)
|
2019-12-09 15:17:23 +00:00
|
|
|
except OSError:
|
2018-10-28 14:26:00 +00:00
|
|
|
pass
|
2019-10-11 15:57:47 +00:00
|
|
|
for remote in ctx.cluster.remotes.keys():
|
2018-10-28 14:26:00 +00:00
|
|
|
sub = os.path.join(path, remote.shortname)
|
|
|
|
try:
|
|
|
|
os.makedirs(sub)
|
2019-12-09 15:17:23 +00:00
|
|
|
except OSError:
|
2018-10-28 14:26:00 +00:00
|
|
|
pass
|
|
|
|
try:
|
|
|
|
teuthology.pull_directory(remote, '/var/lib/ceph/crash',
|
|
|
|
os.path.join(sub, 'crash'))
|
2019-12-09 15:17:23 +00:00
|
|
|
except ReadError:
|
2018-10-28 14:26:00 +00:00
|
|
|
pass
|
|
|
|
|
|
|
|
|
2011-06-16 20:13:32 +00:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def ceph_log(ctx, config):
|
2013-10-12 08:28:27 +00:00
|
|
|
"""
|
|
|
|
Create /var/log/ceph log directory that is open to everyone.
|
|
|
|
Add valgrind and profiling-logger directories.
|
|
|
|
|
|
|
|
:param ctx: Context
|
|
|
|
:param config: Configuration
|
|
|
|
"""
|
2013-02-21 21:13:51 +00:00
|
|
|
log.info('Making ceph log dir writeable by non-root...')
|
2013-02-21 19:05:30 +00:00
|
|
|
run.wait(
|
|
|
|
ctx.cluster.run(
|
|
|
|
args=[
|
|
|
|
'sudo',
|
2013-09-03 18:09:39 +00:00
|
|
|
'chmod',
|
|
|
|
'777',
|
2013-02-21 19:05:30 +00:00
|
|
|
'/var/log/ceph',
|
2015-11-09 13:09:27 +00:00
|
|
|
],
|
2013-02-21 19:05:30 +00:00
|
|
|
wait=False,
|
|
|
|
)
|
2015-11-09 13:09:27 +00:00
|
|
|
)
|
2013-06-27 00:48:03 +00:00
|
|
|
log.info('Disabling ceph logrotate...')
|
|
|
|
run.wait(
|
|
|
|
ctx.cluster.run(
|
|
|
|
args=[
|
|
|
|
'sudo',
|
|
|
|
'rm', '-f', '--',
|
|
|
|
'/etc/logrotate.d/ceph',
|
2015-11-09 13:09:27 +00:00
|
|
|
],
|
2013-06-27 00:48:03 +00:00
|
|
|
wait=False,
|
|
|
|
)
|
2015-11-09 13:09:27 +00:00
|
|
|
)
|
2013-02-17 07:44:03 +00:00
|
|
|
log.info('Creating extra log directories...')
|
2011-06-16 20:13:32 +00:00
|
|
|
run.wait(
|
|
|
|
ctx.cluster.run(
|
|
|
|
args=[
|
2013-02-17 07:44:03 +00:00
|
|
|
'sudo',
|
2015-04-28 03:43:41 +00:00
|
|
|
'install', '-d', '-m0777', '--',
|
2013-02-17 07:44:03 +00:00
|
|
|
'/var/log/ceph/valgrind',
|
|
|
|
'/var/log/ceph/profiling-logger',
|
2015-11-09 13:09:27 +00:00
|
|
|
],
|
2011-06-16 20:13:32 +00:00
|
|
|
wait=False,
|
|
|
|
)
|
2015-11-09 13:09:27 +00:00
|
|
|
)
|
2011-06-16 20:13:32 +00:00
|
|
|
|
2015-06-09 23:14:12 +00:00
|
|
|
class Rotater(object):
|
|
|
|
stop_event = gevent.event.Event()
|
2015-11-09 13:09:27 +00:00
|
|
|
|
2015-05-13 02:53:00 +00:00
|
|
|
def invoke_logrotate(self):
|
2015-11-09 13:09:27 +00:00
|
|
|
# 1) install ceph-test.conf in /etc/logrotate.d
|
|
|
|
# 2) continuously loop over logrotate invocation with ceph-test.conf
|
2015-06-09 23:14:12 +00:00
|
|
|
while not self.stop_event.is_set():
|
|
|
|
self.stop_event.wait(timeout=30)
|
2016-09-05 10:21:24 +00:00
|
|
|
try:
|
|
|
|
run.wait(
|
|
|
|
ctx.cluster.run(
|
|
|
|
args=['sudo', 'logrotate', '/etc/logrotate.d/ceph-test.conf'
|
|
|
|
],
|
|
|
|
wait=False,
|
|
|
|
)
|
2015-05-13 02:53:00 +00:00
|
|
|
)
|
2016-09-05 10:21:24 +00:00
|
|
|
except exceptions.ConnectionLostError as e:
|
|
|
|
# Some tests may power off nodes during test, in which
|
|
|
|
# case we will see connection errors that we should ignore.
|
|
|
|
log.debug("Missed logrotate, node '{0}' is offline".format(
|
|
|
|
e.node))
|
2019-12-09 16:27:46 +00:00
|
|
|
except EOFError:
|
2016-09-05 10:21:24 +00:00
|
|
|
# Paramiko sometimes raises this when it fails to
|
|
|
|
# connect to a node during open_session. As with
|
|
|
|
# ConnectionLostError, we ignore this because nodes
|
|
|
|
# are allowed to get power cycled during tests.
|
|
|
|
log.debug("Missed logrotate, EOFError")
|
2019-12-09 16:27:46 +00:00
|
|
|
except SSHException:
|
2017-02-10 17:15:28 +00:00
|
|
|
log.debug("Missed logrotate, SSHException")
|
2016-09-05 10:21:24 +00:00
|
|
|
except socket.error as e:
|
2019-10-09 04:52:20 +00:00
|
|
|
if e.errno in (errno.EHOSTUNREACH, errno.ECONNRESET):
|
2016-09-05 10:21:24 +00:00
|
|
|
log.debug("Missed logrotate, host unreachable")
|
|
|
|
else:
|
|
|
|
raise
|
2015-05-13 02:53:00 +00:00
|
|
|
|
|
|
|
def begin(self):
|
|
|
|
self.thread = gevent.spawn(self.invoke_logrotate)
|
|
|
|
|
|
|
|
def end(self):
|
2015-06-12 18:05:42 +00:00
|
|
|
self.stop_event.set()
|
2015-05-13 02:53:00 +00:00
|
|
|
self.thread.get()
|
2015-08-05 16:08:35 +00:00
|
|
|
|
2015-05-28 22:54:15 +00:00
|
|
|
def write_rotate_conf(ctx, daemons):
|
2015-05-13 02:53:00 +00:00
|
|
|
testdir = teuthology.get_testdir(ctx)
|
2019-11-26 12:21:15 +00:00
|
|
|
remote_logrotate_conf = '%s/logrotate.ceph-test.conf' % testdir
|
2015-05-13 02:53:00 +00:00
|
|
|
rotate_conf_path = os.path.join(os.path.dirname(__file__), 'logrotate.conf')
|
2020-06-28 11:43:09 +00:00
|
|
|
with open(rotate_conf_path) as f:
|
2015-05-28 22:54:15 +00:00
|
|
|
conf = ""
|
2019-10-08 14:41:32 +00:00
|
|
|
for daemon, size in daemons.items():
|
2019-11-26 12:21:15 +00:00
|
|
|
log.info('writing logrotate stanza for {}'.format(daemon))
|
2020-06-28 11:43:09 +00:00
|
|
|
conf += f.read().format(daemon_type=daemon,
|
|
|
|
max_size=size)
|
2015-05-28 22:54:15 +00:00
|
|
|
f.seek(0, 0)
|
2015-08-05 16:08:35 +00:00
|
|
|
|
2019-10-11 15:57:47 +00:00
|
|
|
for remote in ctx.cluster.remotes.keys():
|
2015-05-28 22:54:15 +00:00
|
|
|
teuthology.write_file(remote=remote,
|
2019-11-26 12:21:15 +00:00
|
|
|
path=remote_logrotate_conf,
|
|
|
|
data=BytesIO(conf.encode())
|
2015-11-09 13:09:27 +00:00
|
|
|
)
|
2015-05-28 22:54:15 +00:00
|
|
|
remote.run(
|
|
|
|
args=[
|
|
|
|
'sudo',
|
|
|
|
'mv',
|
2019-11-26 12:21:15 +00:00
|
|
|
remote_logrotate_conf,
|
2015-05-28 22:54:15 +00:00
|
|
|
'/etc/logrotate.d/ceph-test.conf',
|
|
|
|
run.Raw('&&'),
|
|
|
|
'sudo',
|
|
|
|
'chmod',
|
|
|
|
'0644',
|
|
|
|
'/etc/logrotate.d/ceph-test.conf',
|
|
|
|
run.Raw('&&'),
|
|
|
|
'sudo',
|
|
|
|
'chown',
|
|
|
|
'root.root',
|
|
|
|
'/etc/logrotate.d/ceph-test.conf'
|
|
|
|
]
|
2015-08-06 09:25:03 +00:00
|
|
|
)
|
2015-08-05 16:08:35 +00:00
|
|
|
remote.chcon('/etc/logrotate.d/ceph-test.conf',
|
|
|
|
'system_u:object_r:etc_t:s0')
|
2015-05-13 02:53:00 +00:00
|
|
|
|
2015-05-28 22:54:15 +00:00
|
|
|
if ctx.config.get('log-rotate'):
|
|
|
|
daemons = ctx.config.get('log-rotate')
|
|
|
|
log.info('Setting up log rotation with ' + str(daemons))
|
|
|
|
write_rotate_conf(ctx, daemons)
|
2015-05-13 02:53:00 +00:00
|
|
|
logrotater = Rotater()
|
|
|
|
logrotater.begin()
|
2011-06-16 20:13:32 +00:00
|
|
|
try:
|
|
|
|
yield
|
|
|
|
|
2013-02-17 07:44:03 +00:00
|
|
|
finally:
|
2015-05-28 22:54:15 +00:00
|
|
|
if ctx.config.get('log-rotate'):
|
|
|
|
log.info('Shutting down logrotate')
|
2015-05-13 02:53:00 +00:00
|
|
|
logrotater.end()
|
|
|
|
ctx.cluster.run(
|
|
|
|
args=['sudo', 'rm', '/etc/logrotate.d/ceph-test.conf'
|
2015-11-09 13:09:27 +00:00
|
|
|
]
|
2015-05-13 02:53:00 +00:00
|
|
|
)
|
2015-02-21 00:54:09 +00:00
|
|
|
if ctx.archive is not None and \
|
|
|
|
not (ctx.config.get('archive-on-error') and ctx.summary['success']):
|
|
|
|
# and logs
|
|
|
|
log.info('Compressing logs...')
|
|
|
|
run.wait(
|
|
|
|
ctx.cluster.run(
|
|
|
|
args=[
|
|
|
|
'sudo',
|
|
|
|
'find',
|
|
|
|
'/var/log/ceph',
|
|
|
|
'-name',
|
|
|
|
'*.log',
|
|
|
|
'-print0',
|
|
|
|
run.Raw('|'),
|
|
|
|
'sudo',
|
|
|
|
'xargs',
|
|
|
|
'-0',
|
|
|
|
'--no-run-if-empty',
|
|
|
|
'--',
|
|
|
|
'gzip',
|
|
|
|
'--',
|
2015-11-09 13:09:27 +00:00
|
|
|
],
|
2015-02-21 00:54:09 +00:00
|
|
|
wait=False,
|
2015-11-09 13:09:27 +00:00
|
|
|
),
|
|
|
|
)
|
2015-02-21 00:54:09 +00:00
|
|
|
|
|
|
|
log.info('Archiving logs...')
|
|
|
|
path = os.path.join(ctx.archive, 'remote')
|
2018-10-28 14:26:00 +00:00
|
|
|
try:
|
|
|
|
os.makedirs(path)
|
2019-12-09 15:17:23 +00:00
|
|
|
except OSError:
|
2018-10-28 14:26:00 +00:00
|
|
|
pass
|
2019-10-11 15:57:47 +00:00
|
|
|
for remote in ctx.cluster.remotes.keys():
|
2015-02-21 00:54:09 +00:00
|
|
|
sub = os.path.join(path, remote.shortname)
|
2018-10-28 14:26:00 +00:00
|
|
|
try:
|
|
|
|
os.makedirs(sub)
|
2019-12-09 15:17:23 +00:00
|
|
|
except OSError:
|
2018-10-28 14:26:00 +00:00
|
|
|
pass
|
2015-02-21 00:54:09 +00:00
|
|
|
teuthology.pull_directory(remote, '/var/log/ceph',
|
|
|
|
os.path.join(sub, 'log'))
|
2011-06-16 20:13:32 +00:00
|
|
|
|
2013-09-06 19:22:29 +00:00
|
|
|
|
2011-10-03 21:03:36 +00:00
|
|
|
def assign_devs(roles, devs):
|
2013-10-12 08:28:27 +00:00
|
|
|
"""
|
|
|
|
Create a dictionary of devs indexed by roles
|
|
|
|
|
2013-11-21 19:56:41 +00:00
|
|
|
:param roles: List of roles
|
2013-10-12 08:28:27 +00:00
|
|
|
:param devs: Corresponding list of devices.
|
|
|
|
:returns: Dictionary of devs indexed by roles.
|
|
|
|
"""
|
2011-10-03 21:03:36 +00:00
|
|
|
return dict(zip(roles, devs))
|
|
|
|
|
2015-11-09 13:09:27 +00:00
|
|
|
|
2011-08-29 20:58:09 +00:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def valgrind_post(ctx, config):
|
2013-10-12 08:28:27 +00:00
|
|
|
"""
|
2018-09-18 03:21:13 +00:00
|
|
|
After the tests run, look through all the valgrind logs. Exceptions are raised
|
2018-03-10 14:36:31 +00:00
|
|
|
if textual errors occurred in the logs, or if valgrind exceptions were detected in
|
2013-10-12 08:28:27 +00:00
|
|
|
the logs.
|
|
|
|
|
|
|
|
:param ctx: Context
|
|
|
|
:param config: Configuration
|
|
|
|
"""
|
2011-08-29 20:58:09 +00:00
|
|
|
try:
|
|
|
|
yield
|
|
|
|
finally:
|
2012-02-22 00:10:37 +00:00
|
|
|
lookup_procs = list()
|
2015-11-09 13:09:27 +00:00
|
|
|
log.info('Checking for errors in any valgrind logs...')
|
2019-10-11 15:57:47 +00:00
|
|
|
for remote in ctx.cluster.remotes.keys():
|
2015-11-09 13:09:27 +00:00
|
|
|
# look at valgrind logs for each node
|
2012-02-22 00:10:37 +00:00
|
|
|
proc = remote.run(
|
2020-03-06 04:17:40 +00:00
|
|
|
args="sudo zgrep '<kind>' /var/log/ceph/valgrind/* "
|
2019-11-26 12:21:15 +00:00
|
|
|
# include a second file so that we always get
|
|
|
|
# a filename prefix on the output
|
2020-03-06 04:17:40 +00:00
|
|
|
"/dev/null | sort | uniq",
|
2013-05-22 20:22:21 +00:00
|
|
|
wait=False,
|
2012-11-18 00:19:14 +00:00
|
|
|
check_status=False,
|
2020-06-28 11:43:09 +00:00
|
|
|
stdout=StringIO(),
|
2015-11-09 13:09:27 +00:00
|
|
|
)
|
2012-02-22 00:10:37 +00:00
|
|
|
lookup_procs.append((proc, remote))
|
2011-09-02 18:07:10 +00:00
|
|
|
|
2012-02-22 00:10:37 +00:00
|
|
|
valgrind_exception = None
|
|
|
|
for (proc, remote) in lookup_procs:
|
2014-05-30 19:32:38 +00:00
|
|
|
proc.wait()
|
2020-06-28 11:43:09 +00:00
|
|
|
out = proc.stdout.getvalue()
|
2012-11-18 00:19:14 +00:00
|
|
|
for line in out.split('\n'):
|
|
|
|
if line == '':
|
|
|
|
continue
|
2013-06-03 16:57:17 +00:00
|
|
|
try:
|
|
|
|
(file, kind) = line.split(':')
|
2013-08-30 15:58:10 +00:00
|
|
|
except Exception:
|
2013-06-03 16:57:17 +00:00
|
|
|
log.error('failed to split line %s', line)
|
|
|
|
raise
|
2012-11-18 00:19:14 +00:00
|
|
|
log.debug('file %s kind %s', file, kind)
|
2013-08-02 18:57:14 +00:00
|
|
|
if (file.find('mds') >= 0) and kind.find('Lost') > 0:
|
2012-11-18 00:19:14 +00:00
|
|
|
continue
|
|
|
|
log.error('saw valgrind issue %s in %s', kind, file)
|
|
|
|
valgrind_exception = Exception('saw valgrind issues')
|
2011-09-02 18:07:10 +00:00
|
|
|
|
2015-09-10 14:44:34 +00:00
|
|
|
if config.get('expect_valgrind_errors'):
|
|
|
|
if not valgrind_exception:
|
|
|
|
raise Exception('expected valgrind issues and found none')
|
|
|
|
else:
|
|
|
|
if valgrind_exception:
|
|
|
|
raise valgrind_exception
|
2011-08-29 20:58:09 +00:00
|
|
|
|
2015-11-09 13:09:27 +00:00
|
|
|
|
2015-01-19 15:58:39 +00:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def crush_setup(ctx, config):
|
2016-03-18 02:31:32 +00:00
|
|
|
cluster_name = config['cluster']
|
|
|
|
first_mon = teuthology.get_first_mon(ctx, config, cluster_name)
|
2019-10-11 15:57:47 +00:00
|
|
|
(mon_remote,) = ctx.cluster.only(first_mon).remotes.keys()
|
2015-01-19 15:58:39 +00:00
|
|
|
|
|
|
|
profile = config.get('crush_tunables', 'default')
|
|
|
|
log.info('Setting crush tunables to %s', profile)
|
|
|
|
mon_remote.run(
|
2016-03-18 02:31:32 +00:00
|
|
|
args=['sudo', 'ceph', '--cluster', cluster_name,
|
|
|
|
'osd', 'crush', 'tunables', profile])
|
2015-01-19 15:58:39 +00:00
|
|
|
yield
|
2013-01-23 02:13:19 +00:00
|
|
|
|
2015-11-09 13:09:27 +00:00
|
|
|
|
2017-06-23 20:02:26 +00:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def create_rbd_pool(ctx, config):
|
|
|
|
cluster_name = config['cluster']
|
|
|
|
first_mon = teuthology.get_first_mon(ctx, config, cluster_name)
|
2019-10-11 15:57:47 +00:00
|
|
|
(mon_remote,) = ctx.cluster.only(first_mon).remotes.keys()
|
2017-07-11 03:48:47 +00:00
|
|
|
log.info('Waiting for OSDs to come up')
|
|
|
|
teuthology.wait_until_osds_up(
|
|
|
|
ctx,
|
|
|
|
cluster=ctx.cluster,
|
|
|
|
remote=mon_remote,
|
|
|
|
ceph_cluster=cluster_name,
|
|
|
|
)
|
2017-10-15 12:46:28 +00:00
|
|
|
if config.get('create_rbd_pool', True):
|
|
|
|
log.info('Creating RBD pool')
|
|
|
|
mon_remote.run(
|
|
|
|
args=['sudo', 'ceph', '--cluster', cluster_name,
|
|
|
|
'osd', 'pool', 'create', 'rbd', '8'])
|
|
|
|
mon_remote.run(
|
|
|
|
args=[
|
|
|
|
'sudo', 'ceph', '--cluster', cluster_name,
|
|
|
|
'osd', 'pool', 'application', 'enable',
|
|
|
|
'rbd', 'rbd', '--yes-i-really-mean-it'
|
|
|
|
],
|
|
|
|
check_status=False)
|
2017-06-23 20:02:26 +00:00
|
|
|
yield
|
|
|
|
|
2014-05-27 13:12:07 +00:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def cephfs_setup(ctx, config):
|
2016-03-18 03:06:44 +00:00
|
|
|
cluster_name = config['cluster']
|
2014-07-07 12:15:41 +00:00
|
|
|
|
2016-03-18 03:06:44 +00:00
|
|
|
first_mon = teuthology.get_first_mon(ctx, config, cluster_name)
|
2019-10-11 15:57:47 +00:00
|
|
|
(mon_remote,) = ctx.cluster.only(first_mon).remotes.keys()
|
2016-03-18 03:06:44 +00:00
|
|
|
mdss = ctx.cluster.only(teuthology.is_type('mds', cluster_name))
|
2014-05-27 13:12:07 +00:00
|
|
|
# If there are any MDSs, then create a filesystem for them to use
|
|
|
|
# Do this last because requires mon cluster to be up and running
|
|
|
|
if mdss.remotes:
|
|
|
|
log.info('Setting up CephFS filesystem...')
|
|
|
|
|
2017-10-09 13:15:21 +00:00
|
|
|
fs = Filesystem(ctx, name='cephfs', create=True,
|
|
|
|
ec_profile=config.get('cephfs_ec_profile', None))
|
2014-05-27 13:12:07 +00:00
|
|
|
|
2019-02-25 19:10:01 +00:00
|
|
|
max_mds = config.get('max_mds', 1)
|
|
|
|
if max_mds > 1:
|
|
|
|
fs.set_max_mds(max_mds)
|
2014-07-07 12:15:41 +00:00
|
|
|
|
2014-05-27 13:12:07 +00:00
|
|
|
yield
|
|
|
|
|
2019-06-06 11:20:18 +00:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def watchdog_setup(ctx, config):
|
|
|
|
ctx.ceph[config['cluster']].thrashers = []
|
|
|
|
ctx.ceph[config['cluster']].watchdog = DaemonWatchdog(ctx, config, ctx.ceph[config['cluster']].thrashers)
|
|
|
|
ctx.ceph[config['cluster']].watchdog.start()
|
|
|
|
yield
|
2014-05-27 13:12:07 +00:00
|
|
|
|
2019-01-30 17:23:10 +00:00
|
|
|
def get_mons(roles, ips, cluster_name,
|
2018-12-07 19:16:31 +00:00
|
|
|
mon_bind_msgr2=False,
|
|
|
|
mon_bind_addrvec=False):
|
|
|
|
"""
|
|
|
|
Get monitors and their associated addresses
|
|
|
|
"""
|
|
|
|
mons = {}
|
|
|
|
v1_ports = {}
|
|
|
|
v2_ports = {}
|
2019-01-30 17:23:10 +00:00
|
|
|
is_mon = teuthology.is_type('mon', cluster_name)
|
2018-12-07 19:16:31 +00:00
|
|
|
for idx, roles in enumerate(roles):
|
|
|
|
for role in roles:
|
|
|
|
if not is_mon(role):
|
|
|
|
continue
|
|
|
|
if ips[idx] not in v1_ports:
|
|
|
|
v1_ports[ips[idx]] = 6789
|
|
|
|
else:
|
|
|
|
v1_ports[ips[idx]] += 1
|
|
|
|
if mon_bind_msgr2:
|
|
|
|
if ips[idx] not in v2_ports:
|
|
|
|
v2_ports[ips[idx]] = 3300
|
|
|
|
addr = '{ip}'.format(ip=ips[idx])
|
|
|
|
else:
|
|
|
|
assert mon_bind_addrvec
|
|
|
|
v2_ports[ips[idx]] += 1
|
2019-01-10 22:25:00 +00:00
|
|
|
addr = '[v2:{ip}:{port2},v1:{ip}:{port1}]'.format(
|
2018-12-07 19:16:31 +00:00
|
|
|
ip=ips[idx],
|
|
|
|
port2=v2_ports[ips[idx]],
|
|
|
|
port1=v1_ports[ips[idx]],
|
|
|
|
)
|
|
|
|
elif mon_bind_addrvec:
|
2019-01-10 22:25:00 +00:00
|
|
|
addr = '[v1:{ip}:{port}]'.format(
|
2018-12-07 19:16:31 +00:00
|
|
|
ip=ips[idx],
|
|
|
|
port=v1_ports[ips[idx]],
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
addr = '{ip}:{port}'.format(
|
|
|
|
ip=ips[idx],
|
|
|
|
port=v1_ports[ips[idx]],
|
|
|
|
)
|
|
|
|
mons[role] = addr
|
|
|
|
assert mons
|
|
|
|
return mons
|
|
|
|
|
|
|
|
def skeleton_config(ctx, roles, ips, mons, cluster='ceph'):
|
|
|
|
"""
|
|
|
|
Returns a ConfigObj that is prefilled with a skeleton config.
|
|
|
|
|
|
|
|
Use conf[section][key]=value or conf.merge to change it.
|
|
|
|
|
|
|
|
Use conf.write to write it out, override .filename first if you want.
|
|
|
|
"""
|
|
|
|
path = os.path.join(os.path.dirname(__file__), 'ceph.conf.template')
|
2019-10-09 23:15:37 +00:00
|
|
|
conf = configobj.ConfigObj(path, file_error=True)
|
2018-12-07 19:16:31 +00:00
|
|
|
mon_hosts = []
|
2019-10-08 14:41:32 +00:00
|
|
|
for role, addr in mons.items():
|
2018-12-07 19:16:31 +00:00
|
|
|
mon_cluster, _, _ = teuthology.split_role(role)
|
|
|
|
if mon_cluster != cluster:
|
|
|
|
continue
|
|
|
|
name = teuthology.ceph_role(role)
|
|
|
|
conf.setdefault(name, {})
|
|
|
|
mon_hosts.append(addr)
|
|
|
|
conf.setdefault('global', {})
|
|
|
|
conf['global']['mon host'] = ','.join(mon_hosts)
|
|
|
|
# set up standby mds's
|
|
|
|
is_mds = teuthology.is_type('mds', cluster)
|
|
|
|
for roles_subset in roles:
|
|
|
|
for role in roles_subset:
|
|
|
|
if is_mds(role):
|
|
|
|
name = teuthology.ceph_role(role)
|
|
|
|
conf.setdefault(name, {})
|
|
|
|
return conf
|
|
|
|
|
|
|
|
def create_simple_monmap(ctx, remote, conf, mons,
|
|
|
|
path=None,
|
|
|
|
mon_bind_addrvec=False):
|
|
|
|
"""
|
|
|
|
Writes a simple monmap based on current ceph.conf into path, or
|
|
|
|
<testdir>/monmap by default.
|
|
|
|
|
|
|
|
Assumes ceph_conf is up to date.
|
|
|
|
|
|
|
|
Assumes mon sections are named "mon.*", with the dot.
|
|
|
|
|
|
|
|
:return the FSID (as a string) of the newly created monmap
|
|
|
|
"""
|
|
|
|
|
2019-10-08 14:41:32 +00:00
|
|
|
addresses = list(mons.items())
|
2018-12-07 19:16:31 +00:00
|
|
|
assert addresses, "There are no monitors in config!"
|
|
|
|
log.debug('Ceph mon addresses: %s', addresses)
|
|
|
|
|
|
|
|
testdir = teuthology.get_testdir(ctx)
|
|
|
|
args = [
|
|
|
|
'adjust-ulimits',
|
|
|
|
'ceph-coverage',
|
|
|
|
'{tdir}/archive/coverage'.format(tdir=testdir),
|
|
|
|
'monmaptool',
|
|
|
|
'--create',
|
|
|
|
'--clobber',
|
|
|
|
]
|
2018-12-19 03:35:28 +00:00
|
|
|
if mon_bind_addrvec:
|
|
|
|
args.extend(['--enable-all-features'])
|
2019-01-30 17:23:10 +00:00
|
|
|
for (role, addr) in addresses:
|
|
|
|
_, _, n = teuthology.split_role(role)
|
2018-12-19 03:28:49 +00:00
|
|
|
if mon_bind_addrvec and (',' in addr or 'v' in addr or ':' in addr):
|
2018-12-07 19:16:31 +00:00
|
|
|
args.extend(('--addv', n, addr))
|
|
|
|
else:
|
|
|
|
args.extend(('--add', n, addr))
|
|
|
|
if not path:
|
|
|
|
path = '{tdir}/monmap'.format(tdir=testdir)
|
|
|
|
args.extend([
|
|
|
|
'--print',
|
|
|
|
path
|
|
|
|
])
|
|
|
|
|
2019-11-26 12:21:15 +00:00
|
|
|
monmap_output = remote.sh(args)
|
2018-12-07 19:16:31 +00:00
|
|
|
fsid = re.search("generated fsid (.+)$",
|
|
|
|
monmap_output, re.MULTILINE).group(1)
|
|
|
|
return fsid
|
|
|
|
|
2020-07-30 15:20:35 +00:00
|
|
|
|
2020-08-21 12:22:23 +00:00
|
|
|
def maybe_redirect_stderr(config, type_, args, log_path):
|
2020-07-30 15:20:35 +00:00
|
|
|
if type_ == 'osd' and \
|
2020-08-06 17:11:53 +00:00
|
|
|
config.get('flavor', 'default') == 'crimson':
|
2020-08-21 12:22:23 +00:00
|
|
|
# teuthworker uses ubuntu:ubuntu to access the test nodes
|
|
|
|
create_log_cmd = \
|
|
|
|
f'sudo install -b -o ubuntu -g ubuntu /dev/null {log_path}'
|
|
|
|
return create_log_cmd, args + [run.Raw('2>>'), log_path]
|
2020-07-30 15:20:35 +00:00
|
|
|
else:
|
2020-08-21 12:22:23 +00:00
|
|
|
return None, args
|
2020-07-30 15:20:35 +00:00
|
|
|
|
|
|
|
|
2011-06-16 17:36:15 +00:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def cluster(ctx, config):
|
2013-10-12 08:28:27 +00:00
|
|
|
"""
|
|
|
|
Handle the creation and removal of a ceph cluster.
|
|
|
|
|
|
|
|
On startup:
|
|
|
|
Create directories needed for the cluster.
|
|
|
|
Create remote journals for all osds.
|
|
|
|
Create and set keyring.
|
2018-09-18 03:21:13 +00:00
|
|
|
Copy the monmap to the test systems.
|
2013-10-12 08:28:27 +00:00
|
|
|
Setup mon nodes.
|
|
|
|
Setup mds nodes.
|
|
|
|
Mkfs osd nodes.
|
|
|
|
Add keyring information to monmaps
|
|
|
|
Mkfs mon nodes.
|
2013-11-21 19:56:41 +00:00
|
|
|
|
2013-10-12 08:28:27 +00:00
|
|
|
On exit:
|
2018-03-10 14:36:31 +00:00
|
|
|
If errors occurred, extract a failure message and store in ctx.summary.
|
2013-10-12 08:28:27 +00:00
|
|
|
Unmount all test files and temporary journaling files.
|
|
|
|
Save the monitor information and archive all ceph logs.
|
|
|
|
Cleanup the keyring setup, and remove all monitor map and data files left over.
|
|
|
|
|
|
|
|
:param ctx: Context
|
|
|
|
:param config: Configuration
|
|
|
|
"""
|
2013-11-21 19:56:41 +00:00
|
|
|
if ctx.config.get('use_existing_cluster', False) is True:
|
|
|
|
log.info("'use_existing_cluster' is true; skipping cluster creation")
|
|
|
|
yield
|
|
|
|
|
2013-01-23 20:37:39 +00:00
|
|
|
testdir = teuthology.get_testdir(ctx)
|
2016-03-17 02:10:10 +00:00
|
|
|
cluster_name = config['cluster']
|
|
|
|
data_dir = '{tdir}/{cluster}.data'.format(tdir=testdir, cluster=cluster_name)
|
|
|
|
log.info('Creating ceph cluster %s...', cluster_name)
|
2018-11-26 20:00:38 +00:00
|
|
|
log.info('config %s', config)
|
|
|
|
log.info('ctx.config %s', ctx.config)
|
2011-06-16 21:05:13 +00:00
|
|
|
run.wait(
|
|
|
|
ctx.cluster.run(
|
|
|
|
args=[
|
|
|
|
'install', '-d', '-m0755', '--',
|
2016-03-17 02:10:10 +00:00
|
|
|
data_dir,
|
2015-11-09 13:09:27 +00:00
|
|
|
],
|
2011-06-16 21:05:13 +00:00
|
|
|
wait=False,
|
|
|
|
)
|
2015-11-09 13:09:27 +00:00
|
|
|
)
|
2011-06-16 17:36:15 +00:00
|
|
|
|
2013-02-18 17:41:00 +00:00
|
|
|
run.wait(
|
|
|
|
ctx.cluster.run(
|
|
|
|
args=[
|
|
|
|
'sudo',
|
2013-02-21 21:13:51 +00:00
|
|
|
'install', '-d', '-m0777', '--', '/var/run/ceph',
|
2015-11-09 13:09:27 +00:00
|
|
|
],
|
2013-02-18 17:41:00 +00:00
|
|
|
wait=False,
|
|
|
|
)
|
2015-11-09 13:09:27 +00:00
|
|
|
)
|
2012-03-27 22:05:11 +00:00
|
|
|
|
|
|
|
devs_to_clean = {}
|
|
|
|
remote_to_roles_to_devs = {}
|
2016-03-17 02:10:10 +00:00
|
|
|
osds = ctx.cluster.only(teuthology.is_type('osd', cluster_name))
|
2019-10-08 14:41:32 +00:00
|
|
|
for remote, roles_for_host in osds.remotes.items():
|
2012-03-27 22:05:11 +00:00
|
|
|
devs = teuthology.get_scratch_devices(remote)
|
2019-10-22 14:45:04 +00:00
|
|
|
roles_to_devs = assign_devs(
|
|
|
|
teuthology.cluster_roles_of_type(roles_for_host, 'osd', cluster_name), devs
|
|
|
|
)
|
|
|
|
devs_to_clean[remote] = []
|
2019-10-22 14:35:27 +00:00
|
|
|
log.info('osd dev map: {}'.format(roles_to_devs))
|
2019-10-22 14:45:04 +00:00
|
|
|
assert roles_to_devs, \
|
|
|
|
"remote {} has osd roles, but no osd devices were specified!".format(remote.hostname)
|
2012-03-27 22:05:11 +00:00
|
|
|
remote_to_roles_to_devs[remote] = roles_to_devs
|
2019-10-22 14:45:04 +00:00
|
|
|
log.info("remote_to_roles_to_devs: {}".format(remote_to_roles_to_devs))
|
|
|
|
for osd_role, dev_name in remote_to_roles_to_devs.items():
|
|
|
|
assert dev_name, "{} has no associated device!".format(osd_role)
|
2012-03-27 22:05:11 +00:00
|
|
|
|
2011-07-05 23:45:32 +00:00
|
|
|
log.info('Generating config...')
|
2011-06-16 17:36:15 +00:00
|
|
|
remotes_and_roles = ctx.cluster.remotes.items()
|
2013-02-21 22:47:00 +00:00
|
|
|
roles = [role_list for (remote, role_list) in remotes_and_roles]
|
2015-11-09 13:09:27 +00:00
|
|
|
ips = [host for (host, port) in
|
|
|
|
(remote.ssh.get_transport().getpeername() for (remote, role_list) in remotes_and_roles)]
|
2018-12-07 19:16:31 +00:00
|
|
|
mons = get_mons(
|
2019-01-30 17:23:10 +00:00
|
|
|
roles, ips, cluster_name,
|
2018-11-26 20:00:38 +00:00
|
|
|
mon_bind_msgr2=config.get('mon_bind_msgr2'),
|
|
|
|
mon_bind_addrvec=config.get('mon_bind_addrvec'),
|
2018-12-07 19:16:31 +00:00
|
|
|
)
|
|
|
|
conf = skeleton_config(
|
|
|
|
ctx, roles=roles, ips=ips, mons=mons, cluster=cluster_name,
|
2018-11-26 20:00:38 +00:00
|
|
|
)
|
2019-10-08 14:41:32 +00:00
|
|
|
for section, keys in config['conf'].items():
|
|
|
|
for key, value in keys.items():
|
2011-07-05 23:45:32 +00:00
|
|
|
log.info("[%s] %s = %s" % (section, key, value))
|
|
|
|
if section not in conf:
|
|
|
|
conf[section] = {}
|
|
|
|
conf[section][key] = value
|
2011-07-12 01:00:03 +00:00
|
|
|
|
2016-03-23 23:48:53 +00:00
|
|
|
if not hasattr(ctx, 'ceph'):
|
|
|
|
ctx.ceph = {}
|
|
|
|
ctx.ceph[cluster_name] = argparse.Namespace()
|
|
|
|
ctx.ceph[cluster_name].conf = conf
|
2018-12-19 03:18:31 +00:00
|
|
|
ctx.ceph[cluster_name].mons = mons
|
2011-11-09 06:00:32 +00:00
|
|
|
|
2016-03-17 02:10:10 +00:00
|
|
|
default_keyring = '/etc/ceph/{cluster}.keyring'.format(cluster=cluster_name)
|
|
|
|
keyring_path = config.get('keyring_path', default_keyring)
|
2013-02-06 19:16:52 +00:00
|
|
|
|
2013-01-23 20:37:39 +00:00
|
|
|
coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
|
2011-06-16 17:36:15 +00:00
|
|
|
|
2016-03-17 02:10:10 +00:00
|
|
|
firstmon = teuthology.get_first_mon(ctx, config, cluster_name)
|
2011-07-27 04:46:47 +00:00
|
|
|
|
|
|
|
log.info('Setting up %s...' % firstmon)
|
|
|
|
ctx.cluster.only(firstmon).run(
|
2011-06-16 17:36:15 +00:00
|
|
|
args=[
|
2013-02-06 19:16:52 +00:00
|
|
|
'sudo',
|
2013-09-06 19:22:29 +00:00
|
|
|
'adjust-ulimits',
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-coverage',
|
2011-06-16 17:36:15 +00:00
|
|
|
coverage_dir,
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-authtool',
|
2011-06-16 17:36:15 +00:00
|
|
|
'--create-keyring',
|
2013-02-06 19:16:52 +00:00
|
|
|
keyring_path,
|
2015-11-09 13:09:27 +00:00
|
|
|
],
|
|
|
|
)
|
2011-07-27 04:46:47 +00:00
|
|
|
ctx.cluster.only(firstmon).run(
|
2011-06-16 17:36:15 +00:00
|
|
|
args=[
|
2013-02-06 19:16:52 +00:00
|
|
|
'sudo',
|
2013-09-06 19:22:29 +00:00
|
|
|
'adjust-ulimits',
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-coverage',
|
2011-06-16 17:36:15 +00:00
|
|
|
coverage_dir,
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-authtool',
|
2011-06-16 17:36:15 +00:00
|
|
|
'--gen-key',
|
|
|
|
'--name=mon.',
|
2013-02-06 19:16:52 +00:00
|
|
|
keyring_path,
|
2015-11-09 13:09:27 +00:00
|
|
|
],
|
|
|
|
)
|
2013-02-06 19:16:52 +00:00
|
|
|
ctx.cluster.only(firstmon).run(
|
|
|
|
args=[
|
|
|
|
'sudo',
|
|
|
|
'chmod',
|
|
|
|
'0644',
|
|
|
|
keyring_path,
|
2015-11-09 13:09:27 +00:00
|
|
|
],
|
|
|
|
)
|
2014-03-27 16:35:28 +00:00
|
|
|
(mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
|
2016-03-17 02:10:10 +00:00
|
|
|
monmap_path = '{tdir}/{cluster}.monmap'.format(tdir=testdir,
|
|
|
|
cluster=cluster_name)
|
2018-12-07 19:16:31 +00:00
|
|
|
fsid = create_simple_monmap(
|
2013-01-23 20:37:39 +00:00
|
|
|
ctx,
|
2011-06-16 17:36:15 +00:00
|
|
|
remote=mon0_remote,
|
|
|
|
conf=conf,
|
2018-12-07 19:16:31 +00:00
|
|
|
mons=mons,
|
2016-03-17 02:10:10 +00:00
|
|
|
path=monmap_path,
|
2018-11-26 20:00:38 +00:00
|
|
|
mon_bind_addrvec=config.get('mon_bind_addrvec'),
|
2015-11-09 13:09:27 +00:00
|
|
|
)
|
2013-12-12 21:33:19 +00:00
|
|
|
if not 'global' in conf:
|
|
|
|
conf['global'] = {}
|
|
|
|
conf['global']['fsid'] = fsid
|
|
|
|
|
2016-03-17 02:10:10 +00:00
|
|
|
default_conf_path = '/etc/ceph/{cluster}.conf'.format(cluster=cluster_name)
|
|
|
|
conf_path = config.get('conf_path', default_conf_path)
|
2015-10-21 13:49:10 +00:00
|
|
|
log.info('Writing %s for FSID %s...' % (conf_path, fsid))
|
2016-03-23 23:50:53 +00:00
|
|
|
write_conf(ctx, conf_path, cluster_name)
|
2011-06-16 17:36:15 +00:00
|
|
|
|
2011-07-27 04:46:47 +00:00
|
|
|
log.info('Creating admin key on %s...' % firstmon)
|
|
|
|
ctx.cluster.only(firstmon).run(
|
2011-06-16 17:36:15 +00:00
|
|
|
args=[
|
2013-02-06 19:16:52 +00:00
|
|
|
'sudo',
|
2013-09-06 19:22:29 +00:00
|
|
|
'adjust-ulimits',
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-coverage',
|
2011-06-16 17:36:15 +00:00
|
|
|
coverage_dir,
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-authtool',
|
2011-06-16 17:36:15 +00:00
|
|
|
'--gen-key',
|
|
|
|
'--name=client.admin',
|
|
|
|
'--cap', 'mon', 'allow *',
|
|
|
|
'--cap', 'osd', 'allow *',
|
2014-12-18 18:29:53 +00:00
|
|
|
'--cap', 'mds', 'allow *',
|
2017-03-10 19:15:46 +00:00
|
|
|
'--cap', 'mgr', 'allow *',
|
2013-02-06 19:16:52 +00:00
|
|
|
keyring_path,
|
2015-11-09 13:09:27 +00:00
|
|
|
],
|
|
|
|
)
|
2011-06-16 17:36:15 +00:00
|
|
|
|
2011-07-07 22:40:37 +00:00
|
|
|
log.info('Copying monmap to all nodes...')
|
2011-06-16 17:36:15 +00:00
|
|
|
keyring = teuthology.get_file(
|
|
|
|
remote=mon0_remote,
|
2013-02-06 19:16:52 +00:00
|
|
|
path=keyring_path,
|
2015-11-09 13:09:27 +00:00
|
|
|
)
|
2011-06-16 17:36:15 +00:00
|
|
|
monmap = teuthology.get_file(
|
|
|
|
remote=mon0_remote,
|
2016-03-17 02:10:10 +00:00
|
|
|
path=monmap_path,
|
2015-11-09 13:09:27 +00:00
|
|
|
)
|
2011-06-16 17:36:15 +00:00
|
|
|
|
2019-10-11 15:57:47 +00:00
|
|
|
for rem in ctx.cluster.remotes.keys():
|
2011-06-16 17:36:15 +00:00
|
|
|
# copy mon key and initial monmap
|
2011-07-07 22:40:37 +00:00
|
|
|
log.info('Sending monmap to node {remote}'.format(remote=rem))
|
2013-02-06 19:16:52 +00:00
|
|
|
teuthology.sudo_write_file(
|
2011-06-16 17:36:15 +00:00
|
|
|
remote=rem,
|
2013-02-06 19:16:52 +00:00
|
|
|
path=keyring_path,
|
2011-06-16 17:36:15 +00:00
|
|
|
data=keyring,
|
2013-02-06 19:16:52 +00:00
|
|
|
perms='0644'
|
2015-11-09 13:09:27 +00:00
|
|
|
)
|
2011-06-16 17:36:15 +00:00
|
|
|
teuthology.write_file(
|
|
|
|
remote=rem,
|
2016-03-17 02:10:10 +00:00
|
|
|
path=monmap_path,
|
2011-06-16 17:36:15 +00:00
|
|
|
data=monmap,
|
2015-11-09 13:09:27 +00:00
|
|
|
)
|
2011-06-16 17:36:15 +00:00
|
|
|
|
|
|
|
log.info('Setting up mon nodes...')
|
2016-03-17 02:10:10 +00:00
|
|
|
mons = ctx.cluster.only(teuthology.is_type('mon', cluster_name))
|
2011-06-16 17:36:15 +00:00
|
|
|
|
2017-03-10 21:57:51 +00:00
|
|
|
if not config.get('skip_mgr_daemons', False):
|
|
|
|
log.info('Setting up mgr nodes...')
|
|
|
|
mgrs = ctx.cluster.only(teuthology.is_type('mgr', cluster_name))
|
2019-10-08 14:41:32 +00:00
|
|
|
for remote, roles_for_host in mgrs.remotes.items():
|
2017-03-10 21:57:51 +00:00
|
|
|
for role in teuthology.cluster_roles_of_type(roles_for_host, 'mgr',
|
|
|
|
cluster_name):
|
|
|
|
_, _, id_ = teuthology.split_role(role)
|
2017-04-26 21:10:57 +00:00
|
|
|
mgr_dir = DATA_PATH.format(
|
|
|
|
type_='mgr', cluster=cluster_name, id_=id_)
|
2017-03-10 21:57:51 +00:00
|
|
|
remote.run(
|
|
|
|
args=[
|
|
|
|
'sudo',
|
|
|
|
'mkdir',
|
|
|
|
'-p',
|
|
|
|
mgr_dir,
|
|
|
|
run.Raw('&&'),
|
|
|
|
'sudo',
|
|
|
|
'adjust-ulimits',
|
|
|
|
'ceph-coverage',
|
|
|
|
coverage_dir,
|
|
|
|
'ceph-authtool',
|
|
|
|
'--create-keyring',
|
|
|
|
'--gen-key',
|
|
|
|
'--name=mgr.{id}'.format(id=id_),
|
|
|
|
mgr_dir + '/keyring',
|
|
|
|
],
|
|
|
|
)
|
2016-09-27 11:22:45 +00:00
|
|
|
|
2011-06-16 17:36:15 +00:00
|
|
|
log.info('Setting up mds nodes...')
|
2016-03-17 02:10:10 +00:00
|
|
|
mdss = ctx.cluster.only(teuthology.is_type('mds', cluster_name))
|
2019-10-08 14:41:32 +00:00
|
|
|
for remote, roles_for_host in mdss.remotes.items():
|
2016-03-17 02:10:10 +00:00
|
|
|
for role in teuthology.cluster_roles_of_type(roles_for_host, 'mds',
|
|
|
|
cluster_name):
|
|
|
|
_, _, id_ = teuthology.split_role(role)
|
2017-04-26 21:10:57 +00:00
|
|
|
mds_dir = DATA_PATH.format(
|
|
|
|
type_='mds', cluster=cluster_name, id_=id_)
|
2011-06-16 17:36:15 +00:00
|
|
|
remote.run(
|
|
|
|
args=[
|
2013-02-17 06:32:16 +00:00
|
|
|
'sudo',
|
|
|
|
'mkdir',
|
|
|
|
'-p',
|
2016-03-17 02:10:10 +00:00
|
|
|
mds_dir,
|
2013-02-17 06:32:16 +00:00
|
|
|
run.Raw('&&'),
|
2013-09-05 21:41:27 +00:00
|
|
|
'sudo',
|
2013-09-06 19:22:29 +00:00
|
|
|
'adjust-ulimits',
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-coverage',
|
2011-06-16 17:36:15 +00:00
|
|
|
coverage_dir,
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-authtool',
|
2011-06-16 17:36:15 +00:00
|
|
|
'--create-keyring',
|
|
|
|
'--gen-key',
|
|
|
|
'--name=mds.{id}'.format(id=id_),
|
2016-03-17 02:10:10 +00:00
|
|
|
mds_dir + '/keyring',
|
2015-11-09 13:09:27 +00:00
|
|
|
],
|
|
|
|
)
|
2017-05-01 23:07:33 +00:00
|
|
|
remote.run(args=[
|
|
|
|
'sudo', 'chown', '-R', 'ceph:ceph', mds_dir
|
|
|
|
])
|
2011-06-16 17:36:15 +00:00
|
|
|
|
2016-03-17 02:10:10 +00:00
|
|
|
cclient.create_keyring(ctx, cluster_name)
|
2011-06-16 17:36:15 +00:00
|
|
|
log.info('Running mkfs on osd nodes...')
|
2013-05-01 20:14:35 +00:00
|
|
|
|
2016-03-17 02:10:10 +00:00
|
|
|
if not hasattr(ctx, 'disk_config'):
|
|
|
|
ctx.disk_config = argparse.Namespace()
|
|
|
|
if not hasattr(ctx.disk_config, 'remote_to_roles_to_dev'):
|
|
|
|
ctx.disk_config.remote_to_roles_to_dev = {}
|
|
|
|
if not hasattr(ctx.disk_config, 'remote_to_roles_to_dev_mount_options'):
|
|
|
|
ctx.disk_config.remote_to_roles_to_dev_mount_options = {}
|
|
|
|
if not hasattr(ctx.disk_config, 'remote_to_roles_to_dev_fstype'):
|
|
|
|
ctx.disk_config.remote_to_roles_to_dev_fstype = {}
|
|
|
|
|
|
|
|
teuthology.deep_merge(ctx.disk_config.remote_to_roles_to_dev, remote_to_roles_to_devs)
|
2013-05-01 20:14:35 +00:00
|
|
|
|
|
|
|
log.info("ctx.disk_config.remote_to_roles_to_dev: {r}".format(r=str(ctx.disk_config.remote_to_roles_to_dev)))
|
2019-10-08 14:41:32 +00:00
|
|
|
for remote, roles_for_host in osds.remotes.items():
|
2012-03-27 22:05:11 +00:00
|
|
|
roles_to_devs = remote_to_roles_to_devs[remote]
|
2013-02-01 17:37:13 +00:00
|
|
|
|
2016-03-17 02:10:10 +00:00
|
|
|
for role in teuthology.cluster_roles_of_type(roles_for_host, 'osd', cluster_name):
|
|
|
|
_, _, id_ = teuthology.split_role(role)
|
2017-04-26 21:10:57 +00:00
|
|
|
mnt_point = DATA_PATH.format(
|
|
|
|
type_='osd', cluster=cluster_name, id_=id_)
|
2011-06-16 17:36:15 +00:00
|
|
|
remote.run(
|
|
|
|
args=[
|
2013-02-17 06:32:16 +00:00
|
|
|
'sudo',
|
2011-06-16 17:36:15 +00:00
|
|
|
'mkdir',
|
2013-02-17 06:32:16 +00:00
|
|
|
'-p',
|
2016-03-17 02:10:10 +00:00
|
|
|
mnt_point,
|
2015-11-09 13:09:27 +00:00
|
|
|
])
|
2019-10-22 14:35:27 +00:00
|
|
|
log.info('roles_to_devs: {}'.format(roles_to_devs))
|
|
|
|
log.info('role: {}'.format(role))
|
2016-03-17 02:10:10 +00:00
|
|
|
if roles_to_devs.get(role):
|
|
|
|
dev = roles_to_devs[role]
|
2012-02-11 22:24:39 +00:00
|
|
|
fs = config.get('fs')
|
|
|
|
package = None
|
2012-07-13 18:30:07 +00:00
|
|
|
mkfs_options = config.get('mkfs_options')
|
|
|
|
mount_options = config.get('mount_options')
|
2012-02-11 22:24:39 +00:00
|
|
|
if fs == 'btrfs':
|
2015-11-09 13:09:27 +00:00
|
|
|
# package = 'btrfs-tools'
|
2012-07-13 18:30:07 +00:00
|
|
|
if mount_options is None:
|
2015-11-09 13:09:27 +00:00
|
|
|
mount_options = ['noatime', 'user_subvol_rm_allowed']
|
2012-07-13 18:30:21 +00:00
|
|
|
if mkfs_options is None:
|
2015-04-02 20:08:13 +00:00
|
|
|
mkfs_options = ['-m', 'single',
|
2012-07-13 18:30:21 +00:00
|
|
|
'-l', '32768',
|
|
|
|
'-n', '32768']
|
2012-02-11 22:24:39 +00:00
|
|
|
if fs == 'xfs':
|
2015-11-09 13:09:27 +00:00
|
|
|
# package = 'xfsprogs'
|
2012-07-13 18:30:07 +00:00
|
|
|
if mount_options is None:
|
|
|
|
mount_options = ['noatime']
|
|
|
|
if mkfs_options is None:
|
|
|
|
mkfs_options = ['-f', '-i', 'size=2048']
|
2012-02-11 22:24:39 +00:00
|
|
|
if fs == 'ext4' or fs == 'ext3':
|
2012-07-13 18:30:07 +00:00
|
|
|
if mount_options is None:
|
2015-11-09 13:09:27 +00:00
|
|
|
mount_options = ['noatime', 'user_xattr']
|
2012-02-11 22:24:39 +00:00
|
|
|
|
2012-07-22 03:18:24 +00:00
|
|
|
if mount_options is None:
|
|
|
|
mount_options = []
|
|
|
|
if mkfs_options is None:
|
|
|
|
mkfs_options = []
|
2012-07-13 18:30:07 +00:00
|
|
|
mkfs = ['mkfs.%s' % fs] + mkfs_options
|
2012-02-11 22:24:39 +00:00
|
|
|
log.info('%s on %s on %s' % (mkfs, dev, remote))
|
|
|
|
if package is not None:
|
2019-11-26 12:21:15 +00:00
|
|
|
remote.sh('sudo apt-get install -y %s' % package)
|
2014-03-12 01:15:12 +00:00
|
|
|
|
2015-04-02 20:08:13 +00:00
|
|
|
try:
|
2015-11-09 13:09:27 +00:00
|
|
|
remote.run(args=['yes', run.Raw('|')] + ['sudo'] + mkfs + [dev])
|
2015-04-02 20:08:13 +00:00
|
|
|
except run.CommandFailedError:
|
|
|
|
# Newer btfs-tools doesn't prompt for overwrite, use -f
|
|
|
|
if '-f' not in mount_options:
|
|
|
|
mkfs_options.append('-f')
|
|
|
|
mkfs = ['mkfs.%s' % fs] + mkfs_options
|
|
|
|
log.info('%s on %s on %s' % (mkfs, dev, remote))
|
2015-11-09 13:09:27 +00:00
|
|
|
remote.run(args=['yes', run.Raw('|')] + ['sudo'] + mkfs + [dev])
|
2014-05-29 05:37:27 +00:00
|
|
|
|
2012-07-13 18:30:07 +00:00
|
|
|
log.info('mount %s on %s -o %s' % (dev, remote,
|
|
|
|
','.join(mount_options)))
|
2011-10-03 21:03:36 +00:00
|
|
|
remote.run(
|
|
|
|
args=[
|
|
|
|
'sudo',
|
|
|
|
'mount',
|
2012-02-11 22:24:39 +00:00
|
|
|
'-t', fs,
|
2012-07-13 18:30:07 +00:00
|
|
|
'-o', ','.join(mount_options),
|
2011-10-03 21:03:36 +00:00
|
|
|
dev,
|
2016-03-17 02:10:10 +00:00
|
|
|
mnt_point,
|
2015-11-09 13:09:27 +00:00
|
|
|
]
|
|
|
|
)
|
2016-12-09 18:36:52 +00:00
|
|
|
remote.run(
|
|
|
|
args=[
|
|
|
|
'sudo', '/sbin/restorecon', mnt_point,
|
|
|
|
],
|
|
|
|
check_status=False,
|
|
|
|
)
|
2013-01-23 02:27:41 +00:00
|
|
|
if not remote in ctx.disk_config.remote_to_roles_to_dev_mount_options:
|
|
|
|
ctx.disk_config.remote_to_roles_to_dev_mount_options[remote] = {}
|
2016-03-17 02:10:10 +00:00
|
|
|
ctx.disk_config.remote_to_roles_to_dev_mount_options[remote][role] = mount_options
|
2013-01-23 02:27:41 +00:00
|
|
|
if not remote in ctx.disk_config.remote_to_roles_to_dev_fstype:
|
|
|
|
ctx.disk_config.remote_to_roles_to_dev_fstype[remote] = {}
|
2016-03-17 02:10:10 +00:00
|
|
|
ctx.disk_config.remote_to_roles_to_dev_fstype[remote][role] = fs
|
|
|
|
devs_to_clean[remote].append(mnt_point)
|
2011-10-03 21:03:36 +00:00
|
|
|
|
2016-03-17 02:10:10 +00:00
|
|
|
for role in teuthology.cluster_roles_of_type(roles_for_host, 'osd', cluster_name):
|
|
|
|
_, _, id_ = teuthology.split_role(role)
|
2018-02-05 19:06:04 +00:00
|
|
|
try:
|
2020-07-30 15:20:35 +00:00
|
|
|
args = ['sudo',
|
2018-02-05 19:06:04 +00:00
|
|
|
'MALLOC_CHECK_=3',
|
|
|
|
'adjust-ulimits',
|
2020-07-30 15:20:35 +00:00
|
|
|
'ceph-coverage', coverage_dir,
|
2018-02-05 19:06:04 +00:00
|
|
|
'ceph-osd',
|
|
|
|
'--no-mon-config',
|
2020-07-30 15:20:35 +00:00
|
|
|
'--cluster', cluster_name,
|
2018-02-05 19:06:04 +00:00
|
|
|
'--mkfs',
|
|
|
|
'--mkkey',
|
|
|
|
'-i', id_,
|
2020-07-30 15:20:35 +00:00
|
|
|
'--monmap', monmap_path]
|
2020-08-21 12:22:23 +00:00
|
|
|
log_path = f'/var/log/ceph/{cluster_name}-osd.{id_}.log'
|
|
|
|
create_log_cmd, args = \
|
|
|
|
maybe_redirect_stderr(config, 'osd', args, log_path)
|
|
|
|
if create_log_cmd:
|
|
|
|
remote.sh(create_log_cmd)
|
|
|
|
remote.run(args=args)
|
2018-02-05 19:06:04 +00:00
|
|
|
except run.CommandFailedError:
|
|
|
|
# try without --no-mon-config.. this may be an upgrade test
|
|
|
|
remote.run(
|
|
|
|
args=[
|
|
|
|
'sudo',
|
|
|
|
'MALLOC_CHECK_=3',
|
|
|
|
'adjust-ulimits',
|
|
|
|
'ceph-coverage',
|
|
|
|
coverage_dir,
|
|
|
|
'ceph-osd',
|
|
|
|
'--cluster',
|
|
|
|
cluster_name,
|
|
|
|
'--mkfs',
|
|
|
|
'--mkkey',
|
|
|
|
'-i', id_,
|
|
|
|
'--monmap', monmap_path,
|
|
|
|
],
|
|
|
|
)
|
2017-04-26 21:10:57 +00:00
|
|
|
mnt_point = DATA_PATH.format(
|
|
|
|
type_='osd', cluster=cluster_name, id_=id_)
|
2018-01-17 14:20:39 +00:00
|
|
|
try:
|
|
|
|
remote.run(args=[
|
|
|
|
'sudo', 'chown', '-R', 'ceph:ceph', mnt_point
|
|
|
|
])
|
|
|
|
except run.CommandFailedError as e:
|
|
|
|
# hammer does not have ceph user, so ignore this error
|
|
|
|
log.info('ignoring error when chown ceph:ceph,'
|
|
|
|
'probably installing hammer: %s', e)
|
2013-02-17 06:32:16 +00:00
|
|
|
|
|
|
|
log.info('Reading keys from all nodes...')
|
2019-11-26 12:21:15 +00:00
|
|
|
keys_fp = BytesIO()
|
2013-02-17 06:32:16 +00:00
|
|
|
keys = []
|
2019-10-08 14:41:32 +00:00
|
|
|
for remote, roles_for_host in ctx.cluster.remotes.items():
|
2016-09-27 11:22:45 +00:00
|
|
|
for type_ in ['mgr', 'mds', 'osd']:
|
2017-03-10 21:57:51 +00:00
|
|
|
if type_ == 'mgr' and config.get('skip_mgr_daemons', False):
|
|
|
|
continue
|
2016-03-17 02:10:10 +00:00
|
|
|
for role in teuthology.cluster_roles_of_type(roles_for_host, type_, cluster_name):
|
|
|
|
_, _, id_ = teuthology.split_role(role)
|
2013-02-17 06:32:16 +00:00
|
|
|
data = teuthology.get_file(
|
|
|
|
remote=remote,
|
2017-04-26 21:10:57 +00:00
|
|
|
path=os.path.join(
|
|
|
|
DATA_PATH.format(
|
|
|
|
type_=type_, id_=id_, cluster=cluster_name),
|
|
|
|
'keyring',
|
2015-11-09 13:09:27 +00:00
|
|
|
),
|
2013-02-17 06:32:16 +00:00
|
|
|
sudo=True,
|
2015-11-09 13:09:27 +00:00
|
|
|
)
|
2013-02-17 06:32:16 +00:00
|
|
|
keys.append((type_, id_, data))
|
|
|
|
keys_fp.write(data)
|
2019-10-08 14:41:32 +00:00
|
|
|
for remote, roles_for_host in ctx.cluster.remotes.items():
|
2016-03-17 02:10:10 +00:00
|
|
|
for role in teuthology.cluster_roles_of_type(roles_for_host, 'client', cluster_name):
|
|
|
|
_, _, id_ = teuthology.split_role(role)
|
|
|
|
data = teuthology.get_file(
|
|
|
|
remote=remote,
|
|
|
|
path='/etc/ceph/{cluster}.client.{id}.keyring'.format(id=id_, cluster=cluster_name)
|
|
|
|
)
|
|
|
|
keys.append(('client', id_, data))
|
|
|
|
keys_fp.write(data)
|
2013-02-17 06:32:16 +00:00
|
|
|
|
|
|
|
log.info('Adding keys to all mons...')
|
|
|
|
writes = mons.run(
|
|
|
|
args=[
|
|
|
|
'sudo', 'tee', '-a',
|
|
|
|
keyring_path,
|
2015-11-09 13:09:27 +00:00
|
|
|
],
|
2013-02-17 06:32:16 +00:00
|
|
|
stdin=run.PIPE,
|
|
|
|
wait=False,
|
2019-11-26 12:21:15 +00:00
|
|
|
stdout=BytesIO(),
|
2015-11-09 13:09:27 +00:00
|
|
|
)
|
2013-02-17 06:32:16 +00:00
|
|
|
keys_fp.seek(0)
|
|
|
|
teuthology.feed_many_stdins_and_close(keys_fp, writes)
|
|
|
|
run.wait(writes)
|
|
|
|
for type_, id_, data in keys:
|
|
|
|
run.wait(
|
|
|
|
mons.run(
|
|
|
|
args=[
|
2015-11-09 13:09:27 +00:00
|
|
|
'sudo',
|
|
|
|
'adjust-ulimits',
|
|
|
|
'ceph-coverage',
|
|
|
|
coverage_dir,
|
|
|
|
'ceph-authtool',
|
|
|
|
keyring_path,
|
|
|
|
'--name={type}.{id}'.format(
|
|
|
|
type=type_,
|
|
|
|
id=id_,
|
|
|
|
),
|
2016-09-27 11:17:22 +00:00
|
|
|
] + list(generate_caps(type_)),
|
2013-02-17 06:32:16 +00:00
|
|
|
wait=False,
|
2015-11-09 13:09:27 +00:00
|
|
|
),
|
|
|
|
)
|
2013-02-17 06:32:16 +00:00
|
|
|
|
|
|
|
log.info('Running mkfs on mon nodes...')
|
2019-10-08 14:41:32 +00:00
|
|
|
for remote, roles_for_host in mons.remotes.items():
|
2016-03-17 02:10:10 +00:00
|
|
|
for role in teuthology.cluster_roles_of_type(roles_for_host, 'mon', cluster_name):
|
|
|
|
_, _, id_ = teuthology.split_role(role)
|
2017-04-26 21:10:57 +00:00
|
|
|
mnt_point = DATA_PATH.format(
|
|
|
|
type_='mon', id_=id_, cluster=cluster_name)
|
2013-02-22 01:04:06 +00:00
|
|
|
remote.run(
|
|
|
|
args=[
|
2015-11-09 13:09:27 +00:00
|
|
|
'sudo',
|
|
|
|
'mkdir',
|
|
|
|
'-p',
|
2017-04-26 18:17:28 +00:00
|
|
|
mnt_point,
|
2015-11-09 13:09:27 +00:00
|
|
|
],
|
|
|
|
)
|
2013-02-17 06:32:16 +00:00
|
|
|
remote.run(
|
|
|
|
args=[
|
2013-09-05 21:41:27 +00:00
|
|
|
'sudo',
|
2013-09-06 19:22:29 +00:00
|
|
|
'adjust-ulimits',
|
2013-02-17 06:32:16 +00:00
|
|
|
'ceph-coverage',
|
|
|
|
coverage_dir,
|
|
|
|
'ceph-mon',
|
2016-03-17 02:10:10 +00:00
|
|
|
'--cluster', cluster_name,
|
2013-02-17 06:32:16 +00:00
|
|
|
'--mkfs',
|
|
|
|
'-i', id_,
|
2016-03-17 02:10:10 +00:00
|
|
|
'--monmap', monmap_path,
|
|
|
|
'--keyring', keyring_path,
|
2015-11-09 13:09:27 +00:00
|
|
|
],
|
|
|
|
)
|
2018-01-17 14:20:39 +00:00
|
|
|
try:
|
|
|
|
remote.run(args=[
|
|
|
|
'sudo', 'chown', '-R', 'ceph:ceph', mnt_point
|
|
|
|
])
|
|
|
|
except run.CommandFailedError as e:
|
|
|
|
# hammer does not have ceph user, so ignore this error
|
|
|
|
log.info('ignoring error when chown ceph:ceph,'
|
|
|
|
'probably installing hammer: %s', e)
|
2013-02-17 06:32:16 +00:00
|
|
|
|
2011-06-16 17:36:15 +00:00
|
|
|
run.wait(
|
|
|
|
mons.run(
|
|
|
|
args=[
|
|
|
|
'rm',
|
|
|
|
'--',
|
2016-03-17 02:10:10 +00:00
|
|
|
monmap_path,
|
2015-11-09 13:09:27 +00:00
|
|
|
],
|
2011-06-16 17:36:15 +00:00
|
|
|
wait=False,
|
2015-11-09 13:09:27 +00:00
|
|
|
),
|
|
|
|
)
|
2011-06-16 17:36:15 +00:00
|
|
|
|
|
|
|
try:
|
|
|
|
yield
|
2013-08-30 15:58:10 +00:00
|
|
|
except Exception:
|
2013-03-29 19:19:46 +00:00
|
|
|
# we need to know this below
|
|
|
|
ctx.summary['success'] = False
|
|
|
|
raise
|
2011-06-16 17:36:15 +00:00
|
|
|
finally:
|
2014-03-27 16:35:28 +00:00
|
|
|
(mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
|
2011-08-23 05:04:57 +00:00
|
|
|
|
2012-06-06 20:32:56 +00:00
|
|
|
log.info('Checking cluster log for badness...')
|
2015-11-09 13:09:27 +00:00
|
|
|
|
2011-10-17 21:42:03 +00:00
|
|
|
def first_in_ceph_log(pattern, excludes):
|
2013-10-12 08:28:27 +00:00
|
|
|
"""
|
2018-09-18 03:21:13 +00:00
|
|
|
Find the first occurrence of the pattern specified in the Ceph log,
|
2013-11-21 19:56:41 +00:00
|
|
|
Returns None if none found.
|
|
|
|
|
2013-10-12 08:28:27 +00:00
|
|
|
:param pattern: Pattern scanned for.
|
|
|
|
:param excludes: Patterns to ignore.
|
|
|
|
:return: First line of text (or None if not found)
|
|
|
|
"""
|
2011-10-17 21:42:03 +00:00
|
|
|
args = [
|
2013-03-11 00:08:55 +00:00
|
|
|
'sudo',
|
2011-10-17 21:42:03 +00:00
|
|
|
'egrep', pattern,
|
2016-03-17 02:10:10 +00:00
|
|
|
'/var/log/ceph/{cluster}.log'.format(cluster=cluster_name),
|
2015-11-09 13:09:27 +00:00
|
|
|
]
|
2011-10-17 21:42:03 +00:00
|
|
|
for exclude in excludes:
|
|
|
|
args.extend([run.Raw('|'), 'egrep', '-v', exclude])
|
|
|
|
args.extend([
|
2015-11-09 13:09:27 +00:00
|
|
|
run.Raw('|'), 'head', '-n', '1',
|
|
|
|
])
|
2019-11-26 12:21:15 +00:00
|
|
|
stdout = mon0_remote.sh(args)
|
|
|
|
return stdout or None
|
2011-10-03 23:08:49 +00:00
|
|
|
|
2011-10-17 21:42:03 +00:00
|
|
|
if first_in_ceph_log('\[ERR\]|\[WRN\]|\[SEC\]',
|
|
|
|
config['log_whitelist']) is not None:
|
2011-08-24 04:00:26 +00:00
|
|
|
log.warning('Found errors (ERR|WRN|SEC) in cluster log')
|
|
|
|
ctx.summary['success'] = False
|
2011-10-03 23:08:49 +00:00
|
|
|
# use the most severe problem as the failure reason
|
|
|
|
if 'failure_reason' not in ctx.summary:
|
|
|
|
for pattern in ['\[SEC\]', '\[ERR\]', '\[WRN\]']:
|
2011-10-17 21:42:03 +00:00
|
|
|
match = first_in_ceph_log(pattern, config['log_whitelist'])
|
2011-10-03 23:08:49 +00:00
|
|
|
if match is not None:
|
|
|
|
ctx.summary['failure_reason'] = \
|
|
|
|
'"{match}" in cluster log'.format(
|
2015-11-09 13:09:27 +00:00
|
|
|
match=match.rstrip('\n'),
|
2011-10-03 23:08:49 +00:00
|
|
|
)
|
|
|
|
break
|
2011-08-24 04:00:26 +00:00
|
|
|
|
2019-10-08 14:41:32 +00:00
|
|
|
for remote, dirs in devs_to_clean.items():
|
2011-10-03 21:03:36 +00:00
|
|
|
for dir_ in dirs:
|
|
|
|
log.info('Unmounting %s on %s' % (dir_, remote))
|
2015-02-21 00:55:21 +00:00
|
|
|
try:
|
|
|
|
remote.run(
|
|
|
|
args=[
|
|
|
|
'sync',
|
|
|
|
run.Raw('&&'),
|
|
|
|
'sudo',
|
|
|
|
'umount',
|
|
|
|
'-f',
|
|
|
|
dir_
|
2011-10-03 21:03:36 +00:00
|
|
|
]
|
|
|
|
)
|
2015-02-21 00:55:21 +00:00
|
|
|
except Exception as e:
|
|
|
|
remote.run(args=[
|
2015-11-09 13:09:27 +00:00
|
|
|
'sudo',
|
|
|
|
run.Raw('PATH=/usr/sbin:$PATH'),
|
|
|
|
'lsof',
|
|
|
|
run.Raw(';'),
|
|
|
|
'ps', 'auxf',
|
|
|
|
])
|
2015-02-21 00:55:21 +00:00
|
|
|
raise e
|
2011-10-03 21:03:36 +00:00
|
|
|
|
2013-03-29 19:19:46 +00:00
|
|
|
if ctx.archive is not None and \
|
2015-11-09 13:09:27 +00:00
|
|
|
not (ctx.config.get('archive-on-error') and ctx.summary['success']):
|
2015-02-21 00:54:09 +00:00
|
|
|
|
2012-07-11 21:14:46 +00:00
|
|
|
# archive mon data, too
|
|
|
|
log.info('Archiving mon data...')
|
|
|
|
path = os.path.join(ctx.archive, 'data')
|
2016-03-30 00:14:45 +00:00
|
|
|
try:
|
|
|
|
os.makedirs(path)
|
|
|
|
except OSError as e:
|
|
|
|
if e.errno == errno.EEXIST:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
raise
|
2019-10-08 14:41:32 +00:00
|
|
|
for remote, roles in mons.remotes.items():
|
2012-07-11 21:14:46 +00:00
|
|
|
for role in roles:
|
2016-03-17 02:10:10 +00:00
|
|
|
is_mon = teuthology.is_type('mon', cluster_name)
|
|
|
|
if is_mon(role):
|
2016-03-30 00:10:28 +00:00
|
|
|
_, _, id_ = teuthology.split_role(role)
|
2017-04-26 21:10:57 +00:00
|
|
|
mon_dir = DATA_PATH.format(
|
|
|
|
type_='mon', id_=id_, cluster=cluster_name)
|
2013-02-17 06:32:16 +00:00
|
|
|
teuthology.pull_directory_tarball(
|
|
|
|
remote,
|
2016-03-30 00:10:28 +00:00
|
|
|
mon_dir,
|
2013-02-17 06:32:16 +00:00
|
|
|
path + '/' + role + '.tgz')
|
2012-07-11 21:14:46 +00:00
|
|
|
|
2011-06-16 17:36:15 +00:00
|
|
|
log.info('Cleaning ceph cluster...')
|
|
|
|
run.wait(
|
|
|
|
ctx.cluster.run(
|
|
|
|
args=[
|
2013-02-06 19:16:52 +00:00
|
|
|
'sudo',
|
2011-06-16 17:36:15 +00:00
|
|
|
'rm',
|
|
|
|
'-rf',
|
|
|
|
'--',
|
2013-02-06 19:16:52 +00:00
|
|
|
conf_path,
|
|
|
|
keyring_path,
|
2016-03-17 02:10:10 +00:00
|
|
|
data_dir,
|
|
|
|
monmap_path,
|
2016-03-16 22:33:12 +00:00
|
|
|
run.Raw('{tdir}/../*.pid'.format(tdir=testdir)),
|
2015-11-09 13:09:27 +00:00
|
|
|
],
|
2011-06-16 17:36:15 +00:00
|
|
|
wait=False,
|
2015-11-09 13:09:27 +00:00
|
|
|
),
|
|
|
|
)
|
|
|
|
|
2011-06-16 17:36:15 +00:00
|
|
|
|
2014-02-04 01:17:09 +00:00
|
|
|
def osd_scrub_pgs(ctx, config):
|
|
|
|
"""
|
|
|
|
Scrub pgs when we exit.
|
2014-05-29 05:37:27 +00:00
|
|
|
|
2014-02-04 01:17:09 +00:00
|
|
|
First make sure all pgs are active and clean.
|
|
|
|
Next scrub all osds.
|
|
|
|
Then periodically check until all pgs have scrub time stamps that
|
2018-09-18 03:21:13 +00:00
|
|
|
indicate the last scrub completed. Time out if no progress is made
|
2014-02-04 01:17:09 +00:00
|
|
|
here after two minutes.
|
|
|
|
"""
|
2017-08-04 16:06:27 +00:00
|
|
|
retries = 40
|
|
|
|
delays = 20
|
2016-03-25 04:57:02 +00:00
|
|
|
cluster_name = config['cluster']
|
|
|
|
manager = ctx.managers[cluster_name]
|
2020-07-29 01:27:28 +00:00
|
|
|
for _ in range(retries):
|
2016-03-25 04:57:02 +00:00
|
|
|
stats = manager.get_pg_stats()
|
2018-12-19 13:14:30 +00:00
|
|
|
unclean = [stat['pgid'] for stat in stats if 'active+clean' not in stat['state']]
|
|
|
|
split_merge = []
|
|
|
|
osd_dump = manager.get_osd_dump_json()
|
2019-10-17 14:07:18 +00:00
|
|
|
try:
|
|
|
|
split_merge = [i['pool_name'] for i in osd_dump['pools'] if i['pg_num'] != i['pg_num_target']]
|
|
|
|
except KeyError:
|
|
|
|
# we don't support pg_num_target before nautilus
|
|
|
|
pass
|
2018-12-19 13:14:30 +00:00
|
|
|
if not unclean and not split_merge:
|
2014-02-04 01:17:09 +00:00
|
|
|
break
|
2020-07-29 01:27:28 +00:00
|
|
|
waiting_on = []
|
|
|
|
if unclean:
|
|
|
|
waiting_on.append(f'{unclean} to go clean')
|
|
|
|
if split_merge:
|
|
|
|
waiting_on.append(f'{split_merge} to split/merge')
|
|
|
|
waiting_on = ' and '.join(waiting_on)
|
|
|
|
log.info('Waiting for all PGs to be active+clean and split+merged, waiting on %s', waiting_on)
|
2014-02-04 01:17:09 +00:00
|
|
|
time.sleep(delays)
|
2020-07-29 01:27:28 +00:00
|
|
|
else:
|
2017-05-26 13:23:13 +00:00
|
|
|
raise RuntimeError("Scrubbing terminated -- not all pgs were active and clean.")
|
2014-02-04 01:17:09 +00:00
|
|
|
check_time_now = time.localtime()
|
|
|
|
time.sleep(1)
|
2016-03-25 04:57:02 +00:00
|
|
|
all_roles = teuthology.all_roles(ctx.cluster)
|
|
|
|
for role in teuthology.cluster_roles_of_type(all_roles, 'osd', cluster_name):
|
|
|
|
log.info("Scrubbing {osd}".format(osd=role))
|
|
|
|
_, _, id_ = teuthology.split_role(role)
|
2017-06-19 15:57:50 +00:00
|
|
|
# allow this to fail; in certain cases the OSD might not be up
|
|
|
|
# at this point. we will catch all pgs below.
|
|
|
|
try:
|
2017-11-17 16:20:40 +00:00
|
|
|
manager.raw_cluster_cmd('tell', 'osd.' + id_, 'config', 'set',
|
|
|
|
'osd_debug_deep_scrub_sleep', '0');
|
2017-06-19 15:57:50 +00:00
|
|
|
manager.raw_cluster_cmd('osd', 'deep-scrub', id_)
|
|
|
|
except run.CommandFailedError:
|
|
|
|
pass
|
2014-02-04 01:17:09 +00:00
|
|
|
prev_good = 0
|
|
|
|
gap_cnt = 0
|
|
|
|
loop = True
|
|
|
|
while loop:
|
2016-03-25 04:57:02 +00:00
|
|
|
stats = manager.get_pg_stats()
|
2017-02-24 16:31:03 +00:00
|
|
|
timez = [(stat['pgid'],stat['last_scrub_stamp']) for stat in stats]
|
2014-02-04 01:17:09 +00:00
|
|
|
loop = False
|
|
|
|
thiscnt = 0
|
2020-01-30 16:22:49 +00:00
|
|
|
re_scrub = []
|
2017-02-24 16:31:03 +00:00
|
|
|
for (pgid, tmval) in timez:
|
2019-10-17 15:23:11 +00:00
|
|
|
t = tmval[0:tmval.find('.')].replace(' ', 'T')
|
2019-04-29 14:07:58 +00:00
|
|
|
pgtm = time.strptime(t, '%Y-%m-%dT%H:%M:%S')
|
2014-02-04 01:17:09 +00:00
|
|
|
if pgtm > check_time_now:
|
|
|
|
thiscnt += 1
|
|
|
|
else:
|
2017-02-24 16:31:03 +00:00
|
|
|
log.info('pgid %s last_scrub_stamp %s %s <= %s', pgid, tmval, pgtm, check_time_now)
|
2014-02-04 01:17:09 +00:00
|
|
|
loop = True
|
2020-01-30 16:22:49 +00:00
|
|
|
re_scrub.append(pgid)
|
2014-02-04 01:17:09 +00:00
|
|
|
if thiscnt > prev_good:
|
|
|
|
prev_good = thiscnt
|
|
|
|
gap_cnt = 0
|
|
|
|
else:
|
|
|
|
gap_cnt += 1
|
2017-06-19 15:59:10 +00:00
|
|
|
if gap_cnt % 6 == 0:
|
2020-01-30 16:22:49 +00:00
|
|
|
for pgid in re_scrub:
|
2017-06-19 15:59:10 +00:00
|
|
|
# re-request scrub every so often in case the earlier
|
2018-09-18 03:21:13 +00:00
|
|
|
# request was missed. do not do it every time because
|
2017-06-19 15:59:10 +00:00
|
|
|
# the scrub may be in progress or not reported yet and
|
|
|
|
# we will starve progress.
|
|
|
|
manager.raw_cluster_cmd('pg', 'deep-scrub', pgid)
|
2014-02-04 01:17:09 +00:00
|
|
|
if gap_cnt > retries:
|
2017-05-26 13:24:03 +00:00
|
|
|
raise RuntimeError('Exiting scrub checking -- not all pgs scrubbed.')
|
2014-02-04 01:17:09 +00:00
|
|
|
if loop:
|
|
|
|
log.info('Still waiting for all pgs to be scrubbed.')
|
|
|
|
time.sleep(delays)
|
2011-06-16 17:36:15 +00:00
|
|
|
|
2015-11-09 13:09:27 +00:00
|
|
|
|
2011-06-16 18:30:33 +00:00
|
|
|
@contextlib.contextmanager
|
2012-02-02 17:27:11 +00:00
|
|
|
def run_daemon(ctx, config, type_):
|
2013-10-12 08:28:27 +00:00
|
|
|
"""
|
|
|
|
Run daemons for a role type. Handle the startup and termination of a a daemon.
|
|
|
|
On startup -- set coverages, cpu_profile, valgrind values for all remotes,
|
|
|
|
and a max_mds value for one mds.
|
|
|
|
On cleanup -- Stop all existing daemons of this type.
|
|
|
|
|
|
|
|
:param ctx: Context
|
|
|
|
:param config: Configuration
|
2019-07-04 09:28:12 +00:00
|
|
|
:param type_: Role type
|
2013-10-12 08:28:27 +00:00
|
|
|
"""
|
2016-03-18 02:19:57 +00:00
|
|
|
cluster_name = config['cluster']
|
|
|
|
log.info('Starting %s daemons in cluster %s...', type_, cluster_name)
|
2013-01-23 20:37:39 +00:00
|
|
|
testdir = teuthology.get_testdir(ctx)
|
2016-03-18 02:19:57 +00:00
|
|
|
daemons = ctx.cluster.only(teuthology.is_type(type_, cluster_name))
|
2014-03-23 19:03:25 +00:00
|
|
|
|
|
|
|
# check whether any daemons if this type are configured
|
|
|
|
if daemons is None:
|
|
|
|
return
|
2013-01-23 20:37:39 +00:00
|
|
|
coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
|
2011-06-16 18:30:33 +00:00
|
|
|
|
|
|
|
daemon_signal = 'kill'
|
2012-02-24 19:21:04 +00:00
|
|
|
if config.get('coverage') or config.get('valgrind') is not None:
|
2011-06-16 18:30:33 +00:00
|
|
|
daemon_signal = 'term'
|
|
|
|
|
2017-07-17 19:22:47 +00:00
|
|
|
# create osds in order. (this only matters for pre-luminous, which might
|
|
|
|
# be hammer, which doesn't take an id_ argument to legacy 'osd create').
|
|
|
|
osd_uuids = {}
|
2019-10-08 14:41:32 +00:00
|
|
|
for remote, roles_for_host in daemons.remotes.items():
|
2016-03-18 02:19:57 +00:00
|
|
|
is_type_ = teuthology.is_type(type_, cluster_name)
|
|
|
|
for role in roles_for_host:
|
|
|
|
if not is_type_(role):
|
|
|
|
continue
|
|
|
|
_, _, id_ = teuthology.split_role(role)
|
2011-12-20 22:10:22 +00:00
|
|
|
|
2017-07-17 19:22:47 +00:00
|
|
|
|
2017-06-21 17:52:42 +00:00
|
|
|
if type_ == 'osd':
|
2017-07-07 14:44:20 +00:00
|
|
|
datadir='/var/lib/ceph/osd/{cluster}-{id}'.format(
|
|
|
|
cluster=cluster_name, id=id_)
|
2020-06-28 11:43:09 +00:00
|
|
|
osd_uuid = teuthology.get_file(
|
2017-06-21 17:52:42 +00:00
|
|
|
remote=remote,
|
|
|
|
path=datadir + '/fsid',
|
|
|
|
sudo=True,
|
2020-06-28 11:43:09 +00:00
|
|
|
).decode().strip()
|
2017-07-17 19:22:47 +00:00
|
|
|
osd_uuids[id_] = osd_uuid
|
2017-07-17 20:27:13 +00:00
|
|
|
for osd_id in range(len(osd_uuids)):
|
2017-07-17 19:22:47 +00:00
|
|
|
id_ = str(osd_id)
|
|
|
|
osd_uuid = osd_uuids.get(id_)
|
|
|
|
try:
|
|
|
|
remote.run(
|
|
|
|
args=[
|
|
|
|
'sudo', 'ceph', '--cluster', cluster_name,
|
|
|
|
'osd', 'new', osd_uuid, id_,
|
|
|
|
]
|
|
|
|
)
|
|
|
|
except:
|
|
|
|
# fallback to pre-luminous (hammer or jewel)
|
|
|
|
remote.run(
|
|
|
|
args=[
|
|
|
|
'sudo', 'ceph', '--cluster', cluster_name,
|
|
|
|
'osd', 'create', osd_uuid,
|
|
|
|
]
|
|
|
|
)
|
|
|
|
if config.get('add_osds_to_crush'):
|
|
|
|
remote.run(
|
|
|
|
args=[
|
|
|
|
'sudo', 'ceph', '--cluster', cluster_name,
|
|
|
|
'osd', 'crush', 'create-or-move', 'osd.' + id_,
|
|
|
|
'1.0', 'host=localhost', 'root=default',
|
|
|
|
]
|
|
|
|
)
|
|
|
|
|
2019-10-08 14:41:32 +00:00
|
|
|
for remote, roles_for_host in daemons.remotes.items():
|
2017-07-17 19:22:47 +00:00
|
|
|
is_type_ = teuthology.is_type(type_, cluster_name)
|
|
|
|
for role in roles_for_host:
|
|
|
|
if not is_type_(role):
|
|
|
|
continue
|
|
|
|
_, _, id_ = teuthology.split_role(role)
|
2017-06-21 17:52:42 +00:00
|
|
|
|
2012-02-24 19:21:04 +00:00
|
|
|
run_cmd = [
|
2013-09-05 21:41:27 +00:00
|
|
|
'sudo',
|
2013-09-06 19:22:29 +00:00
|
|
|
'adjust-ulimits',
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-coverage',
|
2012-02-24 19:21:04 +00:00
|
|
|
coverage_dir,
|
2013-09-06 21:49:05 +00:00
|
|
|
'daemon-helper',
|
2012-02-24 19:21:04 +00:00
|
|
|
daemon_signal,
|
2015-11-09 13:09:27 +00:00
|
|
|
]
|
2011-12-19 22:12:39 +00:00
|
|
|
run_cmd_tail = [
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-%s' % (type_),
|
2011-12-19 22:12:39 +00:00
|
|
|
'-f',
|
2016-03-18 02:19:57 +00:00
|
|
|
'--cluster', cluster_name,
|
2013-02-06 19:16:52 +00:00
|
|
|
'-i', id_]
|
2011-08-15 22:35:42 +00:00
|
|
|
|
2013-09-06 22:56:39 +00:00
|
|
|
if type_ in config.get('cpu_profile', []):
|
2016-03-18 02:19:57 +00:00
|
|
|
profile_path = '/var/log/ceph/profiling-logger/%s.prof' % (role)
|
2015-11-09 13:09:27 +00:00
|
|
|
run_cmd.extend(['env', 'CPUPROFILE=%s' % profile_path])
|
2013-09-06 22:56:39 +00:00
|
|
|
|
2012-02-24 19:21:04 +00:00
|
|
|
if config.get('valgrind') is not None:
|
|
|
|
valgrind_args = None
|
|
|
|
if type_ in config['valgrind']:
|
|
|
|
valgrind_args = config['valgrind'][type_]
|
2016-03-18 02:19:57 +00:00
|
|
|
if role in config['valgrind']:
|
|
|
|
valgrind_args = config['valgrind'][role]
|
|
|
|
run_cmd = teuthology.get_valgrind_args(testdir, role,
|
2013-09-06 22:56:39 +00:00
|
|
|
run_cmd,
|
|
|
|
valgrind_args)
|
2012-08-17 20:47:13 +00:00
|
|
|
|
2011-08-15 22:35:42 +00:00
|
|
|
run_cmd.extend(run_cmd_tail)
|
2020-08-21 12:22:23 +00:00
|
|
|
log_path = f'/var/log/ceph/{cluster_name}-{type_}.{id_}.log'
|
|
|
|
create_log_cmd, run_cmd = \
|
|
|
|
maybe_redirect_stderr(config, type_, run_cmd, log_path)
|
|
|
|
if create_log_cmd:
|
|
|
|
remote.sh(create_log_cmd)
|
2017-03-10 21:57:51 +00:00
|
|
|
# always register mgr; don't necessarily start
|
|
|
|
ctx.daemons.register_daemon(
|
|
|
|
remote, type_, id_,
|
|
|
|
cluster=cluster_name,
|
|
|
|
args=run_cmd,
|
|
|
|
logger=log.getChild(role),
|
|
|
|
stdin=run.PIPE,
|
|
|
|
wait=False
|
|
|
|
)
|
|
|
|
if type_ != 'mgr' or not config.get('skip_mgr_daemons', False):
|
|
|
|
role = cluster_name + '.' + type_
|
|
|
|
ctx.daemons.get_daemon(type_, id_, cluster_name).restart()
|
2011-06-16 18:30:33 +00:00
|
|
|
|
2020-02-07 13:33:58 +00:00
|
|
|
# kludge: run any pre-manager commands
|
|
|
|
if type_ == 'mon':
|
|
|
|
for cmd in config.get('pre-mgr-commands', []):
|
|
|
|
firstmon = teuthology.get_first_mon(ctx, config, cluster_name)
|
|
|
|
(remote,) = ctx.cluster.only(firstmon).remotes.keys()
|
|
|
|
remote.run(args=cmd.split(' '))
|
|
|
|
|
2011-06-16 18:37:51 +00:00
|
|
|
try:
|
|
|
|
yield
|
|
|
|
finally:
|
2016-03-18 02:19:57 +00:00
|
|
|
teuthology.stop_daemons_of_type(ctx, type_, cluster_name)
|
2011-06-16 18:37:51 +00:00
|
|
|
|
2015-11-09 13:09:27 +00:00
|
|
|
|
2011-06-16 19:18:58 +00:00
|
|
|
def healthy(ctx, config):
|
2013-10-12 08:28:27 +00:00
|
|
|
"""
|
|
|
|
Wait for all osd's to be up, and for the ceph health monitor to return HEALTH_OK.
|
|
|
|
|
|
|
|
:param ctx: Context
|
|
|
|
:param config: Configuration
|
|
|
|
"""
|
2016-05-11 19:07:16 +00:00
|
|
|
config = config if isinstance(config, dict) else dict()
|
2016-03-18 03:16:35 +00:00
|
|
|
cluster_name = config.get('cluster', 'ceph')
|
2017-07-24 21:05:11 +00:00
|
|
|
log.info('Waiting until %s daemons up and pgs clean...', cluster_name)
|
|
|
|
manager = ctx.managers[cluster_name]
|
|
|
|
try:
|
2017-08-02 00:47:08 +00:00
|
|
|
manager.wait_for_mgr_available(timeout=30)
|
|
|
|
except (run.CommandFailedError, AssertionError) as e:
|
|
|
|
log.info('ignoring mgr wait error, probably testing upgrade: %s', e)
|
2017-07-24 21:05:11 +00:00
|
|
|
|
2019-11-12 22:12:53 +00:00
|
|
|
manager.wait_for_all_osds_up(timeout=300)
|
2017-07-24 21:05:11 +00:00
|
|
|
|
|
|
|
try:
|
|
|
|
manager.flush_all_pg_stats()
|
2017-08-08 16:08:31 +00:00
|
|
|
except (run.CommandFailedError, Exception) as e:
|
|
|
|
log.info('ignoring flush pg stats error, probably testing upgrade: %s', e)
|
2017-07-24 21:05:11 +00:00
|
|
|
manager.wait_for_clean()
|
|
|
|
|
2018-01-16 18:44:29 +00:00
|
|
|
if config.get('wait-for-healthy', True):
|
|
|
|
log.info('Waiting until ceph cluster %s is healthy...', cluster_name)
|
2019-11-12 22:12:53 +00:00
|
|
|
manager.wait_until_healthy(timeout=300)
|
2011-06-16 19:18:58 +00:00
|
|
|
|
2016-03-18 03:16:35 +00:00
|
|
|
if ctx.cluster.only(teuthology.is_type('mds', cluster_name)).remotes:
|
2015-09-17 09:14:07 +00:00
|
|
|
# Some MDSs exist, wait for them to be healthy
|
2016-03-18 03:16:35 +00:00
|
|
|
ceph_fs = Filesystem(ctx) # TODO: make Filesystem cluster-aware
|
2015-09-17 09:14:07 +00:00
|
|
|
ceph_fs.wait_for_daemons(timeout=300)
|
|
|
|
|
2015-11-09 13:09:27 +00:00
|
|
|
|
2013-07-23 21:43:56 +00:00
|
|
|
def wait_for_mon_quorum(ctx, config):
|
2013-10-12 08:28:27 +00:00
|
|
|
"""
|
|
|
|
Check renote ceph status until all monitors are up.
|
|
|
|
|
|
|
|
:param ctx: Context
|
|
|
|
:param config: Configuration
|
|
|
|
"""
|
2016-03-25 03:23:36 +00:00
|
|
|
if isinstance(config, dict):
|
|
|
|
mons = config['daemons']
|
|
|
|
cluster_name = config.get('cluster', 'ceph')
|
|
|
|
else:
|
|
|
|
assert isinstance(config, list)
|
|
|
|
mons = config
|
|
|
|
cluster_name = 'ceph'
|
|
|
|
firstmon = teuthology.get_first_mon(ctx, config, cluster_name)
|
2014-03-27 16:35:28 +00:00
|
|
|
(remote,) = ctx.cluster.only(firstmon).remotes.keys()
|
2016-05-02 19:43:52 +00:00
|
|
|
with contextutil.safe_while(sleep=10, tries=60,
|
|
|
|
action='wait for monitor quorum') as proceed:
|
|
|
|
while proceed():
|
2019-11-26 12:21:15 +00:00
|
|
|
quorum_status = remote.sh('sudo ceph quorum_status',
|
|
|
|
logger=log.getChild('quorum_status'))
|
|
|
|
j = json.loads(quorum_status)
|
2016-05-02 19:43:52 +00:00
|
|
|
q = j.get('quorum_names', [])
|
|
|
|
log.debug('Quorum: %s', q)
|
|
|
|
if sorted(q) == sorted(mons):
|
|
|
|
break
|
2013-07-23 21:43:56 +00:00
|
|
|
|
2011-06-16 19:18:58 +00:00
|
|
|
|
2014-08-23 00:39:38 +00:00
|
|
|
def created_pool(ctx, config):
|
|
|
|
"""
|
|
|
|
Add new pools to the dictionary of pools that the ceph-manager
|
|
|
|
knows about.
|
|
|
|
"""
|
|
|
|
for new_pool in config:
|
2016-03-25 00:12:39 +00:00
|
|
|
if new_pool not in ctx.managers['ceph'].pools:
|
2018-05-08 17:22:36 +00:00
|
|
|
ctx.managers['ceph'].pools[new_pool] = ctx.managers['ceph'].get_pool_int_property(
|
2015-11-09 13:09:27 +00:00
|
|
|
new_pool, 'pg_num')
|
2015-08-05 16:08:35 +00:00
|
|
|
|
2014-08-23 00:39:38 +00:00
|
|
|
|
2017-11-29 04:19:35 +00:00
|
|
|
@contextlib.contextmanager
|
2020-01-23 23:12:21 +00:00
|
|
|
def suppress_mon_health_to_clog(ctx, config):
|
2017-11-29 04:19:35 +00:00
|
|
|
"""
|
2020-01-23 23:12:21 +00:00
|
|
|
set the option, and then restore it with its original value
|
2017-11-29 04:19:35 +00:00
|
|
|
|
|
|
|
Note, due to the way how tasks are executed/nested, it's not suggested to
|
|
|
|
use this method as a standalone task. otherwise, it's likely that it will
|
|
|
|
restore the tweaked option at the /end/ of 'tasks' block.
|
|
|
|
"""
|
2020-01-23 23:12:21 +00:00
|
|
|
if config.get('mon-health-to-clog', 'true') == 'false':
|
|
|
|
cluster = config.get('cluster', 'ceph')
|
|
|
|
manager = ctx.managers[cluster]
|
|
|
|
manager.raw_cluster_command(
|
|
|
|
'config', 'set', 'mon', 'mon_health_to_clog', 'false'
|
|
|
|
)
|
|
|
|
yield
|
|
|
|
manager.raw_cluster_command(
|
|
|
|
'config', 'rm', 'mon', 'mon_health_to_clog'
|
|
|
|
)
|
2017-11-29 04:19:35 +00:00
|
|
|
else:
|
2020-01-23 23:12:21 +00:00
|
|
|
yield
|
2017-11-29 04:19:35 +00:00
|
|
|
|
2013-03-15 01:18:39 +00:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def restart(ctx, config):
|
2013-10-12 08:28:27 +00:00
|
|
|
"""
|
2013-03-15 01:18:39 +00:00
|
|
|
restart ceph daemons
|
|
|
|
|
|
|
|
For example::
|
|
|
|
tasks:
|
|
|
|
- ceph.restart: [all]
|
|
|
|
|
|
|
|
For example::
|
|
|
|
tasks:
|
2014-08-20 11:30:27 +00:00
|
|
|
- ceph.restart: [osd.0, mon.1, mds.*]
|
2013-03-15 01:18:39 +00:00
|
|
|
|
2013-07-24 20:14:02 +00:00
|
|
|
or::
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- ceph.restart:
|
|
|
|
daemons: [osd.0, mon.1]
|
|
|
|
wait-for-healthy: false
|
|
|
|
wait-for-osds-up: true
|
|
|
|
|
2013-10-12 08:28:27 +00:00
|
|
|
:param ctx: Context
|
|
|
|
:param config: Configuration
|
|
|
|
"""
|
|
|
|
if config is None:
|
|
|
|
config = {}
|
2014-08-20 11:30:27 +00:00
|
|
|
elif isinstance(config, list):
|
|
|
|
config = {'daemons': config}
|
|
|
|
|
2016-03-16 05:54:08 +00:00
|
|
|
daemons = ctx.daemons.resolve_role_list(config.get('daemons', None), CEPH_ROLE_TYPES, True)
|
2016-03-25 03:24:01 +00:00
|
|
|
clusters = set()
|
2017-11-29 04:19:35 +00:00
|
|
|
|
2020-01-23 23:12:21 +00:00
|
|
|
with suppress_mon_health_to_clog(ctx, config):
|
2017-11-29 04:19:35 +00:00
|
|
|
for role in daemons:
|
|
|
|
cluster, type_, id_ = teuthology.split_role(role)
|
2019-09-06 02:24:38 +00:00
|
|
|
ctx.daemons.get_daemon(type_, id_, cluster).stop()
|
|
|
|
if type_ == 'osd':
|
|
|
|
ctx.managers[cluster].mark_down_osd(id_)
|
2017-11-29 04:19:35 +00:00
|
|
|
ctx.daemons.get_daemon(type_, id_, cluster).restart()
|
|
|
|
clusters.add(cluster)
|
2018-04-16 10:07:28 +00:00
|
|
|
|
2017-02-17 22:17:18 +00:00
|
|
|
if config.get('wait-for-healthy', True):
|
|
|
|
for cluster in clusters:
|
|
|
|
healthy(ctx=ctx, config=dict(cluster=cluster))
|
|
|
|
if config.get('wait-for-osds-up', False):
|
|
|
|
for cluster in clusters:
|
2019-11-12 22:46:51 +00:00
|
|
|
ctx.managers[cluster].wait_for_all_osds_up()
|
2013-10-12 08:28:27 +00:00
|
|
|
yield
|
2013-03-15 01:18:39 +00:00
|
|
|
|
2014-08-20 11:30:27 +00:00
|
|
|
|
|
|
|
@contextlib.contextmanager
|
|
|
|
def stop(ctx, config):
|
|
|
|
"""
|
|
|
|
Stop ceph daemons
|
|
|
|
|
|
|
|
For example::
|
|
|
|
tasks:
|
|
|
|
- ceph.stop: [mds.*]
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- ceph.stop: [osd.0, osd.2]
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- ceph.stop:
|
|
|
|
daemons: [osd.0, osd.2]
|
|
|
|
|
|
|
|
"""
|
|
|
|
if config is None:
|
|
|
|
config = {}
|
|
|
|
elif isinstance(config, list):
|
|
|
|
config = {'daemons': config}
|
|
|
|
|
2016-03-16 05:54:08 +00:00
|
|
|
daemons = ctx.daemons.resolve_role_list(config.get('daemons', None), CEPH_ROLE_TYPES, True)
|
2019-08-22 15:59:43 +00:00
|
|
|
clusters = set()
|
|
|
|
|
2016-03-16 05:54:08 +00:00
|
|
|
for role in daemons:
|
|
|
|
cluster, type_, id_ = teuthology.split_role(role)
|
|
|
|
ctx.daemons.get_daemon(type_, id_, cluster).stop()
|
2019-08-22 15:59:43 +00:00
|
|
|
clusters.add(cluster)
|
|
|
|
|
|
|
|
|
|
|
|
for cluster in clusters:
|
|
|
|
ctx.ceph[cluster].watchdog.stop()
|
|
|
|
ctx.ceph[cluster].watchdog.join()
|
2014-08-20 11:30:27 +00:00
|
|
|
|
|
|
|
yield
|
|
|
|
|
2015-11-09 13:09:27 +00:00
|
|
|
|
2015-01-23 23:20:50 +00:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def wait_for_failure(ctx, config):
|
|
|
|
"""
|
|
|
|
Wait for a failure of a ceph daemon
|
|
|
|
|
|
|
|
For example::
|
|
|
|
tasks:
|
|
|
|
- ceph.wait_for_failure: [mds.*]
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- ceph.wait_for_failure: [osd.0, osd.2]
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- ceph.wait_for_failure:
|
|
|
|
daemons: [osd.0, osd.2]
|
|
|
|
|
|
|
|
"""
|
|
|
|
if config is None:
|
|
|
|
config = {}
|
|
|
|
elif isinstance(config, list):
|
|
|
|
config = {'daemons': config}
|
|
|
|
|
2016-03-16 05:54:08 +00:00
|
|
|
daemons = ctx.daemons.resolve_role_list(config.get('daemons', None), CEPH_ROLE_TYPES, True)
|
|
|
|
for role in daemons:
|
|
|
|
cluster, type_, id_ = teuthology.split_role(role)
|
2015-01-23 23:20:50 +00:00
|
|
|
try:
|
2016-03-16 05:54:08 +00:00
|
|
|
ctx.daemons.get_daemon(type_, id_, cluster).wait()
|
2015-01-23 23:20:50 +00:00
|
|
|
except:
|
|
|
|
log.info('Saw expected daemon failure. Continuing.')
|
|
|
|
pass
|
|
|
|
else:
|
2016-03-16 05:54:08 +00:00
|
|
|
raise RuntimeError('daemon %s did not fail' % role)
|
2015-01-23 23:20:50 +00:00
|
|
|
|
|
|
|
yield
|
|
|
|
|
2014-08-20 11:30:27 +00:00
|
|
|
|
2016-03-17 02:10:10 +00:00
|
|
|
def validate_config(ctx, config):
|
|
|
|
"""
|
|
|
|
Perform some simple validation on task configuration.
|
|
|
|
Raises exceptions.ConfigError if an error is found.
|
|
|
|
"""
|
|
|
|
# check for osds from multiple clusters on the same host
|
|
|
|
for remote, roles_for_host in ctx.cluster.remotes.items():
|
|
|
|
last_cluster = None
|
|
|
|
last_role = None
|
|
|
|
for role in roles_for_host:
|
|
|
|
role_cluster, role_type, _ = teuthology.split_role(role)
|
|
|
|
if role_type != 'osd':
|
|
|
|
continue
|
|
|
|
if last_cluster and last_cluster != role_cluster:
|
|
|
|
msg = "Host should not have osds (%s and %s) from multiple clusters" % (
|
|
|
|
last_role, role)
|
|
|
|
raise exceptions.ConfigError(msg)
|
|
|
|
last_cluster = role_cluster
|
|
|
|
last_role = role
|
|
|
|
|
|
|
|
|
2011-06-03 21:47:44 +00:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def task(ctx, config):
|
2011-06-06 21:22:49 +00:00
|
|
|
"""
|
|
|
|
Set up and tear down a Ceph cluster.
|
|
|
|
|
|
|
|
For example::
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- ceph:
|
|
|
|
- interactive:
|
2011-06-09 21:08:45 +00:00
|
|
|
|
|
|
|
You can also specify what branch to run::
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- ceph:
|
|
|
|
branch: foo
|
|
|
|
|
|
|
|
Or a tag::
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- ceph:
|
|
|
|
tag: v0.42.13
|
|
|
|
|
2011-06-10 00:05:55 +00:00
|
|
|
Or a sha1::
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- ceph:
|
|
|
|
sha1: 1376a5ab0c89780eab39ffbbe436f6a6092314ed
|
|
|
|
|
2011-06-30 21:49:42 +00:00
|
|
|
Or a local source dir::
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- ceph:
|
|
|
|
path: /home/sage/ceph
|
|
|
|
|
2011-06-09 22:43:43 +00:00
|
|
|
To capture code coverage data, use::
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- ceph:
|
|
|
|
coverage: true
|
|
|
|
|
2012-02-11 22:24:39 +00:00
|
|
|
To use btrfs, ext4, or xfs on the target's scratch disks, use::
|
|
|
|
|
2011-10-03 21:03:36 +00:00
|
|
|
tasks:
|
|
|
|
- ceph:
|
2012-07-13 01:02:29 +00:00
|
|
|
fs: xfs
|
|
|
|
mkfs_options: [-b,size=65536,-l,logdev=/dev/sdc1]
|
|
|
|
mount_options: [nobarrier, inode64]
|
2012-02-11 22:24:39 +00:00
|
|
|
|
2011-10-03 21:03:36 +00:00
|
|
|
Note, this will cause the task to check the /scratch_devs file on each node
|
|
|
|
for available devices. If no such file is found, /dev/sdb will be used.
|
|
|
|
|
2011-08-17 17:35:37 +00:00
|
|
|
To run some daemons under valgrind, include their names
|
2011-12-20 22:10:22 +00:00
|
|
|
and the tool/args to use in a valgrind section::
|
|
|
|
|
2011-08-17 17:35:37 +00:00
|
|
|
tasks:
|
|
|
|
- ceph:
|
|
|
|
valgrind:
|
|
|
|
mds.1: --tool=memcheck
|
2011-12-20 22:10:22 +00:00
|
|
|
osd.1: [--tool=memcheck, --leak-check=no]
|
|
|
|
|
2012-07-13 01:02:29 +00:00
|
|
|
Those nodes which are using memcheck or valgrind will get
|
2011-08-29 20:58:09 +00:00
|
|
|
checked for bad results.
|
2011-08-17 17:35:37 +00:00
|
|
|
|
2011-07-05 23:45:32 +00:00
|
|
|
To adjust or modify config options, use::
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- ceph:
|
|
|
|
conf:
|
|
|
|
section:
|
|
|
|
key: value
|
|
|
|
|
|
|
|
For example::
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- ceph:
|
|
|
|
conf:
|
|
|
|
mds.0:
|
|
|
|
some option: value
|
|
|
|
other key: other value
|
|
|
|
client.0:
|
|
|
|
debug client: 10
|
|
|
|
debug ms: 1
|
|
|
|
|
2011-10-17 21:42:03 +00:00
|
|
|
By default, the cluster log is checked for errors and warnings,
|
|
|
|
and the run marked failed if any appear. You can ignore log
|
|
|
|
entries by giving a list of egrep compatible regexes, i.e.:
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- ceph:
|
|
|
|
log-whitelist: ['foo.*bar', 'bad message']
|
|
|
|
|
2016-03-17 02:10:10 +00:00
|
|
|
To run multiple ceph clusters, use multiple ceph tasks, and roles
|
|
|
|
with a cluster name prefix, e.g. cluster1.client.0. Roles with no
|
|
|
|
cluster use the default cluster name, 'ceph'. OSDs from separate
|
|
|
|
clusters must be on separate hosts. Clients and non-osd daemons
|
|
|
|
from multiple clusters may be colocated. For each cluster, add an
|
|
|
|
instance of the ceph task with the cluster name specified, e.g.::
|
|
|
|
|
|
|
|
roles:
|
|
|
|
- [mon.a, osd.0, osd.1]
|
|
|
|
- [backup.mon.a, backup.osd.0, backup.osd.1]
|
|
|
|
- [client.0, backup.client.0]
|
|
|
|
tasks:
|
|
|
|
- ceph:
|
|
|
|
cluster: ceph
|
|
|
|
- ceph:
|
|
|
|
cluster: backup
|
|
|
|
|
2013-10-12 08:28:27 +00:00
|
|
|
:param ctx: Context
|
|
|
|
:param config: Configuration
|
2016-03-17 02:10:10 +00:00
|
|
|
|
2011-06-06 21:22:49 +00:00
|
|
|
"""
|
2011-06-09 21:08:45 +00:00
|
|
|
if config is None:
|
|
|
|
config = {}
|
2011-06-07 18:45:29 +00:00
|
|
|
assert isinstance(config, dict), \
|
|
|
|
"task ceph only supports a dictionary for configuration"
|
2011-06-03 16:48:22 +00:00
|
|
|
|
2011-07-16 00:15:09 +00:00
|
|
|
overrides = ctx.config.get('overrides', {})
|
2011-11-17 21:07:03 +00:00
|
|
|
teuthology.deep_merge(config, overrides.get('ceph', {}))
|
2011-07-16 00:15:09 +00:00
|
|
|
|
2016-03-29 23:46:11 +00:00
|
|
|
first_ceph_cluster = False
|
2016-03-18 02:19:57 +00:00
|
|
|
if not hasattr(ctx, 'daemons'):
|
2016-03-29 23:46:11 +00:00
|
|
|
first_ceph_cluster = True
|
2016-03-18 02:19:57 +00:00
|
|
|
ctx.daemons = DaemonGroup()
|
2012-01-31 15:59:26 +00:00
|
|
|
|
2013-01-23 20:37:39 +00:00
|
|
|
testdir = teuthology.get_testdir(ctx)
|
2011-08-24 23:48:14 +00:00
|
|
|
if config.get('coverage'):
|
2013-01-23 20:37:39 +00:00
|
|
|
coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
|
2011-08-24 23:48:14 +00:00
|
|
|
log.info('Creating coverage directory...')
|
|
|
|
run.wait(
|
|
|
|
ctx.cluster.run(
|
|
|
|
args=[
|
|
|
|
'install', '-d', '-m0755', '--',
|
|
|
|
coverage_dir,
|
2015-11-09 13:09:27 +00:00
|
|
|
],
|
2011-08-24 23:48:14 +00:00
|
|
|
wait=False,
|
2011-05-31 20:51:48 +00:00
|
|
|
)
|
2015-11-09 13:09:27 +00:00
|
|
|
)
|
2011-05-31 20:51:48 +00:00
|
|
|
|
2016-03-17 02:10:10 +00:00
|
|
|
if 'cluster' not in config:
|
|
|
|
config['cluster'] = 'ceph'
|
|
|
|
|
|
|
|
validate_config(ctx, config)
|
|
|
|
|
2016-03-29 23:46:11 +00:00
|
|
|
subtasks = []
|
|
|
|
if first_ceph_cluster:
|
|
|
|
# these tasks handle general log setup and parsing on all hosts,
|
|
|
|
# so they should only be run once
|
|
|
|
subtasks = [
|
2015-11-09 13:09:27 +00:00
|
|
|
lambda: ceph_log(ctx=ctx, config=None),
|
2018-10-28 14:26:00 +00:00
|
|
|
lambda: ceph_crash(ctx=ctx, config=None),
|
2015-11-09 13:09:27 +00:00
|
|
|
lambda: valgrind_post(ctx=ctx, config=config),
|
2016-03-29 23:46:11 +00:00
|
|
|
]
|
|
|
|
|
|
|
|
subtasks += [
|
|
|
|
lambda: cluster(ctx=ctx, config=dict(
|
|
|
|
conf=config.get('conf', {}),
|
2016-05-27 16:22:50 +00:00
|
|
|
fs=config.get('fs', 'xfs'),
|
2016-03-29 23:46:11 +00:00
|
|
|
mkfs_options=config.get('mkfs_options', None),
|
|
|
|
mount_options=config.get('mount_options', None),
|
2017-03-10 21:57:51 +00:00
|
|
|
skip_mgr_daemons=config.get('skip_mgr_daemons', False),
|
2016-03-29 23:46:11 +00:00
|
|
|
log_whitelist=config.get('log-whitelist', []),
|
|
|
|
cpu_profile=set(config.get('cpu_profile', []),),
|
|
|
|
cluster=config['cluster'],
|
2018-11-26 20:00:38 +00:00
|
|
|
mon_bind_msgr2=config.get('mon_bind_msgr2', True),
|
|
|
|
mon_bind_addrvec=config.get('mon_bind_addrvec', True),
|
2016-03-29 23:46:11 +00:00
|
|
|
)),
|
|
|
|
lambda: run_daemon(ctx=ctx, config=config, type_='mon'),
|
2016-09-27 11:22:45 +00:00
|
|
|
lambda: run_daemon(ctx=ctx, config=config, type_='mgr'),
|
2016-03-29 23:46:11 +00:00
|
|
|
lambda: crush_setup(ctx=ctx, config=config),
|
|
|
|
lambda: run_daemon(ctx=ctx, config=config, type_='osd'),
|
2017-06-23 20:02:26 +00:00
|
|
|
lambda: create_rbd_pool(ctx=ctx, config=config),
|
2016-03-29 23:46:11 +00:00
|
|
|
lambda: cephfs_setup(ctx=ctx, config=config),
|
|
|
|
lambda: run_daemon(ctx=ctx, config=config, type_='mds'),
|
2019-06-06 11:20:18 +00:00
|
|
|
lambda: watchdog_setup(ctx=ctx, config=config),
|
2016-03-29 23:46:11 +00:00
|
|
|
]
|
|
|
|
|
|
|
|
with contextutil.nested(*subtasks):
|
2016-08-31 14:31:31 +00:00
|
|
|
first_mon = teuthology.get_first_mon(ctx, config, config['cluster'])
|
2019-10-11 15:57:47 +00:00
|
|
|
(mon,) = ctx.cluster.only(first_mon).remotes.keys()
|
2016-08-31 14:31:31 +00:00
|
|
|
if not hasattr(ctx, 'managers'):
|
|
|
|
ctx.managers = {}
|
|
|
|
ctx.managers[config['cluster']] = CephManager(
|
|
|
|
mon,
|
|
|
|
ctx=ctx,
|
|
|
|
logger=log.getChild('ceph_manager.' + config['cluster']),
|
|
|
|
cluster=config['cluster'],
|
|
|
|
)
|
|
|
|
|
2014-02-07 22:54:23 +00:00
|
|
|
try:
|
2014-02-10 17:28:39 +00:00
|
|
|
if config.get('wait-for-healthy', True):
|
2016-03-18 03:16:35 +00:00
|
|
|
healthy(ctx=ctx, config=dict(cluster=config['cluster']))
|
2016-08-31 14:31:31 +00:00
|
|
|
|
2014-02-07 22:54:23 +00:00
|
|
|
yield
|
|
|
|
finally:
|
2019-01-12 11:32:54 +00:00
|
|
|
# set pg_num_targets back to actual pg_num, so we don't have to
|
|
|
|
# wait for pending merges (which can take a while!)
|
|
|
|
ctx.managers[config['cluster']].stop_pg_num_changes()
|
|
|
|
|
2014-07-03 11:08:58 +00:00
|
|
|
if config.get('wait-for-scrub', True):
|
|
|
|
osd_scrub_pgs(ctx, config)
|
2017-06-21 18:02:05 +00:00
|
|
|
|
|
|
|
# stop logging health to clog during shutdown, or else we generate
|
|
|
|
# a bunch of scary messages unrelated to our actual run.
|
|
|
|
firstmon = teuthology.get_first_mon(ctx, config, config['cluster'])
|
|
|
|
(mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
|
|
|
|
mon0_remote.run(
|
|
|
|
args=[
|
|
|
|
'sudo',
|
|
|
|
'ceph',
|
|
|
|
'--cluster', config['cluster'],
|
2020-01-21 14:57:06 +00:00
|
|
|
'config', 'set', 'global',
|
|
|
|
'mon_health_to_clog', 'false',
|
|
|
|
],
|
|
|
|
check_status=False,
|
2017-06-21 18:02:05 +00:00
|
|
|
)
|