2011-05-31 20:51:48 +00:00
|
|
|
from cStringIO import StringIO
|
|
|
|
|
2011-11-18 01:00:44 +00:00
|
|
|
import argparse
|
2011-06-03 21:47:44 +00:00
|
|
|
import contextlib
|
2011-05-31 20:51:48 +00:00
|
|
|
import logging
|
|
|
|
import os
|
2012-02-02 17:26:25 +00:00
|
|
|
import sys
|
2011-05-31 20:51:48 +00:00
|
|
|
|
2011-06-03 21:47:44 +00:00
|
|
|
from teuthology import misc as teuthology
|
2011-06-15 21:57:02 +00:00
|
|
|
from teuthology import contextutil
|
2011-07-14 20:57:07 +00:00
|
|
|
from teuthology.parallel import parallel
|
2011-09-13 21:53:02 +00:00
|
|
|
from ..orchestra import run
|
2011-05-31 20:51:48 +00:00
|
|
|
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
2011-09-14 23:31:58 +00:00
|
|
|
class DaemonState(object):
|
|
|
|
def __init__(self, remote, role, id_, *command_args, **command_kwargs):
|
|
|
|
self.remote = remote
|
|
|
|
self.command_args = command_args
|
|
|
|
self.command_kwargs = command_kwargs
|
|
|
|
self.role = role
|
|
|
|
self.id_ = id_
|
2012-02-02 17:29:03 +00:00
|
|
|
self.log = command_kwargs.get('logger', log)
|
2011-09-14 23:31:58 +00:00
|
|
|
self.proc = None
|
|
|
|
|
|
|
|
def stop(self):
|
2012-02-02 17:26:25 +00:00
|
|
|
"""
|
|
|
|
Note: this can raise a run.CommandFailedError,
|
|
|
|
run.CommandCrashedError, or run.ConnectionLostError.
|
|
|
|
"""
|
|
|
|
if not self.running():
|
2012-02-02 17:29:03 +00:00
|
|
|
self.log.error('tried to stop a non-running daemon')
|
2012-02-02 17:26:25 +00:00
|
|
|
return
|
|
|
|
self.proc.stdin.close()
|
2012-02-02 17:29:03 +00:00
|
|
|
self.log.debug('waiting for process to exit')
|
2012-02-02 17:26:25 +00:00
|
|
|
run.wait([self.proc])
|
|
|
|
self.proc = None
|
2012-02-02 17:29:03 +00:00
|
|
|
self.log.info('Stopped')
|
2011-09-14 23:31:58 +00:00
|
|
|
|
2013-01-09 22:02:42 +00:00
|
|
|
def restart(self, *args, **kwargs):
|
2012-02-02 17:29:03 +00:00
|
|
|
self.log.info('Restarting')
|
2012-02-02 17:26:25 +00:00
|
|
|
if self.proc is not None:
|
2012-02-02 17:29:03 +00:00
|
|
|
self.log.debug('stopping old one...')
|
2012-02-02 17:26:25 +00:00
|
|
|
self.stop()
|
2013-01-09 22:02:42 +00:00
|
|
|
cmd_args = list(self.command_args)
|
|
|
|
cmd_args.extend(args)
|
|
|
|
cmd_kwargs = self.command_kwargs
|
|
|
|
cmd_kwargs.update(kwargs)
|
|
|
|
self.proc = self.remote.run(*cmd_args, **cmd_kwargs)
|
2012-02-02 17:29:03 +00:00
|
|
|
self.log.info('Started')
|
2011-09-14 23:31:58 +00:00
|
|
|
|
|
|
|
def running(self):
|
|
|
|
return self.proc is not None
|
|
|
|
|
2013-01-23 02:13:19 +00:00
|
|
|
def reset(self):
|
|
|
|
self.proc = None
|
|
|
|
|
2011-09-14 23:31:58 +00:00
|
|
|
|
|
|
|
class CephState(object):
|
|
|
|
def __init__(self):
|
|
|
|
self.daemons = {}
|
|
|
|
|
|
|
|
def add_daemon(self, remote, role, id_, *args, **kwargs):
|
|
|
|
if role not in self.daemons:
|
|
|
|
self.daemons[role] = {}
|
|
|
|
if id_ in self.daemons[role]:
|
|
|
|
self.daemons[role][id_].stop()
|
|
|
|
self.daemons[role][id_] = None
|
|
|
|
self.daemons[role][id_] = DaemonState(remote, role, id_, *args, **kwargs)
|
|
|
|
self.daemons[role][id_].restart()
|
|
|
|
|
|
|
|
def get_daemon(self, role, id_):
|
|
|
|
if role not in self.daemons:
|
|
|
|
return None
|
|
|
|
return self.daemons[role].get(str(id_), None)
|
|
|
|
|
|
|
|
def iter_daemons_of_role(self, role):
|
|
|
|
return self.daemons.get(role, {}).values()
|
2011-06-15 21:57:02 +00:00
|
|
|
|
2011-06-16 20:13:32 +00:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def ceph_log(ctx, config):
|
|
|
|
log.info('Creating log directories...')
|
2013-01-23 20:37:39 +00:00
|
|
|
archive_dir = '{tdir}/archive'.format(tdir=teuthology.get_testdir(ctx))
|
2011-06-16 20:13:32 +00:00
|
|
|
run.wait(
|
|
|
|
ctx.cluster.run(
|
|
|
|
args=[
|
|
|
|
'install', '-d', '-m0755', '--',
|
2013-01-23 20:37:39 +00:00
|
|
|
'{adir}/log'.format(adir=archive_dir),
|
|
|
|
'{adir}/log/valgrind'.format(adir=archive_dir),
|
|
|
|
'{adir}/profiling-logger'.format(adir=archive_dir),
|
2011-06-16 20:13:32 +00:00
|
|
|
],
|
|
|
|
wait=False,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
try:
|
|
|
|
yield
|
|
|
|
finally:
|
|
|
|
|
|
|
|
if ctx.archive is not None:
|
|
|
|
log.info('Compressing logs...')
|
|
|
|
run.wait(
|
|
|
|
ctx.cluster.run(
|
|
|
|
args=[
|
|
|
|
'find',
|
2013-01-23 20:37:39 +00:00
|
|
|
'{adir}/log'.format(adir=archive_dir),
|
2011-06-16 20:13:32 +00:00
|
|
|
'-name',
|
|
|
|
'*.log',
|
|
|
|
'-print0',
|
|
|
|
run.Raw('|'),
|
|
|
|
'xargs',
|
|
|
|
'-0',
|
|
|
|
'--no-run-if-empty',
|
|
|
|
'--',
|
2011-07-29 17:35:02 +00:00
|
|
|
'gzip',
|
2011-06-16 20:13:32 +00:00
|
|
|
'--',
|
|
|
|
],
|
|
|
|
wait=False,
|
|
|
|
),
|
|
|
|
)
|
|
|
|
|
|
|
|
# log file transfer is done by the generic archive data
|
|
|
|
# handling
|
|
|
|
|
2011-06-15 21:57:02 +00:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def ship_utilities(ctx, config):
|
|
|
|
assert config is None
|
2012-07-04 21:29:55 +00:00
|
|
|
FILES = ['daemon-helper', 'enable-coredump', 'chdir-coredump',
|
2012-10-09 00:59:47 +00:00
|
|
|
'valgrind.supp', 'kcon_most']
|
2013-01-23 20:37:39 +00:00
|
|
|
testdir = teuthology.get_testdir(ctx)
|
2011-06-15 21:57:02 +00:00
|
|
|
for filename in FILES:
|
|
|
|
log.info('Shipping %r...', filename)
|
|
|
|
src = os.path.join(os.path.dirname(__file__), filename)
|
2013-01-23 20:37:39 +00:00
|
|
|
dst = os.path.join(testdir, filename)
|
2011-06-15 21:57:02 +00:00
|
|
|
with file(src, 'rb') as f:
|
|
|
|
for rem in ctx.cluster.remotes.iterkeys():
|
|
|
|
teuthology.write_file(
|
|
|
|
remote=rem,
|
|
|
|
path=dst,
|
|
|
|
data=f,
|
|
|
|
)
|
|
|
|
f.seek(0)
|
|
|
|
rem.run(
|
|
|
|
args=[
|
|
|
|
'chmod',
|
|
|
|
'a=rx',
|
|
|
|
'--',
|
|
|
|
dst,
|
|
|
|
],
|
|
|
|
)
|
|
|
|
|
|
|
|
try:
|
|
|
|
yield
|
|
|
|
finally:
|
|
|
|
log.info('Removing shipped files: %s...', ' '.join(FILES))
|
|
|
|
filenames = (
|
2013-01-23 20:37:39 +00:00
|
|
|
os.path.join(testdir, filename)
|
2011-06-15 21:57:02 +00:00
|
|
|
for filename in FILES
|
|
|
|
)
|
|
|
|
run.wait(
|
|
|
|
ctx.cluster.run(
|
|
|
|
args=[
|
|
|
|
'rm',
|
|
|
|
'-rf',
|
|
|
|
'--',
|
|
|
|
] + list(filenames),
|
|
|
|
wait=False,
|
|
|
|
),
|
|
|
|
)
|
|
|
|
|
2013-02-06 19:16:52 +00:00
|
|
|
def _update_deb_package_list_and_install(remote, debs, branch):
|
|
|
|
"""
|
|
|
|
updates the package list so that apt-get can
|
|
|
|
download the appropriate packages
|
|
|
|
"""
|
|
|
|
|
|
|
|
# check for ceph release key
|
|
|
|
r = remote.run(
|
2013-02-16 01:41:46 +00:00
|
|
|
args=[
|
|
|
|
'sudo', 'apt-key', 'list', run.Raw('|'), 'grep', 'Ceph',
|
|
|
|
],
|
|
|
|
stdout=StringIO(),
|
2011-06-22 17:57:16 +00:00
|
|
|
)
|
2013-02-16 01:41:46 +00:00
|
|
|
if r.stdout.getvalue().find('Ceph automated package') == -1:
|
2013-02-06 19:16:52 +00:00
|
|
|
# if it doesn't exist, add it
|
|
|
|
remote.run(
|
|
|
|
args=[
|
|
|
|
'wget', '-q', '-O-',
|
|
|
|
'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc',
|
|
|
|
run.Raw('|'),
|
|
|
|
'sudo', 'apt-key', 'add', '-',
|
|
|
|
],
|
|
|
|
stdout=StringIO(),
|
|
|
|
)
|
2011-06-22 17:57:16 +00:00
|
|
|
|
2013-02-06 19:16:52 +00:00
|
|
|
# get ubuntu release (precise, quantal, etc.)
|
|
|
|
r = remote.run(
|
|
|
|
args=['lsb_release', '-sc'],
|
|
|
|
stdout=StringIO(),
|
|
|
|
)
|
2011-06-30 21:49:42 +00:00
|
|
|
|
2013-02-06 19:16:52 +00:00
|
|
|
out = r.stdout.getvalue().strip()
|
|
|
|
log.info("release type:" + out)
|
2013-01-23 20:37:39 +00:00
|
|
|
|
2013-02-17 05:31:50 +00:00
|
|
|
# get package version string
|
|
|
|
r = remote.run(
|
|
|
|
args=[
|
|
|
|
'wget', '-q', '-O-',
|
|
|
|
'http://gitbuilder.ceph.com/ceph-deb-' + out + '-x86_64-basic/ref/' + branch + '/version',
|
|
|
|
],
|
|
|
|
stdout=StringIO(),
|
|
|
|
)
|
|
|
|
version = r.stdout.getvalue().strip()
|
|
|
|
log.info('package version is %s', version)
|
|
|
|
|
2013-02-06 19:16:52 +00:00
|
|
|
remote.run(
|
2013-02-16 01:41:46 +00:00
|
|
|
args=[
|
|
|
|
'echo', 'deb',
|
|
|
|
'http://gitbuilder.ceph.com/ceph-deb-' + out + '-x86_64-basic/ref/' + branch,
|
|
|
|
out, 'main', run.Raw('|'),
|
|
|
|
'sudo', 'tee', '/etc/apt/sources.list.d/ceph.list'
|
|
|
|
],
|
|
|
|
stdout=StringIO(),
|
|
|
|
)
|
2013-02-06 19:16:52 +00:00
|
|
|
remote.run(
|
2013-02-16 01:41:46 +00:00
|
|
|
args=[
|
|
|
|
'sudo', 'apt-get', 'update', run.Raw('&&'),
|
|
|
|
'sudo', 'apt-get', '-y', '--force-yes',
|
|
|
|
'install',
|
2013-02-17 05:31:50 +00:00
|
|
|
] + ['%s=%s' % (d, version) for d in debs],
|
2013-02-16 01:41:46 +00:00
|
|
|
stdout=StringIO(),
|
|
|
|
)
|
2011-06-22 17:57:16 +00:00
|
|
|
|
2013-02-06 19:16:52 +00:00
|
|
|
def install_debs(ctx, debs, branch):
|
|
|
|
"""
|
|
|
|
installs Debian packages.
|
|
|
|
"""
|
|
|
|
log.info("Installing ceph debian packages: {debs}".format(debs=', '.join(debs)))
|
|
|
|
with parallel() as p:
|
|
|
|
for remote in ctx.cluster.remotes.iterkeys():
|
|
|
|
p.spawn(_update_deb_package_list_and_install, remote, debs, branch)
|
|
|
|
|
|
|
|
def _remove_deb(remote, debs):
|
2013-02-15 23:39:02 +00:00
|
|
|
args=[
|
|
|
|
'sudo', 'apt-get', '-y', '--force-yes', 'purge',
|
|
|
|
]
|
|
|
|
args.extend(debs)
|
|
|
|
args.extend([
|
|
|
|
run.Raw('||'),
|
|
|
|
'true'
|
|
|
|
])
|
|
|
|
remote.run(args=args)
|
2013-02-06 19:16:52 +00:00
|
|
|
remote.run(
|
2013-02-15 23:39:02 +00:00
|
|
|
args=[
|
|
|
|
'sudo', 'apt-get', '-y', '--force-yes',
|
|
|
|
'autoremove',
|
|
|
|
],
|
|
|
|
stdout=StringIO(),
|
|
|
|
)
|
2013-02-06 19:16:52 +00:00
|
|
|
|
|
|
|
def remove_debs(ctx, debs):
|
|
|
|
log.info("Removing/purging debian packages {debs}".format(debs=', '.join(debs)))
|
|
|
|
with parallel() as p:
|
|
|
|
for remote in ctx.cluster.remotes.iterkeys():
|
|
|
|
p.spawn(_remove_deb, remote, debs)
|
|
|
|
|
|
|
|
def _remove_sources_list(remote):
|
|
|
|
remote.run(
|
|
|
|
args=[
|
|
|
|
'sudo', 'rm', '-f', '/etc/apt/sources.list.d/ceph.list', run.Raw('&&'),
|
|
|
|
'sudo', 'apt-get', 'update',
|
|
|
|
],
|
|
|
|
stdout=StringIO(),
|
|
|
|
)
|
|
|
|
|
|
|
|
def remove_sources(ctx):
|
|
|
|
log.info("Removing ceph sources list from apt")
|
|
|
|
with parallel() as p:
|
|
|
|
for remote in ctx.cluster.remotes.iterkeys():
|
|
|
|
p.spawn(_remove_sources_list, remote)
|
|
|
|
|
|
|
|
|
|
|
|
@contextlib.contextmanager
|
|
|
|
def binaries(ctx, config):
|
2011-06-30 21:49:42 +00:00
|
|
|
|
2013-02-15 23:39:02 +00:00
|
|
|
debs = ['ceph',
|
|
|
|
'ceph-mds',
|
|
|
|
'ceph-common',
|
|
|
|
'python-ceph',
|
|
|
|
'ceph-test',
|
2013-02-17 00:20:07 +00:00
|
|
|
'radosgw',
|
2013-02-15 23:39:02 +00:00
|
|
|
'librados2',
|
|
|
|
'librbd1',
|
|
|
|
]
|
2013-02-06 19:16:52 +00:00
|
|
|
branch = config.get('branch', 'master')
|
|
|
|
log.info('branch: {b}'.format(b=branch))
|
|
|
|
install_debs(ctx, debs, branch)
|
2011-06-15 21:57:02 +00:00
|
|
|
try:
|
|
|
|
yield
|
|
|
|
finally:
|
2013-02-06 19:16:52 +00:00
|
|
|
remove_debs(ctx, debs)
|
|
|
|
remove_sources(ctx)
|
2011-10-03 21:03:36 +00:00
|
|
|
|
|
|
|
def assign_devs(roles, devs):
|
|
|
|
return dict(zip(roles, devs))
|
|
|
|
|
2011-08-29 20:58:09 +00:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def valgrind_post(ctx, config):
|
2013-01-23 20:37:39 +00:00
|
|
|
testdir = teuthology.get_testdir(ctx)
|
2011-08-29 20:58:09 +00:00
|
|
|
try:
|
|
|
|
yield
|
|
|
|
finally:
|
2012-02-22 00:10:37 +00:00
|
|
|
lookup_procs = list()
|
2013-01-23 20:37:39 +00:00
|
|
|
val_path = '{tdir}/archive/log/valgrind'.format(tdir=testdir)
|
2012-02-22 00:10:37 +00:00
|
|
|
log.info('Checking for errors in any valgrind logs...');
|
|
|
|
for remote in ctx.cluster.remotes.iterkeys():
|
|
|
|
#look at valgrind logs for each node
|
|
|
|
proc = remote.run(
|
|
|
|
args=[
|
2012-11-18 00:19:14 +00:00
|
|
|
'grep', '-r', '<kind>',
|
|
|
|
run.Raw(val_path),
|
|
|
|
run.Raw('|'),
|
|
|
|
'sort',
|
|
|
|
run.Raw('|'),
|
|
|
|
'uniq',
|
|
|
|
],
|
2012-02-22 00:10:37 +00:00
|
|
|
wait = False,
|
2012-11-18 00:19:14 +00:00
|
|
|
check_status=False,
|
|
|
|
stdout=StringIO(),
|
2012-02-22 00:10:37 +00:00
|
|
|
)
|
|
|
|
lookup_procs.append((proc, remote))
|
2011-09-02 18:07:10 +00:00
|
|
|
|
2012-02-22 00:10:37 +00:00
|
|
|
valgrind_exception = None
|
|
|
|
for (proc, remote) in lookup_procs:
|
2012-11-18 00:19:14 +00:00
|
|
|
out = proc.stdout.getvalue()
|
|
|
|
for line in out.split('\n'):
|
|
|
|
if line == '':
|
|
|
|
continue
|
|
|
|
(file, kind) = line.split(':')
|
|
|
|
log.debug('file %s kind %s', file, kind)
|
|
|
|
if file.find('client') < 0 and file.find('mon') < 0 and kind.find('Lost') > 0:
|
|
|
|
continue
|
|
|
|
log.error('saw valgrind issue %s in %s', kind, file)
|
|
|
|
valgrind_exception = Exception('saw valgrind issues')
|
2011-09-02 18:07:10 +00:00
|
|
|
|
2012-02-22 00:10:37 +00:00
|
|
|
if valgrind_exception is not None:
|
|
|
|
raise valgrind_exception
|
2011-08-29 20:58:09 +00:00
|
|
|
|
2013-01-23 02:13:19 +00:00
|
|
|
|
|
|
|
def mount_osd_data(ctx, remote, osd):
|
|
|
|
testdir = teuthology.get_testdir(ctx)
|
2013-01-23 02:27:41 +00:00
|
|
|
log.debug('Mounting data for osd.{o} on {r}'.format(o=osd, r=remote))
|
|
|
|
if remote in ctx.disk_config.remote_to_roles_to_dev and osd in ctx.disk_config.remote_to_roles_to_dev[remote]:
|
|
|
|
dev = ctx.disk_config.remote_to_roles_to_dev[remote][osd]
|
|
|
|
journal = ctx.disk_config.remote_to_roles_to_journals[remote][osd]
|
|
|
|
mount_options = ctx.disk_config.remote_to_roles_to_dev_mount_options[remote][osd]
|
|
|
|
fstype = ctx.disk_config.remote_to_roles_to_dev_fstype[remote][osd]
|
|
|
|
mnt = os.path.join('{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=osd))
|
|
|
|
|
|
|
|
log.info('Mounting osd.{o}: dev: {n}, mountpoint: {p}, type: {t}, options: {v}'.format(
|
|
|
|
o=osd, n=remote.name, p=mnt, t=fstype, v=mount_options))
|
|
|
|
|
|
|
|
remote.run(
|
2013-01-23 02:13:19 +00:00
|
|
|
args=[
|
|
|
|
'sudo',
|
|
|
|
'mount',
|
|
|
|
'-t', fstype,
|
|
|
|
'-o', ','.join(mount_options),
|
|
|
|
dev,
|
2013-01-23 02:27:41 +00:00
|
|
|
mnt,
|
2013-01-23 02:13:19 +00:00
|
|
|
]
|
|
|
|
)
|
|
|
|
|
2013-01-23 02:27:41 +00:00
|
|
|
if journal == ('/mnt/osd.%s' % osd):
|
|
|
|
tmpfs = '/mnt/osd.%s' % osd
|
|
|
|
log.info('Creating journal file on tmpfs at {t}'.format(t=tmpfs))
|
|
|
|
remote.run( args=[ 'sudo', 'mount', '-t', 'tmpfs', 'tmpfs', '/mnt' ] )
|
|
|
|
remote.run( args=[ 'truncate', '-s', '1500M', tmpfs ] )
|
2013-01-23 02:13:19 +00:00
|
|
|
|
2011-06-16 17:36:15 +00:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def cluster(ctx, config):
|
2013-01-23 20:37:39 +00:00
|
|
|
testdir = teuthology.get_testdir(ctx)
|
2011-06-16 17:36:15 +00:00
|
|
|
log.info('Creating ceph cluster...')
|
2011-06-16 21:05:13 +00:00
|
|
|
run.wait(
|
|
|
|
ctx.cluster.run(
|
|
|
|
args=[
|
|
|
|
'install', '-d', '-m0755', '--',
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/data'.format(tdir=testdir),
|
2011-06-16 21:05:13 +00:00
|
|
|
],
|
|
|
|
wait=False,
|
|
|
|
)
|
|
|
|
)
|
2011-06-16 17:36:15 +00:00
|
|
|
|
2012-03-27 22:05:11 +00:00
|
|
|
|
|
|
|
devs_to_clean = {}
|
|
|
|
remote_to_roles_to_devs = {}
|
|
|
|
remote_to_roles_to_journals = {}
|
|
|
|
osds = ctx.cluster.only(teuthology.is_type('osd'))
|
|
|
|
for remote, roles_for_host in osds.remotes.iteritems():
|
|
|
|
devs = teuthology.get_scratch_devices(remote)
|
|
|
|
roles_to_devs = {}
|
|
|
|
roles_to_journals = {}
|
|
|
|
if config.get('fs'):
|
2013-01-23 02:13:19 +00:00
|
|
|
log.info('fs option selected, checking for scratch devs')
|
2012-03-27 22:05:11 +00:00
|
|
|
log.info('found devs: %s' % (str(devs),))
|
2013-01-23 02:27:41 +00:00
|
|
|
devs_id_map = teuthology.get_wwn_id_map(remote, devs)
|
2013-02-01 16:16:44 +00:00
|
|
|
iddevs = devs_id_map.values()
|
2012-03-27 22:05:11 +00:00
|
|
|
roles_to_devs = assign_devs(
|
2013-01-23 02:27:41 +00:00
|
|
|
teuthology.roles_of_type(roles_for_host, 'osd'), iddevs
|
2012-03-27 22:05:11 +00:00
|
|
|
)
|
2013-01-23 02:27:41 +00:00
|
|
|
if len(roles_to_devs) < len(iddevs):
|
|
|
|
iddevs = iddevs[len(roles_to_devs):]
|
2012-03-27 22:05:11 +00:00
|
|
|
devs_to_clean[remote] = []
|
2013-01-23 02:13:19 +00:00
|
|
|
|
2012-03-27 22:05:11 +00:00
|
|
|
if config.get('block_journal'):
|
|
|
|
log.info('block journal enabled')
|
|
|
|
roles_to_journals = assign_devs(
|
2013-01-23 02:27:41 +00:00
|
|
|
teuthology.roles_of_type(roles_for_host, 'osd'), iddevs
|
2012-03-27 22:05:11 +00:00
|
|
|
)
|
|
|
|
log.info('journal map: %s', roles_to_journals)
|
2012-08-16 22:50:10 +00:00
|
|
|
|
|
|
|
if config.get('tmpfs_journal'):
|
|
|
|
log.info('tmpfs journal enabled')
|
|
|
|
roles_to_journals = {}
|
|
|
|
remote.run( args=[ 'sudo', 'mount', '-t', 'tmpfs', 'tmpfs', '/mnt' ] )
|
|
|
|
for osd in teuthology.roles_of_type(roles_for_host, 'osd'):
|
|
|
|
tmpfs = '/mnt/osd.%s' % osd
|
|
|
|
roles_to_journals[osd] = tmpfs
|
|
|
|
remote.run( args=[ 'truncate', '-s', '1500M', tmpfs ] )
|
|
|
|
log.info('journal map: %s', roles_to_journals)
|
|
|
|
|
2013-01-23 02:27:41 +00:00
|
|
|
log.info('dev map: %s' % (str(roles_to_devs),))
|
2012-03-27 22:05:11 +00:00
|
|
|
remote_to_roles_to_devs[remote] = roles_to_devs
|
|
|
|
remote_to_roles_to_journals[remote] = roles_to_journals
|
|
|
|
|
|
|
|
|
2011-07-05 23:45:32 +00:00
|
|
|
log.info('Generating config...')
|
2011-06-16 17:36:15 +00:00
|
|
|
remotes_and_roles = ctx.cluster.remotes.items()
|
|
|
|
roles = [roles for (remote, roles) in remotes_and_roles]
|
|
|
|
ips = [host for (host, port) in (remote.ssh.get_transport().getpeername() for (remote, roles) in remotes_and_roles)]
|
2013-01-23 20:37:39 +00:00
|
|
|
conf = teuthology.skeleton_config(ctx, roles=roles, ips=ips)
|
2012-03-27 22:05:11 +00:00
|
|
|
for remote, roles_to_journals in remote_to_roles_to_journals.iteritems():
|
|
|
|
for role, journal in roles_to_journals.iteritems():
|
|
|
|
key = "osd." + str(role)
|
|
|
|
if key not in conf:
|
|
|
|
conf[key] = {}
|
|
|
|
conf[key]['osd journal'] = journal
|
2011-07-05 23:45:32 +00:00
|
|
|
for section, keys in config['conf'].iteritems():
|
|
|
|
for key, value in keys.iteritems():
|
|
|
|
log.info("[%s] %s = %s" % (section, key, value))
|
|
|
|
if section not in conf:
|
|
|
|
conf[section] = {}
|
|
|
|
conf[section][key] = value
|
2011-07-12 01:00:03 +00:00
|
|
|
|
2012-08-16 22:50:10 +00:00
|
|
|
if config.get('tmpfs_journal'):
|
|
|
|
conf['journal dio'] = False
|
|
|
|
|
2011-11-18 01:00:44 +00:00
|
|
|
ctx.ceph = argparse.Namespace()
|
|
|
|
ctx.ceph.conf = conf
|
2011-11-09 06:00:32 +00:00
|
|
|
|
2013-02-06 19:16:52 +00:00
|
|
|
conf_path = config.get('conf_path', '/etc/ceph/ceph.conf')
|
|
|
|
keyring_path = config.get('keyring_path', '/etc/ceph/ceph.keyring')
|
|
|
|
|
2011-07-05 23:45:32 +00:00
|
|
|
log.info('Writing configs...')
|
2011-06-16 17:36:15 +00:00
|
|
|
conf_fp = StringIO()
|
|
|
|
conf.write(conf_fp)
|
|
|
|
conf_fp.seek(0)
|
|
|
|
writes = ctx.cluster.run(
|
|
|
|
args=[
|
2013-02-06 19:16:52 +00:00
|
|
|
'sudo', 'mkdir', '-p', '/etc/ceph', run.Raw('&&'),
|
|
|
|
'sudo', 'chmod', '0755', '/etc/ceph', run.Raw('&&'),
|
|
|
|
'sudo', 'python',
|
2011-06-16 17:36:15 +00:00
|
|
|
'-c',
|
|
|
|
'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
|
2013-02-06 19:16:52 +00:00
|
|
|
conf_path,
|
|
|
|
run.Raw('&&'),
|
|
|
|
'sudo', 'chmod', '0644', conf_path,
|
2011-06-16 17:36:15 +00:00
|
|
|
],
|
|
|
|
stdin=run.PIPE,
|
|
|
|
wait=False,
|
|
|
|
)
|
|
|
|
teuthology.feed_many_stdins_and_close(conf_fp, writes)
|
|
|
|
run.wait(writes)
|
|
|
|
|
2013-01-23 20:37:39 +00:00
|
|
|
coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
|
2011-06-16 17:36:15 +00:00
|
|
|
|
2011-08-31 20:56:42 +00:00
|
|
|
firstmon = teuthology.get_first_mon(ctx, config)
|
2011-07-27 04:46:47 +00:00
|
|
|
|
|
|
|
log.info('Setting up %s...' % firstmon)
|
|
|
|
ctx.cluster.only(firstmon).run(
|
2011-06-16 17:36:15 +00:00
|
|
|
args=[
|
2013-02-06 19:16:52 +00:00
|
|
|
'sudo',
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/enable-coredump'.format(tdir=testdir),
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-coverage',
|
2011-06-16 17:36:15 +00:00
|
|
|
coverage_dir,
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-authtool',
|
2011-06-16 17:36:15 +00:00
|
|
|
'--create-keyring',
|
2013-02-06 19:16:52 +00:00
|
|
|
keyring_path,
|
2011-06-16 17:36:15 +00:00
|
|
|
],
|
|
|
|
)
|
2011-07-27 04:46:47 +00:00
|
|
|
ctx.cluster.only(firstmon).run(
|
2011-06-16 17:36:15 +00:00
|
|
|
args=[
|
2013-02-06 19:16:52 +00:00
|
|
|
'sudo',
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/enable-coredump'.format(tdir=testdir),
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-coverage',
|
2011-06-16 17:36:15 +00:00
|
|
|
coverage_dir,
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-authtool',
|
2011-06-16 17:36:15 +00:00
|
|
|
'--gen-key',
|
|
|
|
'--name=mon.',
|
2013-02-06 19:16:52 +00:00
|
|
|
keyring_path,
|
|
|
|
],
|
|
|
|
)
|
|
|
|
ctx.cluster.only(firstmon).run(
|
|
|
|
args=[
|
|
|
|
'sudo',
|
|
|
|
'chmod',
|
|
|
|
'0644',
|
|
|
|
keyring_path,
|
2011-06-16 17:36:15 +00:00
|
|
|
],
|
|
|
|
)
|
2011-07-27 04:46:47 +00:00
|
|
|
(mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
|
2011-06-16 17:36:15 +00:00
|
|
|
teuthology.create_simple_monmap(
|
2013-01-23 20:37:39 +00:00
|
|
|
ctx,
|
2011-06-16 17:36:15 +00:00
|
|
|
remote=mon0_remote,
|
|
|
|
conf=conf,
|
|
|
|
)
|
|
|
|
|
2011-07-27 04:46:47 +00:00
|
|
|
log.info('Creating admin key on %s...' % firstmon)
|
|
|
|
ctx.cluster.only(firstmon).run(
|
2011-06-16 17:36:15 +00:00
|
|
|
args=[
|
2013-02-06 19:16:52 +00:00
|
|
|
'sudo',
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/enable-coredump'.format(tdir=testdir),
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-coverage',
|
2011-06-16 17:36:15 +00:00
|
|
|
coverage_dir,
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-authtool',
|
2011-06-16 17:36:15 +00:00
|
|
|
'--gen-key',
|
|
|
|
'--name=client.admin',
|
|
|
|
'--set-uid=0',
|
|
|
|
'--cap', 'mon', 'allow *',
|
|
|
|
'--cap', 'osd', 'allow *',
|
|
|
|
'--cap', 'mds', 'allow',
|
2013-02-06 19:16:52 +00:00
|
|
|
keyring_path,
|
2011-06-16 17:36:15 +00:00
|
|
|
],
|
|
|
|
)
|
|
|
|
|
2011-07-07 22:40:37 +00:00
|
|
|
log.info('Copying monmap to all nodes...')
|
2011-06-16 17:36:15 +00:00
|
|
|
keyring = teuthology.get_file(
|
|
|
|
remote=mon0_remote,
|
2013-02-06 19:16:52 +00:00
|
|
|
path=keyring_path,
|
2011-06-16 17:36:15 +00:00
|
|
|
)
|
|
|
|
monmap = teuthology.get_file(
|
|
|
|
remote=mon0_remote,
|
2013-01-23 20:37:39 +00:00
|
|
|
path='{tdir}/monmap'.format(tdir=testdir),
|
2011-06-16 17:36:15 +00:00
|
|
|
)
|
|
|
|
|
2011-07-07 22:40:37 +00:00
|
|
|
for rem in ctx.cluster.remotes.iterkeys():
|
2011-06-16 17:36:15 +00:00
|
|
|
# copy mon key and initial monmap
|
2011-07-07 22:40:37 +00:00
|
|
|
log.info('Sending monmap to node {remote}'.format(remote=rem))
|
2013-02-06 19:16:52 +00:00
|
|
|
teuthology.sudo_write_file(
|
2011-06-16 17:36:15 +00:00
|
|
|
remote=rem,
|
2013-02-06 19:16:52 +00:00
|
|
|
path=keyring_path,
|
2011-06-16 17:36:15 +00:00
|
|
|
data=keyring,
|
2013-02-06 19:16:52 +00:00
|
|
|
perms='0644'
|
2011-06-16 17:36:15 +00:00
|
|
|
)
|
|
|
|
teuthology.write_file(
|
|
|
|
remote=rem,
|
2013-01-23 20:37:39 +00:00
|
|
|
path='{tdir}/monmap'.format(tdir=testdir),
|
2011-06-16 17:36:15 +00:00
|
|
|
data=monmap,
|
|
|
|
)
|
|
|
|
|
|
|
|
log.info('Setting up mon nodes...')
|
2011-07-07 22:40:37 +00:00
|
|
|
mons = ctx.cluster.only(teuthology.is_type('mon'))
|
2011-06-16 17:36:15 +00:00
|
|
|
run.wait(
|
|
|
|
mons.run(
|
|
|
|
args=[
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/enable-coredump'.format(tdir=testdir),
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-coverage',
|
2011-06-16 17:36:15 +00:00
|
|
|
coverage_dir,
|
2013-02-06 19:16:52 +00:00
|
|
|
'osdmaptool',
|
|
|
|
'-c', conf_path,
|
2011-06-16 17:36:15 +00:00
|
|
|
'--clobber',
|
|
|
|
'--createsimple', '{num:d}'.format(
|
|
|
|
num=teuthology.num_instances_of_type(ctx.cluster, 'osd'),
|
|
|
|
),
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/osdmap'.format(tdir=testdir),
|
2011-06-16 17:36:15 +00:00
|
|
|
'--pg_bits', '2',
|
|
|
|
'--pgp_bits', '4',
|
|
|
|
],
|
|
|
|
wait=False,
|
|
|
|
),
|
|
|
|
)
|
|
|
|
|
|
|
|
log.info('Setting up osd nodes...')
|
|
|
|
for remote, roles_for_host in osds.remotes.iteritems():
|
|
|
|
for id_ in teuthology.roles_of_type(roles_for_host, 'osd'):
|
|
|
|
remote.run(
|
|
|
|
args=[
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/enable-coredump'.format(tdir=testdir),
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-coverage',
|
2011-06-16 17:36:15 +00:00
|
|
|
coverage_dir,
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-authtool',
|
2011-06-16 17:36:15 +00:00
|
|
|
'--create-keyring',
|
|
|
|
'--gen-key',
|
|
|
|
'--name=osd.{id}'.format(id=id_),
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/data/osd.{id}.keyring'.format(tdir=testdir, id=id_),
|
2011-06-16 17:36:15 +00:00
|
|
|
],
|
|
|
|
)
|
|
|
|
|
|
|
|
log.info('Setting up mds nodes...')
|
|
|
|
mdss = ctx.cluster.only(teuthology.is_type('mds'))
|
|
|
|
for remote, roles_for_host in mdss.remotes.iteritems():
|
|
|
|
for id_ in teuthology.roles_of_type(roles_for_host, 'mds'):
|
|
|
|
remote.run(
|
|
|
|
args=[
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/enable-coredump'.format(tdir=testdir),
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-coverage',
|
2011-06-16 17:36:15 +00:00
|
|
|
coverage_dir,
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-authtool',
|
2011-06-16 17:36:15 +00:00
|
|
|
'--create-keyring',
|
|
|
|
'--gen-key',
|
|
|
|
'--name=mds.{id}'.format(id=id_),
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/data/mds.{id}.keyring'.format(tdir=testdir, id=id_),
|
2011-06-16 17:36:15 +00:00
|
|
|
],
|
|
|
|
)
|
|
|
|
|
|
|
|
log.info('Setting up client nodes...')
|
|
|
|
clients = ctx.cluster.only(teuthology.is_type('client'))
|
|
|
|
for remote, roles_for_host in clients.remotes.iteritems():
|
|
|
|
for id_ in teuthology.roles_of_type(roles_for_host, 'client'):
|
2013-02-16 01:19:32 +00:00
|
|
|
client_keyring = '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_)
|
2011-06-16 17:36:15 +00:00
|
|
|
remote.run(
|
|
|
|
args=[
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/enable-coredump'.format(tdir=testdir),
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-coverage',
|
2011-06-16 17:36:15 +00:00
|
|
|
coverage_dir,
|
2013-02-16 01:19:32 +00:00
|
|
|
'sudo',
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-authtool',
|
2011-06-16 17:36:15 +00:00
|
|
|
'--create-keyring',
|
|
|
|
'--gen-key',
|
|
|
|
# TODO this --name= is not really obeyed, all unknown "types" are munged to "client"
|
|
|
|
'--name=client.{id}'.format(id=id_),
|
2013-02-16 01:19:32 +00:00
|
|
|
client_keyring,
|
|
|
|
run.Raw('&&'),
|
|
|
|
'sudo',
|
|
|
|
'chmod',
|
|
|
|
'0644',
|
|
|
|
client_keyring,
|
2011-06-16 17:36:15 +00:00
|
|
|
],
|
|
|
|
)
|
|
|
|
|
|
|
|
log.info('Reading keys from all nodes...')
|
|
|
|
keys_fp = StringIO()
|
|
|
|
keys = []
|
|
|
|
for remote, roles_for_host in ctx.cluster.remotes.iteritems():
|
2013-02-16 01:19:32 +00:00
|
|
|
for type_ in ['osd', 'mds']:
|
2011-06-16 17:36:15 +00:00
|
|
|
for id_ in teuthology.roles_of_type(roles_for_host, type_):
|
|
|
|
data = teuthology.get_file(
|
|
|
|
remote=remote,
|
2013-01-23 20:37:39 +00:00
|
|
|
path='{tdir}/data/{type}.{id}.keyring'.format(
|
|
|
|
tdir=testdir,
|
2011-06-16 17:36:15 +00:00
|
|
|
type=type_,
|
|
|
|
id=id_,
|
|
|
|
),
|
|
|
|
)
|
|
|
|
keys.append((type_, id_, data))
|
|
|
|
keys_fp.write(data)
|
2013-02-16 01:19:32 +00:00
|
|
|
for remote, roles_for_host in ctx.cluster.remotes.iteritems():
|
|
|
|
for type_ in ['client']:
|
|
|
|
for id_ in teuthology.roles_of_type(roles_for_host, type_):
|
|
|
|
data = teuthology.get_file(
|
|
|
|
remote=remote,
|
|
|
|
path='/etc/ceph/ceph.client.{id}.keyring'.format(id=id_)
|
|
|
|
)
|
|
|
|
keys.append((type_, id_, data))
|
|
|
|
keys_fp.write(data)
|
2011-06-16 17:36:15 +00:00
|
|
|
|
|
|
|
log.info('Adding keys to all mons...')
|
|
|
|
writes = mons.run(
|
|
|
|
args=[
|
2013-02-06 19:16:52 +00:00
|
|
|
'sudo', 'tee', '-a',
|
|
|
|
keyring_path,
|
2011-06-16 17:36:15 +00:00
|
|
|
],
|
|
|
|
stdin=run.PIPE,
|
|
|
|
wait=False,
|
2013-02-06 19:16:52 +00:00
|
|
|
stdout=StringIO(),
|
2011-06-16 17:36:15 +00:00
|
|
|
)
|
|
|
|
keys_fp.seek(0)
|
|
|
|
teuthology.feed_many_stdins_and_close(keys_fp, writes)
|
|
|
|
run.wait(writes)
|
|
|
|
for type_, id_, data in keys:
|
|
|
|
run.wait(
|
|
|
|
mons.run(
|
|
|
|
args=[
|
2013-02-06 19:16:52 +00:00
|
|
|
'sudo',
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/enable-coredump'.format(tdir=testdir),
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-coverage',
|
2011-06-16 17:36:15 +00:00
|
|
|
coverage_dir,
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-authtool',
|
|
|
|
keyring_path,
|
2011-06-16 17:36:15 +00:00
|
|
|
'--name={type}.{id}'.format(
|
|
|
|
type=type_,
|
|
|
|
id=id_,
|
|
|
|
),
|
|
|
|
] + list(teuthology.generate_caps(type_)),
|
|
|
|
wait=False,
|
|
|
|
),
|
|
|
|
)
|
|
|
|
|
|
|
|
log.info('Running mkfs on mon nodes...')
|
|
|
|
for remote, roles_for_host in mons.remotes.iteritems():
|
|
|
|
for id_ in teuthology.roles_of_type(roles_for_host, 'mon'):
|
|
|
|
remote.run(
|
|
|
|
args=[
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/enable-coredump'.format(tdir=testdir),
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-coverage',
|
2011-06-16 17:36:15 +00:00
|
|
|
coverage_dir,
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-mon',
|
2011-06-16 17:36:15 +00:00
|
|
|
'--mkfs',
|
|
|
|
'-i', id_,
|
2013-01-23 20:37:39 +00:00
|
|
|
'--monmap={tdir}/monmap'.format(tdir=testdir),
|
|
|
|
'--osdmap={tdir}/osdmap'.format(tdir=testdir),
|
2013-02-06 19:16:52 +00:00
|
|
|
'--keyring={kpath}'.format(kpath=keyring_path),
|
2011-06-16 17:36:15 +00:00
|
|
|
],
|
|
|
|
)
|
|
|
|
|
|
|
|
log.info('Running mkfs on osd nodes...')
|
|
|
|
for remote, roles_for_host in osds.remotes.iteritems():
|
2012-03-27 22:05:11 +00:00
|
|
|
roles_to_devs = remote_to_roles_to_devs[remote]
|
|
|
|
roles_to_journals = remote_to_roles_to_journals[remote]
|
2012-06-28 00:38:12 +00:00
|
|
|
ctx.disk_config = argparse.Namespace()
|
|
|
|
ctx.disk_config.remote_to_roles_to_dev = remote_to_roles_to_devs
|
|
|
|
ctx.disk_config.remote_to_roles_to_journals = remote_to_roles_to_journals
|
2013-02-01 17:37:13 +00:00
|
|
|
ctx.disk_config.remote_to_roles_to_dev_mount_options = {}
|
2013-02-01 20:07:10 +00:00
|
|
|
ctx.disk_config.remote_to_roles_to_dev_fstype = {}
|
2013-02-01 17:37:13 +00:00
|
|
|
|
2012-06-28 00:38:12 +00:00
|
|
|
|
2011-06-16 17:36:15 +00:00
|
|
|
for id_ in teuthology.roles_of_type(roles_for_host, 'osd'):
|
2012-03-27 22:05:11 +00:00
|
|
|
log.info(str(roles_to_journals))
|
|
|
|
log.info(id_)
|
2011-06-16 17:36:15 +00:00
|
|
|
remote.run(
|
|
|
|
args=[
|
|
|
|
'mkdir',
|
2013-01-23 20:37:39 +00:00
|
|
|
os.path.join('{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_)),
|
2011-06-16 17:36:15 +00:00
|
|
|
],
|
|
|
|
)
|
2011-10-03 21:03:36 +00:00
|
|
|
if roles_to_devs.get(id_):
|
|
|
|
dev = roles_to_devs[id_]
|
2012-02-11 22:24:39 +00:00
|
|
|
fs = config.get('fs')
|
|
|
|
package = None
|
2012-07-13 18:30:07 +00:00
|
|
|
mkfs_options = config.get('mkfs_options')
|
|
|
|
mount_options = config.get('mount_options')
|
2012-02-11 22:24:39 +00:00
|
|
|
if fs == 'btrfs':
|
|
|
|
package = 'btrfs-tools'
|
2012-07-13 18:30:07 +00:00
|
|
|
if mount_options is None:
|
|
|
|
mount_options = ['noatime','user_subvol_rm_allowed']
|
2012-07-13 18:30:21 +00:00
|
|
|
if mkfs_options is None:
|
|
|
|
mkfs_options = ['-m', 'single',
|
|
|
|
'-l', '32768',
|
|
|
|
'-n', '32768']
|
2012-02-11 22:24:39 +00:00
|
|
|
if fs == 'xfs':
|
|
|
|
package = 'xfsprogs'
|
2012-07-13 18:30:07 +00:00
|
|
|
if mount_options is None:
|
|
|
|
mount_options = ['noatime']
|
|
|
|
if mkfs_options is None:
|
|
|
|
mkfs_options = ['-f', '-i', 'size=2048']
|
2012-02-11 22:24:39 +00:00
|
|
|
if fs == 'ext4' or fs == 'ext3':
|
2012-07-13 18:30:07 +00:00
|
|
|
if mount_options is None:
|
|
|
|
mount_options = ['noatime','user_xattr']
|
2012-02-11 22:24:39 +00:00
|
|
|
|
2012-07-22 03:18:24 +00:00
|
|
|
if mount_options is None:
|
|
|
|
mount_options = []
|
|
|
|
if mkfs_options is None:
|
|
|
|
mkfs_options = []
|
2012-07-13 18:30:07 +00:00
|
|
|
mkfs = ['mkfs.%s' % fs] + mkfs_options
|
2012-02-11 22:24:39 +00:00
|
|
|
log.info('%s on %s on %s' % (mkfs, dev, remote))
|
|
|
|
if package is not None:
|
|
|
|
remote.run(
|
|
|
|
args=[
|
|
|
|
'sudo',
|
|
|
|
'apt-get', 'install', '-y', package
|
2013-02-06 19:16:52 +00:00
|
|
|
],
|
|
|
|
stdout=StringIO(),
|
2012-02-11 22:24:39 +00:00
|
|
|
)
|
2012-07-26 20:48:11 +00:00
|
|
|
remote.run(args= ['yes', run.Raw('|')] + ['sudo'] + mkfs + [dev])
|
2012-07-13 18:30:07 +00:00
|
|
|
log.info('mount %s on %s -o %s' % (dev, remote,
|
|
|
|
','.join(mount_options)))
|
2011-10-03 21:03:36 +00:00
|
|
|
remote.run(
|
|
|
|
args=[
|
|
|
|
'sudo',
|
|
|
|
'mount',
|
2012-02-11 22:24:39 +00:00
|
|
|
'-t', fs,
|
2012-07-13 18:30:07 +00:00
|
|
|
'-o', ','.join(mount_options),
|
2011-10-03 21:03:36 +00:00
|
|
|
dev,
|
2013-01-23 20:37:39 +00:00
|
|
|
os.path.join('{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_)),
|
2011-10-03 21:03:36 +00:00
|
|
|
]
|
|
|
|
)
|
2013-01-23 02:27:41 +00:00
|
|
|
if not remote in ctx.disk_config.remote_to_roles_to_dev_mount_options:
|
|
|
|
ctx.disk_config.remote_to_roles_to_dev_mount_options[remote] = {}
|
|
|
|
ctx.disk_config.remote_to_roles_to_dev_mount_options[remote][id_] = mount_options
|
|
|
|
if not remote in ctx.disk_config.remote_to_roles_to_dev_fstype:
|
|
|
|
ctx.disk_config.remote_to_roles_to_dev_fstype[remote] = {}
|
|
|
|
ctx.disk_config.remote_to_roles_to_dev_fstype[remote][id_] = fs
|
2011-10-03 21:03:36 +00:00
|
|
|
remote.run(
|
|
|
|
args=[
|
|
|
|
'sudo', 'chown', '-R', 'ubuntu.ubuntu',
|
2013-01-23 20:37:39 +00:00
|
|
|
os.path.join('{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_))
|
2011-10-03 21:03:36 +00:00
|
|
|
]
|
|
|
|
)
|
|
|
|
remote.run(
|
|
|
|
args=[
|
|
|
|
'sudo', 'chmod', '-R', '755',
|
2013-01-23 20:37:39 +00:00
|
|
|
os.path.join('{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_))
|
2011-10-03 21:03:36 +00:00
|
|
|
]
|
|
|
|
)
|
|
|
|
devs_to_clean[remote].append(
|
|
|
|
os.path.join(
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_)
|
2011-10-03 21:03:36 +00:00
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
for id_ in teuthology.roles_of_type(roles_for_host, 'osd'):
|
2011-06-16 17:36:15 +00:00
|
|
|
remote.run(
|
|
|
|
args=[
|
2013-01-03 04:44:33 +00:00
|
|
|
'MALLOC_CHECK_=3',
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/enable-coredump'.format(tdir=testdir),
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-coverage',
|
2011-06-16 17:36:15 +00:00
|
|
|
coverage_dir,
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-osd',
|
2011-06-16 17:36:15 +00:00
|
|
|
'--mkfs',
|
|
|
|
'-i', id_,
|
2013-01-23 20:37:39 +00:00
|
|
|
'--monmap', '{tdir}/monmap'.format(tdir=testdir),
|
2011-06-16 17:36:15 +00:00
|
|
|
],
|
|
|
|
)
|
|
|
|
run.wait(
|
|
|
|
mons.run(
|
|
|
|
args=[
|
|
|
|
'rm',
|
|
|
|
'--',
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/monmap'.format(tdir=testdir),
|
|
|
|
'{tdir}/osdmap'.format(tdir=testdir),
|
2011-06-16 17:36:15 +00:00
|
|
|
],
|
|
|
|
wait=False,
|
|
|
|
),
|
|
|
|
)
|
|
|
|
|
|
|
|
try:
|
|
|
|
yield
|
|
|
|
finally:
|
2011-08-24 04:00:26 +00:00
|
|
|
(mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
|
2011-08-23 05:04:57 +00:00
|
|
|
|
2012-06-06 20:32:56 +00:00
|
|
|
log.info('Checking cluster log for badness...')
|
2011-10-17 21:42:03 +00:00
|
|
|
def first_in_ceph_log(pattern, excludes):
|
|
|
|
args = [
|
|
|
|
'egrep', pattern,
|
2013-01-23 20:37:39 +00:00
|
|
|
'%s/archive/log/cluster.%s.log' % (testdir, firstmon),
|
2011-10-17 21:42:03 +00:00
|
|
|
]
|
|
|
|
for exclude in excludes:
|
|
|
|
args.extend([run.Raw('|'), 'egrep', '-v', exclude])
|
|
|
|
args.extend([
|
|
|
|
run.Raw('|'), 'head', '-n', '1',
|
|
|
|
])
|
2011-10-03 23:08:49 +00:00
|
|
|
r = mon0_remote.run(
|
2011-08-24 04:00:26 +00:00
|
|
|
stdout=StringIO(),
|
2011-10-17 21:42:03 +00:00
|
|
|
args=args,
|
2011-08-24 04:00:26 +00:00
|
|
|
)
|
2011-10-03 23:08:49 +00:00
|
|
|
stdout = r.stdout.getvalue()
|
2011-10-17 21:42:03 +00:00
|
|
|
if stdout != '':
|
2011-10-03 23:08:49 +00:00
|
|
|
return stdout
|
|
|
|
return None
|
|
|
|
|
2011-10-17 21:42:03 +00:00
|
|
|
if first_in_ceph_log('\[ERR\]|\[WRN\]|\[SEC\]',
|
|
|
|
config['log_whitelist']) is not None:
|
2011-08-24 04:00:26 +00:00
|
|
|
log.warning('Found errors (ERR|WRN|SEC) in cluster log')
|
|
|
|
ctx.summary['success'] = False
|
2011-10-03 23:08:49 +00:00
|
|
|
# use the most severe problem as the failure reason
|
|
|
|
if 'failure_reason' not in ctx.summary:
|
|
|
|
for pattern in ['\[SEC\]', '\[ERR\]', '\[WRN\]']:
|
2011-10-17 21:42:03 +00:00
|
|
|
match = first_in_ceph_log(pattern, config['log_whitelist'])
|
2011-10-03 23:08:49 +00:00
|
|
|
if match is not None:
|
|
|
|
ctx.summary['failure_reason'] = \
|
|
|
|
'"{match}" in cluster log'.format(
|
|
|
|
match=match.rstrip('\n'),
|
|
|
|
)
|
|
|
|
break
|
2011-08-24 04:00:26 +00:00
|
|
|
|
2011-10-03 21:03:36 +00:00
|
|
|
for remote, dirs in devs_to_clean.iteritems():
|
|
|
|
for dir_ in dirs:
|
|
|
|
log.info('Unmounting %s on %s' % (dir_, remote))
|
|
|
|
remote.run(
|
|
|
|
args=[
|
2012-02-02 17:26:45 +00:00
|
|
|
'sync',
|
|
|
|
run.Raw('&&'),
|
|
|
|
'sudo',
|
|
|
|
'umount',
|
|
|
|
'-f',
|
2011-10-03 21:03:36 +00:00
|
|
|
dir_
|
|
|
|
]
|
|
|
|
)
|
|
|
|
|
2012-09-18 22:56:08 +00:00
|
|
|
if config.get('tmpfs_journal'):
|
|
|
|
log.info('tmpfs journal enabled - unmounting tmpfs at /mnt')
|
2012-09-18 23:31:39 +00:00
|
|
|
for remote, roles_for_host in osds.remotes.iteritems():
|
2012-09-18 22:56:08 +00:00
|
|
|
remote.run(
|
|
|
|
args=[ 'sudo', 'umount', '-f', '/mnt' ],
|
|
|
|
check_status=False,
|
|
|
|
)
|
2012-08-16 22:50:10 +00:00
|
|
|
|
2012-07-11 21:14:46 +00:00
|
|
|
if ctx.archive is not None:
|
|
|
|
# archive mon data, too
|
|
|
|
log.info('Archiving mon data...')
|
|
|
|
path = os.path.join(ctx.archive, 'data')
|
|
|
|
os.makedirs(path)
|
|
|
|
for remote, roles in mons.remotes.iteritems():
|
|
|
|
for role in roles:
|
|
|
|
if role.startswith('mon.'):
|
2012-07-17 17:00:59 +00:00
|
|
|
teuthology.pull_directory_tarball(remote,
|
2013-01-23 20:37:39 +00:00
|
|
|
'%s/data/%s' % (testdir, role),
|
2012-07-17 17:00:59 +00:00
|
|
|
path + '/' + role + '.tgz')
|
2012-07-11 21:14:46 +00:00
|
|
|
|
2011-06-16 17:36:15 +00:00
|
|
|
log.info('Cleaning ceph cluster...')
|
|
|
|
run.wait(
|
|
|
|
ctx.cluster.run(
|
|
|
|
args=[
|
2013-02-06 19:16:52 +00:00
|
|
|
'sudo',
|
2011-06-16 17:36:15 +00:00
|
|
|
'rm',
|
|
|
|
'-rf',
|
|
|
|
'--',
|
2013-02-06 19:16:52 +00:00
|
|
|
conf_path,
|
|
|
|
keyring_path,
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/data'.format(tdir=testdir),
|
|
|
|
'{tdir}/monmap'.format(tdir=testdir),
|
|
|
|
run.Raw('{tdir}/asok.*'.format(tdir=testdir))
|
2011-06-16 17:36:15 +00:00
|
|
|
],
|
|
|
|
wait=False,
|
|
|
|
),
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2011-06-16 18:30:33 +00:00
|
|
|
@contextlib.contextmanager
|
2012-02-02 17:27:11 +00:00
|
|
|
def run_daemon(ctx, config, type_):
|
|
|
|
log.info('Starting %s daemons...' % type_)
|
2013-01-23 20:37:39 +00:00
|
|
|
testdir = teuthology.get_testdir(ctx)
|
2012-02-02 17:27:11 +00:00
|
|
|
daemons = ctx.cluster.only(teuthology.is_type(type_))
|
2013-01-23 20:37:39 +00:00
|
|
|
coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
|
2011-06-16 18:30:33 +00:00
|
|
|
|
|
|
|
daemon_signal = 'kill'
|
2012-02-24 19:21:04 +00:00
|
|
|
if config.get('coverage') or config.get('valgrind') is not None:
|
2011-06-16 18:30:33 +00:00
|
|
|
daemon_signal = 'term'
|
|
|
|
|
2011-12-19 22:12:39 +00:00
|
|
|
num_active = 0
|
|
|
|
for remote, roles_for_host in daemons.remotes.iteritems():
|
2012-02-02 17:27:11 +00:00
|
|
|
for id_ in teuthology.roles_of_type(roles_for_host, type_):
|
|
|
|
name = '%s.%s' % (type_, id_)
|
2011-12-20 22:10:22 +00:00
|
|
|
|
2013-01-09 22:02:42 +00:00
|
|
|
if not (id_.endswith('-s')) and (id_.find('-s-') == -1):
|
2011-12-19 22:12:39 +00:00
|
|
|
num_active += 1
|
|
|
|
|
2012-02-24 19:21:04 +00:00
|
|
|
run_cmd = [
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/enable-coredump'.format(tdir=testdir),
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-coverage',
|
2012-02-24 19:21:04 +00:00
|
|
|
coverage_dir,
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/daemon-helper'.format(tdir=testdir),
|
2012-02-24 19:21:04 +00:00
|
|
|
daemon_signal,
|
|
|
|
]
|
2011-12-19 22:12:39 +00:00
|
|
|
run_cmd_tail = [
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-%s' % (type_),
|
2011-12-19 22:12:39 +00:00
|
|
|
'-f',
|
2013-02-06 19:16:52 +00:00
|
|
|
'-i', id_]
|
2011-08-15 22:35:42 +00:00
|
|
|
|
2012-02-24 19:21:04 +00:00
|
|
|
if config.get('valgrind') is not None:
|
|
|
|
valgrind_args = None
|
|
|
|
if type_ in config['valgrind']:
|
|
|
|
valgrind_args = config['valgrind'][type_]
|
|
|
|
if name in config['valgrind']:
|
|
|
|
valgrind_args = config['valgrind'][name]
|
2013-01-23 20:37:39 +00:00
|
|
|
run_cmd.extend(teuthology.get_valgrind_args(testdir, name, valgrind_args))
|
2012-02-24 19:21:04 +00:00
|
|
|
|
2012-08-20 03:16:43 +00:00
|
|
|
if type_ in config.get('cpu_profile', []):
|
2013-01-23 20:37:39 +00:00
|
|
|
profile_path = '%s/archive/log/%s.%s.prof' % (testdir, type_, id_)
|
2012-08-17 20:47:13 +00:00
|
|
|
run_cmd.extend([ 'env', 'CPUPROFILE=%s' % profile_path ])
|
|
|
|
|
2011-08-15 22:35:42 +00:00
|
|
|
run_cmd.extend(run_cmd_tail)
|
2013-01-09 22:02:42 +00:00
|
|
|
|
2012-02-02 17:27:11 +00:00
|
|
|
ctx.daemons.add_daemon(remote, type_, id_,
|
2011-12-20 22:10:22 +00:00
|
|
|
args=run_cmd,
|
|
|
|
logger=log.getChild(name),
|
|
|
|
stdin=run.PIPE,
|
|
|
|
wait=False,
|
|
|
|
)
|
2011-06-16 18:30:33 +00:00
|
|
|
|
2012-02-02 17:27:11 +00:00
|
|
|
if type_ == 'mds':
|
2011-12-19 22:12:39 +00:00
|
|
|
firstmon = teuthology.get_first_mon(ctx, config)
|
|
|
|
(mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
|
2013-02-06 19:16:52 +00:00
|
|
|
|
2011-12-19 22:12:39 +00:00
|
|
|
mon0_remote.run(args=[
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/enable-coredump'.format(tdir=testdir),
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-coverage',
|
2011-07-28 17:28:57 +00:00
|
|
|
coverage_dir,
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph',
|
2011-07-28 17:25:30 +00:00
|
|
|
'mds', 'set_max_mds', str(num_active)])
|
2011-07-06 23:44:46 +00:00
|
|
|
|
2011-06-16 18:37:51 +00:00
|
|
|
try:
|
|
|
|
yield
|
|
|
|
finally:
|
2012-02-02 17:27:11 +00:00
|
|
|
log.info('Shutting down %s daemons...' % type_)
|
2012-02-02 17:26:25 +00:00
|
|
|
exc_info = (None, None, None)
|
2012-02-02 17:27:11 +00:00
|
|
|
for daemon in ctx.daemons.iter_daemons_of_role(type_):
|
2012-02-02 17:26:25 +00:00
|
|
|
try:
|
|
|
|
daemon.stop()
|
|
|
|
except (run.CommandFailedError,
|
|
|
|
run.CommandCrashedError,
|
|
|
|
run.ConnectionLostError):
|
|
|
|
exc_info = sys.exc_info()
|
|
|
|
log.exception('Saw exception from %s.%s', daemon.role, daemon.id_)
|
|
|
|
if exc_info != (None, None, None):
|
|
|
|
raise exc_info[0], exc_info[1], exc_info[2]
|
2011-06-16 18:37:51 +00:00
|
|
|
|
2011-06-16 19:18:58 +00:00
|
|
|
def healthy(ctx, config):
|
|
|
|
log.info('Waiting until ceph is healthy...')
|
2011-08-31 20:56:42 +00:00
|
|
|
firstmon = teuthology.get_first_mon(ctx, config)
|
2011-07-27 04:46:47 +00:00
|
|
|
(mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
|
2012-01-08 23:14:18 +00:00
|
|
|
teuthology.wait_until_osds_up(
|
2013-01-23 20:37:39 +00:00
|
|
|
ctx,
|
2012-01-08 23:14:18 +00:00
|
|
|
cluster=ctx.cluster,
|
|
|
|
remote=mon0_remote
|
|
|
|
)
|
2011-06-16 19:18:58 +00:00
|
|
|
teuthology.wait_until_healthy(
|
2013-01-23 20:37:39 +00:00
|
|
|
ctx,
|
2011-06-16 19:18:58 +00:00
|
|
|
remote=mon0_remote,
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2011-06-03 21:47:44 +00:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def task(ctx, config):
|
2011-06-06 21:22:49 +00:00
|
|
|
"""
|
|
|
|
Set up and tear down a Ceph cluster.
|
|
|
|
|
|
|
|
For example::
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- ceph:
|
|
|
|
- interactive:
|
2011-06-09 21:08:45 +00:00
|
|
|
|
|
|
|
You can also specify what branch to run::
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- ceph:
|
|
|
|
branch: foo
|
|
|
|
|
|
|
|
Or a tag::
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- ceph:
|
|
|
|
tag: v0.42.13
|
|
|
|
|
2011-06-10 00:05:55 +00:00
|
|
|
Or a sha1::
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- ceph:
|
|
|
|
sha1: 1376a5ab0c89780eab39ffbbe436f6a6092314ed
|
|
|
|
|
2011-06-30 21:49:42 +00:00
|
|
|
Or a local source dir::
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- ceph:
|
|
|
|
path: /home/sage/ceph
|
|
|
|
|
2011-06-09 22:43:43 +00:00
|
|
|
To capture code coverage data, use::
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- ceph:
|
|
|
|
coverage: true
|
|
|
|
|
2012-02-11 22:24:39 +00:00
|
|
|
To use btrfs, ext4, or xfs on the target's scratch disks, use::
|
|
|
|
|
2011-10-03 21:03:36 +00:00
|
|
|
tasks:
|
|
|
|
- ceph:
|
2012-07-13 01:02:29 +00:00
|
|
|
fs: xfs
|
|
|
|
mkfs_options: [-b,size=65536,-l,logdev=/dev/sdc1]
|
|
|
|
mount_options: [nobarrier, inode64]
|
2012-02-11 22:24:39 +00:00
|
|
|
|
2011-10-03 21:03:36 +00:00
|
|
|
Note, this will cause the task to check the /scratch_devs file on each node
|
|
|
|
for available devices. If no such file is found, /dev/sdb will be used.
|
|
|
|
|
2011-08-17 17:35:37 +00:00
|
|
|
To run some daemons under valgrind, include their names
|
2011-12-20 22:10:22 +00:00
|
|
|
and the tool/args to use in a valgrind section::
|
|
|
|
|
2011-08-17 17:35:37 +00:00
|
|
|
tasks:
|
|
|
|
- ceph:
|
|
|
|
valgrind:
|
|
|
|
mds.1: --tool=memcheck
|
2011-12-20 22:10:22 +00:00
|
|
|
osd.1: [--tool=memcheck, --leak-check=no]
|
|
|
|
|
2012-07-13 01:02:29 +00:00
|
|
|
Those nodes which are using memcheck or valgrind will get
|
2011-08-29 20:58:09 +00:00
|
|
|
checked for bad results.
|
2011-08-17 17:35:37 +00:00
|
|
|
|
2011-07-05 23:45:32 +00:00
|
|
|
To adjust or modify config options, use::
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- ceph:
|
|
|
|
conf:
|
|
|
|
section:
|
|
|
|
key: value
|
|
|
|
|
|
|
|
For example::
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- ceph:
|
|
|
|
conf:
|
|
|
|
mds.0:
|
|
|
|
some option: value
|
|
|
|
other key: other value
|
|
|
|
client.0:
|
|
|
|
debug client: 10
|
|
|
|
debug ms: 1
|
|
|
|
|
2011-10-17 21:42:03 +00:00
|
|
|
By default, the cluster log is checked for errors and warnings,
|
|
|
|
and the run marked failed if any appear. You can ignore log
|
|
|
|
entries by giving a list of egrep compatible regexes, i.e.:
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- ceph:
|
|
|
|
log-whitelist: ['foo.*bar', 'bad message']
|
|
|
|
|
2011-06-06 21:22:49 +00:00
|
|
|
"""
|
2011-06-09 21:08:45 +00:00
|
|
|
if config is None:
|
|
|
|
config = {}
|
2011-06-07 18:45:29 +00:00
|
|
|
assert isinstance(config, dict), \
|
|
|
|
"task ceph only supports a dictionary for configuration"
|
2011-06-03 16:48:22 +00:00
|
|
|
|
2011-07-16 00:15:09 +00:00
|
|
|
overrides = ctx.config.get('overrides', {})
|
2011-11-17 21:07:03 +00:00
|
|
|
teuthology.deep_merge(config, overrides.get('ceph', {}))
|
2011-07-16 00:15:09 +00:00
|
|
|
|
2011-09-14 23:31:58 +00:00
|
|
|
ctx.daemons = CephState()
|
2012-01-31 15:59:26 +00:00
|
|
|
|
|
|
|
# Flavor tells us what gitbuilder to fetch the prebuilt software
|
|
|
|
# from. It's a combination of possible keywords, in a specific
|
|
|
|
# order, joined by dashes. It is used as a URL path name. If a
|
|
|
|
# match is not found, the teuthology run fails. This is ugly,
|
|
|
|
# and should be cleaned up at some point.
|
|
|
|
|
2012-06-01 00:09:20 +00:00
|
|
|
dist = 'precise'
|
Pull from new gitbuilder.ceph.com locations.
Simplifies the flavor stuff into a tuple of
<package,type,flavor,dist,arch>
where package is ceph, kenrel, etc.
type is tarball, deb
flavor is basic, gcov, notcmalloc
arch is x86_64, i686 (uname -m)
dist is oneiric, etc. (lsb_release -s -c)
2012-03-13 17:02:26 +00:00
|
|
|
format = 'tarball'
|
|
|
|
arch = 'x86_64'
|
|
|
|
flavor = 'basic'
|
2012-01-31 15:59:26 +00:00
|
|
|
|
|
|
|
# First element: controlled by user (or not there, by default):
|
|
|
|
# used to choose the right distribution, e.g. "oneiric".
|
Pull from new gitbuilder.ceph.com locations.
Simplifies the flavor stuff into a tuple of
<package,type,flavor,dist,arch>
where package is ceph, kenrel, etc.
type is tarball, deb
flavor is basic, gcov, notcmalloc
arch is x86_64, i686 (uname -m)
dist is oneiric, etc. (lsb_release -s -c)
2012-03-13 17:02:26 +00:00
|
|
|
flavor = config.get('flavor', 'basic')
|
2012-01-31 15:59:26 +00:00
|
|
|
|
2011-07-05 22:14:42 +00:00
|
|
|
if config.get('path'):
|
|
|
|
# local dir precludes any other flavors
|
Pull from new gitbuilder.ceph.com locations.
Simplifies the flavor stuff into a tuple of
<package,type,flavor,dist,arch>
where package is ceph, kenrel, etc.
type is tarball, deb
flavor is basic, gcov, notcmalloc
arch is x86_64, i686 (uname -m)
dist is oneiric, etc. (lsb_release -s -c)
2012-03-13 17:02:26 +00:00
|
|
|
flavor = 'local'
|
2011-07-05 22:14:42 +00:00
|
|
|
else:
|
2012-02-20 23:17:52 +00:00
|
|
|
if config.get('valgrind'):
|
|
|
|
log.info('Using notcmalloc flavor and running some daemons under valgrind')
|
Pull from new gitbuilder.ceph.com locations.
Simplifies the flavor stuff into a tuple of
<package,type,flavor,dist,arch>
where package is ceph, kenrel, etc.
type is tarball, deb
flavor is basic, gcov, notcmalloc
arch is x86_64, i686 (uname -m)
dist is oneiric, etc. (lsb_release -s -c)
2012-03-13 17:02:26 +00:00
|
|
|
flavor = 'notcmalloc'
|
2011-08-15 22:32:23 +00:00
|
|
|
else:
|
2012-02-20 23:17:52 +00:00
|
|
|
if config.get('coverage'):
|
|
|
|
log.info('Recording coverage for this run.')
|
Pull from new gitbuilder.ceph.com locations.
Simplifies the flavor stuff into a tuple of
<package,type,flavor,dist,arch>
where package is ceph, kenrel, etc.
type is tarball, deb
flavor is basic, gcov, notcmalloc
arch is x86_64, i686 (uname -m)
dist is oneiric, etc. (lsb_release -s -c)
2012-03-13 17:02:26 +00:00
|
|
|
flavor = 'gcov'
|
2011-06-09 22:43:43 +00:00
|
|
|
|
Pull from new gitbuilder.ceph.com locations.
Simplifies the flavor stuff into a tuple of
<package,type,flavor,dist,arch>
where package is ceph, kenrel, etc.
type is tarball, deb
flavor is basic, gcov, notcmalloc
arch is x86_64, i686 (uname -m)
dist is oneiric, etc. (lsb_release -s -c)
2012-03-13 17:02:26 +00:00
|
|
|
ctx.summary['flavor'] = flavor
|
2013-01-23 20:37:39 +00:00
|
|
|
|
|
|
|
testdir = teuthology.get_testdir(ctx)
|
2011-08-24 23:48:14 +00:00
|
|
|
if config.get('coverage'):
|
2013-01-23 20:37:39 +00:00
|
|
|
coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
|
2011-08-24 23:48:14 +00:00
|
|
|
log.info('Creating coverage directory...')
|
|
|
|
run.wait(
|
|
|
|
ctx.cluster.run(
|
|
|
|
args=[
|
|
|
|
'install', '-d', '-m0755', '--',
|
|
|
|
coverage_dir,
|
|
|
|
],
|
|
|
|
wait=False,
|
|
|
|
)
|
2011-05-31 20:51:48 +00:00
|
|
|
)
|
|
|
|
|
2011-06-15 21:57:02 +00:00
|
|
|
with contextutil.nested(
|
2011-06-16 20:13:32 +00:00
|
|
|
lambda: ceph_log(ctx=ctx, config=None),
|
2011-06-15 21:57:02 +00:00
|
|
|
lambda: ship_utilities(ctx=ctx, config=None),
|
|
|
|
lambda: binaries(ctx=ctx, config=dict(
|
2013-02-06 19:16:52 +00:00
|
|
|
branch=config.get('branch', 'master'),
|
2011-06-15 21:57:02 +00:00
|
|
|
tag=config.get('tag'),
|
|
|
|
sha1=config.get('sha1'),
|
|
|
|
flavor=flavor,
|
2012-06-01 04:39:33 +00:00
|
|
|
dist=config.get('dist', dist),
|
Pull from new gitbuilder.ceph.com locations.
Simplifies the flavor stuff into a tuple of
<package,type,flavor,dist,arch>
where package is ceph, kenrel, etc.
type is tarball, deb
flavor is basic, gcov, notcmalloc
arch is x86_64, i686 (uname -m)
dist is oneiric, etc. (lsb_release -s -c)
2012-03-13 17:02:26 +00:00
|
|
|
format=format,
|
|
|
|
arch=arch
|
2011-06-15 21:57:02 +00:00
|
|
|
)),
|
2011-08-29 20:58:09 +00:00
|
|
|
lambda: valgrind_post(ctx=ctx, config=config),
|
2011-07-05 23:45:32 +00:00
|
|
|
lambda: cluster(ctx=ctx, config=dict(
|
2011-10-03 21:03:36 +00:00
|
|
|
conf=config.get('conf', {}),
|
2012-02-11 22:24:39 +00:00
|
|
|
fs=config.get('fs', None),
|
2012-07-13 01:02:29 +00:00
|
|
|
mkfs_options=config.get('mkfs_options', None),
|
|
|
|
mount_options=config.get('mount_options',None),
|
2012-03-27 22:05:11 +00:00
|
|
|
block_journal=config.get('block_journal', None),
|
2012-08-16 22:50:10 +00:00
|
|
|
tmpfs_journal=config.get('tmpfs_journal', None),
|
2011-10-17 21:42:03 +00:00
|
|
|
log_whitelist=config.get('log-whitelist', []),
|
2012-08-17 20:47:13 +00:00
|
|
|
cpu_profile=set(config.get('cpu_profile', [])),
|
2011-07-05 23:45:32 +00:00
|
|
|
)),
|
2012-02-02 17:27:11 +00:00
|
|
|
lambda: run_daemon(ctx=ctx, config=config, type_='mon'),
|
|
|
|
lambda: run_daemon(ctx=ctx, config=config, type_='osd'),
|
|
|
|
lambda: run_daemon(ctx=ctx, config=config, type_='mds'),
|
2011-06-15 21:57:02 +00:00
|
|
|
):
|
2012-12-31 16:11:50 +00:00
|
|
|
if config.get('wait-for-healthy', True):
|
|
|
|
healthy(ctx=ctx, config=None)
|
2011-06-16 18:37:51 +00:00
|
|
|
yield
|