2011-05-31 20:51:48 +00:00
|
|
|
from cStringIO import StringIO
|
|
|
|
|
|
|
|
import os
|
|
|
|
import logging
|
|
|
|
import configobj
|
2011-07-02 01:15:52 +00:00
|
|
|
import getpass
|
|
|
|
import socket
|
2012-07-11 16:22:50 +00:00
|
|
|
import tarfile
|
2011-05-31 20:51:48 +00:00
|
|
|
import time
|
2011-06-02 16:09:08 +00:00
|
|
|
import urllib2
|
|
|
|
import urlparse
|
2011-07-07 18:43:35 +00:00
|
|
|
import yaml
|
2012-01-08 23:14:18 +00:00
|
|
|
import json
|
2011-05-31 20:51:48 +00:00
|
|
|
|
2012-07-11 16:22:50 +00:00
|
|
|
from teuthology import safepath
|
2011-09-13 21:53:02 +00:00
|
|
|
from .orchestra import run
|
2011-06-16 01:06:57 +00:00
|
|
|
|
2011-05-31 20:51:48 +00:00
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
2013-01-23 20:37:39 +00:00
|
|
|
import datetime
|
|
|
|
stamp = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
|
|
|
|
|
|
|
|
def get_testdir(ctx):
|
|
|
|
if 'test_path' in ctx.teuthology_config:
|
|
|
|
return ctx.teuthology_config['test_path']
|
|
|
|
|
|
|
|
basedir = ctx.teuthology_config.get('base_test_dir', '/tmp/cephtest')
|
|
|
|
|
|
|
|
if hasattr(ctx, 'name') and ctx.name:
|
|
|
|
log.debug('with name basedir: {b}'.format(b=basedir))
|
|
|
|
return '{basedir}/{rundir}'.format(
|
|
|
|
basedir=basedir,
|
|
|
|
rundir=ctx.name)
|
|
|
|
else:
|
|
|
|
log.debug('basedir: {b}'.format(b=basedir))
|
|
|
|
return '{basedir}/{user}-{stamp}'.format(
|
|
|
|
basedir=basedir,
|
|
|
|
user=get_user(),
|
|
|
|
stamp=stamp)
|
|
|
|
|
|
|
|
def get_testdir_base(ctx):
|
|
|
|
if 'test_path' in ctx.teuthology_config:
|
|
|
|
return ctx.teuthology_config['test_path']
|
|
|
|
return ctx.teuthology_config.get('base_test_dir', '/tmp/cephtest')
|
|
|
|
|
Pull from new gitbuilder.ceph.com locations.
Simplifies the flavor stuff into a tuple of
<package,type,flavor,dist,arch>
where package is ceph, kenrel, etc.
type is tarball, deb
flavor is basic, gcov, notcmalloc
arch is x86_64, i686 (uname -m)
dist is oneiric, etc. (lsb_release -s -c)
2012-03-13 17:02:26 +00:00
|
|
|
def get_ceph_binary_url(package=None,
|
|
|
|
branch=None, tag=None, sha1=None, dist=None,
|
|
|
|
flavor=None, format=None, arch=None):
|
2012-03-13 17:09:18 +00:00
|
|
|
BASE = 'http://gitbuilder.ceph.com/{package}-{format}-{dist}-{arch}-{flavor}/'.format(
|
Pull from new gitbuilder.ceph.com locations.
Simplifies the flavor stuff into a tuple of
<package,type,flavor,dist,arch>
where package is ceph, kenrel, etc.
type is tarball, deb
flavor is basic, gcov, notcmalloc
arch is x86_64, i686 (uname -m)
dist is oneiric, etc. (lsb_release -s -c)
2012-03-13 17:02:26 +00:00
|
|
|
package=package,
|
|
|
|
flavor=flavor,
|
|
|
|
arch=arch,
|
|
|
|
format=format,
|
|
|
|
dist=dist
|
|
|
|
)
|
2011-06-10 18:12:34 +00:00
|
|
|
|
2011-06-10 00:05:55 +00:00
|
|
|
if sha1 is not None:
|
|
|
|
assert branch is None, "cannot set both sha1 and branch"
|
|
|
|
assert tag is None, "cannot set both sha1 and tag"
|
2011-06-09 21:08:45 +00:00
|
|
|
else:
|
2011-06-10 00:05:55 +00:00
|
|
|
# gitbuilder uses remote-style ref names for branches, mangled to
|
|
|
|
# have underscores instead of slashes; e.g. origin_master
|
|
|
|
if tag is not None:
|
|
|
|
ref = tag
|
|
|
|
assert branch is None, "cannot set both branch and tag"
|
|
|
|
else:
|
|
|
|
if branch is None:
|
|
|
|
branch = 'master'
|
2011-08-05 21:35:22 +00:00
|
|
|
ref = branch
|
2011-06-10 00:05:55 +00:00
|
|
|
|
|
|
|
sha1_url = urlparse.urljoin(BASE, 'ref/{ref}/sha1'.format(ref=ref))
|
|
|
|
log.debug('Translating ref to sha1 using url %s', sha1_url)
|
2012-11-12 19:16:49 +00:00
|
|
|
|
|
|
|
try:
|
|
|
|
sha1_fp = urllib2.urlopen(sha1_url)
|
|
|
|
sha1 = sha1_fp.read().rstrip('\n')
|
|
|
|
sha1_fp.close()
|
|
|
|
except urllib2.HTTPError as e:
|
|
|
|
log.error('Failed to get url %s', sha1_url)
|
|
|
|
raise e
|
2011-06-09 22:43:43 +00:00
|
|
|
|
Pull from new gitbuilder.ceph.com locations.
Simplifies the flavor stuff into a tuple of
<package,type,flavor,dist,arch>
where package is ceph, kenrel, etc.
type is tarball, deb
flavor is basic, gcov, notcmalloc
arch is x86_64, i686 (uname -m)
dist is oneiric, etc. (lsb_release -s -c)
2012-03-13 17:02:26 +00:00
|
|
|
log.debug('Using %s %s sha1 %s', package, format, sha1)
|
2011-06-02 16:09:08 +00:00
|
|
|
bindir_url = urlparse.urljoin(BASE, 'sha1/{sha1}/'.format(sha1=sha1))
|
2011-06-09 21:02:44 +00:00
|
|
|
return (sha1, bindir_url)
|
2011-05-31 20:51:48 +00:00
|
|
|
|
|
|
|
def feed_many_stdins(fp, processes):
|
|
|
|
while True:
|
|
|
|
data = fp.read(8192)
|
|
|
|
if not data:
|
|
|
|
break
|
|
|
|
for proc in processes:
|
|
|
|
proc.stdin.write(data)
|
|
|
|
|
|
|
|
def feed_many_stdins_and_close(fp, processes):
|
|
|
|
feed_many_stdins(fp, processes)
|
|
|
|
for proc in processes:
|
|
|
|
proc.stdin.close()
|
|
|
|
|
|
|
|
def get_mons(roles, ips):
|
|
|
|
mons = {}
|
2012-01-06 21:36:54 +00:00
|
|
|
mon_ports = {}
|
2011-07-27 04:46:47 +00:00
|
|
|
mon_id = 0
|
2011-05-31 20:51:48 +00:00
|
|
|
for idx, roles in enumerate(roles):
|
|
|
|
for role in roles:
|
|
|
|
if not role.startswith('mon.'):
|
|
|
|
continue
|
2012-01-06 21:36:54 +00:00
|
|
|
if ips[idx] not in mon_ports:
|
|
|
|
mon_ports[ips[idx]] = 6789
|
|
|
|
else:
|
|
|
|
mon_ports[ips[idx]] += 1
|
2011-05-31 20:51:48 +00:00
|
|
|
addr = '{ip}:{port}'.format(
|
|
|
|
ip=ips[idx],
|
2012-01-06 21:36:54 +00:00
|
|
|
port=mon_ports[ips[idx]],
|
2011-05-31 20:51:48 +00:00
|
|
|
)
|
2011-07-27 18:45:20 +00:00
|
|
|
mon_id += 1
|
2011-05-31 20:51:48 +00:00
|
|
|
mons[role] = addr
|
|
|
|
assert mons
|
|
|
|
return mons
|
|
|
|
|
|
|
|
def generate_caps(type_):
|
|
|
|
defaults = dict(
|
|
|
|
osd=dict(
|
|
|
|
mon='allow *',
|
|
|
|
osd='allow *',
|
|
|
|
),
|
|
|
|
mds=dict(
|
|
|
|
mon='allow *',
|
|
|
|
osd='allow *',
|
|
|
|
mds='allow',
|
|
|
|
),
|
|
|
|
client=dict(
|
|
|
|
mon='allow rw',
|
2012-09-29 15:54:08 +00:00
|
|
|
osd='allow rwx pool data, allow rwx pool rbd, allow rwx pool newpool',
|
2011-05-31 20:51:48 +00:00
|
|
|
mds='allow',
|
|
|
|
),
|
|
|
|
)
|
|
|
|
for subsystem, capability in defaults[type_].items():
|
|
|
|
yield '--cap'
|
|
|
|
yield subsystem
|
|
|
|
yield capability
|
|
|
|
|
2013-01-23 20:37:39 +00:00
|
|
|
def skeleton_config(ctx, roles, ips):
|
2011-05-31 20:51:48 +00:00
|
|
|
"""
|
|
|
|
Returns a ConfigObj that's prefilled with a skeleton config.
|
|
|
|
|
|
|
|
Use conf[section][key]=value or conf.merge to change it.
|
|
|
|
|
|
|
|
Use conf.write to write it out, override .filename first if you want.
|
|
|
|
"""
|
2013-01-23 20:37:39 +00:00
|
|
|
path = os.path.join(os.path.dirname(__file__), 'ceph.conf.template')
|
|
|
|
t = open(path, 'r')
|
|
|
|
skconf = t.read().format(testdir=get_testdir(ctx))
|
|
|
|
conf = configobj.ConfigObj(StringIO(skconf), file_error=True)
|
2011-05-31 20:51:48 +00:00
|
|
|
mons = get_mons(roles=roles, ips=ips)
|
|
|
|
for role, addr in mons.iteritems():
|
|
|
|
conf.setdefault(role, {})
|
|
|
|
conf[role]['mon addr'] = addr
|
2011-07-27 17:04:37 +00:00
|
|
|
# set up standby mds's
|
2013-01-09 22:02:42 +00:00
|
|
|
for roles_subset in roles:
|
|
|
|
for role in roles_subset:
|
|
|
|
if role.startswith('mds.'):
|
2011-07-27 17:04:37 +00:00
|
|
|
conf.setdefault(role, {})
|
2013-01-09 22:02:42 +00:00
|
|
|
if role.find('-s-') != -1:
|
|
|
|
standby_mds = role[role.find('-s-')+3:]
|
|
|
|
conf[role]['mds standby for name'] = standby_mds
|
2011-05-31 20:51:48 +00:00
|
|
|
return conf
|
|
|
|
|
|
|
|
def roles_of_type(roles_for_host, type_):
|
|
|
|
prefix = '{type}.'.format(type=type_)
|
|
|
|
for name in roles_for_host:
|
|
|
|
if not name.startswith(prefix):
|
|
|
|
continue
|
|
|
|
id_ = name[len(prefix):]
|
|
|
|
yield id_
|
|
|
|
|
2012-10-22 23:51:54 +00:00
|
|
|
def all_roles(cluster):
|
|
|
|
for _, roles_for_host in cluster.remotes.iteritems():
|
|
|
|
for name in roles_for_host:
|
|
|
|
yield name
|
|
|
|
|
2011-06-16 23:07:59 +00:00
|
|
|
def all_roles_of_type(cluster, type_):
|
|
|
|
prefix = '{type}.'.format(type=type_)
|
|
|
|
for _, roles_for_host in cluster.remotes.iteritems():
|
|
|
|
for name in roles_for_host:
|
|
|
|
if not name.startswith(prefix):
|
|
|
|
continue
|
|
|
|
id_ = name[len(prefix):]
|
|
|
|
yield id_
|
|
|
|
|
2011-06-01 23:04:52 +00:00
|
|
|
def is_type(type_):
|
|
|
|
"""
|
|
|
|
Returns a matcher function for whether role is of type given.
|
|
|
|
"""
|
|
|
|
prefix = '{type}.'.format(type=type_)
|
|
|
|
def _is_type(role):
|
|
|
|
return role.startswith(prefix)
|
|
|
|
return _is_type
|
|
|
|
|
2011-06-03 21:47:44 +00:00
|
|
|
def num_instances_of_type(cluster, type_):
|
|
|
|
remotes_and_roles = cluster.remotes.items()
|
|
|
|
roles = [roles for (remote, roles) in remotes_and_roles]
|
2011-05-31 20:51:48 +00:00
|
|
|
prefix = '{type}.'.format(type=type_)
|
|
|
|
num = sum(sum(1 for role in hostroles if role.startswith(prefix)) for hostroles in roles)
|
|
|
|
return num
|
|
|
|
|
2013-01-23 20:37:39 +00:00
|
|
|
def create_simple_monmap(ctx, remote, conf):
|
2011-05-31 20:51:48 +00:00
|
|
|
"""
|
|
|
|
Writes a simple monmap based on current ceph.conf into <tmpdir>/monmap.
|
|
|
|
|
|
|
|
Assumes ceph_conf is up to date.
|
|
|
|
|
|
|
|
Assumes mon sections are named "mon.*", with the dot.
|
|
|
|
"""
|
|
|
|
def gen_addresses():
|
|
|
|
for section, data in conf.iteritems():
|
|
|
|
PREFIX = 'mon.'
|
|
|
|
if not section.startswith(PREFIX):
|
|
|
|
continue
|
|
|
|
name = section[len(PREFIX):]
|
|
|
|
addr = data['mon addr']
|
|
|
|
yield (name, addr)
|
|
|
|
|
|
|
|
addresses = list(gen_addresses())
|
|
|
|
assert addresses, "There are no monitors in config!"
|
|
|
|
log.debug('Ceph mon addresses: %s', addresses)
|
|
|
|
|
2013-01-23 20:37:39 +00:00
|
|
|
testdir = get_testdir(ctx)
|
2011-05-31 20:51:48 +00:00
|
|
|
args = [
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/enable-coredump'.format(tdir=testdir),
|
|
|
|
'{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
|
|
|
|
'{tdir}/archive/coverage'.format(tdir=testdir),
|
|
|
|
'{tdir}/binary/usr/local/bin/monmaptool'.format(tdir=testdir),
|
2011-05-31 20:51:48 +00:00
|
|
|
'--create',
|
|
|
|
'--clobber',
|
|
|
|
]
|
|
|
|
for (name, addr) in addresses:
|
|
|
|
args.extend(('--add', name, addr))
|
|
|
|
args.extend([
|
|
|
|
'--print',
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/monmap'.format(tdir=testdir),
|
2011-05-31 20:51:48 +00:00
|
|
|
])
|
2011-06-01 23:04:52 +00:00
|
|
|
remote.run(
|
2011-05-31 20:51:48 +00:00
|
|
|
args=args,
|
|
|
|
)
|
|
|
|
|
2011-06-01 23:04:52 +00:00
|
|
|
def write_file(remote, path, data):
|
|
|
|
remote.run(
|
2011-05-31 20:51:48 +00:00
|
|
|
args=[
|
|
|
|
'python',
|
|
|
|
'-c',
|
|
|
|
'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
|
|
|
|
path,
|
|
|
|
],
|
|
|
|
stdin=data,
|
|
|
|
)
|
|
|
|
|
2011-06-20 20:19:08 +00:00
|
|
|
def sudo_write_file(remote, path, data):
|
|
|
|
remote.run(
|
|
|
|
args=[
|
|
|
|
'sudo',
|
|
|
|
'python',
|
|
|
|
'-c',
|
|
|
|
'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
|
|
|
|
path,
|
|
|
|
],
|
|
|
|
stdin=data,
|
|
|
|
)
|
|
|
|
|
2011-06-01 23:04:52 +00:00
|
|
|
def get_file(remote, path):
|
2011-05-31 20:51:48 +00:00
|
|
|
"""
|
|
|
|
Read a file from remote host into memory.
|
|
|
|
"""
|
2011-06-01 23:04:52 +00:00
|
|
|
proc = remote.run(
|
2011-05-31 20:51:48 +00:00
|
|
|
args=[
|
|
|
|
'cat',
|
|
|
|
'--',
|
|
|
|
path,
|
|
|
|
],
|
|
|
|
stdout=StringIO(),
|
|
|
|
)
|
|
|
|
data = proc.stdout.getvalue()
|
|
|
|
return data
|
|
|
|
|
2012-07-11 16:22:50 +00:00
|
|
|
def pull_directory(remote, remotedir, localdir):
|
|
|
|
"""
|
|
|
|
Copy a remote directory to a local directory.
|
|
|
|
"""
|
|
|
|
os.mkdir(localdir)
|
|
|
|
log.debug('Transferring archived files from %s:%s to %s',
|
|
|
|
remote.shortname, remotedir, localdir)
|
|
|
|
proc = remote.run(
|
|
|
|
args=[
|
|
|
|
'tar',
|
|
|
|
'c',
|
|
|
|
'-f', '-',
|
|
|
|
'-C', remotedir,
|
|
|
|
'--',
|
|
|
|
'.',
|
|
|
|
],
|
|
|
|
stdout=run.PIPE,
|
|
|
|
wait=False,
|
|
|
|
)
|
|
|
|
tar = tarfile.open(mode='r|', fileobj=proc.stdout)
|
|
|
|
while True:
|
|
|
|
ti = tar.next()
|
|
|
|
if ti is None:
|
|
|
|
break
|
|
|
|
|
|
|
|
if ti.isdir():
|
|
|
|
# ignore silently; easier to just create leading dirs below
|
|
|
|
pass
|
|
|
|
elif ti.isfile():
|
|
|
|
sub = safepath.munge(ti.name)
|
|
|
|
safepath.makedirs(root=localdir, path=os.path.dirname(sub))
|
|
|
|
tar.makefile(ti, targetpath=os.path.join(localdir, sub))
|
|
|
|
else:
|
|
|
|
if ti.isdev():
|
|
|
|
type_ = 'device'
|
|
|
|
elif ti.issym():
|
|
|
|
type_ = 'symlink'
|
|
|
|
elif ti.islnk():
|
|
|
|
type_ = 'hard link'
|
|
|
|
else:
|
|
|
|
type_ = 'unknown'
|
|
|
|
log.info('Ignoring tar entry: %r type %r', ti.name, type_)
|
|
|
|
continue
|
|
|
|
proc.exitstatus.get()
|
|
|
|
|
2012-07-17 17:00:59 +00:00
|
|
|
def pull_directory_tarball(remote, remotedir, localfile):
|
|
|
|
"""
|
|
|
|
Copy a remote directory to a local tarball.
|
|
|
|
"""
|
|
|
|
log.debug('Transferring archived files from %s:%s to %s',
|
|
|
|
remote.shortname, remotedir, localfile)
|
|
|
|
out = open(localfile, 'w')
|
|
|
|
proc = remote.run(
|
|
|
|
args=[
|
|
|
|
'tar',
|
|
|
|
'cz',
|
|
|
|
'-f', '-',
|
|
|
|
'-C', remotedir,
|
|
|
|
'--',
|
|
|
|
'.',
|
|
|
|
],
|
|
|
|
stdout=out,
|
|
|
|
wait=False,
|
|
|
|
)
|
|
|
|
proc.exitstatus.get()
|
|
|
|
|
2013-01-23 02:27:41 +00:00
|
|
|
# returns map of devices to device id links:
|
|
|
|
# /dev/sdb: /dev/disk/by-id/wwn-0xf00bad
|
|
|
|
def get_wwn_id_map(remote, devs):
|
|
|
|
stdout = None
|
|
|
|
try:
|
|
|
|
r = remote.run(
|
|
|
|
args=[
|
|
|
|
'ls',
|
|
|
|
'-l',
|
|
|
|
'/dev/disk/by-id/wwn-*',
|
|
|
|
],
|
|
|
|
stdout=StringIO(),
|
|
|
|
)
|
|
|
|
stdout = r.stdout.getvalue()
|
|
|
|
except:
|
|
|
|
return None
|
|
|
|
|
|
|
|
devmap = {}
|
|
|
|
|
|
|
|
# lines will be:
|
|
|
|
# lrwxrwxrwx 1 root root 9 Jan 22 14:58 /dev/disk/by-id/wwn-0x50014ee002ddecaf -> ../../sdb
|
|
|
|
for line in stdout.splitlines():
|
|
|
|
comps = line.split(' ')
|
|
|
|
# comps[-1] should be:
|
|
|
|
# ../../sdb
|
|
|
|
rdev = comps[-1]
|
|
|
|
# translate to /dev/sdb
|
|
|
|
dev='/dev/{d}'.format(d=rdev.split('/')[-1])
|
|
|
|
|
|
|
|
# comps[-3] should be:
|
|
|
|
# /dev/disk/by-id/wwn-0x50014ee002ddecaf
|
|
|
|
iddev = comps[-3]
|
|
|
|
|
|
|
|
if dev in devs:
|
|
|
|
devmap[dev] = iddev
|
|
|
|
|
|
|
|
return devmap
|
|
|
|
|
2011-10-03 21:03:36 +00:00
|
|
|
def get_scratch_devices(remote):
|
|
|
|
"""
|
|
|
|
Read the scratch disk list from remote host
|
|
|
|
"""
|
|
|
|
devs = []
|
|
|
|
try:
|
|
|
|
file_data = get_file(remote, "/scratch_devs")
|
|
|
|
devs = file_data.split()
|
|
|
|
except:
|
2013-01-23 02:27:41 +00:00
|
|
|
r = remote.run(
|
|
|
|
args=['ls', run.Raw('/dev/[sv]d*')],
|
|
|
|
stdout=StringIO()
|
|
|
|
)
|
|
|
|
devs = r.stdout.getvalue().split('\n')
|
|
|
|
|
|
|
|
log.debug('devs={d}'.format(d=devs))
|
2011-10-03 21:03:36 +00:00
|
|
|
|
|
|
|
retval = []
|
|
|
|
for dev in devs:
|
|
|
|
try:
|
|
|
|
remote.run(
|
|
|
|
args=[
|
2012-02-11 22:20:18 +00:00
|
|
|
# node exists
|
2011-10-03 21:03:36 +00:00
|
|
|
'stat',
|
2012-02-11 22:20:18 +00:00
|
|
|
dev,
|
|
|
|
run.Raw('&&'),
|
|
|
|
# readable
|
|
|
|
'sudo', 'dd', 'if=%s' % dev, 'of=/dev/null', 'count=1',
|
2013-01-23 02:27:41 +00:00
|
|
|
run.Raw('&&'),
|
2012-02-11 22:20:18 +00:00
|
|
|
# not mounted
|
|
|
|
run.Raw('!'),
|
|
|
|
'mount',
|
|
|
|
run.Raw('|'),
|
|
|
|
'grep', '-q', dev,
|
2011-10-03 21:03:36 +00:00
|
|
|
]
|
|
|
|
)
|
|
|
|
retval.append(dev)
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
return retval
|
|
|
|
|
2013-01-23 20:37:39 +00:00
|
|
|
def wait_until_healthy(ctx, remote):
|
2011-05-31 20:51:48 +00:00
|
|
|
"""Wait until a Ceph cluster is healthy."""
|
2013-01-23 20:37:39 +00:00
|
|
|
testdir = get_testdir(ctx)
|
2011-05-31 20:51:48 +00:00
|
|
|
while True:
|
2011-06-01 23:04:52 +00:00
|
|
|
r = remote.run(
|
2011-05-31 20:51:48 +00:00
|
|
|
args=[
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/enable-coredump'.format(tdir=testdir),
|
|
|
|
'{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
|
|
|
|
'{tdir}/archive/coverage'.format(tdir=testdir),
|
|
|
|
'{tdir}/binary/usr/local/bin/ceph'.format(tdir=testdir),
|
|
|
|
'-c', '{tdir}/ceph.conf'.format(tdir=testdir),
|
2011-05-31 20:51:48 +00:00
|
|
|
'health',
|
|
|
|
'--concise',
|
|
|
|
],
|
|
|
|
stdout=StringIO(),
|
|
|
|
logger=log.getChild('health'),
|
|
|
|
)
|
|
|
|
out = r.stdout.getvalue()
|
|
|
|
log.debug('Ceph health: %s', out.rstrip('\n'))
|
|
|
|
if out.split(None, 1)[0] == 'HEALTH_OK':
|
|
|
|
break
|
|
|
|
time.sleep(1)
|
|
|
|
|
2013-01-23 20:37:39 +00:00
|
|
|
def wait_until_osds_up(ctx, cluster, remote):
|
2012-01-08 23:14:18 +00:00
|
|
|
"""Wait until all Ceph OSDs are booted."""
|
|
|
|
num_osds = num_instances_of_type(cluster, 'osd')
|
2013-01-23 20:37:39 +00:00
|
|
|
testdir = get_testdir(ctx)
|
2012-01-08 23:14:18 +00:00
|
|
|
while True:
|
|
|
|
r = remote.run(
|
|
|
|
args=[
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/enable-coredump'.format(tdir=testdir),
|
|
|
|
'{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
|
|
|
|
'{tdir}/archive/coverage'.format(tdir=testdir),
|
|
|
|
'{tdir}/binary/usr/local/bin/ceph'.format(tdir=testdir),
|
|
|
|
'-c', '{tdir}/ceph.conf'.format(tdir=testdir),
|
2012-01-08 23:14:18 +00:00
|
|
|
'--concise',
|
|
|
|
'osd', 'dump', '--format=json'
|
|
|
|
],
|
|
|
|
stdout=StringIO(),
|
|
|
|
logger=log.getChild('health'),
|
|
|
|
)
|
|
|
|
out = r.stdout.getvalue()
|
|
|
|
j = json.loads('\n'.join(out.split('\n')[1:]))
|
|
|
|
up = len(j['osds'])
|
|
|
|
log.debug('%d of %d OSDs are up' % (up, num_osds))
|
|
|
|
if up == num_osds:
|
|
|
|
break
|
|
|
|
time.sleep(1)
|
|
|
|
|
2011-06-01 23:04:52 +00:00
|
|
|
def wait_until_fuse_mounted(remote, fuse, mountpoint):
|
2011-05-31 20:51:48 +00:00
|
|
|
while True:
|
2011-06-01 23:04:52 +00:00
|
|
|
proc = remote.run(
|
2011-05-31 20:51:48 +00:00
|
|
|
args=[
|
|
|
|
'stat',
|
|
|
|
'--file-system',
|
|
|
|
'--printf=%T\n',
|
|
|
|
'--',
|
|
|
|
mountpoint,
|
|
|
|
],
|
|
|
|
stdout=StringIO(),
|
|
|
|
)
|
|
|
|
fstype = proc.stdout.getvalue().rstrip('\n')
|
|
|
|
if fstype == 'fuseblk':
|
|
|
|
break
|
|
|
|
log.debug('cfuse not yet mounted, got fs type {fstype!r}'.format(fstype=fstype))
|
|
|
|
|
|
|
|
# it shouldn't have exited yet; exposes some trivial problems
|
|
|
|
assert not fuse.exitstatus.ready()
|
|
|
|
|
|
|
|
time.sleep(5)
|
|
|
|
log.info('cfuse is mounted on %s', mountpoint)
|
2011-06-16 01:06:57 +00:00
|
|
|
|
2011-08-10 17:37:04 +00:00
|
|
|
def reconnect(ctx, timeout):
|
|
|
|
"""
|
|
|
|
Connect to all the machines in ctx.cluster.
|
|
|
|
|
|
|
|
Presumably, some of them won't be up. Handle this
|
|
|
|
by waiting for them, unless the wait time exceeds
|
|
|
|
the specified timeout.
|
|
|
|
|
|
|
|
ctx needs to contain the cluster of machines you
|
|
|
|
wish it to try and connect to, as well as a config
|
|
|
|
holding the ssh keys for each of them. As long as it
|
|
|
|
contains this data, you can construct a context
|
|
|
|
that is a subset of your full cluster.
|
|
|
|
"""
|
|
|
|
log.info('Re-opening connections...')
|
|
|
|
starttime = time.time()
|
|
|
|
need_reconnect = ctx.cluster.remotes.keys()
|
2011-12-30 20:23:28 +00:00
|
|
|
while need_reconnect:
|
|
|
|
for remote in need_reconnect:
|
2011-08-10 17:37:04 +00:00
|
|
|
try:
|
2011-10-07 00:18:35 +00:00
|
|
|
log.info('trying to connect to %s', remote.name)
|
2011-09-13 21:53:02 +00:00
|
|
|
from .orchestra import connection
|
2011-08-10 17:37:04 +00:00
|
|
|
remote.ssh = connection.connect(
|
|
|
|
user_at_host=remote.name,
|
|
|
|
host_key=ctx.config['targets'][remote.name],
|
2011-11-03 20:08:39 +00:00
|
|
|
keep_alive=True,
|
2011-08-10 17:37:04 +00:00
|
|
|
)
|
2011-12-30 20:23:28 +00:00
|
|
|
except Exception:
|
2011-10-07 00:18:35 +00:00
|
|
|
if time.time() - starttime > timeout:
|
|
|
|
raise
|
2011-08-10 17:37:04 +00:00
|
|
|
else:
|
|
|
|
need_reconnect.remove(remote)
|
|
|
|
|
|
|
|
log.debug('waited {elapsed}'.format(elapsed=str(time.time() - starttime)))
|
|
|
|
time.sleep(1)
|
|
|
|
|
2013-01-23 20:37:39 +00:00
|
|
|
def write_secret_file(ctx, remote, role, filename):
|
|
|
|
testdir = get_testdir(ctx)
|
2011-06-16 01:06:57 +00:00
|
|
|
remote.run(
|
|
|
|
args=[
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/enable-coredump'.format(tdir=testdir),
|
|
|
|
'{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
|
|
|
|
'{tdir}/archive/coverage'.format(tdir=testdir),
|
|
|
|
'{tdir}/binary/usr/local/bin/ceph-authtool'.format(tdir=testdir),
|
2011-06-16 01:06:57 +00:00
|
|
|
'--name={role}'.format(role=role),
|
|
|
|
'--print-key',
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/data/{role}.keyring'.format(tdir=testdir, role=role),
|
2011-06-16 01:06:57 +00:00
|
|
|
run.Raw('>'),
|
|
|
|
filename,
|
|
|
|
],
|
|
|
|
)
|
2011-06-20 19:12:11 +00:00
|
|
|
|
2011-08-09 20:23:58 +00:00
|
|
|
def get_clients(ctx, roles):
|
|
|
|
for role in roles:
|
|
|
|
assert isinstance(role, basestring)
|
|
|
|
PREFIX = 'client.'
|
|
|
|
assert role.startswith(PREFIX)
|
|
|
|
id_ = role[len(PREFIX):]
|
|
|
|
(remote,) = ctx.cluster.only(role).remotes.iterkeys()
|
|
|
|
yield (id_, remote)
|
2011-07-02 01:15:52 +00:00
|
|
|
|
|
|
|
def get_user():
|
|
|
|
return getpass.getuser() + '@' + socket.gethostname()
|
2011-07-07 18:43:35 +00:00
|
|
|
|
|
|
|
def read_config(ctx):
|
|
|
|
filename = os.path.join(os.environ['HOME'], '.teuthology.yaml')
|
|
|
|
ctx.teuthology_config = {}
|
|
|
|
with file(filename) as f:
|
|
|
|
g = yaml.safe_load_all(f)
|
|
|
|
for new in g:
|
|
|
|
ctx.teuthology_config.update(new)
|
2011-08-31 20:56:42 +00:00
|
|
|
|
2011-11-09 06:06:43 +00:00
|
|
|
def get_mon_names(ctx):
|
2011-08-31 20:56:42 +00:00
|
|
|
mons = []
|
|
|
|
for remote, roles in ctx.cluster.remotes.items():
|
|
|
|
for role in roles:
|
|
|
|
if not role.startswith('mon.'):
|
|
|
|
continue
|
|
|
|
mons.append(role)
|
2011-11-09 06:06:43 +00:00
|
|
|
return mons
|
|
|
|
|
|
|
|
# return the "first" mon (alphanumerically, for lack of anything better)
|
|
|
|
def get_first_mon(ctx, config):
|
|
|
|
firstmon = sorted(get_mon_names(ctx))[0]
|
2011-08-31 20:56:42 +00:00
|
|
|
assert firstmon
|
|
|
|
return firstmon
|
2011-09-08 00:50:12 +00:00
|
|
|
|
|
|
|
def replace_all_with_clients(cluster, config):
|
|
|
|
"""
|
|
|
|
Converts a dict containing a key all to one
|
|
|
|
mapping all clients to the value of config['all']
|
|
|
|
"""
|
|
|
|
assert isinstance(config, dict), 'config must be a dict'
|
|
|
|
if 'all' not in config:
|
|
|
|
return config
|
|
|
|
norm_config = {}
|
|
|
|
assert len(config) == 1, \
|
|
|
|
"config cannot have 'all' and specific clients listed"
|
|
|
|
for client in all_roles_of_type(cluster, 'client'):
|
|
|
|
norm_config['client.{id}'.format(id=client)] = config['all']
|
|
|
|
return norm_config
|
2011-11-17 21:06:36 +00:00
|
|
|
|
|
|
|
def deep_merge(a, b):
|
|
|
|
if a is None:
|
|
|
|
return b
|
|
|
|
if b is None:
|
|
|
|
return a
|
|
|
|
if isinstance(a, list):
|
|
|
|
assert isinstance(b, list)
|
|
|
|
a.extend(b)
|
|
|
|
return a
|
|
|
|
if isinstance(a, dict):
|
|
|
|
assert isinstance(b, dict)
|
|
|
|
for (k, v) in b.iteritems():
|
|
|
|
if k in a:
|
|
|
|
a[k] = deep_merge(a[k], v)
|
|
|
|
else:
|
|
|
|
a[k] = v
|
|
|
|
return a
|
|
|
|
return b
|
2012-02-22 17:18:17 +00:00
|
|
|
|
2013-01-23 20:37:39 +00:00
|
|
|
def get_valgrind_args(testdir, name, v):
|
2012-02-22 17:18:17 +00:00
|
|
|
if v is None:
|
2012-02-24 22:55:49 +00:00
|
|
|
return []
|
2012-02-22 17:18:17 +00:00
|
|
|
if not isinstance(v, list):
|
|
|
|
v = [v]
|
2013-01-23 20:37:39 +00:00
|
|
|
val_path = '{tdir}/archive/log/valgrind'.format(tdir=testdir)
|
2012-02-22 17:18:17 +00:00
|
|
|
if '--tool=memcheck' in v or '--tool=helgrind' in v:
|
|
|
|
extra_args = [
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/chdir-coredump'.format(tdir=testdir),
|
2012-07-04 21:29:55 +00:00
|
|
|
'valgrind',
|
2013-01-23 20:37:39 +00:00
|
|
|
'--suppressions={tdir}/valgrind.supp'.format(tdir=testdir),
|
2012-07-04 21:29:55 +00:00
|
|
|
'--xml=yes',
|
2012-02-22 17:18:17 +00:00
|
|
|
'--xml-file={vdir}/{n}.log'.format(vdir=val_path, n=name)
|
|
|
|
]
|
|
|
|
else:
|
|
|
|
extra_args = [
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/chdir-coredump'.format(tdir=testdir),
|
2012-02-22 17:18:17 +00:00
|
|
|
'valgrind',
|
2013-01-23 20:37:39 +00:00
|
|
|
'--suppressions={tdir}/valgrind.supp'.format(tdir=testdir),
|
2012-02-22 17:18:17 +00:00
|
|
|
'--log-file={vdir}/{n}.log'.format(vdir=val_path, n=name)
|
|
|
|
]
|
|
|
|
extra_args.extend(v)
|
2012-02-24 20:04:58 +00:00
|
|
|
log.debug('running %s under valgrind with args %s', name, extra_args)
|
2012-02-22 17:18:17 +00:00
|
|
|
return extra_args
|