2011-05-31 20:51:48 +00:00
|
|
|
from cStringIO import StringIO
|
|
|
|
|
|
|
|
import os
|
|
|
|
import logging
|
|
|
|
import configobj
|
2011-07-02 01:15:52 +00:00
|
|
|
import getpass
|
|
|
|
import socket
|
2013-05-01 00:07:53 +00:00
|
|
|
import sys
|
2012-07-11 16:22:50 +00:00
|
|
|
import tarfile
|
2011-05-31 20:51:48 +00:00
|
|
|
import time
|
2011-06-02 16:09:08 +00:00
|
|
|
import urllib2
|
|
|
|
import urlparse
|
2011-07-07 18:43:35 +00:00
|
|
|
import yaml
|
2012-01-08 23:14:18 +00:00
|
|
|
import json
|
2011-05-31 20:51:48 +00:00
|
|
|
|
2012-07-11 16:22:50 +00:00
|
|
|
from teuthology import safepath
|
2013-04-12 17:55:54 +00:00
|
|
|
from teuthology import lockstatus
|
2011-09-13 21:53:02 +00:00
|
|
|
from .orchestra import run
|
2011-06-16 01:06:57 +00:00
|
|
|
|
2011-05-31 20:51:48 +00:00
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
2013-01-23 20:37:39 +00:00
|
|
|
import datetime
|
2013-04-11 14:23:10 +00:00
|
|
|
stamp = datetime.datetime.now().strftime("%y%m%d%H%M")
|
2013-06-12 21:21:14 +00:00
|
|
|
is_vm = lambda x: x.startswith('vpm') or x.startswith('ubuntu@vpm')
|
2013-01-23 20:37:39 +00:00
|
|
|
|
2013-08-16 22:21:47 +00:00
|
|
|
is_arm = lambda x: x.startswith('tala') or x.startswith('ubuntu@tala') or x.startswith('saya') or x.startswith('ubuntu@saya')
|
|
|
|
|
2013-01-23 20:37:39 +00:00
|
|
|
def get_testdir(ctx):
|
|
|
|
if 'test_path' in ctx.teuthology_config:
|
|
|
|
return ctx.teuthology_config['test_path']
|
2013-09-10 17:53:41 +00:00
|
|
|
test_user = get_test_user(ctx)
|
|
|
|
# FIXME this ideally should use os.path.expanduser() in the future, in case
|
|
|
|
# $HOME isn't /home/$USER - e.g. on a Mac. However, since we're executing
|
|
|
|
# this on the server side, it won't work properly.
|
|
|
|
return ctx.teuthology_config.get('test_path', '/home/%s/cephtest' % test_user)
|
2013-08-20 21:25:25 +00:00
|
|
|
|
|
|
|
def get_test_user(ctx):
|
|
|
|
"""
|
|
|
|
:returns: str -- the user to run tests as on remote hosts
|
|
|
|
"""
|
|
|
|
return ctx.teuthology_config.get('test_user', 'ubuntu')
|
|
|
|
|
|
|
|
|
2013-09-04 15:16:17 +00:00
|
|
|
def get_archive_dir(ctx):
|
|
|
|
test_dir = get_testdir(ctx)
|
|
|
|
return os.path.normpath(os.path.join(test_dir, 'archive'))
|
|
|
|
|
Pull from new gitbuilder.ceph.com locations.
Simplifies the flavor stuff into a tuple of
<package,type,flavor,dist,arch>
where package is ceph, kenrel, etc.
type is tarball, deb
flavor is basic, gcov, notcmalloc
arch is x86_64, i686 (uname -m)
dist is oneiric, etc. (lsb_release -s -c)
2012-03-13 17:02:26 +00:00
|
|
|
def get_ceph_binary_url(package=None,
|
|
|
|
branch=None, tag=None, sha1=None, dist=None,
|
|
|
|
flavor=None, format=None, arch=None):
|
2012-03-13 17:09:18 +00:00
|
|
|
BASE = 'http://gitbuilder.ceph.com/{package}-{format}-{dist}-{arch}-{flavor}/'.format(
|
Pull from new gitbuilder.ceph.com locations.
Simplifies the flavor stuff into a tuple of
<package,type,flavor,dist,arch>
where package is ceph, kenrel, etc.
type is tarball, deb
flavor is basic, gcov, notcmalloc
arch is x86_64, i686 (uname -m)
dist is oneiric, etc. (lsb_release -s -c)
2012-03-13 17:02:26 +00:00
|
|
|
package=package,
|
|
|
|
flavor=flavor,
|
|
|
|
arch=arch,
|
|
|
|
format=format,
|
|
|
|
dist=dist
|
|
|
|
)
|
2011-06-10 18:12:34 +00:00
|
|
|
|
2011-06-10 00:05:55 +00:00
|
|
|
if sha1 is not None:
|
|
|
|
assert branch is None, "cannot set both sha1 and branch"
|
|
|
|
assert tag is None, "cannot set both sha1 and tag"
|
2011-06-09 21:08:45 +00:00
|
|
|
else:
|
2011-06-10 00:05:55 +00:00
|
|
|
# gitbuilder uses remote-style ref names for branches, mangled to
|
|
|
|
# have underscores instead of slashes; e.g. origin_master
|
|
|
|
if tag is not None:
|
|
|
|
ref = tag
|
|
|
|
assert branch is None, "cannot set both branch and tag"
|
|
|
|
else:
|
|
|
|
if branch is None:
|
|
|
|
branch = 'master'
|
2011-08-05 21:35:22 +00:00
|
|
|
ref = branch
|
2011-06-10 00:05:55 +00:00
|
|
|
|
|
|
|
sha1_url = urlparse.urljoin(BASE, 'ref/{ref}/sha1'.format(ref=ref))
|
|
|
|
log.debug('Translating ref to sha1 using url %s', sha1_url)
|
2012-11-12 19:16:49 +00:00
|
|
|
|
|
|
|
try:
|
|
|
|
sha1_fp = urllib2.urlopen(sha1_url)
|
|
|
|
sha1 = sha1_fp.read().rstrip('\n')
|
|
|
|
sha1_fp.close()
|
|
|
|
except urllib2.HTTPError as e:
|
|
|
|
log.error('Failed to get url %s', sha1_url)
|
|
|
|
raise e
|
2011-06-09 22:43:43 +00:00
|
|
|
|
Pull from new gitbuilder.ceph.com locations.
Simplifies the flavor stuff into a tuple of
<package,type,flavor,dist,arch>
where package is ceph, kenrel, etc.
type is tarball, deb
flavor is basic, gcov, notcmalloc
arch is x86_64, i686 (uname -m)
dist is oneiric, etc. (lsb_release -s -c)
2012-03-13 17:02:26 +00:00
|
|
|
log.debug('Using %s %s sha1 %s', package, format, sha1)
|
2011-06-02 16:09:08 +00:00
|
|
|
bindir_url = urlparse.urljoin(BASE, 'sha1/{sha1}/'.format(sha1=sha1))
|
2011-06-09 21:02:44 +00:00
|
|
|
return (sha1, bindir_url)
|
2011-05-31 20:51:48 +00:00
|
|
|
|
|
|
|
def feed_many_stdins(fp, processes):
|
|
|
|
while True:
|
|
|
|
data = fp.read(8192)
|
|
|
|
if not data:
|
|
|
|
break
|
|
|
|
for proc in processes:
|
|
|
|
proc.stdin.write(data)
|
|
|
|
|
|
|
|
def feed_many_stdins_and_close(fp, processes):
|
|
|
|
feed_many_stdins(fp, processes)
|
|
|
|
for proc in processes:
|
|
|
|
proc.stdin.close()
|
|
|
|
|
|
|
|
def get_mons(roles, ips):
|
|
|
|
mons = {}
|
2012-01-06 21:36:54 +00:00
|
|
|
mon_ports = {}
|
2011-07-27 04:46:47 +00:00
|
|
|
mon_id = 0
|
2011-05-31 20:51:48 +00:00
|
|
|
for idx, roles in enumerate(roles):
|
|
|
|
for role in roles:
|
|
|
|
if not role.startswith('mon.'):
|
|
|
|
continue
|
2012-01-06 21:36:54 +00:00
|
|
|
if ips[idx] not in mon_ports:
|
|
|
|
mon_ports[ips[idx]] = 6789
|
|
|
|
else:
|
|
|
|
mon_ports[ips[idx]] += 1
|
2011-05-31 20:51:48 +00:00
|
|
|
addr = '{ip}:{port}'.format(
|
|
|
|
ip=ips[idx],
|
2012-01-06 21:36:54 +00:00
|
|
|
port=mon_ports[ips[idx]],
|
2011-05-31 20:51:48 +00:00
|
|
|
)
|
2011-07-27 18:45:20 +00:00
|
|
|
mon_id += 1
|
2011-05-31 20:51:48 +00:00
|
|
|
mons[role] = addr
|
|
|
|
assert mons
|
|
|
|
return mons
|
|
|
|
|
|
|
|
def generate_caps(type_):
|
|
|
|
defaults = dict(
|
|
|
|
osd=dict(
|
|
|
|
mon='allow *',
|
|
|
|
osd='allow *',
|
|
|
|
),
|
|
|
|
mds=dict(
|
|
|
|
mon='allow *',
|
|
|
|
osd='allow *',
|
|
|
|
mds='allow',
|
|
|
|
),
|
|
|
|
client=dict(
|
|
|
|
mon='allow rw',
|
2013-06-16 21:53:49 +00:00
|
|
|
osd='allow rwx',
|
2011-05-31 20:51:48 +00:00
|
|
|
mds='allow',
|
|
|
|
),
|
|
|
|
)
|
|
|
|
for subsystem, capability in defaults[type_].items():
|
|
|
|
yield '--cap'
|
|
|
|
yield subsystem
|
|
|
|
yield capability
|
|
|
|
|
2013-01-23 20:37:39 +00:00
|
|
|
def skeleton_config(ctx, roles, ips):
|
2011-05-31 20:51:48 +00:00
|
|
|
"""
|
|
|
|
Returns a ConfigObj that's prefilled with a skeleton config.
|
|
|
|
|
|
|
|
Use conf[section][key]=value or conf.merge to change it.
|
|
|
|
|
|
|
|
Use conf.write to write it out, override .filename first if you want.
|
|
|
|
"""
|
2013-01-23 20:37:39 +00:00
|
|
|
path = os.path.join(os.path.dirname(__file__), 'ceph.conf.template')
|
|
|
|
t = open(path, 'r')
|
|
|
|
skconf = t.read().format(testdir=get_testdir(ctx))
|
|
|
|
conf = configobj.ConfigObj(StringIO(skconf), file_error=True)
|
2011-05-31 20:51:48 +00:00
|
|
|
mons = get_mons(roles=roles, ips=ips)
|
|
|
|
for role, addr in mons.iteritems():
|
|
|
|
conf.setdefault(role, {})
|
|
|
|
conf[role]['mon addr'] = addr
|
2011-07-27 17:04:37 +00:00
|
|
|
# set up standby mds's
|
2013-01-09 22:02:42 +00:00
|
|
|
for roles_subset in roles:
|
|
|
|
for role in roles_subset:
|
|
|
|
if role.startswith('mds.'):
|
2011-07-27 17:04:37 +00:00
|
|
|
conf.setdefault(role, {})
|
2013-01-09 22:02:42 +00:00
|
|
|
if role.find('-s-') != -1:
|
|
|
|
standby_mds = role[role.find('-s-')+3:]
|
|
|
|
conf[role]['mds standby for name'] = standby_mds
|
2011-05-31 20:51:48 +00:00
|
|
|
return conf
|
|
|
|
|
|
|
|
def roles_of_type(roles_for_host, type_):
|
|
|
|
prefix = '{type}.'.format(type=type_)
|
|
|
|
for name in roles_for_host:
|
|
|
|
if not name.startswith(prefix):
|
|
|
|
continue
|
|
|
|
id_ = name[len(prefix):]
|
|
|
|
yield id_
|
|
|
|
|
2012-10-22 23:51:54 +00:00
|
|
|
def all_roles(cluster):
|
|
|
|
for _, roles_for_host in cluster.remotes.iteritems():
|
|
|
|
for name in roles_for_host:
|
|
|
|
yield name
|
|
|
|
|
2011-06-16 23:07:59 +00:00
|
|
|
def all_roles_of_type(cluster, type_):
|
|
|
|
prefix = '{type}.'.format(type=type_)
|
|
|
|
for _, roles_for_host in cluster.remotes.iteritems():
|
|
|
|
for name in roles_for_host:
|
|
|
|
if not name.startswith(prefix):
|
|
|
|
continue
|
|
|
|
id_ = name[len(prefix):]
|
|
|
|
yield id_
|
|
|
|
|
2011-06-01 23:04:52 +00:00
|
|
|
def is_type(type_):
|
|
|
|
"""
|
|
|
|
Returns a matcher function for whether role is of type given.
|
|
|
|
"""
|
|
|
|
prefix = '{type}.'.format(type=type_)
|
|
|
|
def _is_type(role):
|
|
|
|
return role.startswith(prefix)
|
|
|
|
return _is_type
|
|
|
|
|
2011-06-03 21:47:44 +00:00
|
|
|
def num_instances_of_type(cluster, type_):
|
|
|
|
remotes_and_roles = cluster.remotes.items()
|
|
|
|
roles = [roles for (remote, roles) in remotes_and_roles]
|
2011-05-31 20:51:48 +00:00
|
|
|
prefix = '{type}.'.format(type=type_)
|
|
|
|
num = sum(sum(1 for role in hostroles if role.startswith(prefix)) for hostroles in roles)
|
|
|
|
return num
|
|
|
|
|
2013-01-23 20:37:39 +00:00
|
|
|
def create_simple_monmap(ctx, remote, conf):
|
2011-05-31 20:51:48 +00:00
|
|
|
"""
|
|
|
|
Writes a simple monmap based on current ceph.conf into <tmpdir>/monmap.
|
|
|
|
|
|
|
|
Assumes ceph_conf is up to date.
|
|
|
|
|
|
|
|
Assumes mon sections are named "mon.*", with the dot.
|
|
|
|
"""
|
|
|
|
def gen_addresses():
|
|
|
|
for section, data in conf.iteritems():
|
|
|
|
PREFIX = 'mon.'
|
|
|
|
if not section.startswith(PREFIX):
|
|
|
|
continue
|
|
|
|
name = section[len(PREFIX):]
|
|
|
|
addr = data['mon addr']
|
|
|
|
yield (name, addr)
|
|
|
|
|
|
|
|
addresses = list(gen_addresses())
|
|
|
|
assert addresses, "There are no monitors in config!"
|
|
|
|
log.debug('Ceph mon addresses: %s', addresses)
|
|
|
|
|
2013-01-23 20:37:39 +00:00
|
|
|
testdir = get_testdir(ctx)
|
2011-05-31 20:51:48 +00:00
|
|
|
args = [
|
2013-09-06 20:08:01 +00:00
|
|
|
'adjust-ulimits',
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-coverage',
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/archive/coverage'.format(tdir=testdir),
|
2013-02-06 19:16:52 +00:00
|
|
|
'monmaptool',
|
2011-05-31 20:51:48 +00:00
|
|
|
'--create',
|
|
|
|
'--clobber',
|
|
|
|
]
|
|
|
|
for (name, addr) in addresses:
|
|
|
|
args.extend(('--add', name, addr))
|
|
|
|
args.extend([
|
|
|
|
'--print',
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/monmap'.format(tdir=testdir),
|
2011-05-31 20:51:48 +00:00
|
|
|
])
|
2011-06-01 23:04:52 +00:00
|
|
|
remote.run(
|
2011-05-31 20:51:48 +00:00
|
|
|
args=args,
|
|
|
|
)
|
|
|
|
|
2011-06-01 23:04:52 +00:00
|
|
|
def write_file(remote, path, data):
|
|
|
|
remote.run(
|
2011-05-31 20:51:48 +00:00
|
|
|
args=[
|
|
|
|
'python',
|
|
|
|
'-c',
|
|
|
|
'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
|
|
|
|
path,
|
|
|
|
],
|
|
|
|
stdin=data,
|
|
|
|
)
|
|
|
|
|
2013-02-06 19:16:52 +00:00
|
|
|
def sudo_write_file(remote, path, data, perms=None):
|
|
|
|
permargs = []
|
|
|
|
if perms:
|
|
|
|
permargs=[run.Raw('&&'), 'sudo', 'chmod', perms, path]
|
2011-06-20 20:19:08 +00:00
|
|
|
remote.run(
|
|
|
|
args=[
|
|
|
|
'sudo',
|
|
|
|
'python',
|
|
|
|
'-c',
|
|
|
|
'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
|
|
|
|
path,
|
2013-02-06 19:16:52 +00:00
|
|
|
] + permargs,
|
2011-06-20 20:19:08 +00:00
|
|
|
stdin=data,
|
|
|
|
)
|
|
|
|
|
2013-03-20 04:26:16 +00:00
|
|
|
def move_file(remote, from_path, to_path, sudo=False):
|
|
|
|
|
2013-08-14 19:46:22 +00:00
|
|
|
# need to stat the file first, to make sure we
|
|
|
|
# maintain the same permissions
|
2013-03-20 04:26:16 +00:00
|
|
|
args = []
|
|
|
|
if sudo:
|
|
|
|
args.append('sudo')
|
|
|
|
args.extend([
|
|
|
|
'stat',
|
|
|
|
'-c',
|
|
|
|
'\"%a\"',
|
|
|
|
to_path
|
|
|
|
])
|
|
|
|
proc = remote.run(
|
|
|
|
args=args,
|
|
|
|
stdout=StringIO(),
|
|
|
|
)
|
|
|
|
perms = proc.stdout.getvalue().rstrip().strip('\"')
|
|
|
|
|
|
|
|
args = []
|
|
|
|
if sudo:
|
|
|
|
args.append('sudo')
|
|
|
|
args.extend([
|
|
|
|
'mv',
|
|
|
|
'--',
|
|
|
|
from_path,
|
|
|
|
to_path,
|
|
|
|
])
|
|
|
|
proc = remote.run(
|
|
|
|
args=args,
|
|
|
|
stdout=StringIO(),
|
|
|
|
)
|
|
|
|
|
|
|
|
# reset the file back to the original permissions
|
|
|
|
args = []
|
|
|
|
if sudo:
|
|
|
|
args.append('sudo')
|
|
|
|
args.extend([
|
|
|
|
'chmod',
|
|
|
|
perms,
|
|
|
|
to_path,
|
|
|
|
])
|
|
|
|
proc = remote.run(
|
|
|
|
args=args,
|
|
|
|
stdout=StringIO(),
|
|
|
|
)
|
|
|
|
|
2013-06-21 01:36:58 +00:00
|
|
|
def delete_file(remote, path, sudo=False, force=False):
|
2013-03-20 04:26:16 +00:00
|
|
|
args = []
|
|
|
|
if sudo:
|
|
|
|
args.append('sudo')
|
2013-06-21 01:36:58 +00:00
|
|
|
args.extend(['rm'])
|
|
|
|
if force:
|
|
|
|
args.extend(['-f'])
|
2013-03-20 04:26:16 +00:00
|
|
|
args.extend([
|
|
|
|
'--',
|
|
|
|
path,
|
|
|
|
])
|
|
|
|
proc = remote.run(
|
|
|
|
args=args,
|
|
|
|
stdout=StringIO(),
|
|
|
|
)
|
|
|
|
|
|
|
|
def remove_lines_from_file(remote, path, line_is_valid_test, string_to_test_for):
|
|
|
|
# read in the specified file
|
|
|
|
in_data = get_file(remote, path, False)
|
|
|
|
out_data = ""
|
|
|
|
|
|
|
|
first_line = True
|
|
|
|
# use the 'line_is_valid_test' function to remove unwanted lines
|
|
|
|
for line in in_data.split('\n'):
|
|
|
|
if line_is_valid_test(line, string_to_test_for):
|
|
|
|
if not first_line:
|
|
|
|
out_data += '\n'
|
|
|
|
else:
|
|
|
|
first_line = False
|
|
|
|
|
|
|
|
out_data += '{line}'.format(line=line)
|
|
|
|
|
|
|
|
else:
|
|
|
|
log.info('removing line: {bad_line}'.format(bad_line=line))
|
|
|
|
|
2013-08-14 19:46:22 +00:00
|
|
|
# get a temp file path on the remote host to write to,
|
|
|
|
# we don't want to blow away the remote file and then have the
|
2013-03-20 04:26:16 +00:00
|
|
|
# network drop out
|
2013-04-15 21:16:34 +00:00
|
|
|
temp_file_path = remote_mktemp(remote)
|
2013-03-20 04:26:16 +00:00
|
|
|
|
|
|
|
# write out the data to a temp file
|
|
|
|
write_file(remote, temp_file_path, out_data)
|
|
|
|
|
|
|
|
# then do a 'mv' to the actual file location
|
|
|
|
move_file(remote, temp_file_path, path)
|
2013-08-14 19:46:22 +00:00
|
|
|
|
2013-03-20 04:26:16 +00:00
|
|
|
def append_lines_to_file(remote, path, lines, sudo=False):
|
2013-04-12 20:52:47 +00:00
|
|
|
temp_file_path = remote_mktemp(remote)
|
2013-08-14 19:46:22 +00:00
|
|
|
|
2013-03-20 04:26:16 +00:00
|
|
|
data = get_file(remote, path, sudo)
|
|
|
|
|
|
|
|
# add the additional data and write it back out, using a temp file
|
2013-08-14 19:46:22 +00:00
|
|
|
# in case of connectivity of loss, and then mv it to the
|
2013-03-20 04:26:16 +00:00
|
|
|
# actual desired location
|
|
|
|
data += lines
|
|
|
|
temp_file_path
|
|
|
|
write_file(remote, temp_file_path, data)
|
|
|
|
|
|
|
|
# then do a 'mv' to the actual file location
|
|
|
|
move_file(remote, temp_file_path, path)
|
|
|
|
|
2013-04-12 20:52:47 +00:00
|
|
|
def remote_mktemp(remote, sudo=False):
|
2013-03-20 04:26:16 +00:00
|
|
|
args = []
|
|
|
|
if sudo:
|
|
|
|
args.append('sudo')
|
|
|
|
args.extend([
|
|
|
|
'python',
|
|
|
|
'-c',
|
2013-04-15 21:26:22 +00:00
|
|
|
'import os; import tempfile; (fd,fname) = tempfile.mkstemp(); os.close(fd); print fname.rstrip()'
|
2013-03-20 04:26:16 +00:00
|
|
|
])
|
|
|
|
proc = remote.run(
|
|
|
|
args=args,
|
|
|
|
stdout=StringIO(),
|
|
|
|
)
|
|
|
|
data = proc.stdout.getvalue()
|
|
|
|
return data
|
|
|
|
|
|
|
|
def create_file(remote, path, data="", permissions=str(644), sudo=False):
|
|
|
|
"""
|
|
|
|
Create a file on the remote host.
|
|
|
|
"""
|
|
|
|
args = []
|
|
|
|
if sudo:
|
|
|
|
args.append('sudo')
|
|
|
|
args.extend([
|
|
|
|
'touch',
|
|
|
|
path,
|
|
|
|
run.Raw('&&'),
|
|
|
|
'chmod',
|
|
|
|
permissions,
|
|
|
|
'--',
|
|
|
|
path
|
|
|
|
])
|
|
|
|
proc = remote.run(
|
|
|
|
args=args,
|
|
|
|
stdout=StringIO(),
|
|
|
|
)
|
|
|
|
# now write out the data if any was passed in
|
|
|
|
if "" != data:
|
|
|
|
append_lines_to_file(remote, path, data, sudo)
|
|
|
|
|
2013-02-06 19:16:52 +00:00
|
|
|
def get_file(remote, path, sudo=False):
|
2011-05-31 20:51:48 +00:00
|
|
|
"""
|
|
|
|
Read a file from remote host into memory.
|
|
|
|
"""
|
2013-02-17 06:32:16 +00:00
|
|
|
args = []
|
|
|
|
if sudo:
|
|
|
|
args.append('sudo')
|
|
|
|
args.extend([
|
2011-05-31 20:51:48 +00:00
|
|
|
'cat',
|
|
|
|
'--',
|
|
|
|
path,
|
2013-02-17 06:32:16 +00:00
|
|
|
])
|
|
|
|
proc = remote.run(
|
|
|
|
args=args,
|
2011-05-31 20:51:48 +00:00
|
|
|
stdout=StringIO(),
|
|
|
|
)
|
|
|
|
data = proc.stdout.getvalue()
|
|
|
|
return data
|
|
|
|
|
2012-07-11 16:22:50 +00:00
|
|
|
def pull_directory(remote, remotedir, localdir):
|
|
|
|
"""
|
|
|
|
Copy a remote directory to a local directory.
|
|
|
|
"""
|
|
|
|
log.debug('Transferring archived files from %s:%s to %s',
|
|
|
|
remote.shortname, remotedir, localdir)
|
2013-02-17 07:44:03 +00:00
|
|
|
if not os.path.exists(localdir):
|
|
|
|
os.mkdir(localdir)
|
2012-07-11 16:22:50 +00:00
|
|
|
proc = remote.run(
|
|
|
|
args=[
|
2013-02-17 07:44:03 +00:00
|
|
|
'sudo',
|
2012-07-11 16:22:50 +00:00
|
|
|
'tar',
|
|
|
|
'c',
|
|
|
|
'-f', '-',
|
|
|
|
'-C', remotedir,
|
|
|
|
'--',
|
|
|
|
'.',
|
|
|
|
],
|
|
|
|
stdout=run.PIPE,
|
|
|
|
wait=False,
|
|
|
|
)
|
|
|
|
tar = tarfile.open(mode='r|', fileobj=proc.stdout)
|
|
|
|
while True:
|
|
|
|
ti = tar.next()
|
|
|
|
if ti is None:
|
|
|
|
break
|
|
|
|
|
|
|
|
if ti.isdir():
|
|
|
|
# ignore silently; easier to just create leading dirs below
|
|
|
|
pass
|
|
|
|
elif ti.isfile():
|
|
|
|
sub = safepath.munge(ti.name)
|
|
|
|
safepath.makedirs(root=localdir, path=os.path.dirname(sub))
|
|
|
|
tar.makefile(ti, targetpath=os.path.join(localdir, sub))
|
|
|
|
else:
|
|
|
|
if ti.isdev():
|
|
|
|
type_ = 'device'
|
|
|
|
elif ti.issym():
|
|
|
|
type_ = 'symlink'
|
|
|
|
elif ti.islnk():
|
|
|
|
type_ = 'hard link'
|
|
|
|
else:
|
|
|
|
type_ = 'unknown'
|
|
|
|
log.info('Ignoring tar entry: %r type %r', ti.name, type_)
|
|
|
|
continue
|
|
|
|
proc.exitstatus.get()
|
|
|
|
|
2012-07-17 17:00:59 +00:00
|
|
|
def pull_directory_tarball(remote, remotedir, localfile):
|
|
|
|
"""
|
|
|
|
Copy a remote directory to a local tarball.
|
|
|
|
"""
|
|
|
|
log.debug('Transferring archived files from %s:%s to %s',
|
|
|
|
remote.shortname, remotedir, localfile)
|
|
|
|
out = open(localfile, 'w')
|
|
|
|
proc = remote.run(
|
|
|
|
args=[
|
2013-02-17 06:32:16 +00:00
|
|
|
'sudo',
|
2012-07-17 17:00:59 +00:00
|
|
|
'tar',
|
|
|
|
'cz',
|
|
|
|
'-f', '-',
|
|
|
|
'-C', remotedir,
|
|
|
|
'--',
|
|
|
|
'.',
|
|
|
|
],
|
|
|
|
stdout=out,
|
|
|
|
wait=False,
|
|
|
|
)
|
|
|
|
proc.exitstatus.get()
|
|
|
|
|
2013-01-23 02:27:41 +00:00
|
|
|
# returns map of devices to device id links:
|
|
|
|
# /dev/sdb: /dev/disk/by-id/wwn-0xf00bad
|
|
|
|
def get_wwn_id_map(remote, devs):
|
|
|
|
stdout = None
|
|
|
|
try:
|
|
|
|
r = remote.run(
|
|
|
|
args=[
|
|
|
|
'ls',
|
|
|
|
'-l',
|
|
|
|
'/dev/disk/by-id/wwn-*',
|
|
|
|
],
|
|
|
|
stdout=StringIO(),
|
|
|
|
)
|
|
|
|
stdout = r.stdout.getvalue()
|
2013-08-30 15:58:10 +00:00
|
|
|
except Exception:
|
2013-02-01 14:20:43 +00:00
|
|
|
log.error('Failed to get wwn devices! Using /dev/sd* devices...')
|
2013-02-01 15:13:48 +00:00
|
|
|
return dict((d,d) for d in devs)
|
2013-01-23 02:27:41 +00:00
|
|
|
|
|
|
|
devmap = {}
|
|
|
|
|
|
|
|
# lines will be:
|
|
|
|
# lrwxrwxrwx 1 root root 9 Jan 22 14:58 /dev/disk/by-id/wwn-0x50014ee002ddecaf -> ../../sdb
|
|
|
|
for line in stdout.splitlines():
|
|
|
|
comps = line.split(' ')
|
|
|
|
# comps[-1] should be:
|
|
|
|
# ../../sdb
|
|
|
|
rdev = comps[-1]
|
|
|
|
# translate to /dev/sdb
|
|
|
|
dev='/dev/{d}'.format(d=rdev.split('/')[-1])
|
|
|
|
|
|
|
|
# comps[-3] should be:
|
|
|
|
# /dev/disk/by-id/wwn-0x50014ee002ddecaf
|
|
|
|
iddev = comps[-3]
|
|
|
|
|
|
|
|
if dev in devs:
|
|
|
|
devmap[dev] = iddev
|
|
|
|
|
|
|
|
return devmap
|
|
|
|
|
2011-10-03 21:03:36 +00:00
|
|
|
def get_scratch_devices(remote):
|
|
|
|
"""
|
|
|
|
Read the scratch disk list from remote host
|
|
|
|
"""
|
|
|
|
devs = []
|
|
|
|
try:
|
|
|
|
file_data = get_file(remote, "/scratch_devs")
|
|
|
|
devs = file_data.split()
|
2013-08-30 15:58:10 +00:00
|
|
|
except Exception:
|
2013-01-23 02:27:41 +00:00
|
|
|
r = remote.run(
|
2013-02-01 16:53:47 +00:00
|
|
|
args=['ls', run.Raw('/dev/[sv]d?')],
|
2013-01-23 02:27:41 +00:00
|
|
|
stdout=StringIO()
|
|
|
|
)
|
2013-08-01 19:33:11 +00:00
|
|
|
devs = r.stdout.getvalue().strip().split('\n')
|
|
|
|
|
|
|
|
#Remove root device (vm guests) from the disk list
|
|
|
|
for dev in devs:
|
|
|
|
if 'vda' in dev:
|
|
|
|
devs.remove(dev)
|
|
|
|
log.warn("Removing root device: %s from device list" % dev)
|
2013-01-23 02:27:41 +00:00
|
|
|
|
|
|
|
log.debug('devs={d}'.format(d=devs))
|
2011-10-03 21:03:36 +00:00
|
|
|
|
|
|
|
retval = []
|
|
|
|
for dev in devs:
|
|
|
|
try:
|
2013-09-03 19:40:31 +00:00
|
|
|
# FIXME: Split this into multiple calls.
|
2011-10-03 21:03:36 +00:00
|
|
|
remote.run(
|
|
|
|
args=[
|
2012-02-11 22:20:18 +00:00
|
|
|
# node exists
|
2011-10-03 21:03:36 +00:00
|
|
|
'stat',
|
2012-02-11 22:20:18 +00:00
|
|
|
dev,
|
|
|
|
run.Raw('&&'),
|
|
|
|
# readable
|
|
|
|
'sudo', 'dd', 'if=%s' % dev, 'of=/dev/null', 'count=1',
|
2013-01-23 02:27:41 +00:00
|
|
|
run.Raw('&&'),
|
2012-02-11 22:20:18 +00:00
|
|
|
# not mounted
|
|
|
|
run.Raw('!'),
|
|
|
|
'mount',
|
|
|
|
run.Raw('|'),
|
|
|
|
'grep', '-q', dev,
|
2013-09-03 19:40:31 +00:00
|
|
|
]
|
|
|
|
)
|
2011-10-03 21:03:36 +00:00
|
|
|
retval.append(dev)
|
2013-09-03 19:40:31 +00:00
|
|
|
except run.CommandFailedError:
|
|
|
|
log.debug("get_scratch_devices: %s is in use" % dev)
|
2011-10-03 21:03:36 +00:00
|
|
|
return retval
|
|
|
|
|
2013-09-03 19:40:31 +00:00
|
|
|
|
2013-01-23 20:37:39 +00:00
|
|
|
def wait_until_healthy(ctx, remote):
|
2011-05-31 20:51:48 +00:00
|
|
|
"""Wait until a Ceph cluster is healthy."""
|
2013-01-23 20:37:39 +00:00
|
|
|
testdir = get_testdir(ctx)
|
2011-05-31 20:51:48 +00:00
|
|
|
while True:
|
2011-06-01 23:04:52 +00:00
|
|
|
r = remote.run(
|
2011-05-31 20:51:48 +00:00
|
|
|
args=[
|
2013-09-06 20:08:01 +00:00
|
|
|
'adjust-ulimits',
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-coverage',
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/archive/coverage'.format(tdir=testdir),
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph',
|
2011-05-31 20:51:48 +00:00
|
|
|
'health',
|
|
|
|
],
|
|
|
|
stdout=StringIO(),
|
|
|
|
logger=log.getChild('health'),
|
|
|
|
)
|
|
|
|
out = r.stdout.getvalue()
|
|
|
|
log.debug('Ceph health: %s', out.rstrip('\n'))
|
|
|
|
if out.split(None, 1)[0] == 'HEALTH_OK':
|
|
|
|
break
|
|
|
|
time.sleep(1)
|
|
|
|
|
2013-01-23 20:37:39 +00:00
|
|
|
def wait_until_osds_up(ctx, cluster, remote):
|
2012-01-08 23:14:18 +00:00
|
|
|
"""Wait until all Ceph OSDs are booted."""
|
|
|
|
num_osds = num_instances_of_type(cluster, 'osd')
|
2013-01-23 20:37:39 +00:00
|
|
|
testdir = get_testdir(ctx)
|
2012-01-08 23:14:18 +00:00
|
|
|
while True:
|
|
|
|
r = remote.run(
|
|
|
|
args=[
|
2013-09-06 20:08:01 +00:00
|
|
|
'adjust-ulimits',
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-coverage',
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/archive/coverage'.format(tdir=testdir),
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph',
|
2012-01-08 23:14:18 +00:00
|
|
|
'osd', 'dump', '--format=json'
|
|
|
|
],
|
|
|
|
stdout=StringIO(),
|
|
|
|
logger=log.getChild('health'),
|
|
|
|
)
|
|
|
|
out = r.stdout.getvalue()
|
|
|
|
j = json.loads('\n'.join(out.split('\n')[1:]))
|
|
|
|
up = len(j['osds'])
|
|
|
|
log.debug('%d of %d OSDs are up' % (up, num_osds))
|
|
|
|
if up == num_osds:
|
|
|
|
break
|
|
|
|
time.sleep(1)
|
|
|
|
|
2011-06-01 23:04:52 +00:00
|
|
|
def wait_until_fuse_mounted(remote, fuse, mountpoint):
|
2011-05-31 20:51:48 +00:00
|
|
|
while True:
|
2011-06-01 23:04:52 +00:00
|
|
|
proc = remote.run(
|
2011-05-31 20:51:48 +00:00
|
|
|
args=[
|
|
|
|
'stat',
|
|
|
|
'--file-system',
|
|
|
|
'--printf=%T\n',
|
|
|
|
'--',
|
|
|
|
mountpoint,
|
|
|
|
],
|
|
|
|
stdout=StringIO(),
|
|
|
|
)
|
|
|
|
fstype = proc.stdout.getvalue().rstrip('\n')
|
|
|
|
if fstype == 'fuseblk':
|
|
|
|
break
|
2013-02-17 07:53:23 +00:00
|
|
|
log.debug('ceph-fuse not yet mounted, got fs type {fstype!r}'.format(fstype=fstype))
|
2011-05-31 20:51:48 +00:00
|
|
|
|
|
|
|
# it shouldn't have exited yet; exposes some trivial problems
|
|
|
|
assert not fuse.exitstatus.ready()
|
|
|
|
|
|
|
|
time.sleep(5)
|
2013-02-17 07:53:23 +00:00
|
|
|
log.info('ceph-fuse is mounted on %s', mountpoint)
|
2011-06-16 01:06:57 +00:00
|
|
|
|
2013-03-13 16:05:45 +00:00
|
|
|
def reconnect(ctx, timeout, remotes=None):
|
2011-08-10 17:37:04 +00:00
|
|
|
"""
|
|
|
|
Connect to all the machines in ctx.cluster.
|
|
|
|
|
|
|
|
Presumably, some of them won't be up. Handle this
|
|
|
|
by waiting for them, unless the wait time exceeds
|
|
|
|
the specified timeout.
|
|
|
|
|
|
|
|
ctx needs to contain the cluster of machines you
|
|
|
|
wish it to try and connect to, as well as a config
|
|
|
|
holding the ssh keys for each of them. As long as it
|
|
|
|
contains this data, you can construct a context
|
|
|
|
that is a subset of your full cluster.
|
|
|
|
"""
|
|
|
|
log.info('Re-opening connections...')
|
|
|
|
starttime = time.time()
|
2013-02-05 22:20:52 +00:00
|
|
|
|
2013-03-13 16:05:45 +00:00
|
|
|
if remotes:
|
|
|
|
need_reconnect = remotes
|
|
|
|
else:
|
|
|
|
need_reconnect = ctx.cluster.remotes.keys()
|
|
|
|
|
|
|
|
for r in need_reconnect:
|
2013-02-05 22:20:52 +00:00
|
|
|
r.ssh.close()
|
|
|
|
|
2011-12-30 20:23:28 +00:00
|
|
|
while need_reconnect:
|
|
|
|
for remote in need_reconnect:
|
2011-08-10 17:37:04 +00:00
|
|
|
try:
|
2011-10-07 00:18:35 +00:00
|
|
|
log.info('trying to connect to %s', remote.name)
|
2013-06-07 01:43:43 +00:00
|
|
|
key = ctx.config['targets'][remote.name]
|
2011-09-13 21:53:02 +00:00
|
|
|
from .orchestra import connection
|
2011-08-10 17:37:04 +00:00
|
|
|
remote.ssh = connection.connect(
|
|
|
|
user_at_host=remote.name,
|
2013-06-07 01:43:43 +00:00
|
|
|
host_key=key,
|
2011-11-03 20:08:39 +00:00
|
|
|
keep_alive=True,
|
2011-08-10 17:37:04 +00:00
|
|
|
)
|
2011-12-30 20:23:28 +00:00
|
|
|
except Exception:
|
2011-10-07 00:18:35 +00:00
|
|
|
if time.time() - starttime > timeout:
|
|
|
|
raise
|
2011-08-10 17:37:04 +00:00
|
|
|
else:
|
|
|
|
need_reconnect.remove(remote)
|
|
|
|
|
|
|
|
log.debug('waited {elapsed}'.format(elapsed=str(time.time() - starttime)))
|
|
|
|
time.sleep(1)
|
|
|
|
|
2013-02-17 05:30:57 +00:00
|
|
|
def write_secret_file(ctx, remote, role, keyring, filename):
|
2013-01-23 20:37:39 +00:00
|
|
|
testdir = get_testdir(ctx)
|
2011-06-16 01:06:57 +00:00
|
|
|
remote.run(
|
|
|
|
args=[
|
2013-09-06 20:08:01 +00:00
|
|
|
'adjust-ulimits',
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-coverage',
|
2013-01-23 20:37:39 +00:00
|
|
|
'{tdir}/archive/coverage'.format(tdir=testdir),
|
2013-02-06 19:16:52 +00:00
|
|
|
'ceph-authtool',
|
2011-06-16 01:06:57 +00:00
|
|
|
'--name={role}'.format(role=role),
|
|
|
|
'--print-key',
|
2013-02-16 01:19:32 +00:00
|
|
|
keyring,
|
2011-06-16 01:06:57 +00:00
|
|
|
run.Raw('>'),
|
|
|
|
filename,
|
|
|
|
],
|
|
|
|
)
|
2011-06-20 19:12:11 +00:00
|
|
|
|
2011-08-09 20:23:58 +00:00
|
|
|
def get_clients(ctx, roles):
|
|
|
|
for role in roles:
|
|
|
|
assert isinstance(role, basestring)
|
|
|
|
PREFIX = 'client.'
|
|
|
|
assert role.startswith(PREFIX)
|
|
|
|
id_ = role[len(PREFIX):]
|
|
|
|
(remote,) = ctx.cluster.only(role).remotes.iterkeys()
|
|
|
|
yield (id_, remote)
|
2011-07-02 01:15:52 +00:00
|
|
|
|
|
|
|
def get_user():
|
|
|
|
return getpass.getuser() + '@' + socket.gethostname()
|
2011-07-07 18:43:35 +00:00
|
|
|
|
2013-08-14 19:48:45 +00:00
|
|
|
|
2011-07-07 18:43:35 +00:00
|
|
|
def read_config(ctx):
|
|
|
|
ctx.teuthology_config = {}
|
2013-08-14 19:48:45 +00:00
|
|
|
filename = os.path.join(os.environ['HOME'], '.teuthology.yaml')
|
|
|
|
|
|
|
|
if not os.path.exists(filename):
|
|
|
|
log.debug("%s not found", filename)
|
|
|
|
return
|
|
|
|
|
2011-07-07 18:43:35 +00:00
|
|
|
with file(filename) as f:
|
|
|
|
g = yaml.safe_load_all(f)
|
|
|
|
for new in g:
|
|
|
|
ctx.teuthology_config.update(new)
|
2011-08-31 20:56:42 +00:00
|
|
|
|
2011-11-09 06:06:43 +00:00
|
|
|
def get_mon_names(ctx):
|
2011-08-31 20:56:42 +00:00
|
|
|
mons = []
|
|
|
|
for remote, roles in ctx.cluster.remotes.items():
|
|
|
|
for role in roles:
|
|
|
|
if not role.startswith('mon.'):
|
|
|
|
continue
|
|
|
|
mons.append(role)
|
2011-11-09 06:06:43 +00:00
|
|
|
return mons
|
|
|
|
|
|
|
|
# return the "first" mon (alphanumerically, for lack of anything better)
|
|
|
|
def get_first_mon(ctx, config):
|
|
|
|
firstmon = sorted(get_mon_names(ctx))[0]
|
2011-08-31 20:56:42 +00:00
|
|
|
assert firstmon
|
|
|
|
return firstmon
|
2011-09-08 00:50:12 +00:00
|
|
|
|
|
|
|
def replace_all_with_clients(cluster, config):
|
|
|
|
"""
|
|
|
|
Converts a dict containing a key all to one
|
|
|
|
mapping all clients to the value of config['all']
|
|
|
|
"""
|
|
|
|
assert isinstance(config, dict), 'config must be a dict'
|
|
|
|
if 'all' not in config:
|
|
|
|
return config
|
|
|
|
norm_config = {}
|
|
|
|
assert len(config) == 1, \
|
|
|
|
"config cannot have 'all' and specific clients listed"
|
|
|
|
for client in all_roles_of_type(cluster, 'client'):
|
|
|
|
norm_config['client.{id}'.format(id=client)] = config['all']
|
|
|
|
return norm_config
|
2011-11-17 21:06:36 +00:00
|
|
|
|
|
|
|
def deep_merge(a, b):
|
|
|
|
if a is None:
|
|
|
|
return b
|
|
|
|
if b is None:
|
|
|
|
return a
|
|
|
|
if isinstance(a, list):
|
|
|
|
assert isinstance(b, list)
|
|
|
|
a.extend(b)
|
|
|
|
return a
|
|
|
|
if isinstance(a, dict):
|
|
|
|
assert isinstance(b, dict)
|
|
|
|
for (k, v) in b.iteritems():
|
|
|
|
if k in a:
|
|
|
|
a[k] = deep_merge(a[k], v)
|
|
|
|
else:
|
|
|
|
a[k] = v
|
|
|
|
return a
|
|
|
|
return b
|
2012-02-22 17:18:17 +00:00
|
|
|
|
2013-09-06 22:56:39 +00:00
|
|
|
def get_valgrind_args(testdir, name, preamble, v):
|
|
|
|
"""
|
|
|
|
Build a command line for running valgrind.
|
|
|
|
|
|
|
|
testdir - test results directory
|
|
|
|
name - name of daemon (for naming hte log file)
|
|
|
|
preamble - stuff we should run before valgrind
|
|
|
|
v - valgrind arguments
|
|
|
|
"""
|
2012-02-22 17:18:17 +00:00
|
|
|
if v is None:
|
2012-02-24 22:55:49 +00:00
|
|
|
return []
|
2012-02-22 17:18:17 +00:00
|
|
|
if not isinstance(v, list):
|
|
|
|
v = [v]
|
2013-02-17 07:44:03 +00:00
|
|
|
val_path = '/var/log/ceph/valgrind'.format(tdir=testdir)
|
2012-02-22 17:18:17 +00:00
|
|
|
if '--tool=memcheck' in v or '--tool=helgrind' in v:
|
|
|
|
extra_args = [
|
2013-09-06 22:56:39 +00:00
|
|
|
|
2012-07-04 21:29:55 +00:00
|
|
|
'valgrind',
|
2013-09-06 21:05:29 +00:00
|
|
|
'--num-callers=50',
|
2013-09-06 22:55:14 +00:00
|
|
|
'--suppressions={tdir}/valgrind.supp'.format(tdir=testdir),
|
2012-07-04 21:29:55 +00:00
|
|
|
'--xml=yes',
|
2012-02-22 17:18:17 +00:00
|
|
|
'--xml-file={vdir}/{n}.log'.format(vdir=val_path, n=name)
|
|
|
|
]
|
|
|
|
else:
|
|
|
|
extra_args = [
|
|
|
|
'valgrind',
|
2013-09-06 22:55:14 +00:00
|
|
|
'--suppressions={tdir}/valgrind.supp'.format(tdir=testdir),
|
2012-02-22 17:18:17 +00:00
|
|
|
'--log-file={vdir}/{n}.log'.format(vdir=val_path, n=name)
|
|
|
|
]
|
2013-09-06 22:56:39 +00:00
|
|
|
args = [
|
|
|
|
'cd', testdir,
|
|
|
|
run.Raw('&&'),
|
|
|
|
] + preamble + extra_args + v
|
|
|
|
log.debug('running %s under valgrind with args %s', name, args)
|
|
|
|
return args
|
2013-04-30 23:35:11 +00:00
|
|
|
|
|
|
|
def stop_daemons_of_type(ctx, type_):
|
|
|
|
log.info('Shutting down %s daemons...' % type_)
|
|
|
|
exc_info = (None, None, None)
|
|
|
|
for daemon in ctx.daemons.iter_daemons_of_role(type_):
|
|
|
|
try:
|
|
|
|
daemon.stop()
|
|
|
|
except (run.CommandFailedError,
|
|
|
|
run.CommandCrashedError,
|
|
|
|
run.ConnectionLostError):
|
|
|
|
exc_info = sys.exc_info()
|
|
|
|
log.exception('Saw exception from %s.%s', daemon.role, daemon.id_)
|
|
|
|
if exc_info != (None, None, None):
|
|
|
|
raise exc_info[0], exc_info[1], exc_info[2]
|
2013-07-10 01:52:00 +00:00
|
|
|
|
|
|
|
def get_system_type(remote):
|
|
|
|
"""
|
|
|
|
Return this system type (deb or rpm)
|
|
|
|
"""
|
|
|
|
r = remote.run(
|
|
|
|
args=[
|
|
|
|
'sudo','lsb_release', '-is',
|
|
|
|
],
|
|
|
|
stdout=StringIO(),
|
|
|
|
)
|
|
|
|
system_value = r.stdout.getvalue().strip()
|
|
|
|
log.debug("System to be installed: %s" % system_value)
|
|
|
|
if system_value in ['Ubuntu','Debian']:
|
|
|
|
return "deb"
|
|
|
|
if system_value in ['CentOS','Fedora','RedHatEnterpriseServer']:
|
|
|
|
return "rpm"
|
|
|
|
return system_value
|
2013-07-26 02:50:02 +00:00
|
|
|
|
2013-07-25 21:45:02 +00:00
|
|
|
def get_distro(ctx):
|
|
|
|
try:
|
|
|
|
os_type = ctx.config.get('os_type', ctx.os_type)
|
|
|
|
except AttributeError:
|
|
|
|
os_type = 'ubuntu'
|
|
|
|
try:
|
|
|
|
return ctx.config['downburst'].get('distro', os_type)
|
|
|
|
except KeyError:
|
2013-07-30 00:03:30 +00:00
|
|
|
return os_type
|
|
|
|
except AttributeError:
|
|
|
|
return ctx.os_type
|