ceph/qa/tasks/s3readwrite.py

350 lines
12 KiB
Python
Raw Normal View History

"""
Run rgw s3 readwite tests
"""
from cStringIO import StringIO
import base64
import contextlib
import logging
import os
import random
import string
import yaml
from teuthology import misc as teuthology
from teuthology import contextutil
from teuthology.config import config as teuth_config
from teuthology.orchestra import run
from teuthology.orchestra.connection import split_user
log = logging.getLogger(__name__)
@contextlib.contextmanager
def download(ctx, config):
"""
Download the s3 tests from the git builder.
Remove downloaded s3 file upon exit.
The context passed in should be identical to the context
passed in to the main task.
"""
assert isinstance(config, dict)
log.info('Downloading s3-tests...')
testdir = teuthology.get_testdir(ctx)
for (client, cconf) in config.items():
branch = cconf.get('force-branch', None)
if not branch:
branch = cconf.get('branch', 'master')
sha1 = cconf.get('sha1')
ctx.cluster.only(client).run(
args=[
'git', 'clone',
'-b', branch,
2013-09-24 19:19:24 +00:00
teuth_config.ceph_git_base_url + 's3-tests.git',
'{tdir}/s3-tests'.format(tdir=testdir),
],
)
if sha1 is not None:
ctx.cluster.only(client).run(
args=[
'cd', '{tdir}/s3-tests'.format(tdir=testdir),
run.Raw('&&'),
'git', 'reset', '--hard', sha1,
],
)
try:
yield
finally:
log.info('Removing s3-tests...')
testdir = teuthology.get_testdir(ctx)
for client in config:
ctx.cluster.only(client).run(
args=[
'rm',
'-rf',
'{tdir}/s3-tests'.format(tdir=testdir),
],
)
def _config_user(s3tests_conf, section, user):
"""
Configure users for this section by stashing away keys, ids, and
email addresses.
"""
s3tests_conf[section].setdefault('user_id', user)
s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user))
s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user))
s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20)))
s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40)))
@contextlib.contextmanager
def create_users(ctx, config):
"""
Create a default s3 user.
"""
assert isinstance(config, dict)
log.info('Creating rgw users...')
testdir = teuthology.get_testdir(ctx)
users = {'s3': 'foo'}
cached_client_user_names = dict()
for client in config['clients']:
cached_client_user_names[client] = dict()
s3tests_conf = config['s3tests_conf'][client]
s3tests_conf.setdefault('readwrite', {})
s3tests_conf['readwrite'].setdefault('bucket', 'rwtest-' + client + '-{random}-')
s3tests_conf['readwrite'].setdefault('readers', 10)
s3tests_conf['readwrite'].setdefault('writers', 3)
s3tests_conf['readwrite'].setdefault('duration', 300)
s3tests_conf['readwrite'].setdefault('files', {})
rwconf = s3tests_conf['readwrite']
rwconf['files'].setdefault('num', 10)
rwconf['files'].setdefault('size', 2000)
rwconf['files'].setdefault('stddev', 500)
for section, user in users.iteritems():
_config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client))
log.debug('creating user {user} on {client}'.format(user=s3tests_conf[section]['user_id'],
client=client))
# stash the 'delete_user' flag along with user name for easier cleanup
delete_this_user = True
if 'delete_user' in s3tests_conf['s3']:
delete_this_user = s3tests_conf['s3']['delete_user']
log.debug('delete_user set to {flag} for {client}'.format(flag=delete_this_user, client=client))
cached_client_user_names[client][section+user] = (s3tests_conf[section]['user_id'], delete_this_user)
# skip actual user creation if the create_user flag is set to false for this client
if 'create_user' in s3tests_conf['s3'] and s3tests_conf['s3']['create_user'] == False:
log.debug('create_user set to False, skipping user creation for {client}'.format(client=client))
continue
else:
ctx.cluster.only(client).run(
args=[
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'radosgw-admin',
'-n', client,
'user', 'create',
'--uid', s3tests_conf[section]['user_id'],
'--display-name', s3tests_conf[section]['display_name'],
'--access-key', s3tests_conf[section]['access_key'],
'--secret', s3tests_conf[section]['secret_key'],
'--email', s3tests_conf[section]['email'],
],
)
try:
yield
finally:
for client in config['clients']:
for section, user in users.iteritems():
#uid = '{user}.{client}'.format(user=user, client=client)
real_uid, delete_this_user = cached_client_user_names[client][section+user]
if delete_this_user:
ctx.cluster.only(client).run(
args=[
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'radosgw-admin',
'-n', client,
'user', 'rm',
'--uid', real_uid,
'--purge-data',
],
)
else:
log.debug('skipping delete for user {uid} on {client}'.format(uid=real_uid, client=client))
@contextlib.contextmanager
def configure(ctx, config):
"""
Configure the s3-tests. This includes the running of the
bootstrap code and the updating of local conf files.
"""
assert isinstance(config, dict)
log.info('Configuring s3-readwrite-tests...')
for client, properties in config['clients'].iteritems():
s3tests_conf = config['s3tests_conf'][client]
if properties is not None and 'rgw_server' in properties:
host = None
for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']):
log.info('roles: ' + str(roles))
log.info('target: ' + str(target))
if properties['rgw_server'] in roles:
_, host = split_user(target)
assert host is not None, "Invalid client specified as the rgw_server"
s3tests_conf['s3']['host'] = host
else:
s3tests_conf['s3']['host'] = 'localhost'
def_conf = s3tests_conf['DEFAULT']
s3tests_conf['s3'].setdefault('port', def_conf['port'])
s3tests_conf['s3'].setdefault('is_secure', def_conf['is_secure'])
(remote,) = ctx.cluster.only(client).remotes.keys()
remote.run(
args=[
'cd',
'{tdir}/s3-tests'.format(tdir=teuthology.get_testdir(ctx)),
run.Raw('&&'),
'./bootstrap',
],
)
conf_fp = StringIO()
conf = dict(
s3=s3tests_conf['s3'],
readwrite=s3tests_conf['readwrite'],
)
yaml.safe_dump(conf, conf_fp, default_flow_style=False)
teuthology.write_file(
remote=remote,
path='{tdir}/archive/s3readwrite.{client}.config.yaml'.format(tdir=teuthology.get_testdir(ctx), client=client),
data=conf_fp.getvalue(),
)
yield
@contextlib.contextmanager
def run_tests(ctx, config):
"""
Run the s3readwrite tests after everything is set up.
:param ctx: Context passed to task
:param config: specific configuration information
"""
assert isinstance(config, dict)
testdir = teuthology.get_testdir(ctx)
Revert "Move output in task/s3readwrite" This reverts commit 26a33c3a5aa2aedb52eb5ce140c76503f099b253. This is tryign to create the archive dir on the remote host: 2014-12-29T12:15:30.213 INFO:teuthology.orchestra.run.plana31:Running: 'mkdir -p /var/lib/teuthworker/archive/sage-2014-12-29_11:40:52-rgw-next---basic-multi/683052' 2014-12-29T12:15:30.231 ERROR:teuthology.contextutil:Saw exception from nested tasks Traceback (most recent call last): File "/home/teuthworker/src/teuthology_master/teuthology/contextutil.py", line 28, in nested vars.append(enter()) File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__ return self.gen.next() File "/var/lib/teuthworker/src/ceph-qa-suite_next/tasks/s3readwrite.py", line 241, in run_tests ctx.cluster.only(client).run(args=['mkdir', '-p', archive_dir]) File "/home/teuthworker/src/teuthology_master/teuthology/orchestra/cluster.py", line 64, in run return [remote.run(**kwargs) for remote in remotes] File "/home/teuthworker/src/teuthology_master/teuthology/orchestra/remote.py", line 128, in run r = self._runner(client=self.ssh, name=self.shortname, **kwargs) File "/home/teuthworker/src/teuthology_master/teuthology/orchestra/run.py", line 368, in run r.wait() File "/home/teuthworker/src/teuthology_master/teuthology/orchestra/run.py", line 106, in wait exitstatus=status, node=self.hostname) CommandFailedError: Command failed on plana31 with status 1: 'mkdir -p /var/lib/teuthworker/archive/sage-2014-12-29_11:40:52-rgw-next---basic-multi/683052' ...but it should only be on the local host.
2014-12-29 20:39:26 +00:00
for client, client_config in config.iteritems():
(remote,) = ctx.cluster.only(client).remotes.keys()
conf = teuthology.get_file(remote, '{tdir}/archive/s3readwrite.{client}.config.yaml'.format(tdir=testdir, client=client))
args = [
'{tdir}/s3-tests/virtualenv/bin/s3tests-test-readwrite'.format(tdir=testdir),
]
if client_config is not None and 'extra_args' in client_config:
args.extend(client_config['extra_args'])
Revert "Move output in task/s3readwrite" This reverts commit 26a33c3a5aa2aedb52eb5ce140c76503f099b253. This is tryign to create the archive dir on the remote host: 2014-12-29T12:15:30.213 INFO:teuthology.orchestra.run.plana31:Running: 'mkdir -p /var/lib/teuthworker/archive/sage-2014-12-29_11:40:52-rgw-next---basic-multi/683052' 2014-12-29T12:15:30.231 ERROR:teuthology.contextutil:Saw exception from nested tasks Traceback (most recent call last): File "/home/teuthworker/src/teuthology_master/teuthology/contextutil.py", line 28, in nested vars.append(enter()) File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__ return self.gen.next() File "/var/lib/teuthworker/src/ceph-qa-suite_next/tasks/s3readwrite.py", line 241, in run_tests ctx.cluster.only(client).run(args=['mkdir', '-p', archive_dir]) File "/home/teuthworker/src/teuthology_master/teuthology/orchestra/cluster.py", line 64, in run return [remote.run(**kwargs) for remote in remotes] File "/home/teuthworker/src/teuthology_master/teuthology/orchestra/remote.py", line 128, in run r = self._runner(client=self.ssh, name=self.shortname, **kwargs) File "/home/teuthworker/src/teuthology_master/teuthology/orchestra/run.py", line 368, in run r.wait() File "/home/teuthworker/src/teuthology_master/teuthology/orchestra/run.py", line 106, in wait exitstatus=status, node=self.hostname) CommandFailedError: Command failed on plana31 with status 1: 'mkdir -p /var/lib/teuthworker/archive/sage-2014-12-29_11:40:52-rgw-next---basic-multi/683052' ...but it should only be on the local host.
2014-12-29 20:39:26 +00:00
ctx.cluster.only(client).run(
args=args,
stdin=conf,
)
yield
@contextlib.contextmanager
def task(ctx, config):
"""
Run the s3tests-test-readwrite suite against rgw.
To run all tests on all clients::
tasks:
- ceph:
- rgw:
- s3readwrite:
To restrict testing to particular clients::
tasks:
- ceph:
- rgw: [client.0]
- s3readwrite: [client.0]
To run against a server on client.1::
tasks:
- ceph:
- rgw: [client.1]
- s3readwrite:
client.0:
rgw_server: client.1
To pass extra test arguments
tasks:
- ceph:
- rgw: [client.0]
- s3readwrite:
client.0:
readwrite:
bucket: mybucket
readers: 10
writers: 3
duration: 600
files:
num: 10
size: 2000
stddev: 500
client.1:
...
To override s3 configuration
tasks:
- ceph:
- rgw: [client.0]
- s3readwrite:
client.0:
s3:
user_id: myuserid
display_name: myname
email: my@email
access_key: myaccesskey
secret_key: mysecretkey
"""
assert hasattr(ctx, 'rgw'), 's3readwrite must run after the rgw task'
assert config is None or isinstance(config, list) \
or isinstance(config, dict), \
"task s3readwrite only supports a list or dictionary for configuration"
all_clients = ['client.{id}'.format(id=id_)
for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
if config is None:
config = all_clients
if isinstance(config, list):
config = dict.fromkeys(config)
clients = config.keys()
overrides = ctx.config.get('overrides', {})
# merge each client section, not the top level.
for client in config.iterkeys():
if not config[client]:
config[client] = {}
teuthology.deep_merge(config[client], overrides.get('s3readwrite', {}))
log.debug('in s3readwrite, config is %s', config)
s3tests_conf = {}
for client in clients:
2011-12-15 20:39:39 +00:00
if config[client] is None:
config[client] = {}
config[client].setdefault('s3', {})
config[client].setdefault('readwrite', {})
endpoint = ctx.rgw.role_endpoints.get(client)
assert endpoint, 's3readwrite: no rgw endpoint for {}'.format(client)
2011-12-15 20:39:39 +00:00
s3tests_conf[client] = ({
'DEFAULT':
{
'port' : endpoint.port,
'is_secure' : endpoint.cert is not None,
},
'readwrite' : config[client]['readwrite'],
's3' : config[client]['s3'],
})
with contextutil.nested(
lambda: download(ctx=ctx, config=config),
lambda: create_users(ctx=ctx, config=dict(
clients=clients,
s3tests_conf=s3tests_conf,
)),
lambda: configure(ctx=ctx, config=dict(
clients=config,
s3tests_conf=s3tests_conf,
)),
Revert "Move output in task/s3readwrite" This reverts commit 26a33c3a5aa2aedb52eb5ce140c76503f099b253. This is tryign to create the archive dir on the remote host: 2014-12-29T12:15:30.213 INFO:teuthology.orchestra.run.plana31:Running: 'mkdir -p /var/lib/teuthworker/archive/sage-2014-12-29_11:40:52-rgw-next---basic-multi/683052' 2014-12-29T12:15:30.231 ERROR:teuthology.contextutil:Saw exception from nested tasks Traceback (most recent call last): File "/home/teuthworker/src/teuthology_master/teuthology/contextutil.py", line 28, in nested vars.append(enter()) File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__ return self.gen.next() File "/var/lib/teuthworker/src/ceph-qa-suite_next/tasks/s3readwrite.py", line 241, in run_tests ctx.cluster.only(client).run(args=['mkdir', '-p', archive_dir]) File "/home/teuthworker/src/teuthology_master/teuthology/orchestra/cluster.py", line 64, in run return [remote.run(**kwargs) for remote in remotes] File "/home/teuthworker/src/teuthology_master/teuthology/orchestra/remote.py", line 128, in run r = self._runner(client=self.ssh, name=self.shortname, **kwargs) File "/home/teuthworker/src/teuthology_master/teuthology/orchestra/run.py", line 368, in run r.wait() File "/home/teuthworker/src/teuthology_master/teuthology/orchestra/run.py", line 106, in wait exitstatus=status, node=self.hostname) CommandFailedError: Command failed on plana31 with status 1: 'mkdir -p /var/lib/teuthworker/archive/sage-2014-12-29_11:40:52-rgw-next---basic-multi/683052' ...but it should only be on the local host.
2014-12-29 20:39:26 +00:00
lambda: run_tests(ctx=ctx, config=config),
):
pass
yield