Merge remote-tracking branch 'gh/next'

This commit is contained in:
Sage Weil 2013-08-13 11:03:00 -07:00
commit 0f3d9eddd9
5 changed files with 309 additions and 133 deletions

View File

@ -12,6 +12,8 @@ import boto.exception
import boto.s3.connection
import boto.s3.acl
import teuthology.task_util.rgw as rgw_utils
import time
from teuthology import misc as teuthology
@ -41,8 +43,11 @@ def task(ctx, config):
config = dict.fromkeys(config)
clients = config.keys()
# just use the first client...
client = clients[0];
client = rgw_utils.get_master_client(ctx, clients)
if not client:
# oh, well, just use the first client... multiregion stuff might not work correctly
client = clients[0];
##
user1='foo'
@ -215,6 +220,8 @@ def task(ctx, config):
assert not err
assert len(out) == 0
rgw_utils.radosgw_agent_sync_all(ctx)
# connect to rgw
(remote,) = ctx.cluster.only(client).remotes.iterkeys()
(remote_user, remote_host) = remote.name.split('@')
@ -367,9 +374,13 @@ def task(ctx, config):
for obj in out:
# TESTCASE 'log-show','log','show','after activity','returns expected info'
if obj[:4] == 'meta' or obj[:4] == 'data':
continue
(err, log) = rgwadmin(ctx, client, ['log', 'show', '--object', obj])
assert not err
assert len(log) > 0
assert log['bucket'].find(bucket_name) == 0
assert log['bucket'] != bucket_name or log['bucket_id'] == bucket_id
assert log['bucket_owner'] == user1 or log['bucket'] == bucket_name + '5'
@ -563,12 +574,19 @@ def task(ctx, config):
assert err
# TESTCASE 'zone-info', 'zone', 'get', 'get zone info', 'succeeds, has default placement rule'
(err, out) = rgwadmin(ctx, client, ['zone', 'get'])
assert len(out) > 0
assert len(out['placement_pools']) == 1
#
default_rule = out['placement_pools'][0]
assert default_rule['key'] == 'default-placement'
(err, out) = rgwadmin(ctx, client, ['zone', 'get'])
orig_placement_pools = len(out['placement_pools'])
# removed this test, it is not correct to assume that zone has default placement, it really
# depends on how we set it up before
#
# assert len(out) > 0
# assert len(out['placement_pools']) == 1
# default_rule = out['placement_pools'][0]
# assert default_rule['key'] == 'default-placement'
rule={'key': 'new-placement', 'val': {'data_pool': '.rgw.buckets.2', 'index_pool': '.rgw.buckets.index.2'}}
@ -579,4 +597,4 @@ def task(ctx, config):
(err, out) = rgwadmin(ctx, client, ['zone', 'get'])
assert len(out) > 0
assert len(out['placement_pools']) == 2
assert len(out['placement_pools']) == orig_placement_pools + 1

View File

@ -1,5 +1,6 @@
import contextlib
import logging
import argparse
from ..orchestra import run
from teuthology import misc as teuthology
@ -7,70 +8,108 @@ import teuthology.task_util.rgw as rgw_utils
log = logging.getLogger(__name__)
def run_radosgw_agent(ctx, client, config):
def run_radosgw_agent(ctx, config):
"""
Run a single radosgw-agent. See task() for config format.
"""
src_client = config['src']
dest_client = config['dest']
return_list = list()
for (client, cconf) in config.items():
# don't process entries that are not clients
if not client.startswith('client.'):
log.debug('key {data} does not start with \'client.\', moving on'.format(
data=client))
continue
src_zone = rgw_utils.zone_for_client(ctx, src_client)
dest_zone = rgw_utils.zone_for_client(ctx, dest_client)
src_client = cconf['src']
dest_client = cconf['dest']
log.info("source is %s", src_zone)
log.info("dest is %s", dest_zone)
src_zone = rgw_utils.zone_for_client(ctx, src_client)
dest_zone = rgw_utils.zone_for_client(ctx, dest_client)
testdir = teuthology.get_testdir(ctx)
(remote,) = ctx.cluster.only(client).remotes.keys()
remote.run(
args=[
'cd', testdir, run.Raw('&&'),
'git', 'clone', 'https://github.com/ceph/radosgw-agent.git',
'radosgw-agent.{client}'.format(client=client),
run.Raw('&&'),
'cd', 'radosgw-agent.{client}'.format(client=client),
run.Raw('&&'),
'./bootstrap',
]
)
log.info("source is %s", src_zone)
log.info("dest is %s", dest_zone)
src_host, src_port = rgw_utils.get_zone_host_and_port(ctx, src_client,
src_zone)
dest_host, dest_port = rgw_utils.get_zone_host_and_port(ctx, dest_client,
dest_zone)
src_access, src_secret = rgw_utils.get_zone_system_keys(ctx, src_client,
src_zone)
dest_access, dest_secret = rgw_utils.get_zone_system_keys(ctx, dest_client,
dest_zone)
port = config.get('port', 8000)
daemon_name = '{host}.syncdaemon'.format(host=remote.name)
testdir = teuthology.get_testdir(ctx)
(remote,) = ctx.cluster.only(client).remotes.keys()
# figure out which branch to pull from
branch = cconf.get('force-branch', None)
if not branch:
branch = cconf.get('branch', 'master')
sha1 = cconf.get('sha1')
remote.run(
args=[
'cd', testdir, run.Raw('&&'),
'git', 'clone',
'-b', branch,
'https://github.com/ceph/radosgw-agent.git',
'radosgw-agent.{client}'.format(client=client),
]
)
if sha1 is not None:
remote.run(
args=[
'cd', testdir, run.Raw('&&'),
run.Raw('&&'),
'git', 'reset', '--hard', sha1,
]
)
remote.run(
args=[
'cd', testdir, run.Raw('&&'),
'cd', 'radosgw-agent.{client}'.format(client=client),
run.Raw('&&'),
'./bootstrap',
]
)
return remote.run(
args=[
'{tdir}/daemon-helper'.format(tdir=testdir), 'kill',
'{tdir}/radosgw-agent.{client}/radosgw-agent'.format(tdir=testdir,
client=client),
'-v',
'--src-access-key', src_access,
'--src-secret-key', src_secret,
'--src-host', src_host,
'--src-port', str(src_port),
'--src-zone', src_zone,
'--dest-access-key', dest_access,
'--dest-secret-key', dest_secret,
'--dest-host', dest_host,
'--dest-port', str(dest_port),
'--dest-zone', dest_zone,
'--daemon-id', daemon_name,
'--test-server-host', '0.0.0.0', '--test-server-port', str(port),
'--log-file', '{tdir}/archive/rgw_sync_agent.{client}.log'.format(
tdir=testdir,
client=client),
],
wait=False,
stdin=run.PIPE,
logger=log.getChild(daemon_name)
)
src_host, src_port = rgw_utils.get_zone_host_and_port(ctx, src_client,
src_zone)
dest_host, dest_port = rgw_utils.get_zone_host_and_port(ctx, dest_client,
dest_zone)
src_access, src_secret = rgw_utils.get_zone_system_keys(ctx, src_client,
src_zone)
dest_access, dest_secret = rgw_utils.get_zone_system_keys(ctx, dest_client,
dest_zone)
sync_scope = cconf.get('sync-scope', None)
port = cconf.get('port', 8000)
daemon_name = '{host}.{port}.syncdaemon'.format(host=remote.name, port=port)
in_args=[
'{tdir}/daemon-helper'.format(tdir=testdir), 'kill',
'{tdir}/radosgw-agent.{client}/radosgw-agent'.format(tdir=testdir,
client=client),
'-v',
'--src-access-key', src_access,
'--src-secret-key', src_secret,
'--src-host', src_host,
'--src-port', str(src_port),
'--src-zone', src_zone,
'--dest-access-key', dest_access,
'--dest-secret-key', dest_secret,
'--dest-host', dest_host,
'--dest-port', str(dest_port),
'--dest-zone', dest_zone,
'--daemon-id', daemon_name,
'--log-file', '{tdir}/archive/rgw_sync_agent.{client}.log'.format(
tdir=testdir,
client=client),
]
# the test server and full/incremental flags are mutually exclusive
if sync_scope is None:
in_args.append('--test-server-host')
in_args.append('0.0.0.0')
in_args.append('--test-server-port')
in_args.append(str(port))
else:
in_args.append('--sync-scope')
in_args.append(sync_scope)
return_list.append((client, remote.run(
args=in_args,
wait=False,
stdin=run.PIPE,
logger=log.getChild(daemon_name),
)))
return return_list
@contextlib.contextmanager
@ -83,7 +122,16 @@ def task(ctx, config):
to 0.0.0.0. Port defaults to 8000. This must be run on clients
that have the correct zone root pools and rgw zone set in
ceph.conf, or the task cannot read the region information from the
cluster. An example::
cluster.
By default, this task will start an HTTP server that will trigger full
or incremental syncs based on requests made to it.
Alternatively, a single full sync can be triggered by
specifying 'sync-scope: full' or a loop of incremental syncs can be triggered
by specifying 'sync-scope: incremental' (the loop will sleep
'--incremental-sync-delay' seconds between each sync, default is 20 seconds).
An example::
tasks:
- ceph:
@ -97,8 +145,10 @@ def task(ctx, config):
- rgw: # region configuration omitted for brevity
- radosgw-agent:
client.0:
branch: wip-next-feature-branch
src: client.0
dest: client.1
sync-scope: full
# port: 8000 (default)
client.1:
src: client.1
@ -108,8 +158,19 @@ def task(ctx, config):
assert isinstance(config, dict), 'rgw_sync_agent requires a dictionary config'
log.debug("config is %s", config)
procs = [(client, run_radosgw_agent(ctx, client, c_config)) for
client, c_config in config.iteritems()]
overrides = ctx.config.get('overrides', {})
# merge each client section, but only if it exists in config since there isn't
# a sensible default action for this task
for client in config.iterkeys():
if config[client]:
log.debug('config[{client}]: {data}'.format(client=client, data=config[client]))
teuthology.deep_merge(config[client], overrides.get('radosgw-agent', {}))
ctx.radosgw_agent = argparse.Namespace()
procs = run_radosgw_agent(ctx, config)
ctx.radosgw_agent.procs = procs
try:
yield

View File

@ -271,11 +271,28 @@ def extract_zone_info(ctx, client, client_config):
client=client)
region = ceph_config['rgw region']
zone = ceph_config['rgw zone']
zone_info = {}
for key in ['control_pool', 'gc_pool', 'log_pool', 'intent_log_pool',
'usage_log_pool', 'user_keys_pool', 'user_email_pool',
'user_swift_pool', 'user_uid_pool', 'domain_root']:
zone_info[key] = '.' + region + '.' + zone + '.' + key
zone_info = dict(
domain_root=ceph_config['rgw zone root pool'],
)
for key in ['rgw control pool', 'rgw gc pool', 'rgw log pool', 'rgw intent log pool',
'rgw usage log pool', 'rgw user keys pool', 'rgw user email pool',
'rgw user swift pool', 'rgw user uid pool']:
new_key = key.split(' ',1)[1]
new_key = new_key.replace(' ', '_')
if key in ceph_config:
value = ceph_config[key]
log.debug('{key} specified in ceph_config ({val})'.format(key=key, val=value))
zone_info[new_key] = value
else:
zone_info[new_key] = '.' + region + '.' + zone + '.' + new_key
# these keys are meant for the zones argument in the region info.
# We insert them into zone_info with a different format and then remove them
# in the fill_in_endpoints() method
for key in ['rgw log meta', 'rgw log data']:
if key in ceph_config:
zone_info[key] = ceph_config[key]
# these keys are meant for the zones argument in the region info.
# We insert them into zone_info with a different format and then remove them
@ -316,6 +333,13 @@ def fill_in_endpoints(region_info, role_zones, role_endpoints):
region, zone, zone_info, _ = role_zones[role]
host, port = role_endpoints[role]
endpoint = 'http://{host}:{port}/'.format(host=host, port=port)
# check if the region specified under client actually exists
# in region_info (it should, if properly configured).
# If not, throw a reasonable error
if region not in region_info:
raise Exception('Region: {region} was specified but no corresponding' \
' entry was round under \'regions\''.format(region=region))
region_conf = region_info[region]
region_conf.setdefault('endpoints', [])
region_conf['endpoints'].append(endpoint)
@ -373,6 +397,7 @@ def configure_users(ctx, config):
@contextlib.contextmanager
def configure_regions_and_zones(ctx, config, regions, role_endpoints):
if not regions:
log.debug('In rgw.configure_regions_and_zones() and regions is None. Bailing')
yield
return
@ -400,7 +425,30 @@ def configure_regions_and_zones(ctx, config, regions, role_endpoints):
for region, r_config in regions.iteritems()])
fill_in_endpoints(region_info, role_zones, role_endpoints)
# clear out the old defaults
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
# removing these objects from .rgw.root and the per-zone root pools
# may or may not matter
rados(ctx, mon,
cmd=['-p', '.rgw.root', 'rm', 'region_info.default'])
rados(ctx, mon,
cmd=['-p', '.rgw.root', 'rm', 'zone_info.default'])
for client in config.iterkeys():
for role, (_, zone, zone_info, user_info) in role_zones.iteritems():
rados(ctx, mon,
cmd=['-p', zone_info['domain_root'],
'rm', 'region_info.default'])
rados(ctx, mon,
cmd=['-p', zone_info['domain_root'],
'rm', 'zone_info.default'])
rgwadmin(ctx, client,
cmd=['-n', client, 'zone', 'set', '--rgw-zone', zone],
stdin=StringIO(json.dumps(dict(zone_info.items() + user_info.items()))),
check_status=True)
for region, info in region_info.iteritems():
region_json = json.dumps(info)
log.debug('region info is: %s', region_json)
@ -414,37 +462,8 @@ def configure_regions_and_zones(ctx, config, regions, role_endpoints):
'region', 'default',
'--rgw-region', region],
check_status=True)
for role, (_, zone, zone_info, user_info) in role_zones.iteritems():
# add the user_info (if it exists) to the zone_info
if user_info:
new_dict = dict(zone_info.items() + user_info.items())
else:
new_dict = zone_info
rgwadmin(ctx, client,
cmd=['-n', client, 'zone', 'set', '--rgw-zone', zone],
stdin=StringIO(json.dumps(new_dict)),
check_status=True)
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
# removing these objects from .rgw.root and the per-zone root pools
# may or may not matter
rados(ctx, mon,
cmd=['-p', '.rgw.root', 'rm', 'region_info.default'])
rados(ctx, mon,
cmd=['-p', '.rgw.root', 'rm', 'zone_info.default'])
for client in config.iterkeys():
rgwadmin(ctx, client, cmd=['-n', client, 'regionmap', 'update'])
for role, (_, zone, zone_info, user_info) in role_zones.iteritems():
rados(ctx, mon,
cmd=['-p', zone_info['domain_root'],
'rm', 'region_info.default'])
rados(ctx, mon,
cmd=['-p', zone_info['domain_root'],
'rm', 'zone_info.default'])
yield
@contextlib.contextmanager
@ -505,22 +524,22 @@ def task(ctx, config):
client.0:
rgw region: foo
rgw zone: foo-1
rgw region root pool: .rgw.root.foo
rgw zone root pool: .rgw.root.foo
rgw region root pool: .rgw.rroot.foo
rgw zone root pool: .rgw.zroot.foo
rgw log meta: true
rgw log data: true
client.1:
rgw region: bar
rgw zone: bar-master
rgw region root pool: .rgw.root.bar
rgw zone root pool: .rgw.root.bar
rgw region root pool: .rgw.rroot.bar
rgw zone root pool: .rgw.zroot.bar
rgw log meta: true
rgw log data: true
client.2:
rgw region: bar
rgw zone: bar-secondary
rgw region root pool: .rgw.root.bar
rgw zone root pool: .rgw.root.bar-secondary
rgw region root pool: .rgw.rroot.bar
rgw zone root pool: .rgw.zroot.bar-secondary
- rgw:
regions:
foo:

View File

@ -70,7 +70,9 @@ def create_users(ctx, config):
log.info('Creating rgw users...')
testdir = teuthology.get_testdir(ctx)
users = {'s3': 'foo'}
cached_client_user_names = dict()
for client in config['clients']:
cached_client_user_names[client] = dict()
s3tests_conf = config['s3tests_conf'][client]
s3tests_conf.setdefault('readwrite', {})
s3tests_conf['readwrite'].setdefault('bucket', 'rwtest-' + client + '-{random}-')
@ -84,27 +86,21 @@ def create_users(ctx, config):
rwconf['files'].setdefault('stddev', 500)
for section, user in users.iteritems():
_config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client))
ctx.cluster.only(client).run(
args=[
'{tdir}/adjust-ulimits'.format(tdir=testdir),
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'radosgw-admin',
'-n', client,
'user', 'create',
'--uid', s3tests_conf[section]['user_id'],
'--display-name', s3tests_conf[section]['display_name'],
'--access-key', s3tests_conf[section]['access_key'],
'--secret', s3tests_conf[section]['secret_key'],
'--email', s3tests_conf[section]['email'],
],
)
try:
yield
finally:
for client in config['clients']:
for user in users.itervalues():
uid = '{user}.{client}'.format(user=user, client=client)
log.debug('creating user {user} on {client}'.format(user=s3tests_conf[section]['user_id'],
client=client))
# stash the 'delete_user' flag along with user name for easier cleanup
delete_this_user = True
if 'delete_user' in s3tests_conf['s3']:
delete_this_user = s3tests_conf['s3']['delete_user']
log.debug('delete_user set to {flag} for {client}'.format(flag=delete_this_user,client=client))
cached_client_user_names[client][section+user] = (s3tests_conf[section]['user_id'], delete_this_user)
# skip actual user creation if the create_user flag is set to false for this client
if 'create_user' in s3tests_conf['s3'] and s3tests_conf['s3']['create_user'] == False:
log.debug('create_user set to False, skipping user creation for {client}'.format(client=client))
continue
else:
ctx.cluster.only(client).run(
args=[
'{tdir}/adjust-ulimits'.format(tdir=testdir),
@ -112,11 +108,36 @@ def create_users(ctx, config):
'{tdir}/archive/coverage'.format(tdir=testdir),
'radosgw-admin',
'-n', client,
'user', 'rm',
'--uid', uid,
'--purge-data',
],
)
'user', 'create',
'--uid', s3tests_conf[section]['user_id'],
'--display-name', s3tests_conf[section]['display_name'],
'--access-key', s3tests_conf[section]['access_key'],
'--secret', s3tests_conf[section]['secret_key'],
'--email', s3tests_conf[section]['email'],
],
)
try:
yield
finally:
for client in config['clients']:
for section, user in users.iteritems():
#uid = '{user}.{client}'.format(user=user, client=client)
real_uid, delete_this_user = cached_client_user_names[client][section+user]
if delete_this_user:
ctx.cluster.only(client).run(
args=[
'{tdir}/adjust-ulimits'.format(tdir=testdir),
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'radosgw-admin',
'-n', client,
'user', 'rm',
'--uid', real_uid,
'--purge-data',
],
)
else:
log.debug('skipping delete for user {uid} on {client}'.format(uid=real_uid,client=client))
@contextlib.contextmanager
def configure(ctx, config):

View File

@ -1,6 +1,7 @@
from cStringIO import StringIO
import logging
import json
import requests
from urlparse import urlparse
from teuthology import misc as teuthology
@ -17,8 +18,10 @@ def rgwadmin(ctx, client, cmd, stdin=StringIO(), check_status=False):
'radosgw-admin'.format(tdir=testdir),
'--log-to-stderr',
'--format', 'json',
'-n', client,
]
pre.extend(cmd)
log.info('radosgw-admin: cmd=%s' % pre)
(remote,) = ctx.cluster.only(client).remotes.iterkeys()
proc = remote.run(
args=pre,
@ -53,6 +56,35 @@ def get_zone_host_and_port(ctx, client, zone):
return host, port
assert False, 'no endpoint for zone {zone} found'.format(zone=zone)
def get_master_zone(ctx, client):
_, region_map = rgwadmin(ctx, client, check_status=True,
cmd=['-n', client, 'region-map', 'get'])
regions = region_map['regions']
for region in regions:
is_master = (region['val']['is_master'] == "true")
log.info('region={r} is_master={ism}'.format(r=region, ism=is_master))
if not is_master:
continue
master_zone = region['val']['master_zone']
log.info('master_zone=%s' % master_zone)
for zone_info in region['val']['zones']:
if zone_info['name'] == master_zone:
return master_zone
log.info('couldn\'t find master zone')
return None
def get_master_client(ctx, clients):
master_zone = get_master_zone(ctx, clients[0]) # can use any client for this as long as system configured correctly
if not master_zone:
return None
for client in clients:
zone = zone_for_client(ctx, client)
if zone == master_zone:
return client
return None
def get_zone_system_keys(ctx, client, zone):
_, zone_info = rgwadmin(ctx, client, check_status=True,
cmd=['-n', client,
@ -65,3 +97,28 @@ def zone_for_client(ctx, client):
ceph_config.update(ctx.ceph.conf.get('client', {}))
ceph_config.update(ctx.ceph.conf.get(client, {}))
return ceph_config.get('rgw zone')
def radosgw_agent_sync(ctx, agent_host, agent_port):
print 'agent_host', agent_host, 'port', agent_port
log.info('sync agent {h}:{p}'.format(h=agent_host, p=agent_port))
return requests.post('http://{addr}:{port}/metadata/incremental'.format(addr = agent_host, port = agent_port))
def radosgw_agent_sync_all(ctx):
if ctx.radosgw_agent.procs:
for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
dest_zone = zone_for_client(ctx, agent_client)
port = c_config.get('port', 8000)
dest_host, dest_port = get_zone_host_and_port(ctx, agent_client, dest_zone)
radosgw_agent_sync(ctx, dest_host, port)
def radosgw_agent_sync_all(ctx):
if ctx.radosgw_agent.procs:
for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
dest_zone = zone_for_client(ctx, agent_client)
port = c_config.get('port', 8000)
dest_host, dest_port = get_zone_host_and_port(ctx, agent_client, dest_zone)
radosgw_agent_sync(ctx, dest_host, port)