2013-07-19 22:16:16 +00:00
|
|
|
from cStringIO import StringIO
|
|
|
|
import logging
|
|
|
|
import json
|
2013-08-03 03:28:46 +00:00
|
|
|
import requests
|
2013-08-02 00:18:00 +00:00
|
|
|
from urlparse import urlparse
|
2013-07-19 22:16:16 +00:00
|
|
|
|
2013-08-13 16:23:56 +00:00
|
|
|
from ..orchestra.connection import split_user
|
2013-07-19 22:16:16 +00:00
|
|
|
from teuthology import misc as teuthology
|
|
|
|
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
2013-08-24 02:16:29 +00:00
|
|
|
# simple test to indicate if multi-region testing should occur
|
|
|
|
def multi_region_enabled(ctx):
|
|
|
|
# this is populated by the radosgw-agent task, seems reasonable to
|
|
|
|
# use that as an indicator that we're testing multi-region sync
|
|
|
|
return 'radosgw_agent' in ctx
|
|
|
|
|
2013-07-22 17:38:28 +00:00
|
|
|
def rgwadmin(ctx, client, cmd, stdin=StringIO(), check_status=False):
|
2013-08-17 00:34:57 +00:00
|
|
|
log.info('rgwadmin: {client} : {cmd}'.format(client=client,cmd=cmd))
|
2013-07-19 22:16:16 +00:00
|
|
|
testdir = teuthology.get_testdir(ctx)
|
|
|
|
pre = [
|
2013-09-06 20:08:01 +00:00
|
|
|
'adjust-ulimits',
|
2013-07-19 22:16:16 +00:00
|
|
|
'ceph-coverage'.format(tdir=testdir),
|
|
|
|
'{tdir}/archive/coverage'.format(tdir=testdir),
|
|
|
|
'radosgw-admin'.format(tdir=testdir),
|
|
|
|
'--log-to-stderr',
|
|
|
|
'--format', 'json',
|
2013-08-03 03:28:46 +00:00
|
|
|
'-n', client,
|
2013-07-19 22:16:16 +00:00
|
|
|
]
|
|
|
|
pre.extend(cmd)
|
2013-08-13 16:23:56 +00:00
|
|
|
log.info('rgwadmin: cmd=%s' % pre)
|
2013-07-19 22:16:16 +00:00
|
|
|
(remote,) = ctx.cluster.only(client).remotes.iterkeys()
|
|
|
|
proc = remote.run(
|
|
|
|
args=pre,
|
2013-07-22 17:38:28 +00:00
|
|
|
check_status=check_status,
|
2013-07-19 22:16:16 +00:00
|
|
|
stdout=StringIO(),
|
|
|
|
stderr=StringIO(),
|
|
|
|
stdin=stdin,
|
|
|
|
)
|
|
|
|
r = proc.exitstatus
|
|
|
|
out = proc.stdout.getvalue()
|
|
|
|
j = None
|
|
|
|
if not r and out != '':
|
|
|
|
try:
|
|
|
|
j = json.loads(out)
|
|
|
|
log.info(' json result: %s' % j)
|
|
|
|
except ValueError:
|
|
|
|
j = out
|
|
|
|
log.info(' raw result: %s' % j)
|
|
|
|
return (r, j)
|
2013-08-02 00:18:00 +00:00
|
|
|
|
|
|
|
def get_zone_host_and_port(ctx, client, zone):
|
|
|
|
_, region_map = rgwadmin(ctx, client, check_status=True,
|
|
|
|
cmd=['-n', client, 'region-map', 'get'])
|
|
|
|
regions = region_map['regions']
|
|
|
|
for region in regions:
|
|
|
|
for zone_info in region['val']['zones']:
|
|
|
|
if zone_info['name'] == zone:
|
|
|
|
endpoint = urlparse(zone_info['endpoints'][0])
|
|
|
|
host, port = endpoint.hostname, endpoint.port
|
|
|
|
if port is None:
|
|
|
|
port = 80
|
|
|
|
return host, port
|
|
|
|
assert False, 'no endpoint for zone {zone} found'.format(zone=zone)
|
|
|
|
|
2013-08-03 03:28:46 +00:00
|
|
|
def get_master_zone(ctx, client):
|
|
|
|
_, region_map = rgwadmin(ctx, client, check_status=True,
|
|
|
|
cmd=['-n', client, 'region-map', 'get'])
|
|
|
|
regions = region_map['regions']
|
|
|
|
for region in regions:
|
|
|
|
is_master = (region['val']['is_master'] == "true")
|
|
|
|
log.info('region={r} is_master={ism}'.format(r=region, ism=is_master))
|
|
|
|
if not is_master:
|
|
|
|
continue
|
|
|
|
master_zone = region['val']['master_zone']
|
|
|
|
log.info('master_zone=%s' % master_zone)
|
|
|
|
for zone_info in region['val']['zones']:
|
|
|
|
if zone_info['name'] == master_zone:
|
|
|
|
return master_zone
|
|
|
|
log.info('couldn\'t find master zone')
|
|
|
|
return None
|
|
|
|
|
|
|
|
def get_master_client(ctx, clients):
|
|
|
|
master_zone = get_master_zone(ctx, clients[0]) # can use any client for this as long as system configured correctly
|
|
|
|
if not master_zone:
|
|
|
|
return None
|
|
|
|
|
|
|
|
for client in clients:
|
|
|
|
zone = zone_for_client(ctx, client)
|
|
|
|
if zone == master_zone:
|
|
|
|
return client
|
|
|
|
|
|
|
|
return None
|
|
|
|
|
2013-08-02 00:18:00 +00:00
|
|
|
def get_zone_system_keys(ctx, client, zone):
|
|
|
|
_, zone_info = rgwadmin(ctx, client, check_status=True,
|
|
|
|
cmd=['-n', client,
|
|
|
|
'zone', 'get', '--rgw-zone', zone])
|
|
|
|
system_key = zone_info['system_key']
|
|
|
|
return system_key['access_key'], system_key['secret_key']
|
2013-08-02 01:56:08 +00:00
|
|
|
|
|
|
|
def zone_for_client(ctx, client):
|
|
|
|
ceph_config = ctx.ceph.conf.get('global', {})
|
|
|
|
ceph_config.update(ctx.ceph.conf.get('client', {}))
|
|
|
|
ceph_config.update(ctx.ceph.conf.get(client, {}))
|
|
|
|
return ceph_config.get('rgw zone')
|
2013-08-03 03:28:46 +00:00
|
|
|
|
|
|
|
|
|
|
|
def radosgw_agent_sync(ctx, agent_host, agent_port):
|
|
|
|
log.info('sync agent {h}:{p}'.format(h=agent_host, p=agent_port))
|
|
|
|
return requests.post('http://{addr}:{port}/metadata/incremental'.format(addr = agent_host, port = agent_port))
|
|
|
|
|
|
|
|
def radosgw_agent_sync_all(ctx):
|
|
|
|
if ctx.radosgw_agent.procs:
|
|
|
|
for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
|
|
|
|
dest_zone = zone_for_client(ctx, agent_client)
|
2013-08-17 00:32:45 +00:00
|
|
|
sync_host, sync_port = get_sync_agent(ctx, agent_client)
|
|
|
|
log.debug('doing a sync via {host1}'.format(host1=sync_host))
|
|
|
|
radosgw_agent_sync(ctx, sync_host, sync_port)
|
2013-08-13 16:23:56 +00:00
|
|
|
|
|
|
|
def host_for_role(ctx, role):
|
|
|
|
for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']):
|
|
|
|
if role in roles:
|
|
|
|
_, host = split_user(target)
|
|
|
|
return host
|
|
|
|
|
|
|
|
def get_sync_agent(ctx, source):
|
|
|
|
for task in ctx.config['tasks']:
|
|
|
|
if 'radosgw-agent' not in task:
|
|
|
|
continue
|
|
|
|
for client, conf in task['radosgw-agent'].iteritems():
|
|
|
|
if conf['src'] == source:
|
|
|
|
return host_for_role(ctx, source), conf.get('port', 8000)
|
|
|
|
return None, None
|