2013-07-19 22:16:16 +00:00
|
|
|
from cStringIO import StringIO
|
|
|
|
import logging
|
|
|
|
import json
|
2013-08-03 03:28:46 +00:00
|
|
|
import requests
|
2016-11-15 16:24:25 +00:00
|
|
|
from requests.packages.urllib3.util import Retry
|
2013-08-02 00:18:00 +00:00
|
|
|
from urlparse import urlparse
|
2013-07-19 22:16:16 +00:00
|
|
|
|
2014-08-07 14:24:59 +00:00
|
|
|
from teuthology.orchestra.connection import split_user
|
2013-07-19 22:16:16 +00:00
|
|
|
from teuthology import misc as teuthology
|
|
|
|
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
2013-08-24 02:16:29 +00:00
|
|
|
def multi_region_enabled(ctx):
|
|
|
|
# this is populated by the radosgw-agent task, seems reasonable to
|
|
|
|
# use that as an indicator that we're testing multi-region sync
|
|
|
|
return 'radosgw_agent' in ctx
|
|
|
|
|
2016-03-29 15:19:07 +00:00
|
|
|
def rgwadmin(ctx, client, cmd, stdin=StringIO(), check_status=False,
|
|
|
|
format='json'):
|
2013-08-17 00:34:57 +00:00
|
|
|
log.info('rgwadmin: {client} : {cmd}'.format(client=client,cmd=cmd))
|
2013-07-19 22:16:16 +00:00
|
|
|
testdir = teuthology.get_testdir(ctx)
|
2016-12-16 18:23:09 +00:00
|
|
|
cluster_name, daemon_type, client_id = teuthology.split_role(client)
|
|
|
|
client_with_id = daemon_type + '.' + client_id
|
2013-07-19 22:16:16 +00:00
|
|
|
pre = [
|
2013-09-06 20:08:01 +00:00
|
|
|
'adjust-ulimits',
|
2013-07-19 22:16:16 +00:00
|
|
|
'ceph-coverage'.format(tdir=testdir),
|
|
|
|
'{tdir}/archive/coverage'.format(tdir=testdir),
|
|
|
|
'radosgw-admin'.format(tdir=testdir),
|
|
|
|
'--log-to-stderr',
|
2016-03-29 15:19:07 +00:00
|
|
|
'--format', format,
|
2016-12-16 18:23:09 +00:00
|
|
|
'-n', client_with_id,
|
|
|
|
'--cluster', cluster_name,
|
2013-07-19 22:16:16 +00:00
|
|
|
]
|
|
|
|
pre.extend(cmd)
|
2013-08-13 16:23:56 +00:00
|
|
|
log.info('rgwadmin: cmd=%s' % pre)
|
2014-03-27 16:35:28 +00:00
|
|
|
(remote,) = ctx.cluster.only(client).remotes.iterkeys()
|
2013-07-19 22:16:16 +00:00
|
|
|
proc = remote.run(
|
|
|
|
args=pre,
|
2013-07-22 17:38:28 +00:00
|
|
|
check_status=check_status,
|
2013-07-19 22:16:16 +00:00
|
|
|
stdout=StringIO(),
|
|
|
|
stderr=StringIO(),
|
|
|
|
stdin=stdin,
|
|
|
|
)
|
|
|
|
r = proc.exitstatus
|
|
|
|
out = proc.stdout.getvalue()
|
|
|
|
j = None
|
|
|
|
if not r and out != '':
|
|
|
|
try:
|
|
|
|
j = json.loads(out)
|
|
|
|
log.info(' json result: %s' % j)
|
|
|
|
except ValueError:
|
|
|
|
j = out
|
|
|
|
log.info(' raw result: %s' % j)
|
|
|
|
return (r, j)
|
2013-08-02 00:18:00 +00:00
|
|
|
|
2015-03-22 16:43:02 +00:00
|
|
|
def get_user_summary(out, user):
|
|
|
|
"""Extract the summary for a given user"""
|
|
|
|
user_summary = None
|
|
|
|
for summary in out['summary']:
|
|
|
|
if summary.get('user') == user:
|
|
|
|
user_summary = summary
|
|
|
|
|
|
|
|
if not user_summary:
|
|
|
|
raise AssertionError('No summary info found for user: %s' % user)
|
|
|
|
|
|
|
|
return user_summary
|
|
|
|
|
|
|
|
def get_user_successful_ops(out, user):
|
|
|
|
summary = out['summary']
|
|
|
|
if len(summary) == 0:
|
|
|
|
return 0
|
|
|
|
return get_user_summary(out, user)['total']['successful_ops']
|
|
|
|
|
2013-08-02 00:18:00 +00:00
|
|
|
def get_zone_host_and_port(ctx, client, zone):
|
2016-12-16 18:23:09 +00:00
|
|
|
cluster_name, daemon_type, client_id = teuthology.split_role(client)
|
|
|
|
client_with_id = daemon_type + '.' + client_id
|
2017-03-14 19:43:13 +00:00
|
|
|
_, period = rgwadmin(ctx, client, check_status=True,
|
2016-12-16 18:23:09 +00:00
|
|
|
cmd=['period', 'get'])
|
2017-03-14 19:43:13 +00:00
|
|
|
period_map = period['period_map']
|
|
|
|
zonegroups = period_map['zonegroups']
|
|
|
|
for zonegroup in zonegroups:
|
|
|
|
for zone_info in zonegroup['zones']:
|
2013-08-02 00:18:00 +00:00
|
|
|
if zone_info['name'] == zone:
|
|
|
|
endpoint = urlparse(zone_info['endpoints'][0])
|
|
|
|
host, port = endpoint.hostname, endpoint.port
|
|
|
|
if port is None:
|
|
|
|
port = 80
|
|
|
|
return host, port
|
|
|
|
assert False, 'no endpoint for zone {zone} found'.format(zone=zone)
|
|
|
|
|
2013-08-03 03:28:46 +00:00
|
|
|
def get_master_zone(ctx, client):
|
2016-12-16 18:23:09 +00:00
|
|
|
cluster_name, daemon_type, client_id = teuthology.split_role(client)
|
|
|
|
client_with_id = daemon_type + '.' + client_id
|
2017-03-14 19:43:13 +00:00
|
|
|
_, period = rgwadmin(ctx, client, check_status=True,
|
2016-12-16 18:23:09 +00:00
|
|
|
cmd=['period', 'get'])
|
2017-03-14 19:43:13 +00:00
|
|
|
period_map = period['period_map']
|
|
|
|
zonegroups = period_map['zonegroups']
|
|
|
|
for zonegroup in zonegroups:
|
|
|
|
is_master = (zonegroup['is_master'] == "true")
|
|
|
|
log.info('zonegroup={z} is_master={ism}'.format(z=zonegroup, ism=is_master))
|
2013-08-03 03:28:46 +00:00
|
|
|
if not is_master:
|
|
|
|
continue
|
2017-03-14 19:43:13 +00:00
|
|
|
master_zone = zonegroup['master_zone']
|
2013-08-03 03:28:46 +00:00
|
|
|
log.info('master_zone=%s' % master_zone)
|
2017-03-14 19:43:13 +00:00
|
|
|
for zone_info in zonegroup['zones']:
|
2013-08-03 03:28:46 +00:00
|
|
|
if zone_info['name'] == master_zone:
|
|
|
|
return master_zone
|
|
|
|
log.info('couldn\'t find master zone')
|
|
|
|
return None
|
|
|
|
|
|
|
|
def get_master_client(ctx, clients):
|
|
|
|
master_zone = get_master_zone(ctx, clients[0]) # can use any client for this as long as system configured correctly
|
|
|
|
if not master_zone:
|
|
|
|
return None
|
|
|
|
|
|
|
|
for client in clients:
|
|
|
|
zone = zone_for_client(ctx, client)
|
|
|
|
if zone == master_zone:
|
|
|
|
return client
|
|
|
|
|
|
|
|
return None
|
|
|
|
|
2013-08-02 00:18:00 +00:00
|
|
|
def get_zone_system_keys(ctx, client, zone):
|
|
|
|
_, zone_info = rgwadmin(ctx, client, check_status=True,
|
2016-12-16 18:23:09 +00:00
|
|
|
cmd=['zone', 'get', '--rgw-zone', zone])
|
2013-08-02 00:18:00 +00:00
|
|
|
system_key = zone_info['system_key']
|
|
|
|
return system_key['access_key'], system_key['secret_key']
|
2013-08-02 01:56:08 +00:00
|
|
|
|
|
|
|
def zone_for_client(ctx, client):
|
2016-12-16 18:23:09 +00:00
|
|
|
cluster_name, daemon_type, client_id = teuthology.split_role(client)
|
|
|
|
ceph_config = ctx.ceph[cluster_name].conf.get('global', {})
|
|
|
|
ceph_config.update(ctx.ceph[cluster_name].conf.get('client', {}))
|
|
|
|
ceph_config.update(ctx.ceph[cluster_name].conf.get(client, {}))
|
2013-08-02 01:56:08 +00:00
|
|
|
return ceph_config.get('rgw zone')
|
2013-08-03 03:28:46 +00:00
|
|
|
|
2013-10-10 10:27:27 +00:00
|
|
|
def region_for_client(ctx, client):
|
2016-12-16 18:23:09 +00:00
|
|
|
cluster_name, daemon_type, client_id = teuthology.split_role(client)
|
|
|
|
ceph_config = ctx.ceph[cluster_name].conf.get('global', {})
|
|
|
|
ceph_config.update(ctx.ceph[cluster_name].conf.get('client', {}))
|
|
|
|
ceph_config.update(ctx.ceph[cluster_name].conf.get(client, {}))
|
2013-10-10 10:27:27 +00:00
|
|
|
return ceph_config.get('rgw region')
|
|
|
|
|
2014-03-10 07:22:39 +00:00
|
|
|
def radosgw_data_log_window(ctx, client):
|
2016-12-16 18:23:09 +00:00
|
|
|
cluster_name, daemon_type, client_id = teuthology.split_role(client)
|
|
|
|
ceph_config = ctx.ceph[cluster_name].conf.get('global', {})
|
|
|
|
ceph_config.update(ctx.ceph[cluster_name].conf.get('client', {}))
|
|
|
|
ceph_config.update(ctx.ceph[cluster_name].conf.get(client, {}))
|
2014-03-10 07:22:39 +00:00
|
|
|
return ceph_config.get('rgw data log window', 30)
|
2013-10-10 10:27:27 +00:00
|
|
|
|
|
|
|
def radosgw_agent_sync_data(ctx, agent_host, agent_port, full=False):
|
|
|
|
log.info('sync agent {h}:{p}'.format(h=agent_host, p=agent_port))
|
2016-11-15 16:24:25 +00:00
|
|
|
# use retry with backoff to tolerate slow startup of radosgw-agent
|
|
|
|
s = requests.Session()
|
|
|
|
s.mount('http://{addr}:{port}/'.format(addr = agent_host, port = agent_port),
|
|
|
|
requests.adapters.HTTPAdapter(max_retries=Retry(total=5, backoff_factor=1)))
|
2013-10-10 10:27:27 +00:00
|
|
|
method = "full" if full else "incremental"
|
2016-11-15 16:24:25 +00:00
|
|
|
return s.post('http://{addr}:{port}/data/{method}'.format(addr = agent_host, port = agent_port, method = method))
|
2013-08-03 03:28:46 +00:00
|
|
|
|
2013-10-10 10:27:27 +00:00
|
|
|
def radosgw_agent_sync_metadata(ctx, agent_host, agent_port, full=False):
|
2013-08-03 03:28:46 +00:00
|
|
|
log.info('sync agent {h}:{p}'.format(h=agent_host, p=agent_port))
|
2016-11-15 16:24:25 +00:00
|
|
|
# use retry with backoff to tolerate slow startup of radosgw-agent
|
|
|
|
s = requests.Session()
|
|
|
|
s.mount('http://{addr}:{port}/'.format(addr = agent_host, port = agent_port),
|
|
|
|
requests.adapters.HTTPAdapter(max_retries=Retry(total=5, backoff_factor=1)))
|
2013-10-10 10:27:27 +00:00
|
|
|
method = "full" if full else "incremental"
|
2016-11-15 16:24:25 +00:00
|
|
|
return s.post('http://{addr}:{port}/metadata/{method}'.format(addr = agent_host, port = agent_port, method = method))
|
2013-08-03 03:28:46 +00:00
|
|
|
|
2013-10-10 10:27:27 +00:00
|
|
|
def radosgw_agent_sync_all(ctx, full=False, data=False):
|
2013-08-03 03:28:46 +00:00
|
|
|
if ctx.radosgw_agent.procs:
|
|
|
|
for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
|
2013-09-26 16:09:55 +00:00
|
|
|
zone_for_client(ctx, agent_client)
|
2013-08-17 00:32:45 +00:00
|
|
|
sync_host, sync_port = get_sync_agent(ctx, agent_client)
|
|
|
|
log.debug('doing a sync via {host1}'.format(host1=sync_host))
|
2013-10-10 10:27:27 +00:00
|
|
|
radosgw_agent_sync_metadata(ctx, sync_host, sync_port, full)
|
2013-10-29 19:23:08 +00:00
|
|
|
if (data):
|
2013-10-10 10:27:27 +00:00
|
|
|
radosgw_agent_sync_data(ctx, sync_host, sync_port, full)
|
2013-08-13 16:23:56 +00:00
|
|
|
|
|
|
|
def host_for_role(ctx, role):
|
|
|
|
for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']):
|
|
|
|
if role in roles:
|
|
|
|
_, host = split_user(target)
|
|
|
|
return host
|
|
|
|
|
|
|
|
def get_sync_agent(ctx, source):
|
|
|
|
for task in ctx.config['tasks']:
|
|
|
|
if 'radosgw-agent' not in task:
|
|
|
|
continue
|
|
|
|
for client, conf in task['radosgw-agent'].iteritems():
|
|
|
|
if conf['src'] == source:
|
|
|
|
return host_for_role(ctx, source), conf.get('port', 8000)
|
|
|
|
return None, None
|