2014-02-15 01:08:28 +00:00
|
|
|
"""
|
|
|
|
rgw routines
|
|
|
|
"""
|
2013-08-17 17:14:37 +00:00
|
|
|
import argparse
|
2011-06-22 23:36:58 +00:00
|
|
|
import contextlib
|
|
|
|
import logging
|
|
|
|
|
2014-08-07 14:24:59 +00:00
|
|
|
from teuthology.orchestra import run
|
2011-06-22 23:36:58 +00:00
|
|
|
from teuthology import misc as teuthology
|
|
|
|
from teuthology import contextutil
|
2018-11-28 09:10:01 +00:00
|
|
|
from teuthology.exceptions import ConfigError
|
2021-03-03 02:38:36 +00:00
|
|
|
from tasks.ceph_manager import get_valgrind_args
|
2020-02-15 10:05:01 +00:00
|
|
|
from tasks.util import get_remote_for_role
|
|
|
|
from tasks.util.rgw import rgwadmin, wait_for_radosgw
|
|
|
|
from tasks.util.rados import (create_ec_pool,
|
|
|
|
create_replicated_pool,
|
|
|
|
create_cache_pool)
|
2011-06-22 23:36:58 +00:00
|
|
|
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
2018-02-21 16:19:48 +00:00
|
|
|
class RGWEndpoint:
|
2018-02-20 17:28:24 +00:00
|
|
|
def __init__(self, hostname=None, port=None, cert=None, dns_name=None, website_dns_name=None):
|
2018-02-21 16:19:48 +00:00
|
|
|
self.hostname = hostname
|
|
|
|
self.port = port
|
2018-02-13 18:26:09 +00:00
|
|
|
self.cert = cert
|
2018-02-20 17:28:24 +00:00
|
|
|
self.dns_name = dns_name
|
|
|
|
self.website_dns_name = website_dns_name
|
2018-02-21 16:19:48 +00:00
|
|
|
|
|
|
|
def url(self):
|
2018-02-13 18:26:09 +00:00
|
|
|
proto = 'https' if self.cert else 'http'
|
|
|
|
return '{proto}://{hostname}:{port}/'.format(proto=proto, hostname=self.hostname, port=self.port)
|
2018-02-21 16:19:48 +00:00
|
|
|
|
2011-09-03 00:58:19 +00:00
|
|
|
@contextlib.contextmanager
|
2017-06-26 08:42:08 +00:00
|
|
|
def start_rgw(ctx, config, clients):
|
2014-02-15 01:08:28 +00:00
|
|
|
"""
|
|
|
|
Start rgw on remote sites.
|
|
|
|
"""
|
2011-09-03 00:58:19 +00:00
|
|
|
log.info('Starting rgw...')
|
2013-01-23 20:37:39 +00:00
|
|
|
testdir = teuthology.get_testdir(ctx)
|
2017-06-26 08:42:08 +00:00
|
|
|
for client in clients:
|
2019-10-11 15:57:47 +00:00
|
|
|
(remote,) = ctx.cluster.only(client).remotes.keys()
|
2016-12-16 18:23:09 +00:00
|
|
|
cluster_name, daemon_type, client_id = teuthology.split_role(client)
|
|
|
|
client_with_id = daemon_type + '.' + client_id
|
|
|
|
client_with_cluster = cluster_name + '.' + client_with_id
|
|
|
|
|
2012-02-22 00:08:21 +00:00
|
|
|
client_config = config.get(client)
|
|
|
|
if client_config is None:
|
|
|
|
client_config = {}
|
2012-02-24 20:04:58 +00:00
|
|
|
log.info("rgw %s config is %s", client, client_config)
|
2014-04-15 18:12:00 +00:00
|
|
|
cmd_prefix = [
|
2013-02-18 20:14:14 +00:00
|
|
|
'sudo',
|
2013-09-06 20:08:01 +00:00
|
|
|
'adjust-ulimits',
|
2013-07-22 17:38:28 +00:00
|
|
|
'ceph-coverage',
|
|
|
|
'{tdir}/archive/coverage'.format(tdir=testdir),
|
2013-09-06 20:08:01 +00:00
|
|
|
'daemon-helper',
|
2013-07-22 17:38:28 +00:00
|
|
|
'term',
|
2012-02-22 00:08:21 +00:00
|
|
|
]
|
2014-04-15 19:49:03 +00:00
|
|
|
|
|
|
|
rgw_cmd = ['radosgw']
|
|
|
|
|
2017-04-26 01:38:52 +00:00
|
|
|
log.info("Using %s as radosgw frontend", ctx.rgw.frontend)
|
2014-04-15 19:49:03 +00:00
|
|
|
|
2018-02-21 16:19:48 +00:00
|
|
|
endpoint = ctx.rgw.role_endpoints[client]
|
2018-02-13 18:26:09 +00:00
|
|
|
frontends = ctx.rgw.frontend
|
2017-08-13 23:36:38 +00:00
|
|
|
frontend_prefix = client_config.get('frontend_prefix', None)
|
|
|
|
if frontend_prefix:
|
|
|
|
frontends += ' prefix={pfx}'.format(pfx=frontend_prefix)
|
2018-02-13 18:26:09 +00:00
|
|
|
|
|
|
|
if endpoint.cert:
|
|
|
|
# add the ssl certificate path
|
|
|
|
frontends += ' ssl_certificate={}'.format(endpoint.cert.certificate)
|
2021-05-11 13:42:53 +00:00
|
|
|
frontends += ' ssl_port={}'.format(endpoint.port)
|
2018-02-13 18:26:09 +00:00
|
|
|
else:
|
|
|
|
frontends += ' port={}'.format(endpoint.port)
|
|
|
|
|
2014-04-15 19:49:03 +00:00
|
|
|
rgw_cmd.extend([
|
2017-07-19 14:23:41 +00:00
|
|
|
'--rgw-frontends', frontends,
|
2016-12-16 18:23:09 +00:00
|
|
|
'-n', client_with_id,
|
|
|
|
'--cluster', cluster_name,
|
|
|
|
'-k', '/etc/ceph/{client_with_cluster}.keyring'.format(client_with_cluster=client_with_cluster),
|
2013-07-22 17:38:28 +00:00
|
|
|
'--log-file',
|
2016-12-16 18:23:09 +00:00
|
|
|
'/var/log/ceph/rgw.{client_with_cluster}.log'.format(client_with_cluster=client_with_cluster),
|
2013-07-22 17:38:28 +00:00
|
|
|
'--rgw_ops_log_socket_path',
|
2016-12-16 18:23:09 +00:00
|
|
|
'{tdir}/rgw.opslog.{client_with_cluster}.sock'.format(tdir=testdir,
|
2017-08-21 09:34:21 +00:00
|
|
|
client_with_cluster=client_with_cluster),
|
2017-07-19 14:20:05 +00:00
|
|
|
])
|
|
|
|
|
|
|
|
keystone_role = client_config.get('use-keystone-role', None)
|
|
|
|
if keystone_role is not None:
|
|
|
|
if not ctx.keystone:
|
|
|
|
raise ConfigError('rgw must run after the keystone task')
|
2018-02-21 16:19:48 +00:00
|
|
|
url = 'http://{host}:{port}/v1/KEY_$(tenant_id)s'.format(host=endpoint.hostname,
|
|
|
|
port=endpoint.port)
|
2017-07-19 14:20:05 +00:00
|
|
|
ctx.keystone.create_endpoint(ctx, keystone_role, 'swift', url)
|
|
|
|
|
|
|
|
keystone_host, keystone_port = \
|
|
|
|
ctx.keystone.public_endpoints[keystone_role]
|
|
|
|
rgw_cmd.extend([
|
|
|
|
'--rgw_keystone_url',
|
|
|
|
'http://{khost}:{kport}'.format(khost=keystone_host,
|
|
|
|
kport=keystone_port),
|
|
|
|
])
|
|
|
|
|
2017-08-21 09:34:21 +00:00
|
|
|
|
2020-04-21 15:28:58 +00:00
|
|
|
if client_config.get('dns-name') is not None:
|
2018-02-20 17:28:24 +00:00
|
|
|
rgw_cmd.extend(['--rgw-dns-name', endpoint.dns_name])
|
2020-04-21 15:28:58 +00:00
|
|
|
if client_config.get('dns-s3website-name') is not None:
|
2018-02-20 17:28:24 +00:00
|
|
|
rgw_cmd.extend(['--rgw-dns-s3website-name', endpoint.website_dns_name])
|
|
|
|
|
2019-09-09 13:12:24 +00:00
|
|
|
|
|
|
|
vault_role = client_config.get('use-vault-role', None)
|
2017-08-21 09:34:21 +00:00
|
|
|
barbican_role = client_config.get('use-barbican-role', None)
|
2020-11-03 23:17:28 +00:00
|
|
|
pykmip_role = client_config.get('use-pykmip-role', None)
|
2019-09-09 13:12:24 +00:00
|
|
|
|
2021-09-17 09:28:53 +00:00
|
|
|
token_path = '/etc/ceph/vault-root-token'
|
2017-08-21 09:34:21 +00:00
|
|
|
if barbican_role is not None:
|
|
|
|
if not hasattr(ctx, 'barbican'):
|
|
|
|
raise ConfigError('rgw must run after the barbican task')
|
|
|
|
|
|
|
|
barbican_host, barbican_port = \
|
|
|
|
ctx.barbican.endpoints[barbican_role]
|
|
|
|
log.info("Use barbican url=%s:%s", barbican_host, barbican_port)
|
|
|
|
|
|
|
|
rgw_cmd.extend([
|
|
|
|
'--rgw_barbican_url',
|
|
|
|
'http://{bhost}:{bport}'.format(bhost=barbican_host,
|
|
|
|
bport=barbican_port),
|
|
|
|
])
|
2019-09-09 13:12:24 +00:00
|
|
|
elif vault_role is not None:
|
|
|
|
if not ctx.vault.root_token:
|
|
|
|
raise ConfigError('vault: no "root_token" specified')
|
|
|
|
# create token on file
|
2021-01-26 01:49:16 +00:00
|
|
|
ctx.rgw.vault_role = vault_role
|
2021-09-17 09:28:53 +00:00
|
|
|
ctx.cluster.only(client).run(args=['sudo', 'echo', '-n', ctx.vault.root_token, run.Raw('|'), 'sudo', 'tee', token_path])
|
2019-09-09 13:12:24 +00:00
|
|
|
log.info("Token file content")
|
|
|
|
ctx.cluster.only(client).run(args=['cat', token_path])
|
2020-04-20 14:50:00 +00:00
|
|
|
log.info("Restrict access to token file")
|
2021-09-17 09:28:53 +00:00
|
|
|
ctx.cluster.only(client).run(args=['sudo', 'chmod', '600', token_path])
|
2020-04-20 14:50:00 +00:00
|
|
|
ctx.cluster.only(client).run(args=['sudo', 'chown', 'ceph', token_path])
|
2019-09-09 13:12:24 +00:00
|
|
|
|
2021-12-15 21:14:38 +00:00
|
|
|
vault_addr = "{}:{}".format(*ctx.vault.endpoints[vault_role])
|
2019-09-09 13:12:24 +00:00
|
|
|
rgw_cmd.extend([
|
2021-12-15 21:14:38 +00:00
|
|
|
'--rgw_crypt_vault_addr', vault_addr,
|
|
|
|
'--rgw_crypt_vault_token_file', token_path,
|
|
|
|
'--rgw_crypt_sse_s3_vault_addr', vault_addr,
|
|
|
|
'--rgw_crypt_sse_s3_vault_token_file', token_path,
|
2019-09-09 13:12:24 +00:00
|
|
|
])
|
2020-11-03 23:17:28 +00:00
|
|
|
elif pykmip_role is not None:
|
|
|
|
if not hasattr(ctx, 'pykmip'):
|
|
|
|
raise ConfigError('rgw must run after the pykmip task')
|
2020-11-12 03:38:18 +00:00
|
|
|
ctx.rgw.pykmip_role = pykmip_role
|
2020-11-03 23:17:28 +00:00
|
|
|
rgw_cmd.extend([
|
|
|
|
'--rgw_crypt_kmip_addr', "{}:{}".format(*ctx.pykmip.endpoints[pykmip_role]),
|
|
|
|
])
|
2017-08-21 09:34:21 +00:00
|
|
|
|
2022-01-17 19:01:34 +00:00
|
|
|
clientcert = ctx.ssl_certificates.get('kmip-client')
|
|
|
|
servercert = ctx.ssl_certificates.get('kmip-server')
|
|
|
|
clientca = ctx.ssl_certificates.get('kmiproot')
|
|
|
|
|
|
|
|
clientkey = clientcert.key
|
|
|
|
clientcert = clientcert.certificate
|
|
|
|
serverkey = servercert.key
|
|
|
|
servercert = servercert.certificate
|
|
|
|
rootkey = clientca.key
|
|
|
|
rootcert = clientca.certificate
|
|
|
|
|
|
|
|
cert_path = '/etc/ceph/'
|
|
|
|
ctx.cluster.only(client).run(args=['sudo', 'cp', clientcert, cert_path])
|
|
|
|
ctx.cluster.only(client).run(args=['sudo', 'cp', clientkey, cert_path])
|
|
|
|
ctx.cluster.only(client).run(args=['sudo', 'cp', servercert, cert_path])
|
|
|
|
ctx.cluster.only(client).run(args=['sudo', 'cp', serverkey, cert_path])
|
|
|
|
ctx.cluster.only(client).run(args=['sudo', 'cp', rootkey, cert_path])
|
|
|
|
ctx.cluster.only(client).run(args=['sudo', 'cp', rootcert, cert_path])
|
|
|
|
|
|
|
|
clientcert = cert_path + 'kmip-client.crt'
|
|
|
|
clientkey = cert_path + 'kmip-client.key'
|
|
|
|
servercert = cert_path + 'kmip-server.crt'
|
|
|
|
serverkey = cert_path + 'kmip-server.key'
|
|
|
|
rootkey = cert_path + 'kmiproot.key'
|
|
|
|
rootcert = cert_path + 'kmiproot.crt'
|
|
|
|
|
|
|
|
ctx.cluster.only(client).run(args=['sudo', 'chmod', '600', clientcert, clientkey, servercert, serverkey, rootkey, rootcert])
|
|
|
|
ctx.cluster.only(client).run(args=['sudo', 'chown', 'ceph', clientcert, clientkey, servercert, serverkey, rootkey, rootcert])
|
|
|
|
|
2017-07-19 14:20:05 +00:00
|
|
|
rgw_cmd.extend([
|
2013-07-22 17:38:28 +00:00
|
|
|
'--foreground',
|
|
|
|
run.Raw('|'),
|
|
|
|
'sudo',
|
|
|
|
'tee',
|
2020-01-09 07:27:37 +00:00
|
|
|
'/var/log/ceph/rgw.{client_with_cluster}.stdout'.format(client_with_cluster=client_with_cluster),
|
2013-07-22 17:38:28 +00:00
|
|
|
run.Raw('2>&1'),
|
2014-04-15 19:49:03 +00:00
|
|
|
])
|
2012-02-22 17:18:17 +00:00
|
|
|
|
2013-09-07 00:38:29 +00:00
|
|
|
if client_config.get('valgrind'):
|
2021-03-03 02:38:36 +00:00
|
|
|
cmd_prefix = get_valgrind_args(
|
2013-09-07 00:38:29 +00:00
|
|
|
testdir,
|
2017-04-18 19:34:59 +00:00
|
|
|
client_with_cluster,
|
2014-04-15 18:12:00 +00:00
|
|
|
cmd_prefix,
|
2021-01-19 14:23:56 +00:00
|
|
|
client_config.get('valgrind'),
|
|
|
|
# see https://github.com/ceph/teuthology/pull/1600
|
|
|
|
exit_on_first_error=False
|
2013-09-07 00:38:29 +00:00
|
|
|
)
|
2012-02-22 00:08:21 +00:00
|
|
|
|
2014-04-15 18:12:00 +00:00
|
|
|
run_cmd = list(cmd_prefix)
|
|
|
|
run_cmd.extend(rgw_cmd)
|
2012-02-22 00:08:21 +00:00
|
|
|
|
2013-04-30 23:37:48 +00:00
|
|
|
ctx.daemons.add_daemon(
|
2017-04-18 19:34:59 +00:00
|
|
|
remote, 'rgw', client_with_id,
|
2016-12-16 18:23:09 +00:00
|
|
|
cluster=cluster_name,
|
2021-02-04 16:36:05 +00:00
|
|
|
fsid=ctx.ceph[cluster_name].fsid,
|
2012-02-22 00:08:21 +00:00
|
|
|
args=run_cmd,
|
2011-09-03 00:58:19 +00:00
|
|
|
logger=log.getChild(client),
|
|
|
|
stdin=run.PIPE,
|
|
|
|
wait=False,
|
|
|
|
)
|
2022-05-06 17:10:12 +00:00
|
|
|
|
2016-11-15 18:44:27 +00:00
|
|
|
# XXX: add_daemon() doesn't let us wait until radosgw finishes startup
|
2018-03-05 20:22:53 +00:00
|
|
|
for client in clients:
|
2018-02-21 16:19:48 +00:00
|
|
|
endpoint = ctx.rgw.role_endpoints[client]
|
|
|
|
url = endpoint.url()
|
|
|
|
log.info('Polling {client} until it starts accepting connections on {url}'.format(client=client, url=url))
|
2019-10-11 15:57:47 +00:00
|
|
|
(remote,) = ctx.cluster.only(client).remotes.keys()
|
2019-06-12 20:12:47 +00:00
|
|
|
wait_for_radosgw(url, remote)
|
2016-11-15 18:44:27 +00:00
|
|
|
|
2011-09-03 00:58:19 +00:00
|
|
|
try:
|
|
|
|
yield
|
|
|
|
finally:
|
2018-03-05 20:22:53 +00:00
|
|
|
for client in clients:
|
2017-04-22 15:33:44 +00:00
|
|
|
cluster_name, daemon_type, client_id = teuthology.split_role(client)
|
|
|
|
client_with_id = daemon_type + '.' + client_id
|
|
|
|
client_with_cluster = cluster_name + '.' + client_with_id
|
|
|
|
ctx.daemons.get_daemon('rgw', client_with_id, cluster_name).stop()
|
2012-11-20 00:19:06 +00:00
|
|
|
ctx.cluster.only(client).run(
|
|
|
|
args=[
|
|
|
|
'rm',
|
2013-04-30 23:37:48 +00:00
|
|
|
'-f',
|
2017-04-22 15:33:44 +00:00
|
|
|
'{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir,
|
|
|
|
client=client_with_cluster),
|
2013-04-30 23:37:48 +00:00
|
|
|
],
|
|
|
|
)
|
2021-09-17 09:28:53 +00:00
|
|
|
ctx.cluster.only(client).run(args=['sudo', 'rm', '-f', token_path])
|
2011-09-03 00:58:19 +00:00
|
|
|
|
2018-02-13 18:26:09 +00:00
|
|
|
def assign_endpoints(ctx, config, default_cert):
|
2013-07-22 17:38:28 +00:00
|
|
|
role_endpoints = {}
|
2019-10-09 12:36:58 +00:00
|
|
|
for role, client_config in config.items():
|
2018-02-21 16:19:48 +00:00
|
|
|
client_config = client_config or {}
|
|
|
|
remote = get_remote_for_role(ctx, role)
|
2018-02-13 18:26:09 +00:00
|
|
|
|
|
|
|
cert = client_config.get('ssl certificate', default_cert)
|
|
|
|
if cert:
|
|
|
|
# find the certificate created by the ssl task
|
|
|
|
if not hasattr(ctx, 'ssl_certificates'):
|
|
|
|
raise ConfigError('rgw: no ssl task found for option "ssl certificate"')
|
|
|
|
ssl_certificate = ctx.ssl_certificates.get(cert, None)
|
|
|
|
if not ssl_certificate:
|
|
|
|
raise ConfigError('rgw: missing ssl certificate "{}"'.format(cert))
|
|
|
|
else:
|
|
|
|
ssl_certificate = None
|
|
|
|
|
2019-03-05 15:50:23 +00:00
|
|
|
port = client_config.get('port', 443 if ssl_certificate else 80)
|
2018-02-20 16:23:00 +00:00
|
|
|
|
2018-02-20 17:28:24 +00:00
|
|
|
# if dns-name is given, use it as the hostname (or as a prefix)
|
|
|
|
dns_name = client_config.get('dns-name', '')
|
|
|
|
if len(dns_name) == 0 or dns_name.endswith('.'):
|
|
|
|
dns_name += remote.hostname
|
|
|
|
|
|
|
|
website_dns_name = client_config.get('dns-s3website-name')
|
2020-04-21 15:28:58 +00:00
|
|
|
if website_dns_name is not None and (len(website_dns_name) == 0 or website_dns_name.endswith('.')):
|
|
|
|
website_dns_name += remote.hostname
|
2018-02-20 17:28:24 +00:00
|
|
|
|
|
|
|
role_endpoints[role] = RGWEndpoint(remote.hostname, port, ssl_certificate, dns_name, website_dns_name)
|
2013-07-22 17:38:28 +00:00
|
|
|
|
|
|
|
return role_endpoints
|
|
|
|
|
2014-03-25 01:19:41 +00:00
|
|
|
@contextlib.contextmanager
|
2017-06-26 08:42:08 +00:00
|
|
|
def create_pools(ctx, clients):
|
2014-03-25 01:19:41 +00:00
|
|
|
"""Create replicated or erasure coded data pools for rgw."""
|
|
|
|
|
2017-04-26 01:38:52 +00:00
|
|
|
log.info('Creating data pools')
|
2017-06-26 08:42:08 +00:00
|
|
|
for client in clients:
|
|
|
|
log.debug("Obtaining remote for client {}".format(client))
|
2019-10-11 15:57:47 +00:00
|
|
|
(remote,) = ctx.cluster.only(client).remotes.keys()
|
2018-04-25 23:00:11 +00:00
|
|
|
data_pool = 'default.rgw.buckets.data'
|
2016-12-16 18:23:09 +00:00
|
|
|
cluster_name, daemon_type, client_id = teuthology.split_role(client)
|
|
|
|
|
2014-03-25 01:19:41 +00:00
|
|
|
if ctx.rgw.ec_data_pool:
|
2018-04-25 23:00:11 +00:00
|
|
|
create_ec_pool(remote, data_pool, client, ctx.rgw.data_pool_pg_size,
|
2017-08-22 17:56:11 +00:00
|
|
|
ctx.rgw.erasure_code_profile, cluster_name, 'rgw')
|
2014-03-25 01:19:41 +00:00
|
|
|
else:
|
2018-04-25 23:00:11 +00:00
|
|
|
create_replicated_pool(remote, data_pool, ctx.rgw.data_pool_pg_size, cluster_name, 'rgw')
|
|
|
|
|
|
|
|
index_pool = 'default.rgw.buckets.index'
|
|
|
|
create_replicated_pool(remote, index_pool, ctx.rgw.index_pool_pg_size, cluster_name, 'rgw')
|
|
|
|
|
2014-05-01 21:36:08 +00:00
|
|
|
if ctx.rgw.cache_pools:
|
|
|
|
create_cache_pool(remote, data_pool, data_pool + '.cache', 64,
|
2017-08-28 14:14:03 +00:00
|
|
|
64*1024*1024, cluster_name)
|
2017-04-26 01:38:52 +00:00
|
|
|
log.debug('Pools created')
|
2014-03-25 01:19:41 +00:00
|
|
|
yield
|
|
|
|
|
2013-07-11 00:45:35 +00:00
|
|
|
@contextlib.contextmanager
|
2017-06-26 08:42:08 +00:00
|
|
|
def configure_compression(ctx, clients, compression):
|
2017-04-26 01:38:52 +00:00
|
|
|
""" set a compression type in the default zone placement """
|
|
|
|
log.info('Configuring compression type = %s', compression)
|
2017-06-26 08:42:08 +00:00
|
|
|
for client in clients:
|
2016-12-02 19:48:44 +00:00
|
|
|
# XXX: the 'default' zone and zonegroup aren't created until we run RGWRados::init_complete().
|
|
|
|
# issue a 'radosgw-admin user list' command to trigger this
|
|
|
|
rgwadmin(ctx, client, cmd=['user', 'list'], check_status=True)
|
|
|
|
|
|
|
|
rgwadmin(ctx, client,
|
|
|
|
cmd=['zone', 'placement', 'modify', '--rgw-zone', 'default',
|
2017-04-18 19:34:59 +00:00
|
|
|
'--placement-id', 'default-placement',
|
|
|
|
'--compression', compression],
|
2016-12-02 19:48:44 +00:00
|
|
|
check_status=True)
|
2013-07-22 17:38:28 +00:00
|
|
|
yield
|
2011-06-22 23:36:58 +00:00
|
|
|
|
2022-12-12 17:32:21 +00:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def disable_inline_data(ctx, clients):
|
|
|
|
for client in clients:
|
|
|
|
# XXX: the 'default' zone and zonegroup aren't created until we run RGWRados::init_complete().
|
|
|
|
# issue a 'radosgw-admin user list' command to trigger this
|
|
|
|
rgwadmin(ctx, client, cmd=['user', 'list'], check_status=True)
|
|
|
|
|
|
|
|
rgwadmin(ctx, client,
|
|
|
|
cmd=['zone', 'placement', 'modify', '--rgw-zone', 'default',
|
|
|
|
'--placement-id', 'default-placement',
|
|
|
|
'--placement-inline-data', 'false'],
|
|
|
|
check_status=True)
|
|
|
|
yield
|
|
|
|
|
2020-12-01 04:25:01 +00:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def configure_datacache(ctx, clients, datacache_path):
|
|
|
|
""" create directory for rgw datacache """
|
rgw: D3N Cache changes for Upstream
Upstreaming / rebase of #24500
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
rgw: change io_ctx pool per storage class
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: Changing free() to delete()
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
rgw: Addressing review comments
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
rgw: Fixing seg fault
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
Moving CacheRequest out of librados
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
rgw: cache initialization fix
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
rgw: fix missing spawn.hpp compile errors
resolves compilation errors similar to:
```
[ 15%] Building CXX object src/tools/immutable_object_cache/CMakeFiles/ceph_immutable_object_cache_lib.dir/CacheController.cc.o
In file included from /home/jenkins-build/build/workspace/ceph-pull-requests/src/rgw/rgw_common.h:31,
from /home/jenkins-build/build/workspace/ceph-pull-requests/src/rgw/rgw_rados.h:17,
from /home/jenkins-build/build/workspace/ceph-pull-requests/src/librados/IoCtxImpl.h:30,
from /home/jenkins-build/build/workspace/ceph-pull-requests/src/librados/RadosClient.h:35,
from /home/jenkins-build/build/workspace/ceph-pull-requests/src/neorados/RADOSImpl.h:27,
from /home/jenkins-build/build/workspace/ceph-pull-requests/src/neorados/RADOS.cc:37:
/home/jenkins-build/build/workspace/ceph-pull-requests/src/common/async/yield_context.h:31:10: fatal error: spawn/spawn.hpp: No such file or directory
31 | #include <spawn/spawn.hpp>
| ^~~~~~~~~~~~~~~~~
compilation terminated.
src/neorados/CMakeFiles/neorados_api_obj.dir/build.make:62: recipe for target 'src/neorados/CMakeFiles/neorados_api_obj.dir/RADOS.cc.o' failed
make[3]: *** [src/neorados/CMakeFiles/neorados_api_obj.dir/RADOS.cc.o] Error 1
```
Signed-off-by: Mark Kogan <mkogan@redhat.com>
Resolving merge conflict
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
Removing all logs and unnecessary comments
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
Cache Read and Write working
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
Initial Commit L1 Cache
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
post re-rebase merge, update fixes
Signed-off-by: Mark Kogan <mkogan@redhat.com>
fixup! post re-rebase merge, update fixes
rgw: continuation of rgwcache branch rebase
Signed-off-by: Mark Kogan <mkogan@redhat.com>
RGW: DataCache: post merge fixes
Signed-off-by: Mark Kogan <mkogan@redhat.com>
fixes of segmentation fault caused by oid
Signed-off-by: E. Ugur Kaynar <ukaynar@bu.edu>
rgw: fixes for segmentation faults and configuration processing
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: data cache first commit
Signed-off-by: Mania Abdi <mania.abdi287@gmail.com>
rgw: cleanup addressing PR comments
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: cleanup addressing PR comments, continuation.
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: pr cleanup addressing second review round
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: Addressing review comments, removing all D3N code from librados
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
rgw: for compilation err from removal of mydout() helper
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rge: addressing review comments
rgw: move d3n datacache into separate files
rgw: 1st part of datacache rebranding to d3n
fix forward declaration compile err (only with clang):
../src/rgw/rgw_cache.h:396:4: error: member access into incomplete type 'struct get_obj_data'
d->data_lock.lock();
^
../src/rgw/rgw_cache.h:365:8: note: forward declaration of 'get_obj_data'
struct get_obj_data;
^
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: addressing review comments, datacache rebranding to d3n cache
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: Cleaning up unused D3N cache code
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
rgw: cont. cleaning up of rgw_obj_data()
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: Removing redundant code, fix for multipart S3 objects
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
rgw: fix: incorrect content len on multipart get
in s3tests_boto3.functional.test_s3:test_multipart_copy_versioned
when d3n cache is disabled
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: fix segfault reading from cache
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: fix segfault in multisite
sync on secondary site
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: fix segfault in multisite teuthology tests, cont.
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: Adding drain to wait for all AIO reads to complete
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
rgw: fix for using read() by liabio or posix io per config
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: improve persistent data cache directory handling
on start create the persistent datacache directory if necessary
and add an option to evict it's content if already exists
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: fix possible segfault during eviction
Signed-off-by: Mark Kogan <mkogan@redhat.com>
Co-authored-by: Mania Abdi <mania.abdi287@gmail.com>
Co-authored-by: E. Ugur Kaynar <ukaynar@bu.edu>
Co-authored-by: Aishwarya Mathuria <amathuri@redhat.com>
Co-authored-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: addressing latest review comments
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: add debug logs for cache in/out flow
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: move the L2 cache functionality to separate PR
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: addressing review comments
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: address java_s3tests teuthology issues
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: do not handle compressed objects fro now
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: l2 cleanup and log fixups + post dpp
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: thread dpp thru get_obj_iterate_cb() and related
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: bypass reading versioned objects from cache
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: cleanup and fix s3tests
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: allow to enable cache only on beast
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: delete the content of the cache directory on rgw start
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: re-enable d3n cache with civetweb frontend
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: rebase post zipper 10
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: address teuthoogy valgrind leaks detected
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: mitigating valgrind leaks
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: remove rgw_d3n_l1_libaio_read option
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: wip segfault fix
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: mitigate libaio SIGEV_THREAD cb race
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: change libaio signaling mechanism
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: wip cont. libaio cb thread race
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: wip libaio cb thread race
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: libaio cleanups and edge case handling fixes
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: narrow the libaio locking scope
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: add libaio req ordering mechanism
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: fix lock regression
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: addressing reviwe comments and cleasnup
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: libaio locks cleanup
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: refactor libaio abstraction to share the ioc implementation
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: addressing latest review comments and cleanup
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: address review comments, cont.
Signed-off-by: Mark Kogan <mkogan@redhat.com>
Co-authored-by: Mania Abdi <mania.abdi287@gmail.com>
Co-authored-by: E. Ugur Kaynar <ukaynar@bu.edu>
Co-authored-by: Aishwarya Mathuria <amathuri@redhat.com>
Co-authored-by: Ali Maredia <amaredia@redhat.com>
Co-authored-by: Feng Hualong <hualong.feng@intel.com>
2020-09-27 17:25:11 +00:00
|
|
|
log.info('Preparing directory for rgw datacache at %s', datacache_path)
|
2020-12-01 04:25:01 +00:00
|
|
|
for client in clients:
|
|
|
|
if(datacache_path != None):
|
|
|
|
ctx.cluster.only(client).run(args=['mkdir', '-p', datacache_path])
|
|
|
|
ctx.cluster.only(client).run(args=['sudo', 'chmod', 'a+rwx', datacache_path])
|
|
|
|
else:
|
|
|
|
log.info('path for datacache was not provided')
|
|
|
|
yield
|
|
|
|
|
2019-01-10 01:16:15 +00:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def configure_storage_classes(ctx, clients, storage_classes):
|
|
|
|
""" set a compression type in the default zone placement """
|
|
|
|
|
|
|
|
sc = [s.strip() for s in storage_classes.split(',')]
|
|
|
|
|
|
|
|
for client in clients:
|
|
|
|
# XXX: the 'default' zone and zonegroup aren't created until we run RGWRados::init_complete().
|
|
|
|
# issue a 'radosgw-admin user list' command to trigger this
|
|
|
|
rgwadmin(ctx, client, cmd=['user', 'list'], check_status=True)
|
|
|
|
|
|
|
|
for storage_class in sc:
|
|
|
|
log.info('Configuring storage class type = %s', storage_class)
|
|
|
|
rgwadmin(ctx, client,
|
|
|
|
cmd=['zonegroup', 'placement', 'add',
|
|
|
|
'--rgw-zone', 'default',
|
|
|
|
'--placement-id', 'default-placement',
|
|
|
|
'--storage-class', storage_class],
|
|
|
|
check_status=True)
|
|
|
|
rgwadmin(ctx, client,
|
|
|
|
cmd=['zone', 'placement', 'add',
|
|
|
|
'--rgw-zone', 'default',
|
|
|
|
'--placement-id', 'default-placement',
|
|
|
|
'--storage-class', storage_class,
|
|
|
|
'--data-pool', 'default.rgw.buckets.data.' + storage_class.lower()],
|
|
|
|
check_status=True)
|
|
|
|
yield
|
|
|
|
|
2011-06-22 23:36:58 +00:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def task(ctx, config):
|
|
|
|
"""
|
|
|
|
For example, to run rgw on all clients::
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- ceph:
|
|
|
|
- rgw:
|
|
|
|
|
|
|
|
To only run on certain clients::
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- ceph:
|
|
|
|
- rgw: [client.0, client.3]
|
2012-02-22 00:08:21 +00:00
|
|
|
|
|
|
|
or
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- ceph:
|
|
|
|
- rgw:
|
|
|
|
client.0:
|
|
|
|
client.3:
|
|
|
|
|
|
|
|
To run radosgw through valgrind:
|
|
|
|
|
|
|
|
tasks:
|
|
|
|
- ceph:
|
|
|
|
- rgw:
|
|
|
|
client.0:
|
|
|
|
valgrind: [--tool=memcheck]
|
|
|
|
client.3:
|
|
|
|
valgrind: [--tool=memcheck]
|
2018-04-25 23:00:11 +00:00
|
|
|
|
|
|
|
To configure data or index pool pg_size:
|
|
|
|
|
|
|
|
overrides:
|
|
|
|
rgw:
|
|
|
|
data_pool_pg_size: 256
|
|
|
|
index_pool_pg_size: 128
|
2011-06-22 23:36:58 +00:00
|
|
|
"""
|
|
|
|
if config is None:
|
2012-02-21 23:47:32 +00:00
|
|
|
config = dict(('client.{id}'.format(id=id_), None)
|
2014-04-15 15:50:07 +00:00
|
|
|
for id_ in teuthology.all_roles_of_type(
|
|
|
|
ctx.cluster, 'client'))
|
2012-02-21 23:47:32 +00:00
|
|
|
elif isinstance(config, list):
|
|
|
|
config = dict((name, None) for name in config)
|
2011-06-22 23:36:58 +00:00
|
|
|
|
2017-06-26 08:42:08 +00:00
|
|
|
clients = config.keys() # http://tracker.ceph.com/issues/20417
|
|
|
|
|
2014-03-26 01:04:35 +00:00
|
|
|
overrides = ctx.config.get('overrides', {})
|
|
|
|
teuthology.deep_merge(config, overrides.get('rgw', {}))
|
|
|
|
|
2013-08-17 17:14:37 +00:00
|
|
|
ctx.rgw = argparse.Namespace()
|
2022-05-06 17:32:26 +00:00
|
|
|
ctx.rgw_cloudtier = None
|
2015-11-23 16:04:12 +00:00
|
|
|
|
2017-04-18 19:34:59 +00:00
|
|
|
ctx.rgw.ec_data_pool = bool(config.pop('ec-data-pool', False))
|
|
|
|
ctx.rgw.erasure_code_profile = config.pop('erasure_code_profile', {})
|
|
|
|
ctx.rgw.cache_pools = bool(config.pop('cache-pools', False))
|
2021-05-11 13:42:53 +00:00
|
|
|
ctx.rgw.frontend = config.pop('frontend', 'beast')
|
2017-04-18 19:34:59 +00:00
|
|
|
ctx.rgw.compression_type = config.pop('compression type', None)
|
2022-12-12 17:32:21 +00:00
|
|
|
ctx.rgw.inline_data = config.pop('inline data', True)
|
2019-01-10 01:16:15 +00:00
|
|
|
ctx.rgw.storage_classes = config.pop('storage classes', None)
|
2018-02-13 18:26:09 +00:00
|
|
|
default_cert = config.pop('ssl certificate', None)
|
2018-04-25 23:00:11 +00:00
|
|
|
ctx.rgw.data_pool_pg_size = config.pop('data_pool_pg_size', 64)
|
|
|
|
ctx.rgw.index_pool_pg_size = config.pop('index_pool_pg_size', 64)
|
2020-12-01 04:25:01 +00:00
|
|
|
ctx.rgw.datacache = bool(config.pop('datacache', False))
|
|
|
|
ctx.rgw.datacache_path = config.pop('datacache_path', None)
|
2016-12-16 18:23:09 +00:00
|
|
|
ctx.rgw.config = config
|
2016-01-19 14:56:18 +00:00
|
|
|
|
2017-06-26 08:42:08 +00:00
|
|
|
log.debug("config is {}".format(config))
|
|
|
|
log.debug("client list is {}".format(clients))
|
2018-02-21 16:19:48 +00:00
|
|
|
|
2018-02-13 18:26:09 +00:00
|
|
|
ctx.rgw.role_endpoints = assign_endpoints(ctx, config, default_cert)
|
2018-02-21 16:19:48 +00:00
|
|
|
|
2017-05-19 20:05:36 +00:00
|
|
|
subtasks = [
|
2017-06-26 08:42:08 +00:00
|
|
|
lambda: create_pools(ctx=ctx, clients=clients),
|
2017-05-19 20:05:36 +00:00
|
|
|
]
|
2017-05-05 23:59:05 +00:00
|
|
|
if ctx.rgw.compression_type:
|
|
|
|
subtasks.extend([
|
2017-06-26 08:42:08 +00:00
|
|
|
lambda: configure_compression(ctx=ctx, clients=clients,
|
2017-05-05 23:59:05 +00:00
|
|
|
compression=ctx.rgw.compression_type),
|
|
|
|
])
|
2022-12-12 17:32:21 +00:00
|
|
|
if not ctx.rgw.inline_data:
|
|
|
|
subtasks.extend([
|
|
|
|
lambda: disable_inline_data(ctx=ctx, clients=clients),
|
|
|
|
])
|
2020-12-01 04:25:01 +00:00
|
|
|
if ctx.rgw.datacache:
|
|
|
|
subtasks.extend([
|
|
|
|
lambda: configure_datacache(ctx=ctx, clients=clients,
|
rgw: D3N Cache changes for Upstream
Upstreaming / rebase of #24500
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
rgw: change io_ctx pool per storage class
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: Changing free() to delete()
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
rgw: Addressing review comments
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
rgw: Fixing seg fault
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
Moving CacheRequest out of librados
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
rgw: cache initialization fix
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
rgw: fix missing spawn.hpp compile errors
resolves compilation errors similar to:
```
[ 15%] Building CXX object src/tools/immutable_object_cache/CMakeFiles/ceph_immutable_object_cache_lib.dir/CacheController.cc.o
In file included from /home/jenkins-build/build/workspace/ceph-pull-requests/src/rgw/rgw_common.h:31,
from /home/jenkins-build/build/workspace/ceph-pull-requests/src/rgw/rgw_rados.h:17,
from /home/jenkins-build/build/workspace/ceph-pull-requests/src/librados/IoCtxImpl.h:30,
from /home/jenkins-build/build/workspace/ceph-pull-requests/src/librados/RadosClient.h:35,
from /home/jenkins-build/build/workspace/ceph-pull-requests/src/neorados/RADOSImpl.h:27,
from /home/jenkins-build/build/workspace/ceph-pull-requests/src/neorados/RADOS.cc:37:
/home/jenkins-build/build/workspace/ceph-pull-requests/src/common/async/yield_context.h:31:10: fatal error: spawn/spawn.hpp: No such file or directory
31 | #include <spawn/spawn.hpp>
| ^~~~~~~~~~~~~~~~~
compilation terminated.
src/neorados/CMakeFiles/neorados_api_obj.dir/build.make:62: recipe for target 'src/neorados/CMakeFiles/neorados_api_obj.dir/RADOS.cc.o' failed
make[3]: *** [src/neorados/CMakeFiles/neorados_api_obj.dir/RADOS.cc.o] Error 1
```
Signed-off-by: Mark Kogan <mkogan@redhat.com>
Resolving merge conflict
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
Removing all logs and unnecessary comments
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
Cache Read and Write working
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
Initial Commit L1 Cache
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
post re-rebase merge, update fixes
Signed-off-by: Mark Kogan <mkogan@redhat.com>
fixup! post re-rebase merge, update fixes
rgw: continuation of rgwcache branch rebase
Signed-off-by: Mark Kogan <mkogan@redhat.com>
RGW: DataCache: post merge fixes
Signed-off-by: Mark Kogan <mkogan@redhat.com>
fixes of segmentation fault caused by oid
Signed-off-by: E. Ugur Kaynar <ukaynar@bu.edu>
rgw: fixes for segmentation faults and configuration processing
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: data cache first commit
Signed-off-by: Mania Abdi <mania.abdi287@gmail.com>
rgw: cleanup addressing PR comments
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: cleanup addressing PR comments, continuation.
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: pr cleanup addressing second review round
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: Addressing review comments, removing all D3N code from librados
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
rgw: for compilation err from removal of mydout() helper
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rge: addressing review comments
rgw: move d3n datacache into separate files
rgw: 1st part of datacache rebranding to d3n
fix forward declaration compile err (only with clang):
../src/rgw/rgw_cache.h:396:4: error: member access into incomplete type 'struct get_obj_data'
d->data_lock.lock();
^
../src/rgw/rgw_cache.h:365:8: note: forward declaration of 'get_obj_data'
struct get_obj_data;
^
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: addressing review comments, datacache rebranding to d3n cache
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: Cleaning up unused D3N cache code
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
rgw: cont. cleaning up of rgw_obj_data()
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: Removing redundant code, fix for multipart S3 objects
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
rgw: fix: incorrect content len on multipart get
in s3tests_boto3.functional.test_s3:test_multipart_copy_versioned
when d3n cache is disabled
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: fix segfault reading from cache
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: fix segfault in multisite
sync on secondary site
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: fix segfault in multisite teuthology tests, cont.
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: Adding drain to wait for all AIO reads to complete
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
rgw: fix for using read() by liabio or posix io per config
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: improve persistent data cache directory handling
on start create the persistent datacache directory if necessary
and add an option to evict it's content if already exists
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: fix possible segfault during eviction
Signed-off-by: Mark Kogan <mkogan@redhat.com>
Co-authored-by: Mania Abdi <mania.abdi287@gmail.com>
Co-authored-by: E. Ugur Kaynar <ukaynar@bu.edu>
Co-authored-by: Aishwarya Mathuria <amathuri@redhat.com>
Co-authored-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: addressing latest review comments
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: add debug logs for cache in/out flow
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: move the L2 cache functionality to separate PR
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: addressing review comments
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: address java_s3tests teuthology issues
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: do not handle compressed objects fro now
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: l2 cleanup and log fixups + post dpp
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: thread dpp thru get_obj_iterate_cb() and related
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: bypass reading versioned objects from cache
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: cleanup and fix s3tests
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: allow to enable cache only on beast
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: delete the content of the cache directory on rgw start
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: re-enable d3n cache with civetweb frontend
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: rebase post zipper 10
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: address teuthoogy valgrind leaks detected
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: mitigating valgrind leaks
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: remove rgw_d3n_l1_libaio_read option
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: wip segfault fix
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: mitigate libaio SIGEV_THREAD cb race
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: change libaio signaling mechanism
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: wip cont. libaio cb thread race
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: wip libaio cb thread race
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: libaio cleanups and edge case handling fixes
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: narrow the libaio locking scope
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: add libaio req ordering mechanism
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: fix lock regression
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: addressing reviwe comments and cleasnup
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: libaio locks cleanup
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: refactor libaio abstraction to share the ioc implementation
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: addressing latest review comments and cleanup
Signed-off-by: Mark Kogan <mkogan@redhat.com>
rgw: d3n: address review comments, cont.
Signed-off-by: Mark Kogan <mkogan@redhat.com>
Co-authored-by: Mania Abdi <mania.abdi287@gmail.com>
Co-authored-by: E. Ugur Kaynar <ukaynar@bu.edu>
Co-authored-by: Aishwarya Mathuria <amathuri@redhat.com>
Co-authored-by: Ali Maredia <amaredia@redhat.com>
Co-authored-by: Feng Hualong <hualong.feng@intel.com>
2020-09-27 17:25:11 +00:00
|
|
|
datacache_path=ctx.rgw.datacache_path),
|
2020-12-01 04:25:01 +00:00
|
|
|
])
|
2019-01-10 01:16:15 +00:00
|
|
|
if ctx.rgw.storage_classes:
|
|
|
|
subtasks.extend([
|
|
|
|
lambda: configure_storage_classes(ctx=ctx, clients=clients,
|
|
|
|
storage_classes=ctx.rgw.storage_classes),
|
|
|
|
])
|
2017-05-05 23:59:05 +00:00
|
|
|
subtasks.extend([
|
2017-06-26 08:42:08 +00:00
|
|
|
lambda: start_rgw(ctx=ctx, config=config, clients=clients),
|
2017-04-26 01:38:52 +00:00
|
|
|
])
|
2014-04-16 14:35:04 +00:00
|
|
|
|
|
|
|
with contextutil.nested(*subtasks):
|
|
|
|
yield
|