mirror of
https://github.com/ceph/ceph
synced 2025-02-24 19:47:44 +00:00
QA: Add D4N teuthology suite
Signed-off-by: Samarah <samarah.uriarte@ibm.com>
This commit is contained in:
parent
7abe77962a
commit
cd1d249678
0
qa/suites/rgw/d4n/+
Normal file
0
qa/suites/rgw/d4n/+
Normal file
1
qa/suites/rgw/d4n/.qa
Symbolic link
1
qa/suites/rgw/d4n/.qa
Symbolic link
@ -0,0 +1 @@
|
||||
../.qa/
|
2
qa/suites/rgw/d4n/cluster.yaml
Normal file
2
qa/suites/rgw/d4n/cluster.yaml
Normal file
@ -0,0 +1,2 @@
|
||||
roles:
|
||||
- [mon.a, osd.0, osd.1, osd.2, mgr.0, client.0]
|
10
qa/suites/rgw/d4n/overrides.yaml
Normal file
10
qa/suites/rgw/d4n/overrides.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
overrides:
|
||||
ceph:
|
||||
conf:
|
||||
client:
|
||||
setuser: ceph
|
||||
setgroup: ceph
|
||||
debug rgw: 20
|
||||
rgw filter: d4n
|
||||
rgw:
|
||||
frontend: beast
|
1
qa/suites/rgw/d4n/supported-random-distro$
Symbolic link
1
qa/suites/rgw/d4n/supported-random-distro$
Symbolic link
@ -0,0 +1 @@
|
||||
.qa/distros/supported-random-distro$
|
1
qa/suites/rgw/d4n/tasks/.qa
Symbolic link
1
qa/suites/rgw/d4n/tasks/.qa
Symbolic link
@ -0,0 +1 @@
|
||||
../.qa/
|
19
qa/suites/rgw/d4n/tasks/rgw_d4ntests.yaml
Normal file
19
qa/suites/rgw/d4n/tasks/rgw_d4ntests.yaml
Normal file
@ -0,0 +1,19 @@
|
||||
tasks:
|
||||
- install:
|
||||
# extra packages added for the rgw-d4ntests task
|
||||
extra_system_packages:
|
||||
deb: ['s3cmd', 'redis']
|
||||
rpm: ['s3cmd', 'redis']
|
||||
- ceph:
|
||||
- rgw: [client.0]
|
||||
- tox: [client.0]
|
||||
- exec:
|
||||
client.0:
|
||||
- sudo chmod 0777 /var/lib/ceph
|
||||
- sudo chmod 0777 /var/lib/ceph/radosgw
|
||||
- d4ntests:
|
||||
client.0:
|
||||
- workunit:
|
||||
clients:
|
||||
client.0:
|
||||
- rgw/run-d4n.sh
|
152
qa/tasks/d4ntests.py
Normal file
152
qa/tasks/d4ntests.py
Normal file
@ -0,0 +1,152 @@
|
||||
import logging
|
||||
|
||||
from teuthology import misc as teuthology
|
||||
from teuthology.task import Task
|
||||
from teuthology.orchestra import run
|
||||
from teuthology.packaging import remove_package
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
def get_toxvenv_dir(ctx):
|
||||
return ctx.tox.venv_path
|
||||
|
||||
def toxvenv_sh(ctx, remote, args, **kwargs):
|
||||
activate = get_toxvenv_dir(ctx) + '/bin/activate'
|
||||
return remote.sh(['source', activate, run.Raw('&&')] + args, **kwargs)
|
||||
|
||||
display_name='Foo'
|
||||
email='foo@foo.com'
|
||||
access_key='test3'
|
||||
secret_key='test3'
|
||||
|
||||
class D4NTests(Task):
|
||||
|
||||
def __init__(self, ctx, config):
|
||||
super(D4NTests, self).__init__(ctx, config)
|
||||
self.log = log
|
||||
log.info('D4N Tests: __INIT__ ')
|
||||
|
||||
clients = ['client.{id}'.format(id=id_)
|
||||
for id_ in teuthology.all_roles_of_type(self.ctx.cluster, 'client')]
|
||||
self.all_clients = []
|
||||
for client in clients:
|
||||
if client in self.config:
|
||||
self.all_clients.extend([client])
|
||||
if self.all_clients is None:
|
||||
self.all_clients = 'client.0'
|
||||
|
||||
self.user = {'s3main': 'tester'}
|
||||
|
||||
def setup(self):
|
||||
super(D4NTests, self).setup()
|
||||
log.info('D4N Tests: SETUP')
|
||||
|
||||
def begin(self):
|
||||
super(D4NTests, self).begin()
|
||||
log.info('D4N Tests: BEGIN')
|
||||
|
||||
for (host, roles) in self.ctx.cluster.remotes.items():
|
||||
log.debug('D4N Tests: Cluster config is: {cfg}'.format(cfg=roles))
|
||||
log.debug('D4N Tests: Host is: {host}'.format(host=host))
|
||||
|
||||
self.create_user()
|
||||
self.redis_startup()
|
||||
|
||||
def end(self):
|
||||
super(D4NTests, self).end()
|
||||
log.info('D4N Tests: END')
|
||||
|
||||
self.redis_shutdown()
|
||||
|
||||
for client in self.all_clients:
|
||||
self.remove_packages(client)
|
||||
self.delete_user(client)
|
||||
|
||||
def create_user(self):
|
||||
log.info("D4N Tests: Creating S3 user...")
|
||||
testdir = teuthology.get_testdir(self.ctx)
|
||||
|
||||
for client in self.all_clients:
|
||||
for user in list(self.user.items()):
|
||||
s3_user_id = 's3main'
|
||||
log.debug(
|
||||
'D4N Tests: Creating user {s3_user_id}'.format(s3_user_id=s3_user_id))
|
||||
cluster_name, daemon_type, client_id = teuthology.split_role(
|
||||
client)
|
||||
client_with_id = daemon_type + '.' + client_id
|
||||
self.ctx.cluster.only(client).run(
|
||||
args=[
|
||||
'sudo',
|
||||
'adjust-ulimits',
|
||||
'ceph-coverage',
|
||||
'{tdir}/archive/coverage'.format(tdir=testdir),
|
||||
'radosgw-admin',
|
||||
'-n', client_with_id,
|
||||
'user', 'create',
|
||||
'--uid', s3_user_id,
|
||||
'--display-name', display_name,
|
||||
'--access-key', access_key,
|
||||
'--secret', secret_key,
|
||||
'--email', email,
|
||||
'--cluster', cluster_name,
|
||||
],
|
||||
)
|
||||
|
||||
def redis_startup(self):
|
||||
try:
|
||||
for client in self.all_clients:
|
||||
self.ctx.cluster.only(client).run(
|
||||
args=[
|
||||
'sudo',
|
||||
'redis-server',
|
||||
'--daemonize',
|
||||
'yes'
|
||||
],
|
||||
)
|
||||
|
||||
except Exception as err:
|
||||
log.debug('D4N Tests: Error starting up a Redis server')
|
||||
log.debug(err)
|
||||
|
||||
def redis_shutdown(self):
|
||||
try:
|
||||
for client in self.all_clients:
|
||||
self.ctx.cluster.only(client).run(
|
||||
args=[
|
||||
'sudo',
|
||||
'redis-cli',
|
||||
'shutdown',
|
||||
],
|
||||
)
|
||||
|
||||
except Exception as err:
|
||||
log.debug('D4N Tests: Error shutting down a Redis server')
|
||||
log.debug(err)
|
||||
|
||||
def remove_packages(self, client):
|
||||
(remote,) = self.ctx.cluster.only(client).remotes.keys()
|
||||
remove_package('s3cmd', remote)
|
||||
remove_package('redis', remote)
|
||||
|
||||
def delete_user(self, client):
|
||||
log.info("D4N Tests: Deleting S3 user...")
|
||||
testdir = teuthology.get_testdir(self.ctx)
|
||||
|
||||
for user in self.user.items():
|
||||
s3_user_id = 's3main'
|
||||
self.ctx.cluster.only(client).run(
|
||||
args=[
|
||||
'sudo',
|
||||
'adjust-ulimits',
|
||||
'ceph-coverage',
|
||||
'{tdir}/archive/coverage'.format(tdir=testdir),
|
||||
'radosgw-admin',
|
||||
'-n', client,
|
||||
'user', 'rm',
|
||||
'--uid', s3_user_id,
|
||||
'--purge-data',
|
||||
'--cluster', 'ceph',
|
||||
],
|
||||
)
|
||||
|
||||
task = D4NTests
|
@ -18,6 +18,7 @@ basepython = python3
|
||||
deps =
|
||||
mypy
|
||||
types-boto
|
||||
types-redis
|
||||
types-requests
|
||||
types-jwt
|
||||
types-paramiko
|
||||
|
16
qa/workunits/rgw/run-d4n.sh
Executable file
16
qa/workunits/rgw/run-d4n.sh
Executable file
@ -0,0 +1,16 @@
|
||||
#!/usr/bin/env bash
|
||||
set -ex
|
||||
mydir=`dirname $0`
|
||||
|
||||
python3 -m venv $mydir
|
||||
source $mydir/bin/activate
|
||||
pip install pip --upgrade
|
||||
pip install redis
|
||||
pip install configobj
|
||||
pip install boto3
|
||||
|
||||
# run test
|
||||
$mydir/bin/python3 $mydir/test_rgw_d4n.py
|
||||
|
||||
deactivate
|
||||
echo OK.
|
243
qa/workunits/rgw/test_rgw_d4n.py
Normal file
243
qa/workunits/rgw/test_rgw_d4n.py
Normal file
@ -0,0 +1,243 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
import logging as log
|
||||
from configobj import ConfigObj
|
||||
import boto3
|
||||
import redis
|
||||
import subprocess
|
||||
import json
|
||||
|
||||
log.basicConfig(level=log.DEBUG)
|
||||
|
||||
""" Constants """
|
||||
ACCESS_KEY = 'test3'
|
||||
SECRET_KEY = 'test3'
|
||||
|
||||
def exec_cmd(cmd):
|
||||
log.debug("exec_cmd(%s)", cmd)
|
||||
try:
|
||||
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
|
||||
out, err = proc.communicate()
|
||||
if proc.returncode == 0:
|
||||
log.info('command succeeded')
|
||||
if out is not None: log.info(out)
|
||||
return out
|
||||
else:
|
||||
raise Exception("error: %s \nreturncode: %s" % (err, proc.returncode))
|
||||
except Exception as e:
|
||||
log.error('command failed')
|
||||
log.error(e)
|
||||
return False
|
||||
|
||||
def get_radosgw_endpoint():
|
||||
out = exec_cmd('sudo netstat -nltp | egrep "rados|valgr"') # short for radosgw/valgrind
|
||||
x = out.decode('utf8').split(" ")
|
||||
port = [i for i in x if ':' in i][0].split(':')[1]
|
||||
log.info('radosgw port: %s' % port)
|
||||
proto = "http"
|
||||
hostname = '127.0.0.1'
|
||||
|
||||
if port == '443':
|
||||
proto = "https"
|
||||
|
||||
endpoint = "%s://%s:%s" % (proto, hostname, port)
|
||||
|
||||
log.info("radosgw endpoint is: %s", endpoint)
|
||||
return endpoint, proto
|
||||
|
||||
def create_s3cmd_config(path, proto):
|
||||
"""
|
||||
Creates a minimal config file for s3cmd
|
||||
"""
|
||||
log.info("Creating s3cmd config...")
|
||||
|
||||
use_https_config = "False"
|
||||
log.info("proto for s3cmd config is %s", proto)
|
||||
if proto == "https":
|
||||
use_https_config = "True"
|
||||
|
||||
s3cmd_config = ConfigObj(
|
||||
indent_type='',
|
||||
infile={
|
||||
'default':
|
||||
{
|
||||
'host_bucket': 'no.way.in.hell',
|
||||
'use_https': use_https_config,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
f = open(path, 'wb')
|
||||
s3cmd_config.write(f)
|
||||
f.close()
|
||||
log.info("s3cmd config written")
|
||||
|
||||
def get_cmd_output(cmd_out):
|
||||
out = cmd_out.decode('utf8')
|
||||
out = out.strip('\n')
|
||||
return out
|
||||
|
||||
def test_directory_methods(r, client, obj):
|
||||
test_txt = b'test'
|
||||
|
||||
# setValue call
|
||||
response_put = obj.put(Body=test_txt)
|
||||
|
||||
assert(response_put.get('ResponseMetadata').get('HTTPStatusCode') == 200)
|
||||
|
||||
data = r.hgetall('rgw-object:test.txt:directory')
|
||||
|
||||
assert(data.get('key') == 'rgw-object:test.txt:directory')
|
||||
assert(data.get('size') == '4')
|
||||
assert(data.get('bucket_name') == 'bkt')
|
||||
assert(data.get('obj_name') == 'test.txt')
|
||||
assert(data.get('hosts') == '127.0.0.1:6379')
|
||||
|
||||
# getValue call
|
||||
response_get = obj.get()
|
||||
|
||||
assert(response_get.get('ResponseMetadata').get('HTTPStatusCode') == 200)
|
||||
|
||||
data = r.hgetall('rgw-object:test.txt:directory')
|
||||
|
||||
assert(data.get('key') == 'rgw-object:test.txt:directory')
|
||||
assert(data.get('size') == '4')
|
||||
assert(data.get('bucket_name') == 'bkt')
|
||||
assert(data.get('obj_name') == 'test.txt')
|
||||
assert(data.get('hosts') == '127.0.0.1:6379')
|
||||
|
||||
# delValue call
|
||||
response_del = obj.delete()
|
||||
|
||||
assert(response_del.get('ResponseMetadata').get('HTTPStatusCode') == 204)
|
||||
assert(r.exists('rgw-object:test.txt:directory') == False)
|
||||
|
||||
r.flushall()
|
||||
|
||||
def test_cache_methods(r, client, obj):
|
||||
test_txt = b'test'
|
||||
|
||||
# setObject call
|
||||
response_put = obj.put(Body=test_txt)
|
||||
|
||||
assert(response_put.get('ResponseMetadata').get('HTTPStatusCode') == 200)
|
||||
|
||||
data = r.hgetall('rgw-object:test.txt:cache')
|
||||
output = subprocess.check_output(['radosgw-admin', 'object', 'stat', '--bucket=bkt', '--object=test.txt'])
|
||||
attrs = json.loads(output.decode('latin-1'))
|
||||
|
||||
assert((data.get(b'user.rgw.tail_tag')) == attrs.get('attrs').get('user.rgw.tail_tag').encode("latin-1") + b'\x00')
|
||||
assert((data.get(b'user.rgw.pg_ver')) == attrs.get('attrs').get('user.rgw.pg_ver').encode("latin-1") + b'\x00\x00\x00\x00\x00\x00\x00')
|
||||
assert((data.get(b'user.rgw.idtag')) == attrs.get('tag').encode("latin-1") + b'\x00')
|
||||
assert((data.get(b'user.rgw.etag')) == attrs.get('etag').encode("latin-1"))
|
||||
assert((data.get(b'user.rgw.x-amz-content-sha256')) == attrs.get('attrs').get('user.rgw.x-amz-content-sha256').encode("latin-1") + b'\x00')
|
||||
assert((data.get(b'user.rgw.source_zone')) == attrs.get('attrs').get('user.rgw.source_zone').encode("latin-1") + b'\x00\x00\x00\x00')
|
||||
assert((data.get(b'user.rgw.x-amz-date')) == attrs.get('attrs').get('user.rgw.x-amz-date').encode("latin-1") + b'\x00')
|
||||
|
||||
tmp1 = '\x08\x06L\x01\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x06\x84\x00\x00\x00\n\nj\x00\x00\x00\x03\x00\x00\x00bkt+\x00\x00\x00'
|
||||
tmp2 = '+\x00\x00\x00'
|
||||
tmp3 = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\b\x00\x00\x00test.txt\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00!\x00\x00\x00'
|
||||
tmp4 = '\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x01 \x00\x00\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
|
||||
'\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00default-placement\x11\x00\x00\x00default-placement\x00\x00\x00\x00\x02\x02\x18' \
|
||||
'\x00\x00\x00\x04\x00\x00\x00none\x01\x01\t\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
|
||||
assert(data.get(b'user.rgw.manifest') == tmp1.encode("latin-1") + attrs.get('manifest').get('tail_placement').get('bucket').get('bucket_id').encode("utf-8")
|
||||
+ tmp2.encode("latin-1") + attrs.get('manifest').get('tail_placement').get('bucket').get('bucket_id').encode("utf-8")
|
||||
+ tmp3.encode("latin-1") + attrs.get('manifest').get('prefix').encode("utf-8")
|
||||
+ tmp4.encode("latin-1"))
|
||||
|
||||
tmp5 = '\x02\x02\x81\x00\x00\x00\x03\x02\x11\x00\x00\x00\x06\x00\x00\x00s3main\x03\x00\x00\x00Foo\x04\x03d\x00\x00\x00\x01\x01\x00\x00\x00\x06\x00\x00' \
|
||||
'\x00s3main\x0f\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00s3main\x05\x035\x00\x00\x00\x02\x02\x04\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00s3main' \
|
||||
'\x00\x00\x00\x00\x00\x00\x00\x00\x02\x02\x04\x00\x00\x00\x0f\x00\x00\x00\x03\x00\x00\x00Foo\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
|
||||
'\x00\x00\x00'
|
||||
assert((data.get(b'user.rgw.acl')) == tmp5.encode("latin-1"))
|
||||
|
||||
# getObject call
|
||||
response_get = obj.get()
|
||||
|
||||
assert(response_get.get('ResponseMetadata').get('HTTPStatusCode') == 200)
|
||||
|
||||
# Copy to new object with 'COPY' directive; metadata value should not change
|
||||
obj.metadata.update({'test':'value'})
|
||||
m = obj.metadata
|
||||
m['test'] = 'value_replace'
|
||||
|
||||
# copyObject call
|
||||
client.copy_object(Bucket='bkt', Key='test_copy.txt', CopySource='bkt/test.txt', Metadata = m, MetadataDirective='COPY')
|
||||
|
||||
assert(r.hexists('rgw-object:test_copy.txt:cache', b'user.rgw.x-amz-meta-test') == 0)
|
||||
|
||||
# Update object with 'REPLACE' directive; metadata value should change
|
||||
client.copy_object(Bucket='bkt', Key='test.txt', CopySource='bkt/test.txt', Metadata = m, MetadataDirective='REPLACE')
|
||||
|
||||
data = r.hget('rgw-object:test.txt:cache', b'user.rgw.x-amz-meta-test')
|
||||
|
||||
assert(data == b'value_replace\x00')
|
||||
|
||||
# Ensure cache entry exists in cache before deletion
|
||||
assert(r.exists('rgw-object:test.txt:cache') == True)
|
||||
|
||||
# delObject call
|
||||
response_del = obj.delete()
|
||||
|
||||
assert(response_del.get('ResponseMetadata').get('HTTPStatusCode') == 204)
|
||||
assert(r.exists('rgw-object:test.txt:cache') == False)
|
||||
|
||||
r.flushall()
|
||||
|
||||
def main():
|
||||
"""
|
||||
execute the d4n test
|
||||
"""
|
||||
|
||||
# Setup for test
|
||||
log.info("D4NFilterTest setup.")
|
||||
|
||||
out = exec_cmd('pwd')
|
||||
pwd = get_cmd_output(out)
|
||||
log.debug("pwd is: %s", pwd)
|
||||
|
||||
endpoint, proto = get_radosgw_endpoint()
|
||||
|
||||
client = boto3.client(service_name='s3',
|
||||
aws_access_key_id=ACCESS_KEY,
|
||||
aws_secret_access_key=SECRET_KEY,
|
||||
endpoint_url=endpoint,
|
||||
use_ssl=False,
|
||||
verify=False)
|
||||
|
||||
s3 = boto3.resource('s3',
|
||||
aws_access_key_id=ACCESS_KEY,
|
||||
aws_secret_access_key=SECRET_KEY,
|
||||
endpoint_url=endpoint,
|
||||
use_ssl=False,
|
||||
verify=False)
|
||||
|
||||
bucket = s3.Bucket('bkt')
|
||||
bucket.create()
|
||||
obj = s3.Object(bucket_name='bkt', key='test.txt')
|
||||
|
||||
# Check for Redis instance
|
||||
try:
|
||||
connection = redis.Redis(host='localhost', port=6379, db=0, decode_responses=True)
|
||||
connection.ping()
|
||||
except:
|
||||
log.debug("ERROR: Redis instance not running.")
|
||||
raise
|
||||
|
||||
# Create s3cmd config
|
||||
s3cmd_config_path = pwd + '/s3cfg'
|
||||
create_s3cmd_config(s3cmd_config_path, proto)
|
||||
|
||||
r = redis.Redis(host='localhost', port=6379, db=0, decode_responses=True)
|
||||
|
||||
test_directory_methods(r, client, obj)
|
||||
|
||||
# Responses should not be decoded
|
||||
r = redis.Redis(host='localhost', port=6379, db=0)
|
||||
|
||||
test_cache_methods(r, client, obj)
|
||||
|
||||
log.info("D4NFilterTest successfully completed.")
|
||||
|
||||
main()
|
||||
log.info("Completed D4N tests")
|
@ -186,7 +186,6 @@ add_library(common-common-objs OBJECT
|
||||
# retrieved OpenSSL location. This is especially important when cross
|
||||
# compiling (e.g. targeting Windows).
|
||||
target_include_directories(common-common-objs PRIVATE ${OPENSSL_INCLUDE_DIR})
|
||||
|
||||
# for options.cc
|
||||
target_compile_definitions(common-common-objs PRIVATE
|
||||
"CMAKE_INSTALL_LIBDIR=\"${CMAKE_INSTALL_LIBDIR}\""
|
||||
|
Loading…
Reference in New Issue
Block a user