Merge pull request #52931 from yuvalif/wip-yuval-lua-teuthology

rgw/test/lua: add lua integration tests suite

reviewed-by: TRYTOBE8TME, cbodley
This commit is contained in:
Yuval Lifshitz 2023-11-24 11:38:51 +02:00 committed by GitHub
commit 213923165c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 918 additions and 15 deletions

View File

@ -283,7 +283,8 @@ BuildRequires: librabbitmq-devel
BuildRequires: librdkafka-devel
%endif
%if 0%{with lua_packages}
BuildRequires: %{luarocks_package_name}
Requires: lua-devel
Requires: %{luarocks_package_name}
%endif
%if 0%{with make_check}
BuildRequires: hostname

3
debian/control vendored
View File

@ -72,7 +72,6 @@ Build-Depends: automake,
libre2-dev,
libutf8proc-dev (>= 2.2.0),
librdkafka-dev,
luarocks,
libthrift-dev (>= 0.13.0),
libyaml-cpp-dev (>= 0.6),
libzstd-dev <pkg.ceph.check>,
@ -919,6 +918,8 @@ Section: libs
Depends: librados2 (= ${binary:Version}),
${misc:Depends},
${shlibs:Depends},
liblua5.3-dev,
luarocks,
Description: RADOS Gateway client library
RADOS is a distributed object store used by the Ceph distributed
storage system. This package provides a REST gateway to the

0
qa/suites/rgw/lua/% Normal file
View File

1
qa/suites/rgw/lua/.qa Symbolic link
View File

@ -0,0 +1 @@
../.qa/

View File

@ -0,0 +1 @@
.qa/rgw_frontend/beast.yaml

View File

@ -0,0 +1 @@
.qa/objectstore/bluestore-bitmap.yaml

View File

@ -0,0 +1 @@
.qa/clusters/fixed-2.yaml

View File

@ -0,0 +1,9 @@
overrides:
ceph:
conf:
client:
setuser: ceph
setgroup: ceph
debug rgw: 20
rgw:
storage classes: LUKEWARM, FROZEN

View File

@ -0,0 +1 @@
.qa/distros/supported-random-distro$/

View File

View File

@ -0,0 +1,13 @@
tasks:
- install:
- ceph:
- openssl_keys:
- rgw: [client.0]
- tox: [client.0]
overrides:
ceph:
conf:
global:
osd_min_pg_log_entries: 10
osd_max_pg_log_entries: 10

View File

@ -0,0 +1,5 @@
tasks:
- tox: [client.0]
- lua-tests:
client.0:
rgw_server: client.0

254
qa/tasks/lua_tests.py Normal file
View File

@ -0,0 +1,254 @@
"""
Run a set of lua tests on rgw.
"""
from io import BytesIO
from configobj import ConfigObj
import base64
import contextlib
import logging
import os
import random
import string
from teuthology import misc as teuthology
from teuthology import contextutil
from teuthology.orchestra import run
log = logging.getLogger(__name__)
@contextlib.contextmanager
def download(ctx, config):
assert isinstance(config, dict)
log.info('Downloading lua-tests...')
testdir = teuthology.get_testdir(ctx)
branch = ctx.config.get('suite_branch')
repo = ctx.config.get('suite_repo')
log.info('Using branch %s from %s for lua tests', branch, repo)
for (client, client_config) in config.items():
ctx.cluster.only(client).run(
args=['git', 'clone', '-b', branch, repo, '{tdir}/ceph'.format(tdir=testdir)],
)
sha1 = client_config.get('sha1')
if sha1 is not None:
ctx.cluster.only(client).run(
args=[
'cd', '{tdir}/ceph'.format(tdir=testdir),
run.Raw('&&'),
'git', 'reset', '--hard', sha1,
],
)
try:
yield
finally:
log.info('Removing lua-tests...')
testdir = teuthology.get_testdir(ctx)
for client in config:
ctx.cluster.only(client).run(
args=[
'rm',
'-rf',
'{tdir}/ceph'.format(tdir=testdir),
],
)
def _config_user(luatests_conf, section, user):
"""
Configure users for this section by stashing away keys, ids, and
email addresses.
"""
luatests_conf[section].setdefault('user_id', user)
luatests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user))
luatests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user))
luatests_conf[section].setdefault('access_key',
''.join(random.choice(string.ascii_uppercase) for i in range(20)))
luatests_conf[section].setdefault('secret_key',
base64.b64encode(os.urandom(40)).decode())
@contextlib.contextmanager
def create_users(ctx, config):
"""
Create a main and an alternate s3 user.
"""
assert isinstance(config, dict)
log.info('Creating rgw user...')
testdir = teuthology.get_testdir(ctx)
users = {'s3 main': 'foo'}
for client in config['clients']:
luatests_conf = config['luatests_conf'][client]
for section, user in users.items():
_config_user(luatests_conf, section, '{user}.{client}'.format(user=user, client=client))
log.debug('Creating user {user} on {host}'.format(user=luatests_conf[section]['user_id'], host=client))
cluster_name, daemon_type, client_id = teuthology.split_role(client)
client_with_id = daemon_type + '.' + client_id
ctx.cluster.only(client).run(
args=[
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'radosgw-admin',
'-n', client_with_id,
'user', 'create',
'--uid', luatests_conf[section]['user_id'],
'--display-name', luatests_conf[section]['display_name'],
'--access-key', luatests_conf[section]['access_key'],
'--secret', luatests_conf[section]['secret_key'],
'--cluster', cluster_name,
],
)
try:
yield
finally:
for client in config['clients']:
for user in users.values():
uid = '{user}.{client}'.format(user=user, client=client)
cluster_name, daemon_type, client_id = teuthology.split_role(client)
client_with_id = daemon_type + '.' + client_id
ctx.cluster.only(client).run(
args=[
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'radosgw-admin',
'-n', client_with_id,
'user', 'rm',
'--uid', uid,
'--purge-data',
'--cluster', cluster_name,
],
)
@contextlib.contextmanager
def configure(ctx, config):
assert isinstance(config, dict)
log.info('Configuring lua-tests...')
testdir = teuthology.get_testdir(ctx)
for client, properties in config['clients'].items():
(remote,) = ctx.cluster.only(client).remotes.keys()
luatests_conf = config['luatests_conf'][client]
conf_fp = BytesIO()
luatests_conf.write(conf_fp)
remote.write_file(
path='{tdir}/ceph/src/test/rgw/lua/lua-tests.{client}.conf'.format(tdir=testdir, client=client),
data=conf_fp.getvalue(),
)
try:
yield
finally:
log.info('Removing lua-tests.conf file...')
testdir = teuthology.get_testdir(ctx)
for client, properties in config['clients'].items():
(remote,) = ctx.cluster.only(client).remotes.keys()
remote.run(
args=['rm', '-f',
'{tdir}/ceph/src/test/rgw/lua/lua-tests.{client}.conf'.format(tdir=testdir,client=client),
],
)
def get_toxvenv_dir(ctx):
return ctx.tox.venv_path
def toxvenv_sh(ctx, remote, args, **kwargs):
activate = get_toxvenv_dir(ctx) + '/bin/activate'
return remote.sh(['source', activate, run.Raw('&&')] + args, **kwargs)
@contextlib.contextmanager
def run_tests(ctx, config):
"""
Run the lua tests after everything is set up.
:param ctx: Context passed to task
:param config: specific configuration information
"""
assert isinstance(config, dict)
log.info('Running lua-tests...')
testdir = teuthology.get_testdir(ctx)
for client, client_config in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
# test marks to use by default
attr = ['basic_test', 'request_test', 'example_test']
if 'extra_attr' in client_config:
attr = client_config.get('extra_attr')
args = ['cd', '{tdir}/ceph/src/test/rgw/lua/'.format(tdir=testdir), run.Raw('&&'),
'LUATESTS_CONF=./lua-tests.{client}.conf'.format(client=client),
'tox', '--', '-v', '-m', ' or '.join(attr)]
toxvenv_sh(ctx, remote, args, label="lua tests against rgw")
yield
@contextlib.contextmanager
def task(ctx,config):
"""
If you want to run the tests against your changes pushed to your remote repo you can provide 'suite_branch' and 'suite_repo'
parameters in your teuthology-suite command. Example command for this is as follows::
teuthology-suite --ceph-repo https://github.com/ceph/ceph-ci.git -s rgw:lua --ceph your_ceph_branch_name -m smithi --suite-repo https://github.com/your_name/ceph.git --suite-branch your_branch_name
"""
assert hasattr(ctx, 'rgw'), 's3tests must run after the rgw task'
assert hasattr(ctx, 'tox'), 's3tests must run after the tox task'
assert config is None or isinstance(config, list) \
or isinstance(config, dict), \
"task only supports a list or dictionary for configuration"
all_clients = ['client.{id}'.format(id=id_)
for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
if config is None:
config = all_clients
if isinstance(config, list):
config = dict.fromkeys(config)
clients=config.keys()
log.debug('config is %s', config)
luatests_conf = {}
for client in clients:
endpoint = ctx.rgw.role_endpoints.get(client)
assert endpoint, 'luatests: no rgw endpoint for {}'.format(client)
luatests_conf[client] = ConfigObj(
indent_type='',
infile={
'DEFAULT':
{
'port':endpoint.port,
'host':endpoint.dns_name,
},
's3 main':{}
}
)
with contextutil.nested(
lambda: download(ctx=ctx, config=config),
lambda: create_users(ctx=ctx, config=dict(
clients=clients,
luatests_conf=luatests_conf,
)),
lambda: configure(ctx=ctx, config=dict(
clients=config,
luatests_conf=luatests_conf,
)),
lambda: run_tests(ctx=ctx, config=config),
):
pass
yield

View File

@ -173,6 +173,31 @@ int create_directory_p(const DoutPrefixProvider *dpp, const fs::path& p) {
return 0;
}
void get_luarocks_config(const bp::filesystem::path& process,
const std::string& luarocks_path,
const bp::environment& env, std::string& output) {
bp::ipstream is;
auto cmd = process.string();
cmd.append(" config");
output.append("Lua CMD: ");
output.append(cmd);
try {
bp::child c(cmd, env, bp::std_in.close(), (bp::std_err & bp::std_out) > is, bp::start_dir(luarocks_path));
std::string line;
do {
if (!line.empty()) {
output.append("\n\t").append(line);
}
} while (c.running() && std::getline(is, line));
c.wait();
output.append("\n\t").append("exit code: ").append(std::to_string(c.exit_code()));
} catch (const std::runtime_error& err) {
output.append("\n\t").append(err.what());
}
}
int install_packages(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver,
optional_yield y, const std::string& luarocks_path,
packages_t& failed_packages, std::string& install_dir) {
@ -201,6 +226,7 @@ int install_packages(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver,
luarocks_path << ". error: " << rc << dendl;
return rc;
}
// create a temporary sub-directory to install all luarocks packages
std::string tmp_path_template = luarocks_path;// fs::temp_directory_path();
@ -214,28 +240,46 @@ int install_packages(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver,
}
install_dir.assign(tmp_luarocks_path);
// get a handle to the current environment
auto env = boost::this_process::environment();
bp::environment _env = env;
_env["HOME"] = luarocks_path;
if (dpp->get_cct()->_conf->subsys.should_gather<ceph_subsys_rgw, 20>()) {
std::string output;
get_luarocks_config(p, luarocks_path, _env, output);
ldpp_dout(dpp, 20) << output << dendl;
}
// the lua rocks install dir will be created by luarocks the first time it is called
for (const auto& package : packages) {
bp::ipstream is;
const auto cmd = p.string() + " install --lua-version " + CEPH_LUA_VERSION + " --tree " + install_dir + " --deps-mode one " + package;
bp::child c(cmd, bp::std_in.close(), (bp::std_err & bp::std_out) > is);
auto cmd = p.string();
cmd.append(" install --no-doc --lua-version ").
append(CEPH_LUA_VERSION).
append(" --tree ").
append(install_dir).
append(" --deps-mode one ").
append(package);
bp::child c(cmd, _env, bp::std_in.close(), (bp::std_err & bp::std_out) > is, bp::start_dir(luarocks_path));
// once package reload is supported, code should yield when reading output
std::string lines = std::string("Lua CMD: ") + cmd;
std::string line;
do {
if (!line.empty()) {
lines.append("\n\t");
lines.append(line);
}
} while (c.running() && std::getline(is, line));
if (dpp->get_cct()->_conf->subsys.should_gather<ceph_subsys_rgw, 20>()) {
// TODO: yield when reading output
std::string lines = std::string("Lua CMD: ");
lines.append(cmd);
std::string line;
do {
if (!line.empty()) {
lines.append("\n\t").append(line);
}
} while (c.running() && std::getline(is, line));
ldpp_dout(dpp, 20) << lines << dendl;
}
c.wait();
if (c.exit_code()) {
failed_packages.insert(package);
}
ldpp_dout(dpp, 20) << lines << dendl;
}
return 0;

View File

@ -0,0 +1,9 @@
===================
Lua Scripting Tests
===================
* Start the cluster using the `vstart.sh` script
* `luarocks` (package manager for lua) must be installed on the machine
* Run the test from within the `src/test/rgw/lua` directory:
`LUATESTS_CONF=luatests.conf.SAMPLE tox`

View File

@ -0,0 +1,60 @@
import configparser
import os
import pytest
def setup():
cfg = configparser.RawConfigParser()
try:
path = os.environ['LUATESTS_CONF']
except KeyError:
raise RuntimeError(
'To run tests, point environment '
+ 'variable LUATESTS_CONF to a config file.',
)
cfg.read(path)
if not cfg.defaults():
raise RuntimeError('Your config file is missing the DEFAULT section!')
if not cfg.has_section("s3 main"):
raise RuntimeError('Your config file is missing the "s3 main" section!')
defaults = cfg.defaults()
# vars from the DEFAULT section
global default_host
default_host = defaults.get("host")
global default_port
default_port = int(defaults.get("port"))
# vars from the main section
global main_access_key
main_access_key = cfg.get('s3 main',"access_key")
global main_secret_key
main_secret_key = cfg.get('s3 main',"secret_key")
def get_config_host():
global default_host
return default_host
def get_config_port():
global default_port
return default_port
def get_access_key():
global main_access_key
return main_access_key
def get_secret_key():
global main_secret_key
return main_secret_key
@pytest.fixture(autouse=True, scope="package")
def configfile():
setup()

View File

@ -0,0 +1,10 @@
[DEFAULT]
port = 8000
host = localhost
[s3 main]
access_key = 0555b35654ad1656d804
secret_key = h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==
display_name = M. Tester
user_id = testid
email = tester@ceph.com

View File

@ -0,0 +1,5 @@
[pytest]
markers =
basic_test
request_test
example_test

View File

@ -0,0 +1,2 @@
boto3 >=1.0.0
pytest

View File

@ -0,0 +1,476 @@
import logging
import json
import tempfile
import random
import socket
import time
import threading
import subprocess
import os
import stat
import string
import pytest
import boto3
from . import(
configfile,
get_config_host,
get_config_port,
get_access_key,
get_secret_key
)
# configure logging for the tests module
log = logging.getLogger(__name__)
num_buckets = 0
run_prefix=''.join(random.choice(string.ascii_lowercase) for _ in range(6))
test_path = os.path.normpath(os.path.dirname(os.path.realpath(__file__))) + '/../'
def bash(cmd, **kwargs):
log.debug('running command: %s', ' '.join(cmd))
kwargs['stdout'] = subprocess.PIPE
process = subprocess.Popen(cmd, **kwargs)
s = process.communicate()[0].decode('utf-8')
return (s, process.returncode)
def admin(args, **kwargs):
""" radosgw-admin command """
cmd = [test_path + 'test-rgw-call.sh', 'call_rgw_admin', 'noname'] + args
return bash(cmd, **kwargs)
def delete_all_objects(conn, bucket_name):
objects = []
for key in conn.list_objects(Bucket=bucket_name)['Contents']:
objects.append({'Key': key['Key']})
# delete objects from the bucket
response = conn.delete_objects(Bucket=bucket_name,
Delete={'Objects': objects})
def gen_bucket_name():
global num_buckets
num_buckets += 1
return run_prefix + '-' + str(num_buckets)
def get_ip():
return 'localhost'
def get_ip_http():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# address should not be reachable
s.connect(('10.255.255.255', 1))
ip = s.getsockname()[0]
finally:
s.close()
return ip
def connection():
hostname = get_config_host()
port_no = get_config_port()
access_key = get_access_key()
secret_key = get_secret_key()
if port_no == 443 or port_no == 8443:
scheme = 'https://'
else:
scheme = 'http://'
client = boto3.client('s3',
endpoint_url=scheme+hostname+':'+str(port_no),
aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
return client
def another_user(tenant=None):
access_key = str(time.time())
secret_key = str(time.time())
uid = 'superman' + str(time.time())
if tenant:
_, result = admin(['user', 'create', '--uid', uid, '--tenant', tenant, '--access-key', access_key, '--secret-key', secret_key, '--display-name', '"Super Man"'])
else:
_, result = admin(['user', 'create', '--uid', uid, '--access-key', access_key, '--secret-key', secret_key, '--display-name', '"Super Man"'])
assert result == 0
hostname = get_config_host()
port_no = get_config_port()
if port_no == 443 or port_no == 8443:
scheme = 'https://'
else:
scheme = 'http://'
client = boto3.client('s3',
endpoint_url=scheme+hostname+':'+str(port_no),
aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
return client
def put_script(script, context, tenant=None):
fp = tempfile.NamedTemporaryFile(mode='w+')
fp.write(script)
fp.flush()
if tenant:
result = admin(['script', 'put', '--infile', fp.name, '--context', context, '--tenant', tenant])
else:
result = admin(['script', 'put', '--infile', fp.name, '--context', context])
fp.close()
return result
class UnixSocket:
def __init__(self, socket_path):
self.socket_path = socket_path
self.stop = False
self.started = False
self.events = []
self.t = threading.Thread(target=self.listen_on_socket)
self.t.start()
while not self.started:
print("UnixSocket: waiting for unix socket server to start")
time.sleep(1)
def shutdown(self):
self.stop = True
self.t.join()
def get_and_reset_events(self):
tmp = self.events
self.events = []
return tmp
def listen_on_socket(self):
self.started = True
# remove the socket file if it already exists
try:
os.unlink(self.socket_path)
except OSError:
if os.path.exists(self.socket_path):
raise
# create and bind the Unix socket server
server = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server.bind(self.socket_path)
# give permissions for anyone to write to it
os.chmod(self.socket_path, stat.S_IWOTH|stat.S_IWGRP|stat.S_IWUSR)
# listen for incoming connections
server.listen(1)
# accept timeout is 30s at the beginning
server.settimeout(30)
print("UnixSocket '%s' is listening for incoming connections..." % self.socket_path)
while not self.stop:
# accept connections
try:
connection, _ = server.accept()
except Exception as e:
print("UnixSocket: accept "+str(e))
continue
# after we start accept/recv timeouts are 5s
server.settimeout(5)
connection.settimeout(5)
try:
print("UnixSocket: new connection accepted")
# receive data from the client
while True:
# recv timeout is 5s
data = connection.recv(1024)
if not data:
break
event = json.loads(data.decode())
self.events.append(event)
finally:
# close the connection
connection.close()
print("UnixSocket: connection closed")
# remove the socket file
os.unlink(self.socket_path)
#####################
# lua scripting tests
#####################
@pytest.mark.basic_test
def test_script_management():
contexts = ['prerequest', 'postrequest', 'background', 'getdata', 'putdata']
scripts = {}
for context in contexts:
script = 'print("hello from ' + context + '")'
result = put_script(script, context)
assert result[1] == 0
scripts[context] = script
for context in contexts:
result = admin(['script', 'get', '--context', context])
assert result[1] == 0
assert result[0].strip() == scripts[context]
for context in contexts:
result = admin(['script', 'rm', '--context', context])
assert result[1] == 0
for context in contexts:
result = admin(['script', 'get', '--context', context])
assert result[1] == 0
assert result[0].strip() == 'no script exists for context: ' + context
@pytest.mark.basic_test
def test_script_management_with_tenant():
tenant = 'mytenant'
conn2 = another_user(tenant)
contexts = ['prerequest', 'postrequest', 'getdata', 'putdata']
scripts = {}
for context in contexts:
for t in ['', tenant]:
script = 'print("hello from ' + context + ' and ' + tenant + '")'
result = put_script(script, context, t)
assert result[1] == 0
scripts[context+t] = script
for context in contexts:
result = admin(['script', 'get', '--context', context])
assert result[1] == 0
assert result[0].strip(), scripts[context]
result = admin(['script', 'rm', '--context', context])
assert result[1] == 0
result = admin(['script', 'get', '--context', context])
assert result[1] == 0
assert result[0].strip(), 'no script exists for context: ' + context
result = admin(['script', 'get', '--context', context, '--tenant', tenant])
assert result[1] == 0
assert result[0].strip(), scripts[context+tenant]
result = admin(['script', 'rm', '--context', context, '--tenant', tenant])
assert result[1] == 0
result = admin(['script', 'get', '--context', context, '--tenant', tenant])
assert result[1] == 0
assert result[0].strip(), 'no script exists for context: ' + context + ' in tenant: ' + tenant
@pytest.mark.request_test
def test_put_obj():
script = '''
RGWDebugLog("op was: "..Request.RGWOp)
if Request.RGWOp == "put_obj" then
local object = Request.Object
local message = Request.bucket.Name .. "," .. object.Name ..
"," .. object.Id .. "," .. object.Size .. "," .. object.MTime
RGWDebugLog("set: x-amz-meta-test to: " .. message)
Request.HTTP.Metadata["x-amz-meta-test"] = message
end
'''
context = "prerequest"
result = put_script(script, context)
assert result[1] == 0
conn = connection()
bucket_name = gen_bucket_name()
conn.create_bucket(Bucket=bucket_name)
key = "hello"
conn.put_object(Body="1234567890".encode("ascii"), Bucket=bucket_name, Key=key)
result = conn.get_object(Bucket=bucket_name, Key=key)
message = result['ResponseMetadata']['HTTPHeaders']['x-amz-meta-test']
assert message == bucket_name+","+key+","+key+",0,1970-01-01 00:00:00"
# cleanup
conn.delete_object(Bucket=bucket_name, Key=key)
conn.delete_bucket(Bucket=bucket_name)
contexts = ['prerequest', 'postrequest', 'getdata', 'putdata']
for context in contexts:
result = admin(['script', 'rm', '--context', context])
assert result[1] == 0
@pytest.mark.example_test
def test_copyfrom():
script = '''
function print_object(object)
RGWDebugLog(" Name: " .. object.Name)
RGWDebugLog(" Instance: " .. object.Instance)
RGWDebugLog(" Id: " .. object.Id)
RGWDebugLog(" Size: " .. object.Size)
RGWDebugLog(" MTime: " .. object.MTime)
end
if Request.CopyFrom and Request.Object and Request.CopyFrom.Object then
RGWDebugLog("copy from object:")
print_object(Request.CopyFrom.Object)
RGWDebugLog("to object:")
print_object(Request.Object)
end
RGWDebugLog("op was: "..Request.RGWOp)
'''
contexts = ['prerequest', 'postrequest', 'getdata', 'putdata']
for context in contexts:
footer = '\nRGWDebugLog("context was: '+context+'\\n\\n")'
result = put_script(script+footer, context)
assert result[1] == 0
conn = connection()
bucket_name = gen_bucket_name()
# create bucket
bucket = conn.create_bucket(Bucket=bucket_name)
# create objects in the bucket
number_of_objects = 5
for i in range(number_of_objects):
content = str(os.urandom(1024*1024)).encode("ascii")
key = str(i)
conn.put_object(Body=content, Bucket=bucket_name, Key=key)
for i in range(number_of_objects):
key = str(i)
conn.copy_object(Bucket=bucket_name,
Key='copyof'+key,
CopySource=bucket_name+'/'+key)
# cleanup
delete_all_objects(conn, bucket_name)
conn.delete_bucket(Bucket=bucket_name)
contexts = ['prerequest', 'postrequest', 'getdata', 'putdata']
for context in contexts:
result = admin(['script', 'rm', '--context', context])
assert result[1] == 0
@pytest.mark.example_test
def test_entropy():
script = '''
function object_entropy()
local byte_hist = {}
local byte_hist_size = 256
for i = 1,byte_hist_size do
byte_hist[i] = 0
end
local total = 0
for i, c in pairs(Data) do
local byte = c:byte() + 1
byte_hist[byte] = byte_hist[byte] + 1
total = total + 1
end
entropy = 0
for _, count in ipairs(byte_hist) do
if count ~= 0 then
local p = 1.0 * count / total
entropy = entropy - (p * math.log(p)/math.log(byte_hist_size))
end
end
return entropy
end
local full_name = Request.Bucket.Name.."-"..Request.Object.Name
RGWDebugLog("entropy of chunk of: " .. full_name .. " at offset: " .. tostring(Offset) .. " is: " .. tostring(object_entropy()))
RGWDebugLog("payload size of chunk of: " .. full_name .. " is: " .. #Data)
'''
result = put_script(script, "putdata")
assert result[1] == 0
conn = connection()
bucket_name = gen_bucket_name()
# create bucket
bucket = conn.create_bucket(Bucket=bucket_name)
# create objects in the bucket (async)
number_of_objects = 5
for i in range(number_of_objects):
content = str(os.urandom(1024*1024*16)).encode("ascii")
key = str(i)
conn.put_object(Body=content, Bucket=bucket_name, Key=key)
# cleanup
delete_all_objects(conn, bucket_name)
conn.delete_bucket(Bucket=bucket_name)
contexts = ['prerequest', 'postrequest', 'background', 'getdata', 'putdata']
for context in contexts:
result = admin(['script', 'rm', '--context', context])
assert result[1] == 0
@pytest.mark.example_test
def test_access_log():
bucket_name = gen_bucket_name()
socket_path = '/tmp/'+bucket_name
script = '''
if Request.RGWOp == "get_obj" then
local json = require("cjson")
local socket = require("socket")
local unix = require("socket.unix")
local s = unix()
E = {{}}
msg = {{bucket = (Request.Bucket or (Request.CopyFrom or E).Bucket).Name,
object = Request.Object.Name,
time = Request.Time,
operation = Request.RGWOp,
http_status = Request.Response.HTTPStatusCode,
error_code = Request.Response.HTTPStatus,
object_size = Request.Object.Size,
trans_id = Request.TransactionId}}
assert(s:connect("{}"))
s:send(json.encode(msg).."\\n")
s:close()
end
'''.format(socket_path)
result = admin(['script-package', 'add', '--package=lua-cjson', '--allow-compilation'])
assert result[1] == 0
result = admin(['script-package', 'add', '--package=luasocket', '--allow-compilation'])
assert result[1] == 0
result = admin(['script-package', 'reload'])
assert result[1] == 0
result = put_script(script, "postrequest")
assert result[1] == 0
socket_server = UnixSocket(socket_path)
try:
conn = connection()
# create bucket
bucket = conn.create_bucket(Bucket=bucket_name)
# create objects in the bucket (async)
number_of_objects = 5
keys = []
for i in range(number_of_objects):
content = str(os.urandom(1024*1024)).encode("ascii")
key = str(i)
conn.put_object(Body=content, Bucket=bucket_name, Key=key)
keys.append(key)
for key in conn.list_objects(Bucket=bucket_name)['Contents']:
conn.get_object(Bucket=bucket_name, Key=key['Key'])
time.sleep(5)
event_keys = []
for event in socket_server.get_and_reset_events():
assert event['bucket'] == bucket_name
event_keys.append(event['object'])
assert keys == event_keys
finally:
socket_server.shutdown()
delete_all_objects(conn, bucket_name)
conn.delete_bucket(Bucket=bucket_name)
contexts = ['prerequest', 'postrequest', 'background', 'getdata', 'putdata']
for context in contexts:
result = admin(['script', 'rm', '--context', context])
assert result[1] == 0

9
src/test/rgw/lua/tox.ini Normal file
View File

@ -0,0 +1,9 @@
[tox]
envlist = py
skipsdist = True
[testenv]
deps = -rrequirements.txt
passenv =
LUATESTS_CONF
commands = pytest {posargs}