mirror of
https://github.com/ceph/ceph
synced 2025-03-11 02:39:05 +00:00
Merge pull request #30218 from cbodley/wip-qa-rgw-barbican
qa/rgw: add integration test for sse-kms with barbican Reviewed-by: Ali Maredia <amaredia@redhat.com>
This commit is contained in:
commit
8c801f073d
0
qa/suites/rgw/crypt/%
Normal file
0
qa/suites/rgw/crypt/%
Normal file
1
qa/suites/rgw/crypt/.qa
Symbolic link
1
qa/suites/rgw/crypt/.qa
Symbolic link
@ -0,0 +1 @@
|
||||
../.qa
|
1
qa/suites/rgw/crypt/0-cluster/.qa
Symbolic link
1
qa/suites/rgw/crypt/0-cluster/.qa
Symbolic link
@ -0,0 +1 @@
|
||||
../.qa
|
1
qa/suites/rgw/crypt/0-cluster/fixed-1.yaml
Symbolic link
1
qa/suites/rgw/crypt/0-cluster/fixed-1.yaml
Symbolic link
@ -0,0 +1 @@
|
||||
../../../../clusters/fixed-1.yaml
|
1
qa/suites/rgw/crypt/1-ceph-install/.qa
Symbolic link
1
qa/suites/rgw/crypt/1-ceph-install/.qa
Symbolic link
@ -0,0 +1 @@
|
||||
../.qa
|
7
qa/suites/rgw/crypt/1-ceph-install/install.yaml
Normal file
7
qa/suites/rgw/crypt/1-ceph-install/install.yaml
Normal file
@ -0,0 +1,7 @@
|
||||
overrides:
|
||||
ceph:
|
||||
wait-for-scrub: false
|
||||
|
||||
tasks:
|
||||
- install:
|
||||
- ceph:
|
0
qa/suites/rgw/crypt/2-kms/%
Normal file
0
qa/suites/rgw/crypt/2-kms/%
Normal file
1
qa/suites/rgw/crypt/2-kms/.qa
Symbolic link
1
qa/suites/rgw/crypt/2-kms/.qa
Symbolic link
@ -0,0 +1 @@
|
||||
../.qa
|
80
qa/suites/rgw/crypt/2-kms/barbican.yaml
Normal file
80
qa/suites/rgw/crypt/2-kms/barbican.yaml
Normal file
@ -0,0 +1,80 @@
|
||||
overrides:
|
||||
rgw:
|
||||
client.0:
|
||||
use-keystone-role: client.0
|
||||
use-barbican-role: client.0
|
||||
|
||||
tasks:
|
||||
- tox: [ client.0 ]
|
||||
- keystone:
|
||||
client.0:
|
||||
sha1: 12.0.0.0b2
|
||||
force-branch: master
|
||||
tenants:
|
||||
- name: admin
|
||||
description: Admin Tenant
|
||||
- name: rgwcrypt
|
||||
description: Encryption Tenant
|
||||
- name: barbican
|
||||
description: Barbican
|
||||
- name: s3
|
||||
description: S3 project
|
||||
users:
|
||||
- name: admin
|
||||
password: ADMIN
|
||||
project: admin
|
||||
- name: rgwcrypt-user
|
||||
password: rgwcrypt-pass
|
||||
project: rgwcrypt
|
||||
- name: barbican-user
|
||||
password: barbican-pass
|
||||
project: barbican
|
||||
- name: s3-user
|
||||
password: s3-pass
|
||||
project: s3
|
||||
roles: [ name: admin, name: Member, name: creator ]
|
||||
role-mappings:
|
||||
- name: admin
|
||||
user: admin
|
||||
project: admin
|
||||
- name: Member
|
||||
user: rgwcrypt-user
|
||||
project: rgwcrypt
|
||||
- name: admin
|
||||
user: barbican-user
|
||||
project: barbican
|
||||
- name: creator
|
||||
user: s3-user
|
||||
project: s3
|
||||
services:
|
||||
- name: keystone
|
||||
type: identity
|
||||
description: Keystone Identity Service
|
||||
- name: swift
|
||||
type: object-store
|
||||
description: Swift Service
|
||||
- barbican:
|
||||
client.0:
|
||||
sha1: 5.0.1
|
||||
force-branch: master
|
||||
use-keystone-role: client.0
|
||||
keystone_authtoken:
|
||||
auth_plugin: password
|
||||
username: barbican-user
|
||||
password: barbican-pass
|
||||
user_domain_name: Default
|
||||
rgw_user:
|
||||
tenantName: rgwcrypt
|
||||
username: rgwcrypt-user
|
||||
password: rgwcrypt-pass
|
||||
secrets:
|
||||
- name: my-key-1
|
||||
base64: a2V5MS5GcWVxKzhzTGNLaGtzQkg5NGVpb1FKcFpGb2c=
|
||||
tenantName: s3
|
||||
username: s3-user
|
||||
password: s3-pass
|
||||
- name: my-key-2
|
||||
base64: a2V5Mi5yNUNNMGFzMVdIUVZxcCt5NGVmVGlQQ1k4YWg=
|
||||
tenantName: s3
|
||||
username: s3-user
|
||||
password: s3-pass
|
1
qa/suites/rgw/crypt/3-rgw/.qa
Symbolic link
1
qa/suites/rgw/crypt/3-rgw/.qa
Symbolic link
@ -0,0 +1 @@
|
||||
../.qa
|
11
qa/suites/rgw/crypt/3-rgw/rgw.yaml
Normal file
11
qa/suites/rgw/crypt/3-rgw/rgw.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
overrides:
|
||||
ceph:
|
||||
conf:
|
||||
client:
|
||||
rgw crypt require ssl: false
|
||||
rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
|
||||
debug rgw: 20
|
||||
|
||||
tasks:
|
||||
- rgw:
|
||||
client.0:
|
0
qa/suites/rgw/crypt/4-tests/+
Normal file
0
qa/suites/rgw/crypt/4-tests/+
Normal file
1
qa/suites/rgw/crypt/4-tests/.qa
Symbolic link
1
qa/suites/rgw/crypt/4-tests/.qa
Symbolic link
@ -0,0 +1 @@
|
||||
../.qa
|
5
qa/suites/rgw/crypt/4-tests/s3tests.yaml
Normal file
5
qa/suites/rgw/crypt/4-tests/s3tests.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
tasks:
|
||||
- s3tests:
|
||||
client.0:
|
||||
force-branch: ceph-master
|
||||
kms_key: my-key-1
|
517
qa/tasks/barbican.py
Normal file
517
qa/tasks/barbican.py
Normal file
@ -0,0 +1,517 @@
|
||||
"""
|
||||
Deploy and configure Barbican for Teuthology
|
||||
"""
|
||||
import argparse
|
||||
import contextlib
|
||||
import logging
|
||||
import string
|
||||
import httplib
|
||||
from urlparse import urlparse
|
||||
import json
|
||||
|
||||
from teuthology import misc as teuthology
|
||||
from teuthology import contextutil
|
||||
from teuthology import safepath
|
||||
from teuthology.orchestra import run
|
||||
from teuthology.exceptions import ConfigError
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def download(ctx, config):
|
||||
"""
|
||||
Download the Barbican from github.
|
||||
Remove downloaded file upon exit.
|
||||
|
||||
The context passed in should be identical to the context
|
||||
passed in to the main task.
|
||||
"""
|
||||
assert isinstance(config, dict)
|
||||
log.info('Downloading barbican...')
|
||||
testdir = teuthology.get_testdir(ctx)
|
||||
for (client, cconf) in config.items():
|
||||
branch = cconf.get('force-branch', 'master')
|
||||
log.info("Using branch '%s' for barbican", branch)
|
||||
|
||||
sha1 = cconf.get('sha1')
|
||||
log.info('sha1=%s', sha1)
|
||||
|
||||
ctx.cluster.only(client).run(
|
||||
args=[
|
||||
'bash', '-l'
|
||||
],
|
||||
)
|
||||
ctx.cluster.only(client).run(
|
||||
args=[
|
||||
'git', 'clone',
|
||||
'-b', branch,
|
||||
'https://github.com/openstack/barbican.git',
|
||||
'{tdir}/barbican'.format(tdir=testdir),
|
||||
],
|
||||
)
|
||||
if sha1 is not None:
|
||||
ctx.cluster.only(client).run(
|
||||
args=[
|
||||
'cd', '{tdir}/barbican'.format(tdir=testdir),
|
||||
run.Raw('&&'),
|
||||
'git', 'reset', '--hard', sha1,
|
||||
],
|
||||
)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
log.info('Removing barbican...')
|
||||
testdir = teuthology.get_testdir(ctx)
|
||||
for client in config:
|
||||
ctx.cluster.only(client).run(
|
||||
args=[
|
||||
'rm',
|
||||
'-rf',
|
||||
'{tdir}/barbican'.format(tdir=testdir),
|
||||
],
|
||||
)
|
||||
|
||||
def get_barbican_dir(ctx):
|
||||
return '{tdir}/barbican'.format(tdir=teuthology.get_testdir(ctx))
|
||||
|
||||
def run_in_barbican_dir(ctx, client, args):
|
||||
ctx.cluster.only(client).run(
|
||||
args=['cd', get_barbican_dir(ctx), run.Raw('&&'), ] + args,
|
||||
)
|
||||
|
||||
def run_in_barbican_venv(ctx, client, args):
|
||||
run_in_barbican_dir(ctx, client,
|
||||
['.',
|
||||
'.barbicanenv/bin/activate',
|
||||
run.Raw('&&')
|
||||
] + args)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def setup_venv(ctx, config):
|
||||
"""
|
||||
Setup the virtualenv for Barbican using pip.
|
||||
"""
|
||||
assert isinstance(config, dict)
|
||||
log.info('Setting up virtualenv for barbican...')
|
||||
for (client, _) in config.items():
|
||||
run_in_barbican_dir(ctx, client, ['virtualenv', '.barbicanenv'])
|
||||
run_in_barbican_venv(ctx, client, ['pip', 'install', 'pytz', '-e', get_barbican_dir(ctx)])
|
||||
yield
|
||||
|
||||
def assign_ports(ctx, config, initial_port):
|
||||
"""
|
||||
Assign port numbers starting from @initial_port
|
||||
"""
|
||||
port = initial_port
|
||||
role_endpoints = {}
|
||||
for remote, roles_for_host in ctx.cluster.remotes.iteritems():
|
||||
for role in roles_for_host:
|
||||
if role in config:
|
||||
role_endpoints[role] = (remote.name.split('@')[1], port)
|
||||
port += 1
|
||||
|
||||
return role_endpoints
|
||||
|
||||
def set_authtoken_params(ctx, cclient, cconfig):
|
||||
section_config_list = cconfig['keystone_authtoken'].items()
|
||||
for config in section_config_list:
|
||||
(name, val) = config
|
||||
run_in_barbican_dir(ctx, cclient,
|
||||
['sed', '-i',
|
||||
'/[[]filter:authtoken]/{p;s##'+'{} = {}'.format(name, val)+'#;}',
|
||||
'etc/barbican/barbican-api-paste.ini'])
|
||||
|
||||
keystone_role = cconfig.get('use-keystone-role', None)
|
||||
public_host, public_port = ctx.keystone.public_endpoints[keystone_role]
|
||||
url = 'http://{host}:{port}/v3'.format(host=public_host,
|
||||
port=public_port)
|
||||
run_in_barbican_dir(ctx, cclient,
|
||||
['sed', '-i',
|
||||
'/[[]filter:authtoken]/{p;s##'+'auth_uri = {}'.format(url)+'#;}',
|
||||
'etc/barbican/barbican-api-paste.ini'])
|
||||
admin_host, admin_port = ctx.keystone.admin_endpoints[keystone_role]
|
||||
admin_url = 'http://{host}:{port}/v3'.format(host=admin_host,
|
||||
port=admin_port)
|
||||
run_in_barbican_dir(ctx, cclient,
|
||||
['sed', '-i',
|
||||
'/[[]filter:authtoken]/{p;s##'+'auth_url = {}'.format(admin_url)+'#;}',
|
||||
'etc/barbican/barbican-api-paste.ini'])
|
||||
|
||||
def fix_barbican_api_paste(ctx, cclient):
|
||||
run_in_barbican_dir(ctx, cclient,
|
||||
['sed', '-i', '-n',
|
||||
'/\\[pipeline:barbican_api]/ {p;n; /^pipeline =/ '+
|
||||
'{ s/.*/pipeline = unauthenticated-context apiapp/;p;d } } ; p',
|
||||
'./etc/barbican/barbican-api-paste.ini'])
|
||||
|
||||
def fix_barbican_api(ctx, cclient):
|
||||
run_in_barbican_dir(ctx, cclient,
|
||||
['sed', '-i',
|
||||
'/prop_dir =/ s#etc/barbican#{}/etc/barbican#'.format(get_barbican_dir(ctx)),
|
||||
'bin/barbican-api'])
|
||||
|
||||
def copy_policy_json(ctx, cclient):
|
||||
run_in_barbican_dir(ctx, cclient,
|
||||
['cp',
|
||||
get_barbican_dir(ctx)+'/etc/barbican/policy.json',
|
||||
get_barbican_dir(ctx)])
|
||||
|
||||
def create_barbican_conf(ctx, cclient):
|
||||
barbican_host, barbican_port = ctx.barbican.endpoints[cclient]
|
||||
barbican_url = 'http://{host}:{port}'.format(host=barbican_host,
|
||||
port=barbican_port)
|
||||
log.info("barbican url=%s", barbican_url)
|
||||
|
||||
run_in_barbican_dir(ctx, cclient,
|
||||
['bash', '-c',
|
||||
'echo -n -e "[DEFAULT]\nhost_href=' + barbican_url + '\n" ' + \
|
||||
'>barbican.conf'])
|
||||
|
||||
@contextlib.contextmanager
|
||||
def configure_barbican(ctx, config):
|
||||
"""
|
||||
Configure barbican paste-api and barbican-api.
|
||||
"""
|
||||
assert isinstance(config, dict)
|
||||
(cclient, cconfig) = config.items()[0]
|
||||
|
||||
keystone_role = cconfig.get('use-keystone-role', None)
|
||||
if keystone_role is None:
|
||||
raise ConfigError('use-keystone-role not defined in barbican task')
|
||||
|
||||
set_authtoken_params(ctx, cclient, cconfig)
|
||||
fix_barbican_api(ctx, cclient)
|
||||
fix_barbican_api_paste(ctx, cclient)
|
||||
copy_policy_json(ctx, cclient)
|
||||
create_barbican_conf(ctx, cclient)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
pass
|
||||
|
||||
@contextlib.contextmanager
|
||||
def run_barbican(ctx, config):
|
||||
assert isinstance(config, dict)
|
||||
log.info('Running barbican...')
|
||||
|
||||
for (client, _) in config.items():
|
||||
(remote,) = ctx.cluster.only(client).remotes.iterkeys()
|
||||
cluster_name, _, client_id = teuthology.split_role(client)
|
||||
|
||||
# start the public endpoint
|
||||
client_public_with_id = 'barbican.public' + '.' + client_id
|
||||
client_public_with_cluster = cluster_name + '.' + client_public_with_id
|
||||
|
||||
run_cmd = ['cd', get_barbican_dir(ctx), run.Raw('&&'),
|
||||
'.', '.barbicanenv/bin/activate', run.Raw('&&'),
|
||||
'HOME={}'.format(get_barbican_dir(ctx)), run.Raw('&&'),
|
||||
'bin/barbican-api',
|
||||
run.Raw('& { read; kill %1; }')]
|
||||
#run.Raw('1>/dev/null')
|
||||
|
||||
run_cmd = 'cd ' + get_barbican_dir(ctx) + ' && ' + \
|
||||
'. .barbicanenv/bin/activate && ' + \
|
||||
'HOME={}'.format(get_barbican_dir(ctx)) + ' && ' + \
|
||||
'exec bin/barbican-api & { read; kill %1; }'
|
||||
|
||||
ctx.daemons.add_daemon(
|
||||
remote, 'barbican', client_public_with_id,
|
||||
cluster=cluster_name,
|
||||
args=['bash', '-c', run_cmd],
|
||||
logger=log.getChild(client),
|
||||
stdin=run.PIPE,
|
||||
cwd=get_barbican_dir(ctx),
|
||||
wait=False,
|
||||
check_status=False,
|
||||
)
|
||||
|
||||
# sleep driven synchronization
|
||||
run_in_barbican_venv(ctx, client, ['sleep', '15'])
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
log.info('Stopping Barbican instance')
|
||||
ctx.daemons.get_daemon('barbican', client_public_with_id,
|
||||
cluster_name).stop()
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def create_secrets(ctx, config):
|
||||
"""
|
||||
Create a main and an alternate s3 user.
|
||||
"""
|
||||
assert isinstance(config, dict)
|
||||
(cclient, cconfig) = config.items()[0]
|
||||
|
||||
rgw_user = cconfig['rgw_user']
|
||||
ctx.barbican.token[cclient] = {
|
||||
"username": rgw_user["username"],
|
||||
"password": rgw_user["password"],
|
||||
"tenant": rgw_user["tenantName"]
|
||||
}
|
||||
|
||||
keystone_role = cconfig.get('use-keystone-role', None)
|
||||
keystone_host, keystone_port = ctx.keystone.public_endpoints[keystone_role]
|
||||
keystone_url = 'http://{host}:{port}/v2.0'.format(host=keystone_host,
|
||||
port=keystone_port)
|
||||
barbican_host, barbican_port = ctx.barbican.endpoints[cclient]
|
||||
barbican_url = 'http://{host}:{port}'.format(host=barbican_host,
|
||||
port=barbican_port)
|
||||
log.info("barbican_url=%s", barbican_url)
|
||||
#fetching user_id of user that gets secrets for radosgw
|
||||
token_req = httplib.HTTPConnection(keystone_host, keystone_port, timeout=30)
|
||||
token_req.request(
|
||||
'POST',
|
||||
'/v2.0/tokens',
|
||||
headers={'Content-Type':'application/json'},
|
||||
body=json.dumps(
|
||||
{"auth":
|
||||
{"passwordCredentials":
|
||||
{"username": rgw_user["username"],
|
||||
"password": rgw_user["password"]
|
||||
},
|
||||
"tenantName": rgw_user["tenantName"]
|
||||
}
|
||||
}
|
||||
)
|
||||
)
|
||||
rgw_access_user_resp = token_req.getresponse()
|
||||
if not (rgw_access_user_resp.status >= 200 and
|
||||
rgw_access_user_resp.status < 300):
|
||||
raise Exception("Cannot authenticate user "+rgw_user["username"]+" for secret creation")
|
||||
# baru_resp = json.loads(baru_req.data)
|
||||
rgw_access_user_data = json.loads(rgw_access_user_resp.read())
|
||||
rgw_user_id = rgw_access_user_data['access']['user']['id']
|
||||
|
||||
if 'secrets' in cconfig:
|
||||
for secret in cconfig['secrets']:
|
||||
if 'name' not in secret:
|
||||
raise ConfigError('barbican.secrets must have "name" field')
|
||||
if 'base64' not in secret:
|
||||
raise ConfigError('barbican.secrets must have "base64" field')
|
||||
if 'tenantName' not in secret:
|
||||
raise ConfigError('barbican.secrets must have "tenantName" field')
|
||||
if 'username' not in secret:
|
||||
raise ConfigError('barbican.secrets must have "username" field')
|
||||
if 'password' not in secret:
|
||||
raise ConfigError('barbican.secrets must have "password" field')
|
||||
|
||||
token_req = httplib.HTTPConnection(keystone_host, keystone_port, timeout=30)
|
||||
token_req.request(
|
||||
'POST',
|
||||
'/v2.0/tokens',
|
||||
headers={'Content-Type':'application/json'},
|
||||
body=json.dumps(
|
||||
{
|
||||
"auth": {
|
||||
"passwordCredentials": {
|
||||
"username": secret["username"],
|
||||
"password": secret["password"]
|
||||
},
|
||||
"tenantName":secret["tenantName"]
|
||||
}
|
||||
}
|
||||
)
|
||||
)
|
||||
token_resp = token_req.getresponse()
|
||||
if not (token_resp.status >= 200 and
|
||||
token_resp.status < 300):
|
||||
raise Exception("Cannot authenticate user "+secret["username"]+" for secret creation")
|
||||
|
||||
token_data = json.loads(token_resp.read())
|
||||
token_id = token_data['access']['token']['id']
|
||||
|
||||
key1_json = json.dumps(
|
||||
{
|
||||
"name": secret['name'],
|
||||
"expiration": "2020-12-31T19:14:44.180394",
|
||||
"algorithm": "aes",
|
||||
"bit_length": 256,
|
||||
"mode": "cbc",
|
||||
"payload": secret['base64'],
|
||||
"payload_content_type": "application/octet-stream",
|
||||
"payload_content_encoding": "base64"
|
||||
})
|
||||
|
||||
sec_req = httplib.HTTPConnection(barbican_host, barbican_port, timeout=30)
|
||||
try:
|
||||
sec_req.request(
|
||||
'POST',
|
||||
'/v1/secrets',
|
||||
headers={'Content-Type': 'application/json',
|
||||
'Accept': '*/*',
|
||||
'X-Auth-Token': token_id},
|
||||
body=key1_json
|
||||
)
|
||||
except:
|
||||
log.info("catched exception!")
|
||||
run_in_barbican_venv(ctx, cclient, ['sleep', '900'])
|
||||
|
||||
barbican_sec_resp = sec_req.getresponse()
|
||||
if not (barbican_sec_resp.status >= 200 and
|
||||
barbican_sec_resp.status < 300):
|
||||
raise Exception("Cannot create secret")
|
||||
barbican_data = json.loads(barbican_sec_resp.read())
|
||||
if 'secret_ref' not in barbican_data:
|
||||
raise ValueError("Malformed secret creation response")
|
||||
secret_ref = barbican_data["secret_ref"]
|
||||
log.info("secret_ref=%s", secret_ref)
|
||||
secret_url_parsed = urlparse(secret_ref)
|
||||
acl_json = json.dumps(
|
||||
{
|
||||
"read": {
|
||||
"users": [rgw_user_id],
|
||||
"project-access": True
|
||||
}
|
||||
})
|
||||
acl_req = httplib.HTTPConnection(secret_url_parsed.netloc, timeout=30)
|
||||
acl_req.request(
|
||||
'PUT',
|
||||
secret_url_parsed.path+'/acl',
|
||||
headers={'Content-Type': 'application/json',
|
||||
'Accept': '*/*',
|
||||
'X-Auth-Token': token_id},
|
||||
body=acl_json
|
||||
)
|
||||
barbican_acl_resp = acl_req.getresponse()
|
||||
if not (barbican_acl_resp.status >= 200 and
|
||||
barbican_acl_resp.status < 300):
|
||||
raise Exception("Cannot set ACL for secret")
|
||||
|
||||
key = {'id': secret_ref.split('secrets/')[1], 'payload': secret['base64']}
|
||||
ctx.barbican.keys[secret['name']] = key
|
||||
|
||||
run_in_barbican_venv(ctx, cclient, ['sleep', '3'])
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
pass
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def task(ctx, config):
|
||||
"""
|
||||
Deploy and configure Keystone
|
||||
|
||||
Example of configuration:
|
||||
|
||||
tasks:
|
||||
- local_cluster:
|
||||
cluster_path: /home/adam/ceph-1/build
|
||||
- local_rgw:
|
||||
- tox: [ client.0 ]
|
||||
- keystone:
|
||||
client.0:
|
||||
sha1: 12.0.0.0b2
|
||||
force-branch: master
|
||||
tenants:
|
||||
- name: admin
|
||||
description: Admin Tenant
|
||||
- name: rgwcrypt
|
||||
description: Encryption Tenant
|
||||
- name: barbican
|
||||
description: Barbican
|
||||
- name: s3
|
||||
description: S3 project
|
||||
users:
|
||||
- name: admin
|
||||
password: ADMIN
|
||||
project: admin
|
||||
- name: rgwcrypt-user
|
||||
password: rgwcrypt-pass
|
||||
project: rgwcrypt
|
||||
- name: barbican-user
|
||||
password: barbican-pass
|
||||
project: barbican
|
||||
- name: s3-user
|
||||
password: s3-pass
|
||||
project: s3
|
||||
roles: [ name: admin, name: Member, name: creator ]
|
||||
role-mappings:
|
||||
- name: admin
|
||||
user: admin
|
||||
project: admin
|
||||
- name: Member
|
||||
user: rgwcrypt-user
|
||||
project: rgwcrypt
|
||||
- name: admin
|
||||
user: barbican-user
|
||||
project: barbican
|
||||
- name: creator
|
||||
user: s3-user
|
||||
project: s3
|
||||
services:
|
||||
- name: keystone
|
||||
type: identity
|
||||
description: Keystone Identity Service
|
||||
- barbican:
|
||||
client.0:
|
||||
force-branch: master
|
||||
use-keystone-role: client.0
|
||||
keystone_authtoken:
|
||||
auth_plugin: password
|
||||
username: barbican-user
|
||||
password: barbican-pass
|
||||
user_domain_name: Default
|
||||
rgw_user:
|
||||
tenantName: rgwcrypt
|
||||
username: rgwcrypt-user
|
||||
password: rgwcrypt-pass
|
||||
secrets:
|
||||
- name: my-key-1
|
||||
base64: a2V5MS5GcWVxKzhzTGNLaGtzQkg5NGVpb1FKcFpGb2c=
|
||||
tenantName: s3
|
||||
username: s3-user
|
||||
password: s3-pass
|
||||
- name: my-key-2
|
||||
base64: a2V5Mi5yNUNNMGFzMVdIUVZxcCt5NGVmVGlQQ1k4YWg=
|
||||
tenantName: s3
|
||||
username: s3-user
|
||||
password: s3-pass
|
||||
- s3tests:
|
||||
client.0:
|
||||
force-branch: master
|
||||
kms_key: my-key-1
|
||||
- rgw:
|
||||
client.0:
|
||||
use-keystone-role: client.0
|
||||
use-barbican-role: client.0
|
||||
"""
|
||||
assert config is None or isinstance(config, list) \
|
||||
or isinstance(config, dict), \
|
||||
"task keystone only supports a list or dictionary for configuration"
|
||||
all_clients = ['client.{id}'.format(id=id_)
|
||||
for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
|
||||
if config is None:
|
||||
config = all_clients
|
||||
if isinstance(config, list):
|
||||
config = dict.fromkeys(config)
|
||||
clients = config.keys()
|
||||
|
||||
overrides = ctx.config.get('overrides', {})
|
||||
# merge each client section, not the top level.
|
||||
for client in config.iterkeys():
|
||||
if not config[client]:
|
||||
config[client] = {}
|
||||
teuthology.deep_merge(config[client], overrides.get('barbican', {}))
|
||||
|
||||
log.debug('Barbican config is %s', config)
|
||||
|
||||
if not hasattr(ctx, 'keystone'):
|
||||
raise ConfigError('barbican must run after the keystone task')
|
||||
|
||||
|
||||
ctx.barbican = argparse.Namespace()
|
||||
ctx.barbican.endpoints = assign_ports(ctx, config, 9311)
|
||||
ctx.barbican.token = {}
|
||||
ctx.barbican.keys = {}
|
||||
|
||||
with contextutil.nested(
|
||||
lambda: download(ctx=ctx, config=config),
|
||||
lambda: setup_venv(ctx=ctx, config=config),
|
||||
lambda: configure_barbican(ctx=ctx, config=config),
|
||||
lambda: run_barbican(ctx=ctx, config=config),
|
||||
lambda: create_secrets(ctx=ctx, config=config),
|
||||
):
|
||||
yield
|
@ -4,6 +4,7 @@ Deploy and configure Keystone for Teuthology
|
||||
import argparse
|
||||
import contextlib
|
||||
import logging
|
||||
from cStringIO import StringIO
|
||||
|
||||
from teuthology import misc as teuthology
|
||||
from teuthology import contextutil
|
||||
@ -11,46 +12,27 @@ from teuthology.orchestra import run
|
||||
from teuthology.orchestra.connection import split_user
|
||||
from teuthology.packaging import install_package
|
||||
from teuthology.packaging import remove_package
|
||||
from teuthology.exceptions import ConfigError
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def install_packages(ctx, config):
|
||||
"""
|
||||
Download the packaged dependencies of Keystone.
|
||||
Remove install packages upon exit.
|
||||
|
||||
The context passed in should be identical to the context
|
||||
passed in to the main task.
|
||||
"""
|
||||
assert isinstance(config, dict)
|
||||
log.info('Installing packages for Keystone...')
|
||||
|
||||
deps = {
|
||||
'deb': [ 'libffi-dev', 'libssl-dev', 'libldap2-dev', 'libsasl2-dev' ],
|
||||
'rpm': [ 'libffi-devel', 'openssl-devel' ],
|
||||
}
|
||||
for (client, _) in config.items():
|
||||
(remote,) = ctx.cluster.only(client).remotes.iterkeys()
|
||||
for dep in deps[remote.os.package_type]:
|
||||
install_package(dep, remote)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
log.info('Removing packaged dependencies of Keystone...')
|
||||
|
||||
for (client, _) in config.items():
|
||||
(remote,) = ctx.cluster.only(client).remotes.iterkeys()
|
||||
for dep in deps[remote.os.package_type]:
|
||||
remove_package(dep, remote)
|
||||
|
||||
def get_keystone_dir(ctx):
|
||||
return '{tdir}/keystone'.format(tdir=teuthology.get_testdir(ctx))
|
||||
|
||||
def run_in_keystone_dir(ctx, client, args):
|
||||
ctx.cluster.only(client).run(
|
||||
def run_in_keystone_dir(ctx, client, args, **kwargs):
|
||||
return ctx.cluster.only(client).run(
|
||||
args=[ 'cd', get_keystone_dir(ctx), run.Raw('&&'), ] + args,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
def get_toxvenv_dir(ctx):
|
||||
return ctx.tox.venv_path
|
||||
|
||||
def run_in_tox_venv(ctx, remote, args, **kwargs):
|
||||
return remote.run(
|
||||
args=[ 'source', '{}/bin/activate'.format(get_toxvenv_dir(ctx)), run.Raw('&&') ] + args,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
def run_in_keystone_venv(ctx, client, args):
|
||||
@ -64,9 +46,6 @@ def get_keystone_venved_cmd(ctx, cmd, args):
|
||||
kbindir = get_keystone_dir(ctx) + '/.tox/venv/bin/'
|
||||
return [ kbindir + 'python', kbindir + cmd ] + args
|
||||
|
||||
def get_toxvenv_dir(ctx):
|
||||
return ctx.tox.venv_path
|
||||
|
||||
@contextlib.contextmanager
|
||||
def download(ctx, config):
|
||||
"""
|
||||
@ -113,6 +92,40 @@ def download(ctx, config):
|
||||
args=[ 'rm', '-rf', keystonedir ],
|
||||
)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def install_packages(ctx, config):
|
||||
"""
|
||||
Download the packaged dependencies of Keystone.
|
||||
Remove install packages upon exit.
|
||||
|
||||
The context passed in should be identical to the context
|
||||
passed in to the main task.
|
||||
"""
|
||||
assert isinstance(config, dict)
|
||||
log.info('Installing packages for Keystone...')
|
||||
|
||||
packages = {}
|
||||
for (client, _) in config.items():
|
||||
(remote,) = ctx.cluster.only(client).remotes.iterkeys()
|
||||
# use bindep to read which dependencies we need from keystone/bindep.txt
|
||||
run_in_tox_venv(ctx, remote, ['pip', 'install', 'bindep'])
|
||||
r = run_in_tox_venv(ctx, remote,
|
||||
['bindep', '--brief', '--file', '{}/bindep.txt'.format(get_keystone_dir(ctx))],
|
||||
stdout=StringIO(),
|
||||
check_status=False) # returns 1 on success?
|
||||
packages[client] = r.stdout.getvalue().splitlines()
|
||||
for dep in packages[client]:
|
||||
install_package(dep, remote)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
log.info('Removing packaged dependencies of Keystone...')
|
||||
|
||||
for (client, _) in config.items():
|
||||
(remote,) = ctx.cluster.only(client).remotes.iterkeys()
|
||||
for dep in packages[client]:
|
||||
remove_package(dep, remote)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def setup_venv(ctx, config):
|
||||
"""
|
||||
@ -129,7 +142,9 @@ def setup_venv(ctx, config):
|
||||
])
|
||||
|
||||
run_in_keystone_venv(ctx, client,
|
||||
[ 'pip', 'install', 'python-openstackclient' ])
|
||||
[ 'pip', 'install', 'python-openstackclient<=3.19.0',
|
||||
'-r', 'requirements.txt'
|
||||
])
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
@ -161,6 +176,19 @@ def configure_instance(ctx, config):
|
||||
'-e', 's^#key_repository =.*^key_repository = {kr}^'.format(kr = keyrepo_dir),
|
||||
'-i', 'etc/keystone.conf'
|
||||
])
|
||||
# log to a file that gets archived
|
||||
log_file = '{p}/archive/keystone.{c}.log'.format(p=teuthology.get_testdir(ctx), c=client)
|
||||
run_in_keystone_dir(ctx, client,
|
||||
[
|
||||
'sed',
|
||||
'-e', 's^#log_file =.*^log_file = {}^'.format(log_file),
|
||||
'-i', 'etc/keystone.conf'
|
||||
])
|
||||
# copy the config to archive
|
||||
run_in_keystone_dir(ctx, client, [
|
||||
'cp', 'etc/keystone.conf',
|
||||
'{}/archive/keystone.{}.conf'.format(teuthology.get_testdir(ctx), client)
|
||||
])
|
||||
|
||||
# prepare key repository for Fetnet token authenticator
|
||||
run_in_keystone_dir(ctx, client, [ 'mkdir', '-p', keyrepo_dir ])
|
||||
@ -263,6 +291,7 @@ def run_section_cmds(ctx, cclient, section_cmd, special,
|
||||
|
||||
auth_section = [
|
||||
( 'os-token', 'ADMIN' ),
|
||||
( 'os-identity-api-version', '2.0' ),
|
||||
( 'os-url', 'http://{host}:{port}/v2.0'.format(host=admin_host,
|
||||
port=admin_port) ),
|
||||
]
|
||||
@ -270,13 +299,18 @@ def run_section_cmds(ctx, cclient, section_cmd, special,
|
||||
for section_item in section_config_list:
|
||||
run_in_keystone_venv(ctx, cclient,
|
||||
[ 'openstack' ] + section_cmd.split() +
|
||||
dict_to_args(special, auth_section + section_item.items()))
|
||||
dict_to_args(special, auth_section + section_item.items()) +
|
||||
[ '--debug' ])
|
||||
|
||||
def create_endpoint(ctx, cclient, service, url):
|
||||
def create_endpoint(ctx, cclient, service, url, adminurl=None):
|
||||
endpoint_section = {
|
||||
'service': service,
|
||||
'publicurl': url,
|
||||
}
|
||||
if adminurl:
|
||||
endpoint_section.update( {
|
||||
'adminurl': adminurl,
|
||||
} )
|
||||
return run_section_cmds(ctx, cclient, 'endpoint create', 'service',
|
||||
[ endpoint_section ])
|
||||
|
||||
@ -300,7 +334,10 @@ def fill_keystone(ctx, config):
|
||||
public_host, public_port = ctx.keystone.public_endpoints[cclient]
|
||||
url = 'http://{host}:{port}/v2.0'.format(host=public_host,
|
||||
port=public_port)
|
||||
create_endpoint(ctx, cclient, 'keystone', url)
|
||||
admin_host, admin_port = ctx.keystone.admin_endpoints[cclient]
|
||||
admin_url = 'http://{host}:{port}/v2.0'.format(host=admin_host,
|
||||
port=admin_port)
|
||||
create_endpoint(ctx, cclient, 'keystone', url, admin_url)
|
||||
# for the deferred endpoint creation; currently it's used in rgw.py
|
||||
ctx.keystone.create_endpoint = create_endpoint
|
||||
|
||||
@ -362,7 +399,7 @@ def task(ctx, config):
|
||||
or isinstance(config, dict), \
|
||||
"task keystone only supports a list or dictionary for configuration"
|
||||
|
||||
if not ctx.tox:
|
||||
if not hasattr(ctx, 'tox'):
|
||||
raise ConfigError('keystone must run after the tox task')
|
||||
|
||||
all_clients = ['client.{id}'.format(id=id_)
|
||||
@ -379,8 +416,8 @@ def task(ctx, config):
|
||||
ctx.keystone.admin_endpoints = assign_ports(ctx, config, 35357)
|
||||
|
||||
with contextutil.nested(
|
||||
lambda: install_packages(ctx=ctx, config=config),
|
||||
lambda: download(ctx=ctx, config=config),
|
||||
lambda: install_packages(ctx=ctx, config=config),
|
||||
lambda: setup_venv(ctx=ctx, config=config),
|
||||
lambda: configure_instance(ctx=ctx, config=config),
|
||||
lambda: run_keystone(ctx=ctx, config=config),
|
||||
|
@ -88,7 +88,7 @@ def start_rgw(ctx, config, clients):
|
||||
'/var/log/ceph/rgw.{client_with_cluster}.log'.format(client_with_cluster=client_with_cluster),
|
||||
'--rgw_ops_log_socket_path',
|
||||
'{tdir}/rgw.opslog.{client_with_cluster}.sock'.format(tdir=testdir,
|
||||
client_with_cluster=client_with_cluster)
|
||||
client_with_cluster=client_with_cluster),
|
||||
])
|
||||
|
||||
keystone_role = client_config.get('use-keystone-role', None)
|
||||
@ -107,11 +107,35 @@ def start_rgw(ctx, config, clients):
|
||||
kport=keystone_port),
|
||||
])
|
||||
|
||||
|
||||
if client_config.get('dns-name'):
|
||||
rgw_cmd.extend(['--rgw-dns-name', endpoint.dns_name])
|
||||
if client_config.get('dns-s3website-name'):
|
||||
rgw_cmd.extend(['--rgw-dns-s3website-name', endpoint.website_dns_name])
|
||||
|
||||
barbican_role = client_config.get('use-barbican-role', None)
|
||||
if barbican_role is not None:
|
||||
if not hasattr(ctx, 'barbican'):
|
||||
raise ConfigError('rgw must run after the barbican task')
|
||||
|
||||
barbican_host, barbican_port = \
|
||||
ctx.barbican.endpoints[barbican_role]
|
||||
log.info("Use barbican url=%s:%s", barbican_host, barbican_port)
|
||||
|
||||
rgw_cmd.extend([
|
||||
'--rgw_barbican_url',
|
||||
'http://{bhost}:{bport}'.format(bhost=barbican_host,
|
||||
bport=barbican_port),
|
||||
])
|
||||
|
||||
log.info("Barbican access data: %s",ctx.barbican.token[barbican_role])
|
||||
access_data = ctx.barbican.token[barbican_role]
|
||||
rgw_cmd.extend([
|
||||
'--rgw_keystone_barbican_user', access_data['username'],
|
||||
'--rgw_keystone_barbican_password', access_data['password'],
|
||||
'--rgw_keystone_barbican_tenant', access_data['tenant'],
|
||||
])
|
||||
|
||||
rgw_cmd.extend([
|
||||
'--foreground',
|
||||
run.Raw('|'),
|
||||
|
@ -190,6 +190,16 @@ def configure(ctx, config):
|
||||
else:
|
||||
s3tests_conf['DEFAULT']['host'] = 'localhost'
|
||||
|
||||
if properties is not None and 'kms_key' in properties:
|
||||
host = None
|
||||
if not hasattr(ctx, 'barbican'):
|
||||
raise ConfigError('s3tests must run after the barbican task')
|
||||
if not ( properties['kms_key'] in ctx.barbican.keys ):
|
||||
raise ConfigError('Key '+properties['kms_key']+' not defined')
|
||||
|
||||
key = ctx.barbican.keys[properties['kms_key']]
|
||||
s3tests_conf['DEFAULT']['kms_keyid'] = key['id']
|
||||
|
||||
if properties is not None and 'slow_backend' in properties:
|
||||
s3tests_conf['fixtures']['slow backend'] = properties['slow_backend']
|
||||
|
||||
|
@ -58,6 +58,15 @@ def download(ctx, config):
|
||||
sha1 = cconf.get('sha1')
|
||||
if sha1 is not None:
|
||||
run_in_tempest_dir(ctx, client, [ 'git', 'reset', '--hard', sha1 ])
|
||||
|
||||
# tox.ini contains a dead link, replace it with the new one
|
||||
from_url = 'https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt'
|
||||
to_url = 'https://opendev.org/openstack/requirements/raw/branch/stable/pike/upper-constraints.txt'
|
||||
run_in_tempest_dir(ctx, client, [
|
||||
'sed', '-i',
|
||||
run.Raw('"s|{}|{}|"'.format(from_url, to_url)),
|
||||
'tox.ini'
|
||||
])
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
|
Loading…
Reference in New Issue
Block a user