mirror of
https://github.com/ceph/ceph
synced 2025-01-20 01:51:34 +00:00
Merge pull request #32262 from kshtsk/wip-ceph-py-py3
qa/tasks: py3 compat Reviewed-by: Kefu Chai <kchai@redhat.com>
This commit is contained in:
commit
8fd63d2384
@ -6,7 +6,7 @@ import contextlib
|
||||
import logging
|
||||
|
||||
from teuthology import misc as teuthology
|
||||
from cephfs.fuse_mount import FuseMount
|
||||
from tasks.cephfs.fuse_mount import FuseMount
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -4,7 +4,6 @@ import logging
|
||||
from tasks.ceph_test_case import CephTestCase
|
||||
import os
|
||||
import re
|
||||
from StringIO import StringIO
|
||||
|
||||
from tasks.cephfs.fuse_mount import FuseMount
|
||||
|
||||
@ -260,21 +259,21 @@ class CephFSTestCase(CephTestCase):
|
||||
def delete_mds_coredump(self, daemon_id):
|
||||
# delete coredump file, otherwise teuthology.internal.coredump will
|
||||
# catch it later and treat it as a failure.
|
||||
p = self.mds_cluster.mds_daemons[daemon_id].remote.run(args=[
|
||||
"sudo", "sysctl", "-n", "kernel.core_pattern"], stdout=StringIO())
|
||||
core_dir = os.path.dirname(p.stdout.getvalue().strip())
|
||||
core_pattern = self.mds_cluster.mds_daemons[daemon_id].remote.sh(
|
||||
"sudo sysctl -n kernel.core_pattern")
|
||||
core_dir = os.path.dirname(core_pattern.strip())
|
||||
if core_dir: # Non-default core_pattern with a directory in it
|
||||
# We have seen a core_pattern that looks like it's from teuthology's coredump
|
||||
# task, so proceed to clear out the core file
|
||||
log.info("Clearing core from directory: {0}".format(core_dir))
|
||||
|
||||
# Verify that we see the expected single coredump
|
||||
ls_proc = self.mds_cluster.mds_daemons[daemon_id].remote.run(args=[
|
||||
ls_output = self.mds_cluster.mds_daemons[daemon_id].remote.sh([
|
||||
"cd", core_dir, run.Raw('&&'),
|
||||
"sudo", "ls", run.Raw('|'), "sudo", "xargs", "file"
|
||||
], stdout=StringIO())
|
||||
])
|
||||
cores = [l.partition(":")[0]
|
||||
for l in ls_proc.stdout.getvalue().strip().split("\n")
|
||||
for l in ls_output.strip().split("\n")
|
||||
if re.match(r'.*ceph-mds.* -i +{0}'.format(daemon_id), l)]
|
||||
|
||||
log.info("Enumerated cores: {0}".format(cores))
|
||||
|
@ -1,14 +1,17 @@
|
||||
from StringIO import StringIO
|
||||
from io import BytesIO
|
||||
import json
|
||||
import time
|
||||
import logging
|
||||
|
||||
import six
|
||||
|
||||
from textwrap import dedent
|
||||
|
||||
from teuthology import misc
|
||||
from teuthology.contextutil import MaxWhileTries
|
||||
from teuthology.orchestra import run
|
||||
from teuthology.orchestra.run import CommandFailedError
|
||||
from .mount import CephFSMount
|
||||
from tasks.cephfs.mount import CephFSMount
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@ -96,16 +99,12 @@ class FuseMount(CephFSMount):
|
||||
check_status=False,
|
||||
timeout=(15*60)
|
||||
)
|
||||
p = self.client_remote.run(
|
||||
args=["ls", "/sys/fs/fuse/connections"],
|
||||
stdout=StringIO(),
|
||||
check_status=False,
|
||||
timeout=(15*60)
|
||||
)
|
||||
if p.exitstatus != 0:
|
||||
try:
|
||||
ls_str = self.client_remote.sh("ls /sys/fs/fuse/connections",
|
||||
timeout=(15*60)).strip()
|
||||
except CommandFailedError:
|
||||
return []
|
||||
|
||||
ls_str = p.stdout.getvalue().strip()
|
||||
if ls_str:
|
||||
return [int(n) for n in ls_str.split("\n")]
|
||||
else:
|
||||
@ -188,16 +187,17 @@ class FuseMount(CephFSMount):
|
||||
self.mountpoint,
|
||||
],
|
||||
cwd=self.test_dir,
|
||||
stdout=StringIO(),
|
||||
stderr=StringIO(),
|
||||
stdout=BytesIO(),
|
||||
stderr=BytesIO(),
|
||||
wait=False,
|
||||
timeout=(15*60)
|
||||
)
|
||||
try:
|
||||
proc.wait()
|
||||
except CommandFailedError:
|
||||
if ("endpoint is not connected" in proc.stderr.getvalue()
|
||||
or "Software caused connection abort" in proc.stderr.getvalue()):
|
||||
error = six.ensure_str(proc.stderr.getvalue())
|
||||
if ("endpoint is not connected" in error
|
||||
or "Software caused connection abort" in error):
|
||||
# This happens is fuse is killed without unmount
|
||||
log.warn("Found stale moutn point at {0}".format(self.mountpoint))
|
||||
return True
|
||||
@ -206,7 +206,7 @@ class FuseMount(CephFSMount):
|
||||
log.info('mount point does not exist: %s', self.mountpoint)
|
||||
return False
|
||||
|
||||
fstype = proc.stdout.getvalue().rstrip('\n')
|
||||
fstype = six.ensure_str(proc.stdout.getvalue()).rstrip('\n')
|
||||
if fstype == 'fuseblk':
|
||||
log.info('ceph-fuse is mounted on %s', self.mountpoint)
|
||||
return True
|
||||
@ -231,11 +231,11 @@ class FuseMount(CephFSMount):
|
||||
# Now that we're mounted, set permissions so that the rest of the test will have
|
||||
# unrestricted access to the filesystem mount.
|
||||
try:
|
||||
stderr = StringIO()
|
||||
stderr = BytesIO()
|
||||
self.client_remote.run(args=['sudo', 'chmod', '1777', self.mountpoint], timeout=(15*60), cwd=self.test_dir, stderr=stderr)
|
||||
except run.CommandFailedError:
|
||||
stderr = stderr.getvalue()
|
||||
if "Read-only file system".lower() in stderr.lower():
|
||||
if b"Read-only file system".lower() in stderr.lower():
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
@ -278,7 +278,7 @@ class FuseMount(CephFSMount):
|
||||
""").format(self._fuse_conn))
|
||||
self._fuse_conn = None
|
||||
|
||||
stderr = StringIO()
|
||||
stderr = BytesIO()
|
||||
try:
|
||||
# make sure its unmounted
|
||||
self.client_remote.run(
|
||||
@ -345,7 +345,7 @@ class FuseMount(CephFSMount):
|
||||
|
||||
Prerequisite: the client is not mounted.
|
||||
"""
|
||||
stderr = StringIO()
|
||||
stderr = BytesIO()
|
||||
try:
|
||||
self.client_remote.run(
|
||||
args=[
|
||||
@ -359,7 +359,7 @@ class FuseMount(CephFSMount):
|
||||
check_status=False,
|
||||
)
|
||||
except CommandFailedError:
|
||||
if "No such file or directory" in stderr.getvalue():
|
||||
if b"No such file or directory" in stderr.getvalue():
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
@ -444,17 +444,16 @@ print(find_socket("{client_name}"))
|
||||
client_name="client.{0}".format(self.client_id))
|
||||
|
||||
# Find the admin socket
|
||||
p = self.client_remote.run(args=[
|
||||
asok_path = self.client_remote.sh([
|
||||
'sudo', 'python3', '-c', pyscript
|
||||
], stdout=StringIO(), timeout=(15*60))
|
||||
asok_path = p.stdout.getvalue().strip()
|
||||
], timeout=(15*60)).strip()
|
||||
log.info("Found client admin socket at {0}".format(asok_path))
|
||||
|
||||
# Query client ID from admin socket
|
||||
p = self.client_remote.run(
|
||||
args=['sudo', self._prefix + 'ceph', '--admin-daemon', asok_path] + args,
|
||||
stdout=StringIO(), timeout=(15*60))
|
||||
return json.loads(p.stdout.getvalue())
|
||||
json_data = self.client_remote.sh(
|
||||
['sudo', self._prefix + 'ceph', '--admin-daemon', asok_path] + args,
|
||||
timeout=(15*60))
|
||||
return json.loads(json_data)
|
||||
|
||||
def get_global_id(self):
|
||||
"""
|
||||
|
@ -1,4 +1,3 @@
|
||||
from StringIO import StringIO
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
@ -211,10 +210,10 @@ class KernelMount(CephFSMount):
|
||||
print(json.dumps(get_id_to_dir()))
|
||||
""")
|
||||
|
||||
p = self.client_remote.run(args=[
|
||||
output = self.client_remote.sh([
|
||||
'sudo', 'python3', '-c', pyscript
|
||||
], stdout=StringIO(), timeout=(5*60))
|
||||
client_id_to_dir = json.loads(p.stdout.getvalue())
|
||||
], timeout=(5*60))
|
||||
client_id_to_dir = json.loads(output)
|
||||
|
||||
try:
|
||||
return client_id_to_dir[self.client_id]
|
||||
@ -233,10 +232,10 @@ class KernelMount(CephFSMount):
|
||||
print(open(os.path.join("{debug_dir}", "{filename}")).read())
|
||||
""").format(debug_dir=debug_dir, filename=filename)
|
||||
|
||||
p = self.client_remote.run(args=[
|
||||
output = self.client_remote.sh([
|
||||
'sudo', 'python3', '-c', pyscript
|
||||
], stdout=StringIO(), timeout=(5*60))
|
||||
return p.stdout.getvalue()
|
||||
], timeout=(5*60))
|
||||
return output
|
||||
|
||||
def get_global_id(self):
|
||||
"""
|
||||
|
@ -1,11 +1,12 @@
|
||||
from contextlib import contextmanager
|
||||
from io import BytesIO
|
||||
import json
|
||||
import logging
|
||||
import datetime
|
||||
import six
|
||||
import time
|
||||
from textwrap import dedent
|
||||
import os
|
||||
from StringIO import StringIO
|
||||
from teuthology.orchestra import run
|
||||
from teuthology.orchestra.run import CommandFailedError, ConnectionLostError
|
||||
from tasks.cephfs.filesystem import Filesystem
|
||||
@ -184,12 +185,12 @@ class CephFSMount(object):
|
||||
return self.client_remote.run(
|
||||
args=['sudo', 'adjust-ulimits', 'daemon-helper', 'kill',
|
||||
py_version, '-c', pyscript], wait=False, stdin=run.PIPE,
|
||||
stdout=StringIO())
|
||||
stdout=BytesIO())
|
||||
|
||||
def run_python(self, pyscript, py_version='python3'):
|
||||
p = self._run_python(pyscript, py_version)
|
||||
p.wait()
|
||||
return p.stdout.getvalue().strip()
|
||||
return six.ensure_str(p.stdout.getvalue().strip())
|
||||
|
||||
def run_shell(self, args, wait=True, stdin=None, check_status=True,
|
||||
omit_sudo=True):
|
||||
@ -197,8 +198,8 @@ class CephFSMount(object):
|
||||
args = args.split()
|
||||
|
||||
args = ["cd", self.mountpoint, run.Raw('&&'), "sudo"] + args
|
||||
return self.client_remote.run(args=args, stdout=StringIO(),
|
||||
stderr=StringIO(), wait=wait,
|
||||
return self.client_remote.run(args=args, stdout=BytesIO(),
|
||||
stderr=BytesIO(), wait=wait,
|
||||
stdin=stdin, check_status=check_status,
|
||||
omit_sudo=omit_sudo)
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
import logging
|
||||
from StringIO import StringIO
|
||||
|
||||
from io import BytesIO
|
||||
from xfstests_dev import XFSTestsDev
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
@ -22,6 +22,6 @@ class TestACLs(XFSTestsDev):
|
||||
log.info('client is kernel mounted')
|
||||
|
||||
self.mount_a.client_remote.run(args=['sudo', './check',
|
||||
'generic/099'], cwd=self.repo_path, stdout=StringIO(),
|
||||
stderr=StringIO(), timeout=30, check_status=True,
|
||||
'generic/099'], cwd=self.repo_path, stdout=BytesIO(),
|
||||
stderr=BytesIO(), timeout=30, check_status=True,
|
||||
label='running tests for ACLs from xfstests-dev')
|
||||
|
@ -2,15 +2,16 @@
|
||||
Before running this testsuite, add path to cephfs-shell module to $PATH and
|
||||
export $PATH.
|
||||
"""
|
||||
from io import BytesIO
|
||||
from os import path
|
||||
import crypt
|
||||
import logging
|
||||
from tempfile import mkstemp as tempfile_mkstemp
|
||||
import math
|
||||
from six import ensure_str
|
||||
from sys import version_info as sys_version_info
|
||||
from re import search as re_search
|
||||
from time import sleep
|
||||
from StringIO import StringIO
|
||||
from tasks.cephfs.cephfs_test_case import CephFSTestCase
|
||||
from teuthology.misc import sudo_write_file
|
||||
from teuthology.orchestra.run import CommandFailedError
|
||||
@ -52,23 +53,23 @@ class TestCephFSShell(CephFSTestCase):
|
||||
args.extend(("--", cmd))
|
||||
|
||||
log.info("Running command: {}".format(" ".join(args)))
|
||||
return mount_x.client_remote.run(args=args, stdout=StringIO(),
|
||||
stderr=StringIO(), stdin=stdin)
|
||||
return mount_x.client_remote.run(args=args, stdout=BytesIO(),
|
||||
stderr=BytesIO(), stdin=stdin)
|
||||
|
||||
def get_cephfs_shell_cmd_error(self, cmd, mount_x=None, opts=None,
|
||||
stdin=None):
|
||||
return self.run_cephfs_shell_cmd(cmd, mount_x, opts, stdin).stderr.\
|
||||
getvalue().strip()
|
||||
return ensure_str(self.run_cephfs_shell_cmd(cmd, mount_x, opts, stdin).stderr.\
|
||||
getvalue().strip())
|
||||
|
||||
def get_cephfs_shell_cmd_output(self, cmd, mount_x=None, opts=None,
|
||||
stdin=None, config_path=None):
|
||||
return self.run_cephfs_shell_cmd(cmd, mount_x, opts, stdin,
|
||||
return ensure_str(self.run_cephfs_shell_cmd(cmd, mount_x, opts, stdin,
|
||||
config_path).\
|
||||
stdout.getvalue().strip()
|
||||
stdout.getvalue().strip())
|
||||
|
||||
def get_cephfs_shell_script_output(self, script, mount_x=None, stdin=None):
|
||||
return self.run_cephfs_shell_script(script, mount_x, stdin).stdout.\
|
||||
getvalue().strip()
|
||||
return ensure_str(self.run_cephfs_shell_script(script, mount_x, stdin).stdout.\
|
||||
getvalue().strip())
|
||||
|
||||
def run_cephfs_shell_script(self, script, mount_x=None, stdin=None):
|
||||
if mount_x is None:
|
||||
@ -83,8 +84,8 @@ class TestCephFSShell(CephFSTestCase):
|
||||
|
||||
args = ["cephfs-shell", "-c", mount_x.config_path, '-b', scriptpath]
|
||||
log.info('Running script \"' + scriptpath + '\"')
|
||||
return mount_x.client_remote.run(args=args, stdout=StringIO(),
|
||||
stderr=StringIO(), stdin=stdin)
|
||||
return mount_x.client_remote.run(args=args, stdout=BytesIO(),
|
||||
stderr=BytesIO(), stdin=stdin)
|
||||
|
||||
class TestMkdir(TestCephFSShell):
|
||||
def test_mkdir(self):
|
||||
@ -749,7 +750,7 @@ class TestDF(TestCephFSShell):
|
||||
def test_df_for_invalid_directory(self):
|
||||
dir_abspath = path.join(self.mount_a.mountpoint, 'non-existent-dir')
|
||||
proc = self.run_cephfs_shell_cmd('df ' + dir_abspath)
|
||||
assert proc.stderr.getvalue().find('error in stat') != -1
|
||||
assert proc.stderr.getvalue().find(b'error in stat') != -1
|
||||
|
||||
def test_df_for_valid_file(self):
|
||||
s = 'df test' * 14145016
|
||||
@ -924,9 +925,9 @@ class TestMisc(TestCephFSShell):
|
||||
dirname = 'somedirectory'
|
||||
self.run_cephfs_shell_cmd(['mkdir', dirname])
|
||||
|
||||
output = self.mount_a.client_remote.run(args=['cephfs-shell', '-c',
|
||||
self.mount_a.config_path, 'ls'],
|
||||
stdout=StringIO()).stdout.getvalue().strip()
|
||||
output = self.mount_a.client_remote.sh([
|
||||
'cephfs-shell', '-c', self.mount_a.config_path, 'ls'
|
||||
]).strip()
|
||||
|
||||
if sys_version_info.major >= 3:
|
||||
self.assertRegex(dirname, output)
|
||||
|
@ -1,6 +1,5 @@
|
||||
import logging
|
||||
import time
|
||||
from StringIO import StringIO
|
||||
from tasks.cephfs.fuse_mount import FuseMount
|
||||
from tasks.cephfs.cephfs_test_case import CephFSTestCase
|
||||
|
||||
@ -129,7 +128,7 @@ class TestExports(CephFSTestCase):
|
||||
self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0), ('/1/2/3', 2)])
|
||||
|
||||
if not isinstance(self.mount_a, FuseMount):
|
||||
p = self.mount_a.client_remote.run(args=['uname', '-r'], stdout=StringIO(), wait=True)
|
||||
p = self.mount_a.client_remote.sh('uname -r'), wait=True)
|
||||
dir_pin = self.mount_a.getfattr("1", "ceph.dir.pin")
|
||||
log.debug("mount.getfattr('1','ceph.dir.pin'): %s " % dir_pin)
|
||||
if str(p.stdout.getvalue()) < "5" and not(dir_pin):
|
||||
|
@ -1,5 +1,4 @@
|
||||
|
||||
from StringIO import StringIO
|
||||
from tasks.cephfs.cephfs_test_case import CephFSTestCase
|
||||
from tasks.workunit import task as workunit
|
||||
|
||||
@ -76,14 +75,12 @@ class TestJournalMigration(CephFSTestCase):
|
||||
|
||||
self.fs.journal_tool(["event", "get", "json",
|
||||
"--path", "/tmp/journal.json"], 0)
|
||||
p = self.fs.tool_remote.run(
|
||||
args=[
|
||||
p = self.fs.tool_remote.sh([
|
||||
"python3",
|
||||
"-c",
|
||||
"import json; print(len(json.load(open('/tmp/journal.json'))))"
|
||||
],
|
||||
stdout=StringIO())
|
||||
event_count = int(p.stdout.getvalue().strip())
|
||||
])
|
||||
event_count = int(p.strip())
|
||||
if event_count < 1000:
|
||||
# Approximate value of "lots", expected from having run fsstress
|
||||
raise RuntimeError("Unexpectedly few journal events: {0}".format(event_count))
|
||||
|
@ -1,6 +1,6 @@
|
||||
from io import BytesIO
|
||||
import six
|
||||
import logging
|
||||
from StringIO import StringIO
|
||||
from tasks.cephfs.cephfs_test_case import CephFSTestCase
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -28,11 +28,11 @@ class XFSTestsDev(CephFSTestCase):
|
||||
# NOTE: On teuthology machines it's necessary to run "make" as
|
||||
# superuser since the repo is cloned somewhere in /tmp.
|
||||
self.mount_a.client_remote.run(args=['sudo', 'make'],
|
||||
cwd=self.repo_path, stdout=StringIO(),
|
||||
stderr=StringIO())
|
||||
cwd=self.repo_path, stdout=BytesIO(),
|
||||
stderr=BytesIO())
|
||||
self.mount_a.client_remote.run(args=['sudo', 'make', 'install'],
|
||||
cwd=self.repo_path, omit_sudo=False,
|
||||
stdout=StringIO(), stderr=StringIO())
|
||||
stdout=BytesIO(), stderr=BytesIO())
|
||||
|
||||
def get_repo(self):
|
||||
"""
|
||||
|
@ -1,9 +1,11 @@
|
||||
#!/usr/bin/env python
|
||||
import contextlib
|
||||
import logging
|
||||
from cStringIO import StringIO
|
||||
from io import BytesIO
|
||||
import textwrap
|
||||
from configparser import ConfigParser
|
||||
|
||||
import six
|
||||
import time
|
||||
|
||||
from teuthology.orchestra import run
|
||||
@ -140,7 +142,7 @@ def distribute_ceph_keys(devstack_node, ceph_node):
|
||||
log.info("Copying Ceph keys to DevStack node...")
|
||||
|
||||
def copy_key(from_remote, key_name, to_remote, dest_path, owner):
|
||||
key_stringio = StringIO()
|
||||
key_stringio = BytesIO()
|
||||
from_remote.run(
|
||||
args=['sudo', 'ceph', 'auth', 'get-or-create', key_name],
|
||||
stdout=key_stringio)
|
||||
@ -172,14 +174,8 @@ def distribute_ceph_keys(devstack_node, ceph_node):
|
||||
def set_libvirt_secret(devstack_node, ceph_node):
|
||||
log.info("Setting libvirt secret...")
|
||||
|
||||
cinder_key_stringio = StringIO()
|
||||
ceph_node.run(args=['sudo', 'ceph', 'auth', 'get-key', 'client.cinder'],
|
||||
stdout=cinder_key_stringio)
|
||||
cinder_key = cinder_key_stringio.getvalue().strip()
|
||||
|
||||
uuid_stringio = StringIO()
|
||||
devstack_node.run(args=['uuidgen'], stdout=uuid_stringio)
|
||||
uuid = uuid_stringio.getvalue().strip()
|
||||
cinder_key = ceph_node.sh('sudo ceph auth get-key client.cinder').strip()
|
||||
uuid = devstack_node.sh('uuidgen').strip()
|
||||
|
||||
secret_path = '/tmp/secret.xml'
|
||||
secret_template = textwrap.dedent("""
|
||||
@ -210,7 +206,7 @@ def update_devstack_config_files(devstack_node, secret_uuid):
|
||||
parser.read_file(config_stream)
|
||||
for (key, value) in update_dict.items():
|
||||
parser.set(section, key, value)
|
||||
out_stream = StringIO()
|
||||
out_stream = six.StringIO()
|
||||
parser.write(out_stream)
|
||||
out_stream.seek(0)
|
||||
return out_stream
|
||||
@ -254,8 +250,8 @@ def update_devstack_config_files(devstack_node, secret_uuid):
|
||||
for update in updates:
|
||||
file_name = update['name']
|
||||
options = update['options']
|
||||
config_str = misc.get_file(devstack_node, file_name, sudo=True)
|
||||
config_stream = StringIO(config_str)
|
||||
config_data = misc.get_file(devstack_node, file_name, sudo=True)
|
||||
config_stream = six.StringIO(config_data)
|
||||
backup_config(devstack_node, file_name)
|
||||
new_config_stream = update_config(file_name, config_stream, options)
|
||||
misc.sudo_write_file(devstack_node, file_name, new_config_stream)
|
||||
@ -352,21 +348,17 @@ def create_volume(devstack_node, ceph_node, vol_name, size):
|
||||
size=size))
|
||||
args = ['source', 'devstack/openrc', run.Raw('&&'), 'cinder', 'create',
|
||||
'--display-name', vol_name, size]
|
||||
out_stream = StringIO()
|
||||
devstack_node.run(args=args, stdout=out_stream, wait=True)
|
||||
vol_info = parse_os_table(out_stream.getvalue())
|
||||
cinder_create = devstack_node.sh(args, wait=True)
|
||||
vol_info = parse_os_table(cinder_create)
|
||||
log.debug("Volume info: %s", str(vol_info))
|
||||
|
||||
out_stream = StringIO()
|
||||
try:
|
||||
ceph_node.run(args="rbd --id cinder ls -l volumes", stdout=out_stream,
|
||||
wait=True)
|
||||
rbd_output = ceph_node.sh("rbd --id cinder ls -l volumes", wait=True)
|
||||
except run.CommandFailedError:
|
||||
log.debug("Original rbd call failed; retrying without '--id cinder'")
|
||||
ceph_node.run(args="rbd ls -l volumes", stdout=out_stream,
|
||||
wait=True)
|
||||
rbd_output = ceph_node.sh("rbd ls -l volumes", wait=True)
|
||||
|
||||
assert vol_info['id'] in out_stream.getvalue(), \
|
||||
assert vol_info['id'] in rbd_output, \
|
||||
"Volume not found on Ceph cluster"
|
||||
assert vol_info['size'] == size, \
|
||||
"Volume size on Ceph cluster is different than specified"
|
||||
|
@ -8,7 +8,7 @@ from teuthology.misc import deep_merge
|
||||
from teuthology.orchestra.run import CommandFailedError
|
||||
from teuthology import misc
|
||||
from teuthology.contextutil import MaxWhileTries
|
||||
from cephfs.kernel_mount import KernelMount
|
||||
from tasks.cephfs.kernel_mount import KernelMount
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -4,7 +4,6 @@ Deploy and configure Keystone for Teuthology
|
||||
import argparse
|
||||
import contextlib
|
||||
import logging
|
||||
from cStringIO import StringIO
|
||||
|
||||
from teuthology import misc as teuthology
|
||||
from teuthology import contextutil
|
||||
@ -28,11 +27,9 @@ def run_in_keystone_dir(ctx, client, args, **kwargs):
|
||||
def get_toxvenv_dir(ctx):
|
||||
return ctx.tox.venv_path
|
||||
|
||||
def run_in_tox_venv(ctx, remote, args, **kwargs):
|
||||
return remote.run(
|
||||
args=[ 'source', '{}/bin/activate'.format(get_toxvenv_dir(ctx)), run.Raw('&&') ] + args,
|
||||
**kwargs
|
||||
)
|
||||
def toxvenv_sh(ctx, remote, args, **kwargs):
|
||||
activate = get_toxvenv_dir(ctx) + '/bin/activate'
|
||||
return remote.sh(['source', activate, run.Raw('&&')] + args, **kwargs)
|
||||
|
||||
def run_in_keystone_venv(ctx, client, args):
|
||||
run_in_keystone_dir(ctx, client,
|
||||
@ -107,12 +104,10 @@ def install_packages(ctx, config):
|
||||
for (client, _) in config.items():
|
||||
(remote,) = ctx.cluster.only(client).remotes.keys()
|
||||
# use bindep to read which dependencies we need from keystone/bindep.txt
|
||||
run_in_tox_venv(ctx, remote, ['pip', 'install', 'bindep'])
|
||||
r = run_in_tox_venv(ctx, remote,
|
||||
toxvenv_sh(ctx, remote, ['pip', 'install', 'bindep'])
|
||||
packages[client] = toxvenv_sh(ctx, remote,
|
||||
['bindep', '--brief', '--file', '{}/bindep.txt'.format(get_keystone_dir(ctx))],
|
||||
stdout=StringIO(),
|
||||
check_status=False) # returns 1 on success?
|
||||
packages[client] = r.stdout.getvalue().splitlines()
|
||||
check_status=False).splitlines() # returns 1 on success?
|
||||
for dep in packages[client]:
|
||||
install_package(dep, remote)
|
||||
try:
|
||||
|
@ -1,6 +1,6 @@
|
||||
|
||||
|
||||
from mgr_test_case import MgrTestCase
|
||||
from tasks.mgr.mgr_test_case import MgrTestCase
|
||||
|
||||
import json
|
||||
import logging
|
||||
|
@ -1,7 +1,6 @@
|
||||
"""
|
||||
Qemu task
|
||||
"""
|
||||
from cStringIO import StringIO
|
||||
|
||||
import contextlib
|
||||
import logging
|
||||
@ -172,7 +171,7 @@ def generate_iso(ctx, config):
|
||||
user_data = user_data.format(
|
||||
ceph_branch=ctx.config.get('branch'),
|
||||
ceph_sha1=ctx.config.get('sha1'))
|
||||
teuthology.write_file(remote, userdata_path, StringIO(user_data))
|
||||
teuthology.write_file(remote, userdata_path, user_data)
|
||||
|
||||
with open(os.path.join(src_dir, 'metadata.yaml'), 'rb') as f:
|
||||
teuthology.write_file(remote, metadata_path, f)
|
||||
|
@ -17,8 +17,9 @@ import datetime
|
||||
import Queue
|
||||
|
||||
import sys
|
||||
import six
|
||||
|
||||
from cStringIO import StringIO
|
||||
from io import BytesIO
|
||||
|
||||
import boto.exception
|
||||
import boto.s3.connection
|
||||
@ -1038,7 +1039,7 @@ def task(ctx, config):
|
||||
out['placement_pools'].append(rule)
|
||||
|
||||
(err, out) = rgwadmin(ctx, client, ['zone', 'set'],
|
||||
stdin=StringIO(json.dumps(out)),
|
||||
stdin=BytesIO(six.ensure_binary(json.dumps(out))),
|
||||
check_status=True)
|
||||
|
||||
(err, out) = rgwadmin(ctx, client, ['zone', 'get'])
|
||||
@ -1071,16 +1072,15 @@ def main():
|
||||
client0 = remote.Remote(user + host)
|
||||
ctx = config
|
||||
ctx.cluster=cluster.Cluster(remotes=[(client0,
|
||||
[ 'ceph.client.rgw.%s' % (host), ]),])
|
||||
|
||||
[ 'ceph.client.rgw.%s' % (host), ]),])
|
||||
ctx.rgw = argparse.Namespace()
|
||||
endpoints = {}
|
||||
endpoints['ceph.client.rgw.%s' % host] = (host, 80)
|
||||
ctx.rgw.role_endpoints = endpoints
|
||||
ctx.rgw.realm = None
|
||||
ctx.rgw.regions = {'region0': { 'api name': 'api1',
|
||||
'is master': True, 'master zone': 'r0z0',
|
||||
'zones': ['r0z0', 'r0z1'] }}
|
||||
'is master': True, 'master zone': 'r0z0',
|
||||
'zones': ['r0z0', 'r0z1'] }}
|
||||
ctx.rgw.config = {'ceph.client.rgw.%s' % host: {'system user': {'name': '%s-system-user' % host}}}
|
||||
task(config, None)
|
||||
exit()
|
||||
|
@ -1,7 +1,7 @@
|
||||
"""
|
||||
Run a set of s3 tests on rgw.
|
||||
"""
|
||||
from cStringIO import StringIO
|
||||
from io import BytesIO
|
||||
from configobj import ConfigObj
|
||||
import base64
|
||||
import contextlib
|
||||
@ -155,7 +155,7 @@ def create_users(ctx, config, run_stages):
|
||||
if not 'check' in run_stages[client]:
|
||||
# only remove user if went through the check stage
|
||||
continue
|
||||
for user in users.itervalues():
|
||||
for user in users.values():
|
||||
uid = '{user}.{client}'.format(user=user, client=client)
|
||||
ctx.cluster.only(client).run(
|
||||
args=[
|
||||
@ -200,7 +200,7 @@ def configure(ctx, config, run_stages):
|
||||
if properties is not None and 'slow_backend' in properties:
|
||||
ragweed_conf['fixtures']['slow backend'] = properties['slow_backend']
|
||||
|
||||
conf_fp = StringIO()
|
||||
conf_fp = BytesIO()
|
||||
ragweed_conf.write(conf_fp)
|
||||
teuthology.write_file(
|
||||
remote=remote,
|
||||
|
@ -7,7 +7,7 @@ import os
|
||||
import tempfile
|
||||
import sys
|
||||
|
||||
from cStringIO import StringIO
|
||||
from io import BytesIO
|
||||
from teuthology.orchestra import run
|
||||
from teuthology import misc as teuthology
|
||||
from teuthology import contextutil
|
||||
@ -303,12 +303,12 @@ def canonical_path(ctx, role, path):
|
||||
representing the given role. A canonical path contains no
|
||||
. or .. components, and includes no symbolic links.
|
||||
"""
|
||||
version_fp = StringIO()
|
||||
version_fp = BytesIO()
|
||||
ctx.cluster.only(role).run(
|
||||
args=[ 'readlink', '-f', path ],
|
||||
stdout=version_fp,
|
||||
)
|
||||
canonical_path = version_fp.getvalue().rstrip('\n')
|
||||
canonical_path = six.ensure_str(version_fp.getvalue()).rstrip('\n')
|
||||
version_fp.close()
|
||||
return canonical_path
|
||||
|
||||
|
@ -9,7 +9,6 @@ import contextlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import StringIO
|
||||
|
||||
from teuthology.parallel import parallel
|
||||
from teuthology import misc as teuthology
|
||||
@ -77,10 +76,8 @@ def get_ioengine_package_name(ioengine, remote):
|
||||
|
||||
def run_rbd_map(remote, image, iodepth):
|
||||
iodepth = max(iodepth, 128) # RBD_QUEUE_DEPTH_DEFAULT
|
||||
out = StringIO.StringIO()
|
||||
remote.run(args=['sudo', 'rbd', 'device', 'map', '-o',
|
||||
'queue_depth={}'.format(iodepth), image], stdout=out)
|
||||
dev = out.getvalue().rstrip('\n')
|
||||
dev = remote.sh(['sudo', 'rbd', 'device', 'map', '-o',
|
||||
'queue_depth={}'.format(iodepth), image]).rstripg('\n')
|
||||
teuthology.sudo_write_file(
|
||||
remote,
|
||||
'/sys/block/{}/queue/nr_requests'.format(os.path.basename(dev)),
|
||||
@ -214,9 +211,8 @@ def run_fio(remote, config, rbd_test_dir):
|
||||
remote.run(args=['sudo', run.Raw('{tdir}/fio-fio-{v}/fio {f}'.format(tdir=rbd_test_dir,v=fio_version,f=fio_config.name))])
|
||||
remote.run(args=['ceph', '-s'])
|
||||
finally:
|
||||
out=StringIO.StringIO()
|
||||
remote.run(args=['rbd', 'device', 'list', '--format=json'], stdout=out)
|
||||
mapped_images = json.loads(out.getvalue())
|
||||
out = remote.sh('rbd device list --format=json')
|
||||
mapped_images = json.loads(out)
|
||||
if mapped_images:
|
||||
log.info("Unmapping rbd images on {sn}".format(sn=sn))
|
||||
for image in mapped_images:
|
||||
|
@ -9,11 +9,11 @@ from teuthology.orchestra import run
|
||||
from teuthology import misc as teuthology
|
||||
from teuthology import contextutil
|
||||
from teuthology.exceptions import ConfigError
|
||||
from util import get_remote_for_role
|
||||
from util.rgw import rgwadmin, wait_for_radosgw
|
||||
from util.rados import (create_ec_pool,
|
||||
create_replicated_pool,
|
||||
create_cache_pool)
|
||||
from tasks.util import get_remote_for_role
|
||||
from tasks.util.rgw import rgwadmin, wait_for_radosgw
|
||||
from tasks.util.rados import (create_ec_pool,
|
||||
create_replicated_pool,
|
||||
create_cache_pool)
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
"""
|
||||
rgw s3tests logging wrappers
|
||||
"""
|
||||
from cStringIO import StringIO
|
||||
from io import BytesIO
|
||||
from configobj import ConfigObj
|
||||
import contextlib
|
||||
import logging
|
||||
@ -68,7 +68,7 @@ def run_tests(ctx, config):
|
||||
|
||||
s3tests.run_tests(ctx, config)
|
||||
|
||||
netcat_out = StringIO()
|
||||
netcat_out = BytesIO()
|
||||
|
||||
for client, client_config in config.items():
|
||||
ctx.cluster.only(client).run(
|
||||
|
@ -1,13 +1,14 @@
|
||||
"""
|
||||
Run a set of s3 tests on rgw.
|
||||
"""
|
||||
from cStringIO import StringIO
|
||||
from io import BytesIO
|
||||
from configobj import ConfigObj
|
||||
import base64
|
||||
import contextlib
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import six
|
||||
import string
|
||||
|
||||
from teuthology import misc as teuthology
|
||||
@ -78,10 +79,14 @@ def _config_user(s3tests_conf, section, user):
|
||||
s3tests_conf[section].setdefault('user_id', user)
|
||||
s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user))
|
||||
s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user))
|
||||
s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in range(20)))
|
||||
s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40)))
|
||||
s3tests_conf[section].setdefault('totp_serial', ''.join(random.choice(string.digits) for i in range(10)))
|
||||
s3tests_conf[section].setdefault('totp_seed', base64.b32encode(os.urandom(40)))
|
||||
s3tests_conf[section].setdefault('access_key',
|
||||
''.join(random.choice(string.ascii_uppercase) for i in range(20)))
|
||||
s3tests_conf[section].setdefault('secret_key',
|
||||
six.ensure_str(base64.b64encode(os.urandom(40))))
|
||||
s3tests_conf[section].setdefault('totp_serial',
|
||||
''.join(random.choice(string.digits) for i in range(10)))
|
||||
s3tests_conf[section].setdefault('totp_seed',
|
||||
six.ensure_str(base64.b32encode(os.urandom(40))))
|
||||
s3tests_conf[section].setdefault('totp_seconds', '5')
|
||||
|
||||
|
||||
@ -141,7 +146,7 @@ def create_users(ctx, config):
|
||||
yield
|
||||
finally:
|
||||
for client in config['clients']:
|
||||
for user in users.itervalues():
|
||||
for user in users.values():
|
||||
uid = '{user}.{client}'.format(user=user, client=client)
|
||||
cluster_name, daemon_type, client_id = teuthology.split_role(client)
|
||||
client_with_id = daemon_type + '.' + client_id
|
||||
@ -229,7 +234,7 @@ def configure(ctx, config):
|
||||
'./bootstrap',
|
||||
],
|
||||
)
|
||||
conf_fp = StringIO()
|
||||
conf_fp = BytesIO()
|
||||
s3tests_conf.write(conf_fp)
|
||||
teuthology.write_file(
|
||||
remote=remote,
|
||||
@ -242,13 +247,13 @@ def configure(ctx, config):
|
||||
for client, properties in config['clients'].items():
|
||||
with open(boto_src, 'rb') as f:
|
||||
(remote,) = ctx.cluster.only(client).remotes.keys()
|
||||
conf = f.read().format(
|
||||
conf = six.ensure_str(f.read()).format(
|
||||
idle_timeout=config.get('idle_timeout', 30)
|
||||
)
|
||||
teuthology.write_file(
|
||||
remote=remote,
|
||||
path='{tdir}/boto.cfg'.format(tdir=testdir),
|
||||
data=conf,
|
||||
data=six.ensure_binary(conf),
|
||||
)
|
||||
|
||||
try:
|
||||
|
@ -1,7 +1,7 @@
|
||||
"""
|
||||
Task for running RGW S3 tests with the AWS Java SDK
|
||||
"""
|
||||
from cStringIO import StringIO
|
||||
from io import BytesIO
|
||||
import logging
|
||||
|
||||
import base64
|
||||
@ -117,7 +117,7 @@ class S3tests_java(Task):
|
||||
repo,
|
||||
'{tdir}/s3-tests-java'.format(tdir=testdir),
|
||||
],
|
||||
stdout=StringIO()
|
||||
stdout=BytesIO()
|
||||
)
|
||||
if client in self.config and self.config[client] is not None:
|
||||
if 'sha1' in self.config[client] and self.config[client]['sha1'] is not None:
|
||||
@ -156,7 +156,7 @@ class S3tests_java(Task):
|
||||
testdir = teuthology.get_testdir(self.ctx)
|
||||
self.ctx.cluster.only(client).run(
|
||||
args=['{tdir}/s3-tests-java/bootstrap.sh'.format(tdir=testdir)],
|
||||
stdout=StringIO()
|
||||
stdout=BytesIO()
|
||||
)
|
||||
|
||||
endpoint = self.ctx.rgw.role_endpoints[client]
|
||||
@ -173,7 +173,7 @@ class S3tests_java(Task):
|
||||
'-file', endpoint.cert.certificate,
|
||||
'-storepass', 'changeit',
|
||||
],
|
||||
stdout=StringIO()
|
||||
stdout=BytesIO()
|
||||
)
|
||||
|
||||
def create_users(self):
|
||||
@ -224,7 +224,7 @@ class S3tests_java(Task):
|
||||
log.info('{args}'.format(args=args))
|
||||
self.ctx.cluster.only(client).run(
|
||||
args=args,
|
||||
stdout=StringIO()
|
||||
stdout=BytesIO()
|
||||
)
|
||||
else:
|
||||
self.users.pop(section)
|
||||
@ -279,8 +279,8 @@ class S3tests_java(Task):
|
||||
with open('s3_tests_tmp.yaml', 'w') as outfile:
|
||||
yaml.dump(cfg_dict, outfile, default_flow_style=False)
|
||||
|
||||
conf_fp = StringIO()
|
||||
with open('s3_tests_tmp.yaml', 'r') as infile:
|
||||
conf_fp = BytesIO()
|
||||
with open('s3_tests_tmp.yaml', 'rb') as infile:
|
||||
for line in infile:
|
||||
conf_fp.write(line)
|
||||
|
||||
@ -309,7 +309,7 @@ class S3tests_java(Task):
|
||||
'{tdir}/s3-tests-java/config.properties'.format(
|
||||
tdir=testdir)
|
||||
],
|
||||
stdout=StringIO()
|
||||
stdout=BytesIO()
|
||||
)
|
||||
args = ['cd',
|
||||
'{tdir}/s3-tests-java'.format(tdir=testdir),
|
||||
@ -346,25 +346,25 @@ class S3tests_java(Task):
|
||||
self.ctx.cluster.only(client).run(
|
||||
args=['radosgw-admin', 'gc',
|
||||
'process', '--include-all'],
|
||||
stdout=StringIO()
|
||||
stdout=BytesIO()
|
||||
)
|
||||
|
||||
if gr != 'All':
|
||||
self.ctx.cluster.only(client).run(
|
||||
args=args + ['--tests'] + [gr] + extra_args,
|
||||
stdout=StringIO()
|
||||
stdout=BytesIO()
|
||||
)
|
||||
else:
|
||||
self.ctx.cluster.only(client).run(
|
||||
args=args + extra_args,
|
||||
stdout=StringIO()
|
||||
stdout=BytesIO()
|
||||
)
|
||||
|
||||
for i in range(2):
|
||||
self.ctx.cluster.only(client).run(
|
||||
args=['radosgw-admin', 'gc',
|
||||
'process', '--include-all'],
|
||||
stdout=StringIO()
|
||||
stdout=BytesIO()
|
||||
)
|
||||
|
||||
def remove_tests(self, client):
|
||||
@ -379,7 +379,7 @@ class S3tests_java(Task):
|
||||
'cat', self.log_name,
|
||||
run.Raw('&&'),
|
||||
'rm', self.log_name],
|
||||
stdout=StringIO()
|
||||
stdout=BytesIO()
|
||||
)
|
||||
|
||||
self.ctx.cluster.only(client).run(
|
||||
@ -388,7 +388,7 @@ class S3tests_java(Task):
|
||||
'-rf',
|
||||
'{tdir}/s3-tests-java'.format(tdir=testdir),
|
||||
],
|
||||
stdout=StringIO()
|
||||
stdout=BytesIO()
|
||||
)
|
||||
|
||||
def delete_users(self, client):
|
||||
@ -408,7 +408,7 @@ class S3tests_java(Task):
|
||||
'--purge-data',
|
||||
'--cluster', 'ceph',
|
||||
],
|
||||
stdout=StringIO()
|
||||
stdout=BytesIO()
|
||||
)
|
||||
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
from cStringIO import StringIO
|
||||
from io import BytesIO
|
||||
import logging
|
||||
import json
|
||||
import time
|
||||
@ -7,7 +7,7 @@ from teuthology import misc as teuthology
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
def rgwadmin(ctx, client, cmd, stdin=StringIO(), check_status=False,
|
||||
def rgwadmin(ctx, client, cmd, stdin=BytesIO(), check_status=False,
|
||||
format='json', decode=True, log_level=logging.DEBUG):
|
||||
log.info('rgwadmin: {client} : {cmd}'.format(client=client,cmd=cmd))
|
||||
testdir = teuthology.get_testdir(ctx)
|
||||
@ -29,8 +29,8 @@ def rgwadmin(ctx, client, cmd, stdin=StringIO(), check_status=False,
|
||||
proc = remote.run(
|
||||
args=pre,
|
||||
check_status=check_status,
|
||||
stdout=StringIO(),
|
||||
stderr=StringIO(),
|
||||
stdout=BytesIO(),
|
||||
stderr=BytesIO(),
|
||||
stdin=stdin,
|
||||
)
|
||||
r = proc.exitstatus
|
||||
@ -81,9 +81,9 @@ def wait_for_radosgw(url, remote):
|
||||
proc = remote.run(
|
||||
args=curl_cmd,
|
||||
check_status=False,
|
||||
stdout=StringIO(),
|
||||
stderr=StringIO(),
|
||||
stdin=StringIO(),
|
||||
stdout=BytesIO(),
|
||||
stderr=BytesIO(),
|
||||
stdin=BytesIO(),
|
||||
)
|
||||
exit_status = proc.exitstatus
|
||||
if exit_status == 0:
|
||||
|
Loading…
Reference in New Issue
Block a user