mirror of
https://github.com/ceph/ceph
synced 2025-03-11 02:39:05 +00:00
Merge pull request #743 from ceph/wip-root-metadata
tasks/cephfs: add TestSessionMap.test_session_reject Reviewed-by: Greg Farnum <gfarnum@redhat.com>
This commit is contained in:
commit
3b37c245b3
@ -1,4 +1,9 @@
|
||||
|
||||
overrides:
|
||||
ceph:
|
||||
log-whitelist:
|
||||
- client session with invalid root
|
||||
|
||||
tasks:
|
||||
- cephfs_test_runner:
|
||||
modules:
|
||||
|
@ -841,6 +841,14 @@ class CephManager:
|
||||
)
|
||||
return proc.exitstatus
|
||||
|
||||
def run_ceph_w(self):
|
||||
"""
|
||||
Execute "ceph -w" in the background with stdout connected to a StringIO,
|
||||
and return the RemoteProcess.
|
||||
"""
|
||||
return self.controller.run(args=["sudo", "daemon-helper", "kill", "ceph", "-w"],
|
||||
wait=False, stdout=StringIO(), stdin=run.PIPE)
|
||||
|
||||
def do_rados(self, remote, cmd):
|
||||
"""
|
||||
Execute a remote rados command.
|
||||
|
@ -160,9 +160,8 @@ class CephFSTestCase(unittest.TestCase):
|
||||
|
||||
def tearDown(self):
|
||||
self.fs.clear_firewall()
|
||||
self.mount_a.teardown()
|
||||
if self.mount_b:
|
||||
self.mount_b.teardown()
|
||||
for m in self.mounts:
|
||||
m.teardown()
|
||||
|
||||
for subsys, key in self.configs_set:
|
||||
self.fs.clear_ceph_conf(subsys, key)
|
||||
@ -288,3 +287,43 @@ class CephFSTestCase(unittest.TestCase):
|
||||
|
||||
else:
|
||||
raise AssertionError("MDS daemon '{0}' did not crash as expected".format(daemon_id))
|
||||
|
||||
def assert_cluster_log(self, expected_pattern):
|
||||
"""
|
||||
Context manager. Assert that during execution, or up to 5 seconds later,
|
||||
the Ceph cluster log emits a message matching the expected pattern.
|
||||
|
||||
:param expected_pattern: a string that you expect to see in the log output
|
||||
"""
|
||||
|
||||
ceph_manager = self.fs.mon_manager
|
||||
|
||||
class ContextManager(object):
|
||||
def match(self):
|
||||
return expected_pattern in self.watcher_process.stdout.getvalue()
|
||||
|
||||
def __enter__(self):
|
||||
self.watcher_process = ceph_manager.run_ceph_w()
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
if not self.watcher_process.finished:
|
||||
# Check if we got an early match, wait a bit if we didn't
|
||||
if self.match():
|
||||
return
|
||||
else:
|
||||
log.debug("No log hits yet, waiting...")
|
||||
# Default monc tick interval is 10s, so wait that long and
|
||||
# then some grace
|
||||
time.sleep(15)
|
||||
|
||||
self.watcher_process.stdin.close()
|
||||
try:
|
||||
self.watcher_process.wait()
|
||||
except CommandFailedError:
|
||||
pass
|
||||
|
||||
if not self.match():
|
||||
log.error("Log output: \n{0}\n".format(self.watcher_process.stdout.getvalue()))
|
||||
raise AssertionError("Expected log message not found: '{0}'".format(expected_pattern))
|
||||
|
||||
return ContextManager()
|
||||
|
@ -2,7 +2,6 @@
|
||||
from StringIO import StringIO
|
||||
import json
|
||||
import time
|
||||
import os
|
||||
import logging
|
||||
from textwrap import dedent
|
||||
|
||||
@ -23,22 +22,21 @@ class FuseMount(CephFSMount):
|
||||
self.fuse_daemon = None
|
||||
self._fuse_conn = None
|
||||
|
||||
def mount(self):
|
||||
def mount(self, mount_path=None):
|
||||
log.info("Client client.%s config is %s" % (self.client_id, self.client_config))
|
||||
|
||||
daemon_signal = 'kill'
|
||||
if self.client_config.get('coverage') or self.client_config.get('valgrind') is not None:
|
||||
daemon_signal = 'term'
|
||||
|
||||
mnt = os.path.join(self.test_dir, 'mnt.{id}'.format(id=self.client_id))
|
||||
log.info('Mounting ceph-fuse client.{id} at {remote} {mnt}...'.format(
|
||||
id=self.client_id, remote=self.client_remote, mnt=mnt))
|
||||
id=self.client_id, remote=self.client_remote, mnt=self.mountpoint))
|
||||
|
||||
self.client_remote.run(
|
||||
args=[
|
||||
'mkdir',
|
||||
'--',
|
||||
mnt,
|
||||
self.mountpoint,
|
||||
],
|
||||
)
|
||||
|
||||
@ -50,12 +48,16 @@ class FuseMount(CephFSMount):
|
||||
'daemon-helper',
|
||||
daemon_signal,
|
||||
]
|
||||
run_cmd_tail = [
|
||||
'ceph-fuse',
|
||||
'-f',
|
||||
|
||||
fuse_cmd = ['ceph-fuse', "-f"]
|
||||
|
||||
if mount_path is not None:
|
||||
fuse_cmd += ["--client_mountpoint={0}".format(mount_path)]
|
||||
|
||||
fuse_cmd += [
|
||||
'--name', 'client.{id}'.format(id=self.client_id),
|
||||
# TODO ceph-fuse doesn't understand dash dash '--',
|
||||
mnt,
|
||||
self.mountpoint,
|
||||
]
|
||||
|
||||
if self.client_config.get('valgrind') is not None:
|
||||
@ -66,7 +68,7 @@ class FuseMount(CephFSMount):
|
||||
self.client_config.get('valgrind'),
|
||||
)
|
||||
|
||||
run_cmd.extend(run_cmd_tail)
|
||||
run_cmd.extend(fuse_cmd)
|
||||
|
||||
def list_connections():
|
||||
self.client_remote.run(
|
||||
@ -110,6 +112,10 @@ class FuseMount(CephFSMount):
|
||||
|
||||
post_mount_conns = list_connections()
|
||||
while len(post_mount_conns) <= len(pre_mount_conns):
|
||||
if self.fuse_daemon.finished:
|
||||
# Did mount fail? Raise the CommandFailedError instead of
|
||||
# hitting the "failed to populate /sys/" timeout
|
||||
self.fuse_daemon.wait()
|
||||
time.sleep(1)
|
||||
waited += 1
|
||||
if waited > timeout:
|
||||
@ -224,10 +230,7 @@ class FuseMount(CephFSMount):
|
||||
stderr=stderr
|
||||
)
|
||||
except CommandFailedError:
|
||||
if "not found" in stderr.getvalue():
|
||||
# Missing mount point, so we are unmounted already, yay.
|
||||
pass
|
||||
else:
|
||||
if self.is_mounted():
|
||||
raise
|
||||
|
||||
assert not self.is_mounted()
|
||||
|
@ -41,11 +41,11 @@ class KernelMount(CephFSMount):
|
||||
],
|
||||
)
|
||||
|
||||
def mount(self):
|
||||
def mount(self, mount_path=None):
|
||||
log.info('Mounting kclient client.{id} at {remote} {mnt}...'.format(
|
||||
id=self.client_id, remote=self.client_remote, mnt=self.mountpoint))
|
||||
|
||||
keyring = '/etc/ceph/ceph.client.{id}.keyring'.format(id=self.client_id)
|
||||
keyring = self.get_keyring_path()
|
||||
secret = '{tdir}/data/client.{id}.secret'.format(tdir=self.test_dir, id=self.client_id)
|
||||
self.write_secret_file(self.client_remote, 'client.{id}'.format(id=self.client_id),
|
||||
keyring, secret)
|
||||
@ -58,6 +58,9 @@ class KernelMount(CephFSMount):
|
||||
],
|
||||
)
|
||||
|
||||
if mount_path is None:
|
||||
mount_path = "/"
|
||||
|
||||
self.client_remote.run(
|
||||
args=[
|
||||
'sudo',
|
||||
@ -65,7 +68,7 @@ class KernelMount(CephFSMount):
|
||||
'ceph-coverage',
|
||||
'{tdir}/archive/coverage'.format(tdir=self.test_dir),
|
||||
'/sbin/mount.ceph',
|
||||
'{mons}:/'.format(mons=','.join(self.mons)),
|
||||
'{mons}:{mount_path}'.format(mons=','.join(self.mons), mount_path=mount_path),
|
||||
self.mountpoint,
|
||||
'-v',
|
||||
'-o',
|
||||
|
@ -24,15 +24,18 @@ class CephFSMount(object):
|
||||
self.client_id = client_id
|
||||
self.client_remote = client_remote
|
||||
|
||||
self.mountpoint = os.path.join(self.test_dir, 'mnt.{id}'.format(id=self.client_id))
|
||||
self.test_files = ['a', 'b', 'c']
|
||||
|
||||
self.background_procs = []
|
||||
|
||||
@property
|
||||
def mountpoint(self):
|
||||
return os.path.join(self.test_dir, 'mnt.{id}'.format(id=self.client_id))
|
||||
|
||||
def is_mounted(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def mount(self):
|
||||
def mount(self, mount_path=None):
|
||||
raise NotImplementedError()
|
||||
|
||||
def umount(self):
|
||||
@ -53,6 +56,17 @@ class CephFSMount(object):
|
||||
def wait_until_mounted(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_keyring_path(self):
|
||||
return '/etc/ceph/ceph.client.{id}.keyring'.format(id=self.client_id)
|
||||
|
||||
@property
|
||||
def config_path(self):
|
||||
"""
|
||||
Path to ceph.conf: override this if you're not a normal systemwide ceph install
|
||||
:return: stringv
|
||||
"""
|
||||
return "/etc/ceph/ceph.conf"
|
||||
|
||||
@contextmanager
|
||||
def mounted(self):
|
||||
"""
|
||||
@ -107,6 +121,7 @@ class CephFSMount(object):
|
||||
def run_python(self, pyscript):
|
||||
p = self._run_python(pyscript)
|
||||
p.wait()
|
||||
return p.stdout.getvalue().strip()
|
||||
|
||||
def run_shell(self, args, wait=True):
|
||||
args = ["cd", self.mountpoint, run.Raw('&&'), "sudo"] + args
|
||||
|
@ -1,6 +1,8 @@
|
||||
|
||||
import json
|
||||
import logging
|
||||
from tasks.cephfs.fuse_mount import FuseMount
|
||||
from teuthology.exceptions import CommandFailedError
|
||||
from tasks.cephfs.cephfs_test_case import CephFSTestCase
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
@ -105,3 +107,72 @@ class TestSessionMap(CephFSTestCase):
|
||||
log.info("SessionMap: {0}".format(json.dumps(table_json, indent=2)))
|
||||
self.assertEqual(table_json['0']['result'], 0)
|
||||
self.assertEqual(len(table_json['0']['data']['Sessions']), 0)
|
||||
|
||||
def _sudo_write_file(self, remote, path, data):
|
||||
"""
|
||||
Write data to a remote file as super user
|
||||
|
||||
:param remote: Remote site.
|
||||
:param path: Path on the remote being written to.
|
||||
:param data: Data to be written.
|
||||
|
||||
Both perms and owner are passed directly to chmod.
|
||||
"""
|
||||
remote.run(
|
||||
args=[
|
||||
'sudo',
|
||||
'python',
|
||||
'-c',
|
||||
'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
|
||||
path,
|
||||
],
|
||||
stdin=data,
|
||||
)
|
||||
|
||||
def _configure_auth(self, mount, id_name, mds_caps, osd_caps=None, mon_caps=None):
|
||||
"""
|
||||
Set up auth credentials for a client mount, and write out the keyring
|
||||
for the client to use.
|
||||
"""
|
||||
|
||||
# This keyring stuff won't work for kclient
|
||||
assert(isinstance(mount, FuseMount))
|
||||
|
||||
if osd_caps is None:
|
||||
osd_caps = "allow rw"
|
||||
|
||||
if mon_caps is None:
|
||||
mon_caps = "allow r"
|
||||
|
||||
out = self.fs.mon_manager.raw_cluster_cmd(
|
||||
"auth", "get-or-create", "client.{name}".format(name=id_name),
|
||||
"mds", mds_caps,
|
||||
"osd", osd_caps,
|
||||
"mon", mon_caps
|
||||
)
|
||||
mount.client_id = id_name
|
||||
self._sudo_write_file(mount.client_remote, mount.get_keyring_path(), out)
|
||||
self.set_conf("client.{name}".format(name=id_name), "keyring", mount.get_keyring_path())
|
||||
|
||||
def test_session_reject(self):
|
||||
self.mount_a.run_shell(["mkdir", "foo"])
|
||||
self.mount_a.run_shell(["mkdir", "foo/bar"])
|
||||
self.mount_a.umount_wait()
|
||||
|
||||
# Mount B will be my rejected client
|
||||
self.mount_b.umount_wait()
|
||||
|
||||
# Configure a client that is limited to /foo/bar
|
||||
self._configure_auth(self.mount_b, "badguy", "allow rw path=/foo/bar")
|
||||
# Check he can mount that dir and do IO
|
||||
self.mount_b.mount(mount_path="/foo/bar")
|
||||
self.mount_b.wait_until_mounted()
|
||||
self.mount_b.create_destroy()
|
||||
self.mount_b.umount_wait()
|
||||
|
||||
# Configure the client to claim that its mount point metadata is /baz
|
||||
self.set_conf("client.badguy", "client_metadata", "root=/baz")
|
||||
# Try to mount the client, see that it fails
|
||||
with self.assert_cluster_log("client session with invalid root '/baz' denied"):
|
||||
with self.assertRaises(CommandFailedError):
|
||||
self.mount_b.mount(mount_path="/foo/bar")
|
||||
|
@ -160,6 +160,10 @@ class LocalRemote(object):
|
||||
shutil.copy(path, tmpfile)
|
||||
return tmpfile
|
||||
|
||||
def put_file(self, src, dst, sudo=False):
|
||||
assert sudo is False
|
||||
shutil.copy(src, dst)
|
||||
|
||||
def run(self, args, check_status=True, wait=True,
|
||||
stdout=None, stderr=None, cwd=None, stdin=None,
|
||||
logger=None, label=None):
|
||||
@ -354,10 +358,17 @@ class MountDaemon(object):
|
||||
|
||||
|
||||
class LocalFuseMount(FuseMount):
|
||||
def __init__(self, client_id, mount_point):
|
||||
test_dir = "/tmp/not_there"
|
||||
def __init__(self, test_dir, client_id):
|
||||
super(LocalFuseMount, self).__init__(None, test_dir, client_id, LocalRemote())
|
||||
self.mountpoint = mount_point
|
||||
|
||||
@property
|
||||
def config_path(self):
|
||||
return "./ceph.conf"
|
||||
|
||||
def get_keyring_path(self):
|
||||
# This is going to end up in a config file, so use an absolute path
|
||||
# to avoid assumptions about daemons' pwd
|
||||
return os.path.abspath("./client.{0}.keyring".format(self.client_id))
|
||||
|
||||
def run_shell(self, args, wait=True):
|
||||
# FIXME maybe should add a pwd arg to teuthology.orchestra so that
|
||||
@ -387,7 +398,7 @@ class LocalFuseMount(FuseMount):
|
||||
if self.is_mounted():
|
||||
super(LocalFuseMount, self).umount()
|
||||
|
||||
def mount(self):
|
||||
def mount(self, mount_path=None):
|
||||
self.client_remote.run(
|
||||
args=[
|
||||
'mkdir',
|
||||
@ -424,6 +435,9 @@ class LocalFuseMount(FuseMount):
|
||||
if os.getuid() != 0:
|
||||
prefix += ["--client-die-on-failed-remount=false"]
|
||||
|
||||
if mount_path is not None:
|
||||
prefix += ["--client_mountpoint={0}".format(mount_path)]
|
||||
|
||||
self._proc = self.client_remote.run(args=
|
||||
prefix + [
|
||||
"--name",
|
||||
@ -486,6 +500,10 @@ class LocalCephManager(CephManager):
|
||||
"""
|
||||
return LocalRemote()
|
||||
|
||||
def run_ceph_w(self):
|
||||
proc = self.controller.run(["./ceph", "-w"], wait=False, stdout=StringIO())
|
||||
return proc
|
||||
|
||||
def raw_cluster_cmd(self, *args):
|
||||
"""
|
||||
args like ["osd", "dump"}
|
||||
@ -610,9 +628,23 @@ class LocalFilesystem(Filesystem):
|
||||
for subsys, kvs in self._conf.items():
|
||||
existing_str += "\n[{0}]\n".format(subsys)
|
||||
for key, val in kvs.items():
|
||||
# comment out any existing instances
|
||||
if key in existing_str:
|
||||
existing_str = existing_str.replace(key, "#{0}".format(key))
|
||||
# Comment out existing instance if it exists
|
||||
log.info("Searching for existing instance {0}/{1}".format(
|
||||
key, subsys
|
||||
))
|
||||
existing_section = re.search("^\[{0}\]$([\n]|[^\[])+".format(
|
||||
subsys
|
||||
), existing_str, re.MULTILINE)
|
||||
|
||||
if existing_section:
|
||||
section_str = existing_str[existing_section.start():existing_section.end()]
|
||||
existing_val = re.search("^\s*[^#]({0}) =".format(key), section_str, re.MULTILINE)
|
||||
if existing_val:
|
||||
start = existing_section.start() + existing_val.start(1)
|
||||
log.info("Found string to replace at {0}".format(
|
||||
start
|
||||
))
|
||||
existing_str = existing_str[0:start] + "#" + existing_str[start:]
|
||||
|
||||
existing_str += "{0} = {1}\n".format(key, val)
|
||||
|
||||
@ -665,8 +697,8 @@ def exec_test():
|
||||
|
||||
test_dir = tempfile.mkdtemp()
|
||||
|
||||
# Run with two clients because some tests require the second one
|
||||
clients = ["0", "1"]
|
||||
# Create as many of these as the biggest test requires
|
||||
clients = ["0", "1", "2"]
|
||||
|
||||
remote = LocalRemote()
|
||||
|
||||
@ -718,15 +750,14 @@ def exec_test():
|
||||
|
||||
open("./keyring", "a").write(p.stdout.getvalue())
|
||||
|
||||
mount_point = os.path.join(test_dir, "mnt.{0}".format(client_id))
|
||||
mount = LocalFuseMount(client_id, mount_point)
|
||||
mount = LocalFuseMount(test_dir, client_id)
|
||||
mounts.append(mount)
|
||||
if mount.is_mounted():
|
||||
log.warn("unmounting {0}".format(mount_point))
|
||||
log.warn("unmounting {0}".format(mount.mountpoint))
|
||||
mount.umount_wait()
|
||||
else:
|
||||
if os.path.exists(mount_point):
|
||||
os.rmdir(mount_point)
|
||||
if os.path.exists(mount.mountpoint):
|
||||
os.rmdir(mount.mountpoint)
|
||||
filesystem = LocalFilesystem(ctx)
|
||||
|
||||
from tasks.cephfs_test_runner import DecoratingLoader
|
||||
|
Loading…
Reference in New Issue
Block a user