2015-05-22 10:01:55 +00:00
|
|
|
import json
|
2014-09-03 13:14:28 +00:00
|
|
|
import logging
|
2015-03-26 17:52:10 +00:00
|
|
|
from unittest import case
|
2016-11-01 11:21:41 +00:00
|
|
|
from tasks.ceph_test_case import CephTestCase
|
2015-03-26 17:15:28 +00:00
|
|
|
import os
|
|
|
|
import re
|
|
|
|
from StringIO import StringIO
|
|
|
|
|
2015-03-26 17:52:10 +00:00
|
|
|
from tasks.cephfs.fuse_mount import FuseMount
|
2016-01-03 18:35:17 +00:00
|
|
|
|
2015-03-26 17:15:28 +00:00
|
|
|
from teuthology.orchestra import run
|
|
|
|
from teuthology.orchestra.run import CommandFailedError
|
2014-09-03 13:14:28 +00:00
|
|
|
|
|
|
|
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
2016-06-30 08:26:35 +00:00
|
|
|
def for_teuthology(f):
|
2015-07-27 22:21:45 +00:00
|
|
|
"""
|
2016-06-30 08:26:35 +00:00
|
|
|
Decorator that adds an "is_for_teuthology" attribute to the wrapped function
|
2015-07-27 22:21:45 +00:00
|
|
|
"""
|
2016-06-30 08:26:35 +00:00
|
|
|
f.is_for_teuthology = True
|
2015-07-27 22:21:45 +00:00
|
|
|
return f
|
|
|
|
|
|
|
|
|
2015-07-28 13:08:50 +00:00
|
|
|
def needs_trimming(f):
|
|
|
|
"""
|
|
|
|
Mark fn as requiring a client capable of trimming its cache (i.e. for ceph-fuse
|
|
|
|
this means it needs to be able to run as root, currently)
|
|
|
|
"""
|
|
|
|
f.needs_trimming = True
|
|
|
|
return f
|
|
|
|
|
|
|
|
|
2016-11-01 11:21:41 +00:00
|
|
|
class CephFSTestCase(CephTestCase):
|
2014-12-18 13:03:40 +00:00
|
|
|
"""
|
|
|
|
Test case for Ceph FS, requires caller to populate Filesystem and Mounts,
|
|
|
|
into the fs, mount_a, mount_b class attributes (setting mount_b is optional)
|
|
|
|
|
|
|
|
Handles resetting the cluster under test between tests.
|
|
|
|
"""
|
2015-03-26 17:52:10 +00:00
|
|
|
|
|
|
|
# FIXME weird explicit naming
|
2014-12-18 13:03:40 +00:00
|
|
|
mount_a = None
|
|
|
|
mount_b = None
|
2017-07-12 15:43:39 +00:00
|
|
|
recovery_mount = None
|
2015-03-26 17:52:10 +00:00
|
|
|
|
|
|
|
# Declarative test requirements: subclasses should override these to indicate
|
|
|
|
# their special needs. If not met, tests will be skipped.
|
|
|
|
CLIENTS_REQUIRED = 1
|
|
|
|
MDSS_REQUIRED = 1
|
|
|
|
REQUIRE_KCLIENT_REMOTE = False
|
|
|
|
REQUIRE_ONE_CLIENT_REMOTE = False
|
2015-07-23 09:38:48 +00:00
|
|
|
REQUIRE_MEMSTORE = False
|
2015-03-26 17:52:10 +00:00
|
|
|
|
2016-01-03 18:35:17 +00:00
|
|
|
# Whether to create the default filesystem during setUp
|
|
|
|
REQUIRE_FILESYSTEM = True
|
|
|
|
|
2017-07-12 15:43:39 +00:00
|
|
|
# requires REQUIRE_FILESYSTEM = True
|
|
|
|
REQUIRE_RECOVERY_FILESYSTEM = False
|
|
|
|
|
2015-03-26 17:52:10 +00:00
|
|
|
LOAD_SETTINGS = []
|
2014-09-03 13:14:28 +00:00
|
|
|
|
2014-12-18 13:03:40 +00:00
|
|
|
def setUp(self):
|
2016-11-17 11:03:49 +00:00
|
|
|
super(CephFSTestCase, self).setUp()
|
|
|
|
|
2016-08-30 21:23:46 +00:00
|
|
|
if len(self.mds_cluster.mds_ids) < self.MDSS_REQUIRED:
|
2015-03-26 17:52:10 +00:00
|
|
|
raise case.SkipTest("Only have {0} MDSs, require {1}".format(
|
2016-08-30 21:23:46 +00:00
|
|
|
len(self.mds_cluster.mds_ids), self.MDSS_REQUIRED
|
2015-03-26 17:52:10 +00:00
|
|
|
))
|
|
|
|
|
|
|
|
if len(self.mounts) < self.CLIENTS_REQUIRED:
|
|
|
|
raise case.SkipTest("Only have {0} clients, require {1}".format(
|
|
|
|
len(self.mounts), self.CLIENTS_REQUIRED
|
|
|
|
))
|
|
|
|
|
|
|
|
if self.REQUIRE_KCLIENT_REMOTE:
|
|
|
|
if not isinstance(self.mounts[0], FuseMount) or not isinstance(self.mounts[1], FuseMount):
|
|
|
|
# kclient kill() power cycles nodes, so requires clients to each be on
|
|
|
|
# their own node
|
|
|
|
if self.mounts[0].client_remote.hostname == self.mounts[1].client_remote.hostname:
|
|
|
|
raise case.SkipTest("kclient clients must be on separate nodes")
|
|
|
|
|
|
|
|
if self.REQUIRE_ONE_CLIENT_REMOTE:
|
2016-08-30 21:23:46 +00:00
|
|
|
if self.mounts[0].client_remote.hostname in self.mds_cluster.get_mds_hostnames():
|
2015-03-26 17:52:10 +00:00
|
|
|
raise case.SkipTest("Require first client to be on separate server from MDSs")
|
|
|
|
|
2015-07-23 09:38:48 +00:00
|
|
|
if self.REQUIRE_MEMSTORE:
|
2016-08-30 21:23:46 +00:00
|
|
|
objectstore = self.mds_cluster.get_config("osd_objectstore", "osd")
|
2015-07-23 09:38:48 +00:00
|
|
|
if objectstore != "memstore":
|
|
|
|
# You certainly *could* run this on a real OSD, but you don't want to sit
|
|
|
|
# here for hours waiting for the test to fill up a 1TB drive!
|
|
|
|
raise case.SkipTest("Require `memstore` OSD backend to simulate full drives")
|
|
|
|
|
2015-03-26 17:52:10 +00:00
|
|
|
# Create friendly mount_a, mount_b attrs
|
|
|
|
for i in range(0, self.CLIENTS_REQUIRED):
|
|
|
|
setattr(self, "mount_{0}".format(chr(ord('a') + i)), self.mounts[i])
|
|
|
|
|
2016-08-30 21:23:46 +00:00
|
|
|
self.mds_cluster.clear_firewall()
|
2014-12-18 13:03:40 +00:00
|
|
|
|
2017-04-15 14:14:37 +00:00
|
|
|
# Unmount all clients, we are about to blow away the filesystem
|
|
|
|
for mount in self.mounts:
|
|
|
|
if mount.is_mounted():
|
|
|
|
mount.umount_wait(force=True)
|
2014-12-18 13:03:40 +00:00
|
|
|
|
|
|
|
# To avoid any issues with e.g. unlink bugs, we destroy and recreate
|
|
|
|
# the filesystem rather than just doing a rm -rf of files
|
2016-01-03 18:35:17 +00:00
|
|
|
self.mds_cluster.mds_stop()
|
2017-05-31 10:59:39 +00:00
|
|
|
self.mds_cluster.mds_fail()
|
2016-01-03 18:35:17 +00:00
|
|
|
self.mds_cluster.delete_all_filesystems()
|
2016-08-30 21:23:46 +00:00
|
|
|
self.fs = None # is now invalid!
|
2017-07-12 15:41:11 +00:00
|
|
|
self.recovery_fs = None
|
2014-12-18 13:03:40 +00:00
|
|
|
|
|
|
|
# In case the previous filesystem had filled up the RADOS cluster, wait for that
|
|
|
|
# flag to pass.
|
2016-08-30 21:23:46 +00:00
|
|
|
osd_mon_report_interval_max = int(self.mds_cluster.get_config("osd_mon_report_interval_max", service_type='osd'))
|
|
|
|
self.wait_until_true(lambda: not self.mds_cluster.is_full(),
|
2014-12-18 13:03:40 +00:00
|
|
|
timeout=osd_mon_report_interval_max * 5)
|
|
|
|
|
2015-05-22 10:01:55 +00:00
|
|
|
# In case anything is in the OSD blacklist list, clear it out. This is to avoid
|
|
|
|
# the OSD map changing in the background (due to blacklist expiry) while tests run.
|
2016-06-22 12:01:20 +00:00
|
|
|
try:
|
2016-08-30 21:23:46 +00:00
|
|
|
self.mds_cluster.mon_manager.raw_cluster_cmd("osd", "blacklist", "clear")
|
2016-06-22 12:01:20 +00:00
|
|
|
except CommandFailedError:
|
|
|
|
# Fallback for older Ceph cluster
|
2016-08-30 21:23:46 +00:00
|
|
|
blacklist = json.loads(self.mds_cluster.mon_manager.raw_cluster_cmd("osd",
|
2016-06-22 12:01:20 +00:00
|
|
|
"dump", "--format=json-pretty"))['blacklist']
|
|
|
|
log.info("Removing {0} blacklist entries".format(len(blacklist)))
|
|
|
|
for addr, blacklisted_at in blacklist.items():
|
2016-08-30 21:23:46 +00:00
|
|
|
self.mds_cluster.mon_manager.raw_cluster_cmd("osd", "blacklist", "rm", addr)
|
2015-05-22 10:01:55 +00:00
|
|
|
|
2015-11-09 13:16:36 +00:00
|
|
|
client_mount_ids = [m.client_id for m in self.mounts]
|
2015-11-11 10:23:40 +00:00
|
|
|
# In case the test changes the IDs of clients, stash them so that we can
|
|
|
|
# reset in tearDown
|
|
|
|
self._original_client_ids = client_mount_ids
|
2016-08-30 21:23:46 +00:00
|
|
|
log.info(client_mount_ids)
|
2015-11-11 10:23:40 +00:00
|
|
|
|
2015-11-09 13:16:36 +00:00
|
|
|
# In case there were any extra auth identities around from a previous
|
|
|
|
# test, delete them
|
|
|
|
for entry in self.auth_list():
|
|
|
|
ent_type, ent_id = entry['entity'].split(".")
|
|
|
|
if ent_type == "client" and ent_id not in client_mount_ids and ent_id != "admin":
|
2016-08-30 21:23:46 +00:00
|
|
|
self.mds_cluster.mon_manager.raw_cluster_cmd("auth", "del", entry['entity'])
|
2015-11-09 13:16:36 +00:00
|
|
|
|
2016-01-03 18:35:17 +00:00
|
|
|
if self.REQUIRE_FILESYSTEM:
|
2017-07-26 17:05:59 +00:00
|
|
|
self.fs = self.mds_cluster.newfs(create=True)
|
2016-01-03 18:35:17 +00:00
|
|
|
self.fs.mds_restart()
|
2016-08-30 21:23:46 +00:00
|
|
|
|
|
|
|
# In case some test messed with auth caps, reset them
|
|
|
|
for client_id in client_mount_ids:
|
|
|
|
self.mds_cluster.mon_manager.raw_cluster_cmd_result(
|
|
|
|
'auth', 'caps', "client.{0}".format(client_id),
|
|
|
|
'mds', 'allow',
|
|
|
|
'mon', 'allow r',
|
|
|
|
'osd', 'allow rw pool={0}'.format(self.fs.get_data_pool_name()))
|
|
|
|
|
|
|
|
# wait for mds restart to complete...
|
2016-01-03 18:35:17 +00:00
|
|
|
self.fs.wait_for_daemons()
|
2017-04-15 14:14:37 +00:00
|
|
|
|
|
|
|
# Mount the requested number of clients
|
|
|
|
for i in range(0, self.CLIENTS_REQUIRED):
|
|
|
|
self.mounts[i].mount()
|
|
|
|
self.mounts[i].wait_until_mounted()
|
2014-12-18 13:03:40 +00:00
|
|
|
|
2017-07-12 15:43:39 +00:00
|
|
|
if self.REQUIRE_RECOVERY_FILESYSTEM:
|
|
|
|
if not self.REQUIRE_FILESYSTEM:
|
|
|
|
raise case.SkipTest("Recovery filesystem requires a primary filesystem as well")
|
|
|
|
self.fs.mon_manager.raw_cluster_cmd('fs', 'flag', 'set',
|
|
|
|
'enable_multiple', 'true',
|
|
|
|
'--yes-i-really-mean-it')
|
|
|
|
self.recovery_fs = self.mds_cluster.newfs(name="recovery_fs", create=False)
|
|
|
|
self.recovery_fs.set_metadata_overlay(True)
|
|
|
|
self.recovery_fs.set_data_pool_name(self.fs.get_data_pool_name())
|
|
|
|
self.recovery_fs.create()
|
|
|
|
self.recovery_fs.getinfo(refresh=True)
|
|
|
|
self.recovery_fs.mds_restart()
|
|
|
|
self.recovery_fs.wait_for_daemons()
|
|
|
|
|
2015-03-26 17:52:10 +00:00
|
|
|
# Load an config settings of interest
|
|
|
|
for setting in self.LOAD_SETTINGS:
|
2017-07-12 11:09:30 +00:00
|
|
|
setattr(self, setting, float(self.fs.mds_asok(
|
2016-08-30 21:23:46 +00:00
|
|
|
['config', 'get', setting], self.mds_cluster.mds_ids[0]
|
2015-03-26 17:52:10 +00:00
|
|
|
)[setting]))
|
|
|
|
|
2014-12-18 13:03:40 +00:00
|
|
|
self.configs_set = set()
|
|
|
|
|
|
|
|
def tearDown(self):
|
2016-11-17 11:03:49 +00:00
|
|
|
super(CephFSTestCase, self).tearDown()
|
|
|
|
|
2016-08-30 21:23:46 +00:00
|
|
|
self.mds_cluster.clear_firewall()
|
2015-11-30 18:21:14 +00:00
|
|
|
for m in self.mounts:
|
|
|
|
m.teardown()
|
2014-12-18 13:03:40 +00:00
|
|
|
|
2015-11-11 10:23:40 +00:00
|
|
|
for i, m in enumerate(self.mounts):
|
|
|
|
m.client_id = self._original_client_ids[i]
|
|
|
|
|
2014-12-18 13:03:40 +00:00
|
|
|
for subsys, key in self.configs_set:
|
2016-01-03 18:35:17 +00:00
|
|
|
self.mds_cluster.clear_ceph_conf(subsys, key)
|
2014-12-18 13:03:40 +00:00
|
|
|
|
|
|
|
def set_conf(self, subsys, key, value):
|
|
|
|
self.configs_set.add((subsys, key))
|
2016-01-03 18:35:17 +00:00
|
|
|
self.mds_cluster.set_ceph_conf(subsys, key, value)
|
2014-12-18 13:03:40 +00:00
|
|
|
|
2015-11-09 13:17:25 +00:00
|
|
|
def auth_list(self):
|
|
|
|
"""
|
2017-07-19 16:33:14 +00:00
|
|
|
Convenience wrapper on "ceph auth ls"
|
2015-11-09 13:17:25 +00:00
|
|
|
"""
|
2016-08-30 21:23:46 +00:00
|
|
|
return json.loads(self.mds_cluster.mon_manager.raw_cluster_cmd(
|
2017-07-19 16:33:14 +00:00
|
|
|
"auth", "ls", "--format=json-pretty"
|
2015-11-09 13:17:25 +00:00
|
|
|
))['auth_dump']
|
|
|
|
|
2015-02-06 09:55:04 +00:00
|
|
|
def assert_session_count(self, expected, ls_data=None, mds_id=None):
|
2014-09-03 13:14:28 +00:00
|
|
|
if ls_data is None:
|
2015-02-06 09:55:04 +00:00
|
|
|
ls_data = self.fs.mds_asok(['session', 'ls'], mds_id=mds_id)
|
2014-09-03 13:14:28 +00:00
|
|
|
|
2017-05-22 23:04:05 +00:00
|
|
|
alive_count = len([s for s in ls_data if s['state'] != 'killing'])
|
|
|
|
|
|
|
|
self.assertEqual(expected, alive_count, "Expected {0} sessions, found {1}".format(
|
|
|
|
expected, alive_count
|
2014-09-03 13:14:28 +00:00
|
|
|
))
|
|
|
|
|
|
|
|
def assert_session_state(self, client_id, expected_state):
|
|
|
|
self.assertEqual(
|
|
|
|
self._session_by_id(
|
|
|
|
self.fs.mds_asok(['session', 'ls'])).get(client_id, {'state': None})['state'],
|
|
|
|
expected_state)
|
|
|
|
|
|
|
|
def get_session_data(self, client_id):
|
|
|
|
return self._session_by_id(client_id)
|
|
|
|
|
|
|
|
def _session_list(self):
|
|
|
|
ls_data = self.fs.mds_asok(['session', 'ls'])
|
|
|
|
ls_data = [s for s in ls_data if s['state'] not in ['stale', 'closed']]
|
|
|
|
return ls_data
|
|
|
|
|
|
|
|
def get_session(self, client_id, session_ls=None):
|
|
|
|
if session_ls is None:
|
|
|
|
session_ls = self.fs.mds_asok(['session', 'ls'])
|
|
|
|
|
|
|
|
return self._session_by_id(session_ls)[client_id]
|
|
|
|
|
|
|
|
def _session_by_id(self, session_ls):
|
|
|
|
return dict([(s['id'], s) for s in session_ls])
|
|
|
|
|
2016-01-03 18:35:17 +00:00
|
|
|
def wait_for_daemon_start(self, daemon_ids=None):
|
|
|
|
"""
|
|
|
|
Wait until all the daemons appear in the FSMap, either assigned
|
|
|
|
MDS ranks or in the list of standbys
|
|
|
|
"""
|
|
|
|
def get_daemon_names():
|
2016-08-30 21:23:46 +00:00
|
|
|
return [info['name'] for info in self.mds_cluster.status().get_all()]
|
2016-01-03 18:35:17 +00:00
|
|
|
|
|
|
|
if daemon_ids is None:
|
|
|
|
daemon_ids = self.mds_cluster.mds_ids
|
|
|
|
|
|
|
|
try:
|
|
|
|
self.wait_until_true(
|
|
|
|
lambda: set(daemon_ids) & set(get_daemon_names()) == set(daemon_ids),
|
|
|
|
timeout=30
|
|
|
|
)
|
|
|
|
except RuntimeError:
|
|
|
|
log.warn("Timeout waiting for daemons {0}, while we have {1}".format(
|
|
|
|
daemon_ids, get_daemon_names()
|
|
|
|
))
|
|
|
|
raise
|
|
|
|
|
2015-03-26 17:15:28 +00:00
|
|
|
def assert_mds_crash(self, daemon_id):
|
|
|
|
"""
|
|
|
|
Assert that the a particular MDS daemon crashes (block until
|
|
|
|
it does)
|
|
|
|
"""
|
|
|
|
try:
|
2016-08-30 21:23:46 +00:00
|
|
|
self.mds_cluster.mds_daemons[daemon_id].proc.wait()
|
2015-03-26 17:15:28 +00:00
|
|
|
except CommandFailedError as e:
|
|
|
|
log.info("MDS '{0}' crashed with status {1} as expected".format(daemon_id, e.exitstatus))
|
2016-08-30 21:23:46 +00:00
|
|
|
self.mds_cluster.mds_daemons[daemon_id].proc = None
|
2015-03-26 17:15:28 +00:00
|
|
|
|
|
|
|
# Go remove the coredump from the crash, otherwise teuthology.internal.coredump will
|
|
|
|
# catch it later and treat it as a failure.
|
2016-08-30 21:23:46 +00:00
|
|
|
p = self.mds_cluster.mds_daemons[daemon_id].remote.run(args=[
|
2015-03-26 17:15:28 +00:00
|
|
|
"sudo", "sysctl", "-n", "kernel.core_pattern"], stdout=StringIO())
|
|
|
|
core_pattern = p.stdout.getvalue().strip()
|
|
|
|
if os.path.dirname(core_pattern): # Non-default core_pattern with a directory in it
|
|
|
|
# We have seen a core_pattern that looks like it's from teuthology's coredump
|
|
|
|
# task, so proceed to clear out the core file
|
|
|
|
log.info("Clearing core from pattern: {0}".format(core_pattern))
|
|
|
|
|
|
|
|
# Determine the PID of the crashed MDS by inspecting the MDSMap, it had
|
|
|
|
# to talk to the mons to get assigned a rank to reach the point of crashing
|
2017-10-24 18:32:43 +00:00
|
|
|
addr = self.mds_cluster.status().get_mds(daemon_id)['addr']
|
2015-03-26 17:15:28 +00:00
|
|
|
pid_str = addr.split("/")[1]
|
|
|
|
log.info("Determined crasher PID was {0}".format(pid_str))
|
|
|
|
|
|
|
|
# Substitute PID into core_pattern to get a glob
|
|
|
|
core_glob = core_pattern.replace("%p", pid_str)
|
|
|
|
core_glob = re.sub("%[a-z]", "*", core_glob) # Match all for all other % tokens
|
|
|
|
|
|
|
|
# Verify that we see the expected single coredump matching the expected pattern
|
2016-08-30 21:23:46 +00:00
|
|
|
ls_proc = self.mds_cluster.mds_daemons[daemon_id].remote.run(args=[
|
2015-03-26 17:15:28 +00:00
|
|
|
"sudo", "ls", run.Raw(core_glob)
|
|
|
|
], stdout=StringIO())
|
|
|
|
cores = [f for f in ls_proc.stdout.getvalue().strip().split("\n") if f]
|
|
|
|
log.info("Enumerated cores: {0}".format(cores))
|
|
|
|
self.assertEqual(len(cores), 1)
|
|
|
|
|
|
|
|
log.info("Found core file {0}, deleting it".format(cores[0]))
|
|
|
|
|
2016-08-30 21:23:46 +00:00
|
|
|
self.mds_cluster.mds_daemons[daemon_id].remote.run(args=[
|
2015-03-26 17:15:28 +00:00
|
|
|
"sudo", "rm", "-f", cores[0]
|
|
|
|
])
|
|
|
|
else:
|
|
|
|
log.info("No core_pattern directory set, nothing to clear (internal.coredump not enabled?)")
|
|
|
|
|
|
|
|
else:
|
|
|
|
raise AssertionError("MDS daemon '{0}' did not crash as expected".format(daemon_id))
|