diff --git a/tasks/cephfs/test_volume_client.py b/tasks/cephfs/test_volume_client.py index bfc211747c0..e537488bd55 100644 --- a/tasks/cephfs/test_volume_client.py +++ b/tasks/cephfs/test_volume_client.py @@ -219,6 +219,15 @@ vc.disconnect() That data isolated shares get their own pool :return: """ + + # Because the teuthology config template sets mon_pg_warn_max_per_osd to + # 10000 (i.e. it just tries to ignore health warnings), reset it to something + # sane before using volume_client, to avoid creating pools with absurdly large + # numbers of PGs. + self.set_conf("global", "mon pg warn max per osd", "300") + for mon_daemon_state in self.ctx.daemons.iter_daemons_of_role('mon'): + mon_daemon_state.restart() + self.mount_b.umount_wait() self._configure_vc_auth(self.mount_b, "manila") diff --git a/tasks/cephfs/vstart_runner.py b/tasks/cephfs/vstart_runner.py index 3b90afb78de..5242c44a653 100644 --- a/tasks/cephfs/vstart_runner.py +++ b/tasks/cephfs/vstart_runner.py @@ -35,6 +35,7 @@ import errno from unittest import suite import unittest from teuthology.orchestra.run import Raw, quote +from teuthology.orchestra.daemon import DaemonGroup import logging @@ -570,23 +571,13 @@ class LocalFilesystem(Filesystem): self.admin_remote = LocalRemote() - # Hack: cheeky inspection of ceph.conf to see what MDSs exist - self.mds_ids = set() - for line in open("ceph.conf").readlines(): - match = re.match("^\[mds\.(.+)\]$", line) - if match: - self.mds_ids.add(match.group(1)) - + self.mds_ids = ctx.daemons.daemons['mds'].keys() if not self.mds_ids: raise RuntimeError("No MDSs found in ceph.conf!") - self.mds_ids = list(self.mds_ids) - - log.info("Discovered MDS IDs: {0}".format(self.mds_ids)) - self.mon_manager = LocalCephManager() - self.mds_daemons = dict([(id_, LocalDaemon("mds", id_)) for id_ in self.mds_ids]) + self.mds_daemons = ctx.daemons.daemons["mds"] self.client_remote = LocalRemote() @@ -734,6 +725,19 @@ def exec_test(): 'test_path': test_dir } self.cluster = LocalCluster() + self.daemons = DaemonGroup() + + # Shove some LocalDaemons into the ctx.daemons DaemonGroup instance so that any + # tests that want to look these up via ctx can do so. + # Inspect ceph.conf to see what roles exist + for conf_line in open("ceph.conf").readlines(): + for svc_type in ["mon", "osd", "mds"]: + if svc_type not in self.daemons.daemons: + self.daemons.daemons[svc_type] = {} + match = re.match("^\[{0}\.(.+)\]$".format(svc_type), conf_line) + if match: + svc_id = match.group(1) + self.daemons.daemons[svc_type][svc_id] = LocalDaemon(svc_type, svc_id) def __del__(self): shutil.rmtree(self.teuthology_config['test_path'])