1
0
mirror of https://github.com/ceph/ceph synced 2025-04-01 23:02:17 +00:00

qa: remove get_lone_mds_id

This helps remove a dependence on the list of mds_daemons from
teuthology/cephadm.

Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
This commit is contained in:
Patrick Donnelly 2021-03-11 10:31:05 -08:00
parent 0825d6aa9e
commit 8e45af05c9
No known key found for this signature in database
GPG Key ID: 3A2A7E25BEA8AADB
3 changed files with 8 additions and 28 deletions

View File

@ -1050,21 +1050,6 @@ class Filesystem(MDSCluster):
status = self.status()
def get_lone_mds_id(self):
"""
Get a single MDS ID: the only one if there is only one
configured, else the only one currently holding a rank,
else raise an error.
"""
if len(self.mds_ids) != 1:
alive = self.get_rank_names()
if len(alive) == 1:
return alive[0]
else:
raise ValueError("Explicit MDS argument required when multiple MDSs in use")
else:
return self.mds_ids[0]
def put_metadata_object_raw(self, object_id, infile):
"""
Save an object to the metadata pool
@ -1127,13 +1112,13 @@ class Filesystem(MDSCluster):
def mds_asok(self, command, mds_id=None, timeout=None):
if mds_id is None:
mds_id = self.get_lone_mds_id()
return self.rank_asok(command, timeout=timeout)
return self.json_asok(command, 'mds', mds_id, timeout=timeout)
def mds_tell(self, command, mds_id=None):
if mds_id is None:
mds_id = self.get_lone_mds_id()
return self.rank_tell(command)
return json.loads(self.mon_manager.raw_cluster_cmd("tell", f"mds.{mds_id}", *command))

View File

@ -313,12 +313,10 @@ class TestConfigCommands(CephFSTestCase):
test_key = "mds_max_purge_ops"
test_val = "123"
mds_id = self.fs.get_lone_mds_id()
self.fs.mon_manager.raw_cluster_cmd("tell", "mds.{0}".format(mds_id), "injectargs",
"--{0}={1}".format(test_key, test_val))
self.fs.rank_tell(['injectargs', "--{0}={1}".format(test_key, test_val)])
# Read it back with asok because there is no `tell` equivalent
out = self.fs.mds_asok(['config', 'get', test_key])
out = self.fs.rank_tell(['config', 'get', test_key])
self.assertEqual(out[test_key], test_val)

View File

@ -205,14 +205,11 @@ class TestCacheDrop(CephFSTestCase):
def _run_drop_cache_cmd(self, timeout=None):
result = None
mds_id = self.fs.get_lone_mds_id()
args = ["cache", "drop"]
if timeout is not None:
result = self.fs.mon_manager.raw_cluster_cmd("tell", "mds.{0}".format(mds_id),
"cache", "drop", str(timeout))
else:
result = self.fs.mon_manager.raw_cluster_cmd("tell", "mds.{0}".format(mds_id),
"cache", "drop")
return json.loads(result)
args.append(str(timeout))
result = self.fs.rank_tell(args)
return result
def _setup(self, max_caps=20, threshold=400):
# create some files