2014-07-02 11:54:05 +00:00
|
|
|
|
|
|
|
import json
|
|
|
|
import logging
|
2015-12-23 13:57:02 +00:00
|
|
|
from gevent import Greenlet
|
2015-08-06 09:20:34 +00:00
|
|
|
import os
|
2014-07-02 18:25:14 +00:00
|
|
|
import time
|
2014-11-03 15:38:57 +00:00
|
|
|
import datetime
|
2014-12-18 19:38:53 +00:00
|
|
|
import re
|
2016-03-15 12:43:18 +00:00
|
|
|
import errno
|
2017-09-20 22:44:40 +00:00
|
|
|
import random
|
2018-05-03 20:12:54 +00:00
|
|
|
import traceback
|
2014-07-02 11:54:05 +00:00
|
|
|
|
2020-04-05 13:16:10 +00:00
|
|
|
from io import BytesIO
|
2020-04-22 12:34:05 +00:00
|
|
|
from io import StringIO
|
2020-04-05 13:16:10 +00:00
|
|
|
|
2014-11-03 15:38:57 +00:00
|
|
|
from teuthology.exceptions import CommandFailedError
|
2014-07-02 11:54:05 +00:00
|
|
|
from teuthology import misc
|
2014-08-20 11:36:02 +00:00
|
|
|
from teuthology.nuke import clear_firewall
|
2014-07-17 20:35:01 +00:00
|
|
|
from teuthology.parallel import parallel
|
2014-12-02 13:30:06 +00:00
|
|
|
from tasks.ceph_manager import write_conf
|
2014-08-07 14:24:59 +00:00
|
|
|
from tasks import ceph_manager
|
2014-07-02 11:54:05 +00:00
|
|
|
|
|
|
|
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
2014-07-17 20:35:01 +00:00
|
|
|
DAEMON_WAIT_TIMEOUT = 120
|
2014-12-19 13:44:20 +00:00
|
|
|
ROOT_INO = 1
|
2014-07-17 20:35:01 +00:00
|
|
|
|
2019-12-13 00:43:14 +00:00
|
|
|
class FileLayout(object):
|
|
|
|
def __init__(self, pool=None, pool_namespace=None, stripe_unit=None, stripe_count=None, object_size=None):
|
|
|
|
self.pool = pool
|
|
|
|
self.pool_namespace = pool_namespace
|
|
|
|
self.stripe_unit = stripe_unit
|
|
|
|
self.stripe_count = stripe_count
|
|
|
|
self.object_size = object_size
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def load_from_ceph(layout_str):
|
|
|
|
# TODO
|
|
|
|
pass
|
|
|
|
|
|
|
|
def items(self):
|
|
|
|
if self.pool is not None:
|
|
|
|
yield ("pool", self.pool)
|
|
|
|
if self.pool_namespace:
|
|
|
|
yield ("pool_namespace", self.pool_namespace)
|
|
|
|
if self.stripe_unit is not None:
|
|
|
|
yield ("stripe_unit", self.stripe_unit)
|
|
|
|
if self.stripe_count is not None:
|
|
|
|
yield ("stripe_count", self.stripe_count)
|
|
|
|
if self.object_size is not None:
|
|
|
|
yield ("object_size", self.stripe_size)
|
2014-07-17 20:35:01 +00:00
|
|
|
|
2014-11-03 15:38:57 +00:00
|
|
|
class ObjectNotFound(Exception):
|
|
|
|
def __init__(self, object_name):
|
|
|
|
self._object_name = object_name
|
|
|
|
|
|
|
|
def __str__(self):
|
|
|
|
return "Object not found: '{0}'".format(self._object_name)
|
|
|
|
|
2016-08-30 21:23:46 +00:00
|
|
|
class FSStatus(object):
|
|
|
|
"""
|
|
|
|
Operations on a snapshot of the FSMap.
|
|
|
|
"""
|
|
|
|
def __init__(self, mon_manager):
|
|
|
|
self.mon = mon_manager
|
2017-02-04 16:56:44 +00:00
|
|
|
self.map = json.loads(self.mon.raw_cluster_cmd("fs", "dump", "--format=json"))
|
2016-08-30 21:23:46 +00:00
|
|
|
|
|
|
|
def __str__(self):
|
|
|
|
return json.dumps(self.map, indent = 2, sort_keys = True)
|
|
|
|
|
|
|
|
# Expose the fsmap for manual inspection.
|
|
|
|
def __getitem__(self, key):
|
|
|
|
"""
|
|
|
|
Get a field from the fsmap.
|
|
|
|
"""
|
|
|
|
return self.map[key]
|
|
|
|
|
|
|
|
def get_filesystems(self):
|
|
|
|
"""
|
|
|
|
Iterator for all filesystems.
|
|
|
|
"""
|
|
|
|
for fs in self.map['filesystems']:
|
|
|
|
yield fs
|
|
|
|
|
|
|
|
def get_all(self):
|
|
|
|
"""
|
|
|
|
Iterator for all the mds_info components in the FSMap.
|
|
|
|
"""
|
2020-02-11 21:37:53 +00:00
|
|
|
for info in self.map['standbys']:
|
2016-08-30 21:23:46 +00:00
|
|
|
yield info
|
|
|
|
for fs in self.map['filesystems']:
|
2018-01-04 17:41:06 +00:00
|
|
|
for info in fs['mdsmap']['info'].values():
|
2016-08-30 21:23:46 +00:00
|
|
|
yield info
|
|
|
|
|
|
|
|
def get_standbys(self):
|
|
|
|
"""
|
|
|
|
Iterator for all standbys.
|
|
|
|
"""
|
|
|
|
for info in self.map['standbys']:
|
|
|
|
yield info
|
|
|
|
|
|
|
|
def get_fsmap(self, fscid):
|
|
|
|
"""
|
|
|
|
Get the fsmap for the given FSCID.
|
|
|
|
"""
|
|
|
|
for fs in self.map['filesystems']:
|
|
|
|
if fscid is None or fs['id'] == fscid:
|
|
|
|
return fs
|
|
|
|
raise RuntimeError("FSCID {0} not in map".format(fscid))
|
|
|
|
|
|
|
|
def get_fsmap_byname(self, name):
|
|
|
|
"""
|
|
|
|
Get the fsmap for the given file system name.
|
|
|
|
"""
|
|
|
|
for fs in self.map['filesystems']:
|
|
|
|
if name is None or fs['mdsmap']['fs_name'] == name:
|
|
|
|
return fs
|
|
|
|
raise RuntimeError("FS {0} not in map".format(name))
|
|
|
|
|
|
|
|
def get_replays(self, fscid):
|
|
|
|
"""
|
|
|
|
Get the standby:replay MDS for the given FSCID.
|
|
|
|
"""
|
|
|
|
fs = self.get_fsmap(fscid)
|
2018-01-04 17:41:06 +00:00
|
|
|
for info in fs['mdsmap']['info'].values():
|
2016-08-30 21:23:46 +00:00
|
|
|
if info['state'] == 'up:standby-replay':
|
|
|
|
yield info
|
|
|
|
|
|
|
|
def get_ranks(self, fscid):
|
|
|
|
"""
|
|
|
|
Get the ranks for the given FSCID.
|
|
|
|
"""
|
|
|
|
fs = self.get_fsmap(fscid)
|
2018-01-04 17:41:06 +00:00
|
|
|
for info in fs['mdsmap']['info'].values():
|
2019-02-25 19:10:01 +00:00
|
|
|
if info['rank'] >= 0 and info['state'] != 'up:standby-replay':
|
2016-08-30 21:23:46 +00:00
|
|
|
yield info
|
|
|
|
|
|
|
|
def get_rank(self, fscid, rank):
|
|
|
|
"""
|
|
|
|
Get the rank for the given FSCID.
|
|
|
|
"""
|
|
|
|
for info in self.get_ranks(fscid):
|
|
|
|
if info['rank'] == rank:
|
|
|
|
return info
|
|
|
|
raise RuntimeError("FSCID {0} has no rank {1}".format(fscid, rank))
|
|
|
|
|
|
|
|
def get_mds(self, name):
|
|
|
|
"""
|
|
|
|
Get the info for the given MDS name.
|
|
|
|
"""
|
|
|
|
for info in self.get_all():
|
|
|
|
if info['name'] == name:
|
|
|
|
return info
|
|
|
|
return None
|
|
|
|
|
|
|
|
def get_mds_addr(self, name):
|
|
|
|
"""
|
|
|
|
Return the instance addr as a string, like "10.214.133.138:6807\/10825"
|
|
|
|
"""
|
|
|
|
info = self.get_mds(name)
|
|
|
|
if info:
|
|
|
|
return info['addr']
|
|
|
|
else:
|
2020-05-13 23:16:17 +00:00
|
|
|
log.warning(json.dumps(list(self.get_all()), indent=2)) # dump for debugging
|
2016-08-30 21:23:46 +00:00
|
|
|
raise RuntimeError("MDS id '{0}' not found in map".format(name))
|
2014-11-03 15:38:57 +00:00
|
|
|
|
2019-03-28 10:13:49 +00:00
|
|
|
def get_mds_gid(self, gid):
|
|
|
|
"""
|
|
|
|
Get the info for the given MDS gid.
|
|
|
|
"""
|
|
|
|
for info in self.get_all():
|
|
|
|
if info['gid'] == gid:
|
|
|
|
return info
|
|
|
|
return None
|
|
|
|
|
|
|
|
def hadfailover(self, status):
|
|
|
|
"""
|
|
|
|
Compares two statuses for mds failovers.
|
|
|
|
Returns True if there is a failover.
|
|
|
|
"""
|
|
|
|
for fs in status.map['filesystems']:
|
|
|
|
for info in fs['mdsmap']['info'].values():
|
|
|
|
oldinfo = self.get_mds_gid(info['gid'])
|
|
|
|
if oldinfo is None or oldinfo['incarnation'] != info['incarnation']:
|
|
|
|
return True
|
|
|
|
#all matching
|
|
|
|
return False
|
|
|
|
|
2016-11-01 11:21:41 +00:00
|
|
|
class CephCluster(object):
|
|
|
|
@property
|
|
|
|
def admin_remote(self):
|
|
|
|
first_mon = misc.get_first_mon(self._ctx, None)
|
2019-10-11 15:57:47 +00:00
|
|
|
(result,) = self._ctx.cluster.only(first_mon).remotes.keys()
|
2016-11-01 11:21:41 +00:00
|
|
|
return result
|
|
|
|
|
|
|
|
def __init__(self, ctx):
|
|
|
|
self._ctx = ctx
|
|
|
|
self.mon_manager = ceph_manager.CephManager(self.admin_remote, ctx=ctx, logger=log.getChild('ceph_manager'))
|
|
|
|
|
|
|
|
def get_config(self, key, service_type=None):
|
|
|
|
"""
|
|
|
|
Get config from mon by default, or a specific service if caller asks for it
|
|
|
|
"""
|
|
|
|
if service_type is None:
|
|
|
|
service_type = 'mon'
|
|
|
|
|
|
|
|
service_id = sorted(misc.all_roles_of_type(self._ctx.cluster, service_type))[0]
|
|
|
|
return self.json_asok(['config', 'get', key], service_type, service_id)[key]
|
|
|
|
|
|
|
|
def set_ceph_conf(self, subsys, key, value):
|
|
|
|
if subsys not in self._ctx.ceph['ceph'].conf:
|
|
|
|
self._ctx.ceph['ceph'].conf[subsys] = {}
|
|
|
|
self._ctx.ceph['ceph'].conf[subsys][key] = value
|
|
|
|
write_conf(self._ctx) # XXX because we don't have the ceph task's config object, if they
|
|
|
|
# used a different config path this won't work.
|
|
|
|
|
|
|
|
def clear_ceph_conf(self, subsys, key):
|
|
|
|
del self._ctx.ceph['ceph'].conf[subsys][key]
|
|
|
|
write_conf(self._ctx)
|
|
|
|
|
2018-10-06 17:32:18 +00:00
|
|
|
def json_asok(self, command, service_type, service_id, timeout=None):
|
|
|
|
if timeout is None:
|
|
|
|
timeout = 15*60
|
|
|
|
proc = self.mon_manager.admin_socket(service_type, service_id, command, timeout=timeout)
|
2016-11-01 11:21:41 +00:00
|
|
|
response_data = proc.stdout.getvalue()
|
|
|
|
log.info("_json_asok output: {0}".format(response_data))
|
|
|
|
if response_data.strip():
|
|
|
|
return json.loads(response_data)
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
|
|
class MDSCluster(CephCluster):
|
2014-07-02 11:54:05 +00:00
|
|
|
"""
|
2016-01-03 18:35:17 +00:00
|
|
|
Collective operations on all the MDS daemons in the Ceph cluster. These
|
|
|
|
daemons may be in use by various Filesystems.
|
2014-07-02 11:54:05 +00:00
|
|
|
|
2016-01-03 18:35:17 +00:00
|
|
|
For the benefit of pre-multi-filesystem tests, this class is also
|
|
|
|
a parent of Filesystem. The correct way to use MDSCluster going forward is
|
|
|
|
as a separate instance outside of your (multiple) Filesystem instances.
|
2014-07-02 11:54:05 +00:00
|
|
|
"""
|
2016-01-03 18:35:17 +00:00
|
|
|
def __init__(self, ctx):
|
2016-11-01 11:21:41 +00:00
|
|
|
super(MDSCluster, self).__init__(ctx)
|
|
|
|
|
2014-07-17 20:35:01 +00:00
|
|
|
self.mds_ids = list(misc.all_roles_of_type(ctx.cluster, 'mds'))
|
2016-01-03 18:35:17 +00:00
|
|
|
|
2014-07-17 20:35:01 +00:00
|
|
|
if len(self.mds_ids) == 0:
|
|
|
|
raise RuntimeError("This task requires at least one MDS")
|
2014-07-02 11:54:05 +00:00
|
|
|
|
2015-04-08 09:41:20 +00:00
|
|
|
if hasattr(self._ctx, "daemons"):
|
|
|
|
# Presence of 'daemons' attribute implies ceph task rather than ceph_deploy task
|
|
|
|
self.mds_daemons = dict([(mds_id, self._ctx.daemons.get_daemon('mds', mds_id)) for mds_id in self.mds_ids])
|
2014-07-02 11:54:05 +00:00
|
|
|
|
2016-01-03 18:35:17 +00:00
|
|
|
def _one_or_all(self, mds_id, cb, in_parallel=True):
|
|
|
|
"""
|
|
|
|
Call a callback for a single named MDS, or for all.
|
|
|
|
|
|
|
|
Note that the parallelism here isn't for performance, it's to avoid being overly kind
|
|
|
|
to the cluster by waiting a graceful ssh-latency of time between doing things, and to
|
|
|
|
avoid being overly kind by executing them in a particular order. However, some actions
|
|
|
|
don't cope with being done in parallel, so it's optional (`in_parallel`)
|
|
|
|
|
|
|
|
:param mds_id: MDS daemon name, or None
|
|
|
|
:param cb: Callback taking single argument of MDS daemon name
|
|
|
|
:param in_parallel: whether to invoke callbacks concurrently (else one after the other)
|
|
|
|
"""
|
|
|
|
if mds_id is None:
|
|
|
|
if in_parallel:
|
|
|
|
with parallel() as p:
|
|
|
|
for mds_id in self.mds_ids:
|
|
|
|
p.spawn(cb, mds_id)
|
|
|
|
else:
|
|
|
|
for mds_id in self.mds_ids:
|
|
|
|
cb(mds_id)
|
|
|
|
else:
|
|
|
|
cb(mds_id)
|
|
|
|
|
2017-09-20 22:44:40 +00:00
|
|
|
def get_config(self, key, service_type=None):
|
|
|
|
"""
|
|
|
|
get_config specialization of service_type="mds"
|
|
|
|
"""
|
|
|
|
if service_type != "mds":
|
|
|
|
return super(MDSCluster, self).get_config(key, service_type)
|
|
|
|
|
|
|
|
# Some tests stop MDS daemons, don't send commands to a dead one:
|
qa/tasks/cephfs: do no radom.sample(a_dict)
collect the keys instead of filtering a dict,
to address following failure:
```
2020-04-05T12:15:36.275 INFO:tasks.cephfs_test_runner:Traceback (most recent call last):
2020-04-05T12:15:36.275 INFO:tasks.cephfs_test_runner: File "/home/teuthworker/src/github.com_tchaikov_ceph_wip-qa-py3/qa/tasks/cephfs/test_strays.py", line 29, in test_files_throttle
2020-04-05T12:15:36.275 INFO:tasks.cephfs_test_runner: self._test_throttling(self.FILES_THROTTLE)
2020-04-05T12:15:36.276 INFO:tasks.cephfs_test_runner: File "/home/teuthworker/src/github.com_tchaikov_ceph_wip-qa-py3/qa/tasks/cephfs/test_strays.py", line 96, in _test_throttling
2020-04-05T12:15:36.276 INFO:tasks.cephfs_test_runner: return self._do_test_throttling(throttle_type)
2020-04-05T12:15:36.278 INFO:tasks.cephfs_test_runner: File "/home/teuthworker/src/github.com_tchaikov_ceph_wip-qa-py3/qa/tasks/cephfs/test_strays.py", line 176, in _do_test_throttling
2020-04-05T12:15:36.278 INFO:tasks.cephfs_test_runner: mds_max_purge_ops = int(self.fs.get_config("mds_max_purge_ops", 'mds'))
2020-04-05T12:15:36.279 INFO:tasks.cephfs_test_runner: File "/home/teuthworker/src/github.com_tchaikov_ceph_wip-qa-py3/qa/tasks/cephfs/filesystem.py", line 285, in get_config
2020-04-05T12:15:36.279 INFO:tasks.cephfs_test_runner: service_id = random.sample(filter(lambda i: self.mds_daemons[i].running(), self.mds_daemons), 1)[0]
2020-04-05T12:15:36.280 INFO:tasks.cephfs_test_runner: File "/home/teuthworker/src/git.ceph.com_git_teuthology_py3/virtualenv/lib/python3.5/random.py", line 311, in sample
2020-04-05T12:15:36.280 INFO:tasks.cephfs_test_runner: raise TypeError("Population must be a sequence or set. For dicts, use list(d).")
2020-04-05T12:15:36.280 INFO:tasks.cephfs_test_runner:TypeError: Population must be a sequence or set. For dicts, use list(d).
```
Signed-off-by: Kefu Chai <kchai@redhat.com>
2020-04-05 12:39:35 +00:00
|
|
|
running_daemons = [i for i, mds in self.mds_daemons.items() if mds.running()]
|
|
|
|
service_id = random.sample(running_daemons, 1)[0]
|
2017-09-20 22:44:40 +00:00
|
|
|
return self.json_asok(['config', 'get', key], service_type, service_id)[key]
|
|
|
|
|
2016-01-03 18:35:17 +00:00
|
|
|
def mds_stop(self, mds_id=None):
|
|
|
|
"""
|
|
|
|
Stop the MDS daemon process(se). If it held a rank, that rank
|
|
|
|
will eventually go laggy.
|
|
|
|
"""
|
|
|
|
self._one_or_all(mds_id, lambda id_: self.mds_daemons[id_].stop())
|
|
|
|
|
|
|
|
def mds_fail(self, mds_id=None):
|
|
|
|
"""
|
|
|
|
Inform MDSMonitor of the death of the daemon process(es). If it held
|
|
|
|
a rank, that rank will be relinquished.
|
|
|
|
"""
|
|
|
|
self._one_or_all(mds_id, lambda id_: self.mon_manager.raw_cluster_cmd("mds", "fail", id_))
|
|
|
|
|
|
|
|
def mds_restart(self, mds_id=None):
|
|
|
|
self._one_or_all(mds_id, lambda id_: self.mds_daemons[id_].restart())
|
|
|
|
|
|
|
|
def mds_fail_restart(self, mds_id=None):
|
|
|
|
"""
|
|
|
|
Variation on restart that includes marking MDSs as failed, so that doing this
|
|
|
|
operation followed by waiting for healthy daemon states guarantees that they
|
|
|
|
have gone down and come up, rather than potentially seeing the healthy states
|
|
|
|
that existed before the restart.
|
|
|
|
"""
|
|
|
|
def _fail_restart(id_):
|
|
|
|
self.mds_daemons[id_].stop()
|
|
|
|
self.mon_manager.raw_cluster_cmd("mds", "fail", id_)
|
|
|
|
self.mds_daemons[id_].restart()
|
|
|
|
|
|
|
|
self._one_or_all(mds_id, _fail_restart)
|
|
|
|
|
2018-07-19 04:36:31 +00:00
|
|
|
def mds_signal(self, mds_id, sig, silent=False):
|
|
|
|
"""
|
|
|
|
signal a MDS daemon
|
|
|
|
"""
|
|
|
|
self.mds_daemons[mds_id].signal(sig, silent);
|
|
|
|
|
2017-07-26 17:05:59 +00:00
|
|
|
def newfs(self, name='cephfs', create=True):
|
|
|
|
return Filesystem(self._ctx, name=name, create=create)
|
2016-01-03 18:35:17 +00:00
|
|
|
|
2016-08-30 21:23:46 +00:00
|
|
|
def status(self):
|
|
|
|
return FSStatus(self.mon_manager)
|
2016-01-03 18:35:17 +00:00
|
|
|
|
|
|
|
def delete_all_filesystems(self):
|
|
|
|
"""
|
|
|
|
Remove all filesystems that exist, and any pools in use by them.
|
|
|
|
"""
|
2016-08-30 21:23:46 +00:00
|
|
|
pools = json.loads(self.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
|
|
|
|
pool_id_name = {}
|
|
|
|
for pool in pools:
|
|
|
|
pool_id_name[pool['pool']] = pool['pool_name']
|
2016-11-15 19:42:08 +00:00
|
|
|
|
|
|
|
# mark cluster down for each fs to prevent churn during deletion
|
|
|
|
status = self.status()
|
|
|
|
for fs in status.get_filesystems():
|
2019-01-15 22:10:09 +00:00
|
|
|
self.mon_manager.raw_cluster_cmd("fs", "fail", str(fs['mdsmap']['fs_name']))
|
2016-11-15 19:42:08 +00:00
|
|
|
|
|
|
|
# get a new copy as actives may have since changed
|
2016-08-30 21:23:46 +00:00
|
|
|
status = self.status()
|
|
|
|
for fs in status.get_filesystems():
|
|
|
|
mdsmap = fs['mdsmap']
|
|
|
|
metadata_pool = pool_id_name[mdsmap['metadata_pool']]
|
|
|
|
|
2016-11-15 19:42:08 +00:00
|
|
|
self.mon_manager.raw_cluster_cmd('fs', 'rm', mdsmap['fs_name'], '--yes-i-really-mean-it')
|
2016-01-03 18:35:17 +00:00
|
|
|
self.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
|
2016-08-30 21:23:46 +00:00
|
|
|
metadata_pool, metadata_pool,
|
2016-01-03 18:35:17 +00:00
|
|
|
'--yes-i-really-really-mean-it')
|
2016-08-30 21:23:46 +00:00
|
|
|
for data_pool in mdsmap['data_pools']:
|
|
|
|
data_pool = pool_id_name[data_pool]
|
2017-04-04 18:42:31 +00:00
|
|
|
try:
|
|
|
|
self.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
|
|
|
|
data_pool, data_pool,
|
|
|
|
'--yes-i-really-really-mean-it')
|
|
|
|
except CommandFailedError as e:
|
|
|
|
if e.exitstatus == 16: # EBUSY, this data pool is used
|
|
|
|
pass # by two metadata pools, let the 2nd
|
|
|
|
else: # pass delete it
|
|
|
|
raise
|
2016-01-03 18:35:17 +00:00
|
|
|
|
|
|
|
def get_standby_daemons(self):
|
2016-08-30 21:23:46 +00:00
|
|
|
return set([s['name'] for s in self.status().get_standbys()])
|
2016-01-03 18:35:17 +00:00
|
|
|
|
|
|
|
def get_mds_hostnames(self):
|
|
|
|
result = set()
|
|
|
|
for mds_id in self.mds_ids:
|
|
|
|
mds_remote = self.mon_manager.find_remote('mds', mds_id)
|
|
|
|
result.add(mds_remote.hostname)
|
|
|
|
|
|
|
|
return list(result)
|
|
|
|
|
|
|
|
def set_clients_block(self, blocked, mds_id=None):
|
|
|
|
"""
|
|
|
|
Block (using iptables) client communications to this MDS. Be careful: if
|
|
|
|
other services are running on this MDS, or other MDSs try to talk to this
|
|
|
|
MDS, their communications may also be blocked as collatoral damage.
|
|
|
|
|
|
|
|
:param mds_id: Optional ID of MDS to block, default to all
|
|
|
|
:return:
|
|
|
|
"""
|
|
|
|
da_flag = "-A" if blocked else "-D"
|
|
|
|
|
|
|
|
def set_block(_mds_id):
|
|
|
|
remote = self.mon_manager.find_remote('mds', _mds_id)
|
2016-08-30 21:23:46 +00:00
|
|
|
status = self.status()
|
2016-01-03 18:35:17 +00:00
|
|
|
|
2016-08-30 21:23:46 +00:00
|
|
|
addr = status.get_mds_addr(_mds_id)
|
2016-01-03 18:35:17 +00:00
|
|
|
ip_str, port_str, inst_str = re.match("(.+):(.+)/(.+)", addr).groups()
|
|
|
|
|
|
|
|
remote.run(
|
|
|
|
args=["sudo", "iptables", da_flag, "OUTPUT", "-p", "tcp", "--sport", port_str, "-j", "REJECT", "-m",
|
|
|
|
"comment", "--comment", "teuthology"])
|
|
|
|
remote.run(
|
|
|
|
args=["sudo", "iptables", da_flag, "INPUT", "-p", "tcp", "--dport", port_str, "-j", "REJECT", "-m",
|
|
|
|
"comment", "--comment", "teuthology"])
|
|
|
|
|
|
|
|
self._one_or_all(mds_id, set_block, in_parallel=False)
|
|
|
|
|
|
|
|
def clear_firewall(self):
|
|
|
|
clear_firewall(self._ctx)
|
|
|
|
|
2016-01-08 10:23:20 +00:00
|
|
|
def get_mds_info(self, mds_id):
|
2016-08-30 21:23:46 +00:00
|
|
|
return FSStatus(self.mon_manager).get_mds(mds_id)
|
2016-01-08 10:23:20 +00:00
|
|
|
|
2016-08-30 21:23:46 +00:00
|
|
|
def is_pool_full(self, pool_name):
|
|
|
|
pools = json.loads(self.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
|
|
|
|
for pool in pools:
|
|
|
|
if pool['pool_name'] == pool_name:
|
|
|
|
return 'full' in pool['flags_names'].split(",")
|
2016-01-11 19:43:40 +00:00
|
|
|
|
2016-08-30 21:23:46 +00:00
|
|
|
raise RuntimeError("Pool not found '{0}'".format(pool_name))
|
2016-01-03 18:35:17 +00:00
|
|
|
|
|
|
|
class Filesystem(MDSCluster):
|
|
|
|
"""
|
|
|
|
This object is for driving a CephFS filesystem. The MDS daemons driven by
|
|
|
|
MDSCluster may be shared with other Filesystems.
|
|
|
|
"""
|
2017-10-09 13:15:21 +00:00
|
|
|
def __init__(self, ctx, fscid=None, name=None, create=False,
|
|
|
|
ec_profile=None):
|
2016-01-03 18:35:17 +00:00
|
|
|
super(Filesystem, self).__init__(ctx)
|
|
|
|
|
2017-07-26 17:05:59 +00:00
|
|
|
self.name = name
|
2017-10-09 13:15:21 +00:00
|
|
|
self.ec_profile = ec_profile
|
2016-08-30 21:23:46 +00:00
|
|
|
self.id = None
|
|
|
|
self.metadata_pool_name = None
|
2017-07-12 15:43:39 +00:00
|
|
|
self.metadata_overlay = False
|
|
|
|
self.data_pool_name = None
|
2016-08-30 21:23:46 +00:00
|
|
|
self.data_pools = None
|
2016-01-03 18:35:17 +00:00
|
|
|
|
2014-07-02 11:54:05 +00:00
|
|
|
client_list = list(misc.all_roles_of_type(self._ctx.cluster, 'client'))
|
|
|
|
self.client_id = client_list[0]
|
|
|
|
self.client_remote = list(misc.get_clients(ctx=ctx, roles=["client.{0}".format(self.client_id)]))[0][1]
|
|
|
|
|
2017-07-26 17:05:59 +00:00
|
|
|
if name is not None:
|
2016-08-30 21:23:46 +00:00
|
|
|
if fscid is not None:
|
|
|
|
raise RuntimeError("cannot specify fscid when creating fs")
|
2017-07-26 17:05:59 +00:00
|
|
|
if create and not self.legacy_configured():
|
2016-08-30 21:23:46 +00:00
|
|
|
self.create()
|
2017-07-26 17:05:59 +00:00
|
|
|
else:
|
|
|
|
if fscid is not None:
|
|
|
|
self.id = fscid
|
|
|
|
self.getinfo(refresh = True)
|
2016-08-30 21:23:46 +00:00
|
|
|
|
2016-11-10 22:46:23 +00:00
|
|
|
# Stash a reference to the first created filesystem on ctx, so
|
|
|
|
# that if someone drops to the interactive shell they can easily
|
|
|
|
# poke our methods.
|
|
|
|
if not hasattr(self._ctx, "filesystem"):
|
|
|
|
self._ctx.filesystem = self
|
|
|
|
|
2019-07-24 11:00:16 +00:00
|
|
|
def get_task_status(self, status_key):
|
|
|
|
return self.mon_manager.get_service_task_status("mds", status_key)
|
|
|
|
|
2016-08-30 21:23:46 +00:00
|
|
|
def getinfo(self, refresh = False):
|
|
|
|
status = self.status()
|
|
|
|
if self.id is not None:
|
|
|
|
fsmap = status.get_fsmap(self.id)
|
|
|
|
elif self.name is not None:
|
|
|
|
fsmap = status.get_fsmap_byname(self.name)
|
|
|
|
else:
|
|
|
|
fss = [fs for fs in status.get_filesystems()]
|
|
|
|
if len(fss) == 1:
|
|
|
|
fsmap = fss[0]
|
|
|
|
elif len(fss) == 0:
|
|
|
|
raise RuntimeError("no file system available")
|
|
|
|
else:
|
|
|
|
raise RuntimeError("more than one file system available")
|
|
|
|
self.id = fsmap['id']
|
|
|
|
self.name = fsmap['mdsmap']['fs_name']
|
|
|
|
self.get_pool_names(status = status, refresh = refresh)
|
|
|
|
return status
|
|
|
|
|
2017-07-26 17:05:59 +00:00
|
|
|
def set_metadata_overlay(self, overlay):
|
2017-07-12 15:43:39 +00:00
|
|
|
if self.id is not None:
|
2017-07-26 17:05:59 +00:00
|
|
|
raise RuntimeError("cannot specify fscid when configuring overlay")
|
|
|
|
self.metadata_overlay = overlay
|
|
|
|
|
2018-05-03 20:12:54 +00:00
|
|
|
def deactivate(self, rank):
|
|
|
|
if rank < 0:
|
|
|
|
raise RuntimeError("invalid rank")
|
|
|
|
elif rank == 0:
|
|
|
|
raise RuntimeError("cannot deactivate rank 0")
|
|
|
|
self.mon_manager.raw_cluster_cmd("mds", "deactivate", "%d:%d" % (self.id, rank))
|
|
|
|
|
|
|
|
def reach_max_mds(self):
|
|
|
|
# Try to reach rank count == max_mds, up or down (UPGRADE SENSITIVE!)
|
|
|
|
status = self.getinfo()
|
|
|
|
mds_map = self.get_mds_map(status=status)
|
|
|
|
max_mds = mds_map['max_mds']
|
|
|
|
|
|
|
|
count = len(list(self.get_ranks(status=status)))
|
|
|
|
if count > max_mds:
|
|
|
|
try:
|
|
|
|
# deactivate mds in decending order
|
|
|
|
status = self.wait_for_daemons(status=status, skip_max_mds_check=True)
|
|
|
|
while count > max_mds:
|
|
|
|
targets = sorted(self.get_ranks(status=status), key=lambda r: r['rank'], reverse=True)
|
|
|
|
target = targets[0]
|
|
|
|
log.info("deactivating rank %d" % target['rank'])
|
|
|
|
self.deactivate(target['rank'])
|
|
|
|
status = self.wait_for_daemons(skip_max_mds_check=True)
|
|
|
|
count = len(list(self.get_ranks(status=status)))
|
|
|
|
except:
|
|
|
|
# In Mimic, deactivation is done automatically:
|
|
|
|
log.info("Error:\n{}".format(traceback.format_exc()))
|
|
|
|
status = self.wait_for_daemons()
|
|
|
|
else:
|
|
|
|
status = self.wait_for_daemons()
|
|
|
|
|
|
|
|
mds_map = self.get_mds_map(status=status)
|
|
|
|
assert(mds_map['max_mds'] == max_mds)
|
|
|
|
assert(mds_map['in'] == list(range(0, max_mds)))
|
|
|
|
|
2019-01-15 22:10:09 +00:00
|
|
|
def fail(self):
|
|
|
|
self.mon_manager.raw_cluster_cmd("fs", "fail", str(self.name))
|
|
|
|
|
2020-02-11 03:23:43 +00:00
|
|
|
def set_flag(self, var, *args):
|
|
|
|
a = map(lambda x: str(x).lower(), args)
|
|
|
|
self.mon_manager.raw_cluster_cmd("fs", "flag", "set", var, *a)
|
|
|
|
|
|
|
|
def set_allow_multifs(self, yes=True):
|
|
|
|
self.set_flag("enable_multiple", yes)
|
|
|
|
|
2017-12-12 19:09:19 +00:00
|
|
|
def set_var(self, var, *args):
|
2020-02-11 03:23:43 +00:00
|
|
|
a = map(lambda x: str(x).lower(), args)
|
2017-12-12 19:09:19 +00:00
|
|
|
self.mon_manager.raw_cluster_cmd("fs", "set", self.name, var, *a)
|
|
|
|
|
2019-01-15 22:10:09 +00:00
|
|
|
def set_down(self, down=True):
|
|
|
|
self.set_var("down", str(down).lower())
|
|
|
|
|
|
|
|
def set_joinable(self, joinable=True):
|
2020-02-11 03:23:43 +00:00
|
|
|
self.set_var("joinable", joinable)
|
2019-01-15 22:10:09 +00:00
|
|
|
|
2016-08-30 21:23:46 +00:00
|
|
|
def set_max_mds(self, max_mds):
|
2017-12-12 19:09:19 +00:00
|
|
|
self.set_var("max_mds", "%d" % max_mds)
|
2016-08-30 21:23:46 +00:00
|
|
|
|
2019-02-25 19:10:01 +00:00
|
|
|
def set_allow_standby_replay(self, yes):
|
2020-02-11 03:23:43 +00:00
|
|
|
self.set_var("allow_standby_replay", yes)
|
2019-02-25 19:10:01 +00:00
|
|
|
|
2017-10-02 00:07:05 +00:00
|
|
|
def set_allow_new_snaps(self, yes):
|
2020-02-11 03:23:43 +00:00
|
|
|
self.set_var("allow_new_snaps", yes, '--yes-i-really-mean-it')
|
2017-10-02 00:07:05 +00:00
|
|
|
|
2019-10-09 17:41:35 +00:00
|
|
|
# In Octopus+, the PG count can be omitted to use the default. We keep the
|
|
|
|
# hard-coded value for deployments of Mimic/Nautilus.
|
|
|
|
pgs_per_fs_pool = 8
|
2015-06-25 00:38:38 +00:00
|
|
|
|
|
|
|
def create(self):
|
2016-08-30 21:23:46 +00:00
|
|
|
if self.name is None:
|
|
|
|
self.name = "cephfs"
|
|
|
|
if self.metadata_pool_name is None:
|
|
|
|
self.metadata_pool_name = "{0}_metadata".format(self.name)
|
2017-07-12 15:43:39 +00:00
|
|
|
if self.data_pool_name is None:
|
|
|
|
data_pool_name = "{0}_data".format(self.name)
|
|
|
|
else:
|
|
|
|
data_pool_name = self.data_pool_name
|
2016-08-30 21:23:46 +00:00
|
|
|
|
2016-01-03 18:35:17 +00:00
|
|
|
log.info("Creating filesystem '{0}'".format(self.name))
|
|
|
|
|
|
|
|
self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
|
2019-10-09 17:41:35 +00:00
|
|
|
self.metadata_pool_name, self.pgs_per_fs_pool.__str__())
|
2017-07-12 15:43:39 +00:00
|
|
|
if self.metadata_overlay:
|
|
|
|
self.mon_manager.raw_cluster_cmd('fs', 'new',
|
|
|
|
self.name, self.metadata_pool_name, data_pool_name,
|
|
|
|
'--allow-dangerous-metadata-overlay')
|
|
|
|
else:
|
2017-12-19 02:41:31 +00:00
|
|
|
if self.ec_profile and 'disabled' not in self.ec_profile:
|
2017-10-09 13:15:21 +00:00
|
|
|
log.info("EC profile is %s", self.ec_profile)
|
|
|
|
cmd = ['osd', 'erasure-code-profile', 'set', data_pool_name]
|
|
|
|
cmd.extend(self.ec_profile)
|
|
|
|
self.mon_manager.raw_cluster_cmd(*cmd)
|
|
|
|
self.mon_manager.raw_cluster_cmd(
|
|
|
|
'osd', 'pool', 'create',
|
2019-10-09 17:41:35 +00:00
|
|
|
data_pool_name, self.pgs_per_fs_pool.__str__(), 'erasure',
|
2017-10-09 13:15:21 +00:00
|
|
|
data_pool_name)
|
|
|
|
self.mon_manager.raw_cluster_cmd(
|
|
|
|
'osd', 'pool', 'set',
|
|
|
|
data_pool_name, 'allow_ec_overwrites', 'true')
|
|
|
|
else:
|
|
|
|
self.mon_manager.raw_cluster_cmd(
|
|
|
|
'osd', 'pool', 'create',
|
2019-10-09 17:41:35 +00:00
|
|
|
data_pool_name, self.pgs_per_fs_pool.__str__())
|
2017-07-12 15:43:39 +00:00
|
|
|
self.mon_manager.raw_cluster_cmd('fs', 'new',
|
2019-12-20 20:57:31 +00:00
|
|
|
self.name,
|
|
|
|
self.metadata_pool_name,
|
|
|
|
data_pool_name,
|
|
|
|
"--force")
|
2017-08-10 17:28:09 +00:00
|
|
|
self.check_pool_application(self.metadata_pool_name)
|
|
|
|
self.check_pool_application(data_pool_name)
|
2017-05-10 02:58:08 +00:00
|
|
|
# Turn off spurious standby count warnings from modifying max_mds in tests.
|
2017-05-16 22:43:57 +00:00
|
|
|
try:
|
|
|
|
self.mon_manager.raw_cluster_cmd('fs', 'set', self.name, 'standby_count_wanted', '0')
|
|
|
|
except CommandFailedError as e:
|
|
|
|
if e.exitstatus == 22:
|
|
|
|
# standby_count_wanted not available prior to luminous (upgrade tests would fail otherwise)
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
raise
|
2016-08-30 21:23:46 +00:00
|
|
|
|
|
|
|
self.getinfo(refresh = True)
|
2014-12-02 13:28:08 +00:00
|
|
|
|
2017-08-10 17:28:09 +00:00
|
|
|
|
|
|
|
def check_pool_application(self, pool_name):
|
|
|
|
osd_map = self.mon_manager.get_osd_dump_json()
|
|
|
|
for pool in osd_map['pools']:
|
|
|
|
if pool['pool_name'] == pool_name:
|
|
|
|
if "application_metadata" in pool:
|
|
|
|
if not "cephfs" in pool['application_metadata']:
|
2020-01-09 07:27:37 +00:00
|
|
|
raise RuntimeError("Pool {pool_name} does not name cephfs as application!".\
|
|
|
|
format(pool_name=pool_name))
|
2017-08-10 17:28:09 +00:00
|
|
|
|
|
|
|
|
2016-11-10 22:46:23 +00:00
|
|
|
def __del__(self):
|
|
|
|
if getattr(self._ctx, "filesystem", None) == self:
|
|
|
|
delattr(self._ctx, "filesystem")
|
|
|
|
|
2015-07-22 09:08:55 +00:00
|
|
|
def exists(self):
|
|
|
|
"""
|
2016-01-03 18:35:17 +00:00
|
|
|
Whether a filesystem exists in the mon's filesystem list
|
2015-07-22 09:08:55 +00:00
|
|
|
"""
|
|
|
|
fs_list = json.loads(self.mon_manager.raw_cluster_cmd('fs', 'ls', '--format=json-pretty'))
|
2016-01-03 18:35:17 +00:00
|
|
|
return self.name in [fs['name'] for fs in fs_list]
|
2014-10-02 12:01:56 +00:00
|
|
|
|
2015-04-06 18:53:50 +00:00
|
|
|
def legacy_configured(self):
|
|
|
|
"""
|
|
|
|
Check if a legacy (i.e. pre "fs new") filesystem configuration is present. If this is
|
|
|
|
the case, the caller should avoid using Filesystem.create
|
|
|
|
"""
|
|
|
|
try:
|
2015-07-21 07:29:25 +00:00
|
|
|
out_text = self.mon_manager.raw_cluster_cmd('--format=json-pretty', 'osd', 'lspools')
|
|
|
|
pools = json.loads(out_text)
|
2015-04-06 18:53:50 +00:00
|
|
|
metadata_pool_exists = 'metadata' in [p['poolname'] for p in pools]
|
2016-08-30 21:23:46 +00:00
|
|
|
if metadata_pool_exists:
|
|
|
|
self.metadata_pool_name = 'metadata'
|
2015-04-06 18:53:50 +00:00
|
|
|
except CommandFailedError as e:
|
|
|
|
# For use in upgrade tests, Ceph cuttlefish and earlier don't support
|
|
|
|
# structured output (--format) from the CLI.
|
|
|
|
if e.exitstatus == 22:
|
|
|
|
metadata_pool_exists = True
|
|
|
|
else:
|
|
|
|
raise
|
|
|
|
|
|
|
|
return metadata_pool_exists
|
|
|
|
|
2014-10-02 12:01:56 +00:00
|
|
|
def _df(self):
|
|
|
|
return json.loads(self.mon_manager.raw_cluster_cmd("df", "--format=json-pretty"))
|
|
|
|
|
2018-04-13 06:04:31 +00:00
|
|
|
def get_mds_map(self, status=None):
|
|
|
|
if status is None:
|
|
|
|
status = self.status()
|
|
|
|
return status.get_fsmap(self.id)['mdsmap']
|
2016-08-30 21:23:46 +00:00
|
|
|
|
2019-01-18 01:22:32 +00:00
|
|
|
def get_var(self, var, status=None):
|
|
|
|
return self.get_mds_map(status=status)[var]
|
2017-12-12 19:09:19 +00:00
|
|
|
|
2019-12-13 00:43:14 +00:00
|
|
|
def set_dir_layout(self, mount, path, layout):
|
|
|
|
for name, value in layout.items():
|
|
|
|
mount.run_shell(args=["setfattr", "-n", "ceph.dir.layout."+name, "-v", str(value), path])
|
|
|
|
|
|
|
|
def add_data_pool(self, name, create=True):
|
|
|
|
if create:
|
|
|
|
self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', name, self.pgs_per_fs_pool.__str__())
|
2016-08-30 21:23:46 +00:00
|
|
|
self.mon_manager.raw_cluster_cmd('fs', 'add_data_pool', self.name, name)
|
|
|
|
self.get_pool_names(refresh = True)
|
|
|
|
for poolid, fs_name in self.data_pools.items():
|
|
|
|
if name == fs_name:
|
|
|
|
return poolid
|
|
|
|
raise RuntimeError("could not get just created pool '{0}'".format(name))
|
|
|
|
|
|
|
|
def get_pool_names(self, refresh = False, status = None):
|
|
|
|
if refresh or self.metadata_pool_name is None or self.data_pools is None:
|
|
|
|
if status is None:
|
|
|
|
status = self.status()
|
|
|
|
fsmap = status.get_fsmap(self.id)
|
|
|
|
|
|
|
|
osd_map = self.mon_manager.get_osd_dump_json()
|
|
|
|
id_to_name = {}
|
|
|
|
for p in osd_map['pools']:
|
|
|
|
id_to_name[p['pool']] = p['pool_name']
|
|
|
|
|
|
|
|
self.metadata_pool_name = id_to_name[fsmap['mdsmap']['metadata_pool']]
|
|
|
|
self.data_pools = {}
|
|
|
|
for data_pool in fsmap['mdsmap']['data_pools']:
|
|
|
|
self.data_pools[data_pool] = id_to_name[data_pool]
|
|
|
|
|
|
|
|
def get_data_pool_name(self, refresh = False):
|
|
|
|
if refresh or self.data_pools is None:
|
|
|
|
self.get_pool_names(refresh = True)
|
|
|
|
assert(len(self.data_pools) == 1)
|
qa/tasks: use next(iter(..)) for accessing first element in a view
in python2, dict.values() and dict.keys() return lists. but in python3,
they return views, which cannot be indexed directly using an integer index.
there are three use cases when we access these views in python3:
1. get the first element
2. get all the elements and then *might* want to access them by index
3. get the first element assuming there is only a single element in
the view
4. iterate thru the view
in the 1st case, we cannot assume the number of elements, so to be
python3 compatible, we should use `next(iter(a_dict))` instead.
in the 2nd case, in this change, the view is materialized using
`list(a_dict)`.
in the 3rd case, we can just continue using the short hand of
```py
(first_element,) = a_dict.keys()
```
to unpack the view. this works in both python2 and python3.
in the 4th case, the existing code works in both python2 and python3, as
both list and view can be iterated using `iter`, and `len` works as
well.
Signed-off-by: Kefu Chai <kchai@redhat.com>
2020-03-31 02:16:40 +00:00
|
|
|
return next(iter(self.data_pools.values()))
|
2016-08-30 21:23:46 +00:00
|
|
|
|
|
|
|
def get_data_pool_id(self, refresh = False):
|
2016-10-19 22:19:21 +00:00
|
|
|
"""
|
|
|
|
Don't call this if you have multiple data pools
|
|
|
|
:return: integer
|
|
|
|
"""
|
2016-08-30 21:23:46 +00:00
|
|
|
if refresh or self.data_pools is None:
|
|
|
|
self.get_pool_names(refresh = True)
|
|
|
|
assert(len(self.data_pools) == 1)
|
qa/tasks: use next(iter(..)) for accessing first element in a view
in python2, dict.values() and dict.keys() return lists. but in python3,
they return views, which cannot be indexed directly using an integer index.
there are three use cases when we access these views in python3:
1. get the first element
2. get all the elements and then *might* want to access them by index
3. get the first element assuming there is only a single element in
the view
4. iterate thru the view
in the 1st case, we cannot assume the number of elements, so to be
python3 compatible, we should use `next(iter(a_dict))` instead.
in the 2nd case, in this change, the view is materialized using
`list(a_dict)`.
in the 3rd case, we can just continue using the short hand of
```py
(first_element,) = a_dict.keys()
```
to unpack the view. this works in both python2 and python3.
in the 4th case, the existing code works in both python2 and python3, as
both list and view can be iterated using `iter`, and `len` works as
well.
Signed-off-by: Kefu Chai <kchai@redhat.com>
2020-03-31 02:16:40 +00:00
|
|
|
return next(iter(self.data_pools.keys()))
|
2016-01-08 10:23:20 +00:00
|
|
|
|
2016-08-30 21:23:46 +00:00
|
|
|
def get_data_pool_names(self, refresh = False):
|
|
|
|
if refresh or self.data_pools is None:
|
|
|
|
self.get_pool_names(refresh = True)
|
qa/tasks: use next(iter(..)) for accessing first element in a view
in python2, dict.values() and dict.keys() return lists. but in python3,
they return views, which cannot be indexed directly using an integer index.
there are three use cases when we access these views in python3:
1. get the first element
2. get all the elements and then *might* want to access them by index
3. get the first element assuming there is only a single element in
the view
4. iterate thru the view
in the 1st case, we cannot assume the number of elements, so to be
python3 compatible, we should use `next(iter(a_dict))` instead.
in the 2nd case, in this change, the view is materialized using
`list(a_dict)`.
in the 3rd case, we can just continue using the short hand of
```py
(first_element,) = a_dict.keys()
```
to unpack the view. this works in both python2 and python3.
in the 4th case, the existing code works in both python2 and python3, as
both list and view can be iterated using `iter`, and `len` works as
well.
Signed-off-by: Kefu Chai <kchai@redhat.com>
2020-03-31 02:16:40 +00:00
|
|
|
return list(self.data_pools.values())
|
2014-11-03 15:38:57 +00:00
|
|
|
|
|
|
|
def get_metadata_pool_name(self):
|
2016-01-03 18:35:17 +00:00
|
|
|
return self.metadata_pool_name
|
|
|
|
|
2017-07-12 15:43:39 +00:00
|
|
|
def set_data_pool_name(self, name):
|
|
|
|
if self.id is not None:
|
|
|
|
raise RuntimeError("can't set filesystem name if its fscid is set")
|
|
|
|
self.data_pool_name = name
|
|
|
|
|
2016-01-03 18:35:17 +00:00
|
|
|
def get_namespace_id(self):
|
2016-08-30 21:23:46 +00:00
|
|
|
return self.id
|
2014-10-02 12:01:56 +00:00
|
|
|
|
|
|
|
def get_pool_df(self, pool_name):
|
|
|
|
"""
|
|
|
|
Return a dict like:
|
|
|
|
{u'bytes_used': 0, u'max_avail': 83848701, u'objects': 0, u'kb_used': 0}
|
|
|
|
"""
|
|
|
|
for pool_df in self._df()['pools']:
|
|
|
|
if pool_df['name'] == pool_name:
|
|
|
|
return pool_df['stats']
|
|
|
|
|
|
|
|
raise RuntimeError("Pool name '{0}' not found".format(pool_name))
|
|
|
|
|
|
|
|
def get_usage(self):
|
|
|
|
return self._df()['stats']['total_used_bytes']
|
|
|
|
|
2018-05-03 20:12:54 +00:00
|
|
|
def are_daemons_healthy(self, status=None, skip_max_mds_check=False):
|
2014-07-17 20:35:01 +00:00
|
|
|
"""
|
2015-02-06 09:55:04 +00:00
|
|
|
Return true if all daemons are in one of active, standby, standby-replay, and
|
|
|
|
at least max_mds daemons are in 'active'.
|
|
|
|
|
2016-03-15 12:43:18 +00:00
|
|
|
Unlike most of Filesystem, this function is tolerant of new-style `fs`
|
|
|
|
commands being missing, because we are part of the ceph installation
|
|
|
|
process during upgrade suites, so must fall back to old style commands
|
|
|
|
when we get an EINVAL on a new style command.
|
|
|
|
|
2014-07-17 20:35:01 +00:00
|
|
|
:return:
|
|
|
|
"""
|
2017-05-01 22:41:00 +00:00
|
|
|
# First, check to see that processes haven't exited with an error code
|
|
|
|
for mds in self._ctx.daemons.iter_daemons_of_role('mds'):
|
|
|
|
mds.check_status()
|
2015-02-06 09:55:04 +00:00
|
|
|
|
|
|
|
active_count = 0
|
2016-03-15 12:43:18 +00:00
|
|
|
try:
|
2018-04-13 06:04:31 +00:00
|
|
|
mds_map = self.get_mds_map(status=status)
|
2016-03-15 12:43:18 +00:00
|
|
|
except CommandFailedError as cfe:
|
|
|
|
# Old version, fall back to non-multi-fs commands
|
|
|
|
if cfe.exitstatus == errno.EINVAL:
|
|
|
|
mds_map = json.loads(
|
|
|
|
self.mon_manager.raw_cluster_cmd('mds', 'dump', '--format=json'))
|
|
|
|
else:
|
|
|
|
raise
|
2016-01-13 10:16:40 +00:00
|
|
|
|
2016-03-15 12:43:18 +00:00
|
|
|
log.info("are_daemons_healthy: mds map: {0}".format(mds_map))
|
2016-01-13 10:16:40 +00:00
|
|
|
|
2018-01-04 17:41:06 +00:00
|
|
|
for mds_id, mds_status in mds_map['info'].items():
|
|
|
|
if mds_status['state'] not in ["up:active", "up:standby", "up:standby-replay"]:
|
|
|
|
log.warning("Unhealthy mds state {0}:{1}".format(mds_id, mds_status['state']))
|
2014-07-17 20:35:01 +00:00
|
|
|
return False
|
2018-01-04 17:41:06 +00:00
|
|
|
elif mds_status['state'] == 'up:active':
|
2015-02-06 09:55:04 +00:00
|
|
|
active_count += 1
|
2014-07-17 20:35:01 +00:00
|
|
|
|
2016-01-13 10:16:40 +00:00
|
|
|
log.info("are_daemons_healthy: {0}/{1}".format(
|
2016-03-15 12:43:18 +00:00
|
|
|
active_count, mds_map['max_mds']
|
2016-01-13 10:16:40 +00:00
|
|
|
))
|
|
|
|
|
2018-05-03 20:12:54 +00:00
|
|
|
if not skip_max_mds_check:
|
|
|
|
if active_count > mds_map['max_mds']:
|
|
|
|
log.info("are_daemons_healthy: number of actives is greater than max_mds: {0}".format(mds_map))
|
|
|
|
return False
|
|
|
|
elif active_count == mds_map['max_mds']:
|
|
|
|
# The MDSMap says these guys are active, but let's check they really are
|
|
|
|
for mds_id, mds_status in mds_map['info'].items():
|
|
|
|
if mds_status['state'] == 'up:active':
|
|
|
|
try:
|
|
|
|
daemon_status = self.mds_asok(["status"], mds_id=mds_status['name'])
|
|
|
|
except CommandFailedError as cfe:
|
|
|
|
if cfe.exitstatus == errno.EINVAL:
|
|
|
|
# Old version, can't do this check
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
# MDS not even running
|
|
|
|
return False
|
|
|
|
|
|
|
|
if daemon_status['state'] != 'up:active':
|
|
|
|
# MDS hasn't taken the latest map yet
|
2016-03-15 12:43:18 +00:00
|
|
|
return False
|
2015-12-08 16:29:11 +00:00
|
|
|
|
2018-05-03 20:12:54 +00:00
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
2015-12-08 16:29:11 +00:00
|
|
|
else:
|
2018-05-03 20:12:54 +00:00
|
|
|
log.info("are_daemons_healthy: skipping max_mds check")
|
|
|
|
return True
|
2014-07-17 20:35:01 +00:00
|
|
|
|
2018-04-13 06:04:31 +00:00
|
|
|
def get_daemon_names(self, state=None, status=None):
|
2014-12-19 13:49:07 +00:00
|
|
|
"""
|
2015-10-14 01:16:41 +00:00
|
|
|
Return MDS daemon names of those daemons in the given state
|
|
|
|
:param state:
|
|
|
|
:return:
|
2014-12-19 13:49:07 +00:00
|
|
|
"""
|
2018-04-13 06:04:31 +00:00
|
|
|
mdsmap = self.get_mds_map(status)
|
2014-12-19 13:49:07 +00:00
|
|
|
result = []
|
2019-12-09 16:27:46 +00:00
|
|
|
for mds_status in sorted(mdsmap['info'].values(),
|
|
|
|
key=lambda _: _['rank']):
|
2016-04-29 19:07:16 +00:00
|
|
|
if mds_status['state'] == state or state is None:
|
2014-12-19 13:49:07 +00:00
|
|
|
result.append(mds_status['name'])
|
|
|
|
|
|
|
|
return result
|
|
|
|
|
2020-02-11 03:23:56 +00:00
|
|
|
def get_active_names(self, status=None):
|
2015-10-14 01:16:41 +00:00
|
|
|
"""
|
|
|
|
Return MDS daemon names of those daemons holding ranks
|
|
|
|
in state up:active
|
|
|
|
|
|
|
|
:return: list of strings like ['a', 'b'], sorted by rank
|
|
|
|
"""
|
2020-02-11 03:23:56 +00:00
|
|
|
return self.get_daemon_names("up:active", status=status)
|
2015-10-14 01:16:41 +00:00
|
|
|
|
2018-04-13 06:04:31 +00:00
|
|
|
def get_all_mds_rank(self, status=None):
|
|
|
|
mdsmap = self.get_mds_map(status)
|
2016-07-13 14:18:49 +00:00
|
|
|
result = []
|
2019-12-09 16:27:46 +00:00
|
|
|
for mds_status in sorted(mdsmap['info'].values(),
|
|
|
|
key=lambda _: _['rank']):
|
2016-07-13 14:18:49 +00:00
|
|
|
if mds_status['rank'] != -1 and mds_status['state'] != 'up:standby-replay':
|
|
|
|
result.append(mds_status['rank'])
|
|
|
|
|
|
|
|
return result
|
|
|
|
|
2018-04-30 21:43:28 +00:00
|
|
|
def get_rank(self, rank=0, status=None):
|
|
|
|
if status is None:
|
|
|
|
status = self.getinfo()
|
|
|
|
return status.get_rank(self.id, rank)
|
|
|
|
|
2019-02-25 19:10:01 +00:00
|
|
|
def rank_restart(self, rank=0, status=None):
|
|
|
|
name = self.get_rank(rank=rank, status=status)['name']
|
|
|
|
self.mds_restart(mds_id=name)
|
|
|
|
|
|
|
|
def rank_signal(self, signal, rank=0, status=None):
|
|
|
|
name = self.get_rank(rank=rank, status=status)['name']
|
|
|
|
self.mds_signal(name, signal)
|
|
|
|
|
2019-02-25 19:10:26 +00:00
|
|
|
def rank_freeze(self, yes, rank=0):
|
|
|
|
self.mon_manager.raw_cluster_cmd("mds", "freeze", "{}:{}".format(self.id, rank), str(yes).lower())
|
|
|
|
|
2019-02-25 19:10:01 +00:00
|
|
|
def rank_fail(self, rank=0):
|
|
|
|
self.mon_manager.raw_cluster_cmd("mds", "fail", "{}:{}".format(self.id, rank))
|
|
|
|
|
2018-04-30 21:43:28 +00:00
|
|
|
def get_ranks(self, status=None):
|
|
|
|
if status is None:
|
|
|
|
status = self.getinfo()
|
|
|
|
return status.get_ranks(self.id)
|
|
|
|
|
2019-02-25 19:10:01 +00:00
|
|
|
def get_replays(self, status=None):
|
|
|
|
if status is None:
|
|
|
|
status = self.getinfo()
|
|
|
|
return status.get_replays(self.id)
|
|
|
|
|
|
|
|
def get_replay(self, rank=0, status=None):
|
|
|
|
for replay in self.get_replays(status=status):
|
|
|
|
if replay['rank'] == rank:
|
|
|
|
return replay
|
|
|
|
return None
|
|
|
|
|
2018-04-13 06:04:31 +00:00
|
|
|
def get_rank_names(self, status=None):
|
2015-02-06 09:55:04 +00:00
|
|
|
"""
|
|
|
|
Return MDS daemon names of those daemons holding a rank,
|
|
|
|
sorted by rank. This includes e.g. up:replay/reconnect
|
|
|
|
as well as active, but does not include standby or
|
|
|
|
standby-replay.
|
|
|
|
"""
|
2018-04-13 06:04:31 +00:00
|
|
|
mdsmap = self.get_mds_map(status)
|
2015-02-06 09:55:04 +00:00
|
|
|
result = []
|
2019-12-09 16:27:46 +00:00
|
|
|
for mds_status in sorted(mdsmap['info'].values(),
|
|
|
|
key=lambda _: _['rank']):
|
2015-02-06 09:55:04 +00:00
|
|
|
if mds_status['rank'] != -1 and mds_status['state'] != 'up:standby-replay':
|
|
|
|
result.append(mds_status['name'])
|
|
|
|
|
|
|
|
return result
|
|
|
|
|
2018-05-03 20:12:54 +00:00
|
|
|
def wait_for_daemons(self, timeout=None, skip_max_mds_check=False, status=None):
|
2014-07-17 20:35:01 +00:00
|
|
|
"""
|
|
|
|
Wait until all daemons are healthy
|
|
|
|
:return:
|
2014-07-02 18:25:14 +00:00
|
|
|
"""
|
2014-07-17 20:35:01 +00:00
|
|
|
|
|
|
|
if timeout is None:
|
|
|
|
timeout = DAEMON_WAIT_TIMEOUT
|
|
|
|
|
2018-05-03 20:12:54 +00:00
|
|
|
if status is None:
|
|
|
|
status = self.status()
|
|
|
|
|
2014-07-17 20:35:01 +00:00
|
|
|
elapsed = 0
|
|
|
|
while True:
|
2018-05-03 20:12:54 +00:00
|
|
|
if self.are_daemons_healthy(status=status, skip_max_mds_check=skip_max_mds_check):
|
2018-04-13 06:04:31 +00:00
|
|
|
return status
|
2014-07-17 20:35:01 +00:00
|
|
|
else:
|
|
|
|
time.sleep(1)
|
|
|
|
elapsed += 1
|
|
|
|
|
|
|
|
if elapsed > timeout:
|
2018-04-13 06:05:46 +00:00
|
|
|
log.info("status = {0}".format(status))
|
2014-07-17 20:35:01 +00:00
|
|
|
raise RuntimeError("Timed out waiting for MDS daemons to become healthy")
|
|
|
|
|
2018-05-03 20:12:54 +00:00
|
|
|
status = self.status()
|
|
|
|
|
2014-07-17 20:35:01 +00:00
|
|
|
def get_lone_mds_id(self):
|
2015-02-06 09:55:04 +00:00
|
|
|
"""
|
|
|
|
Get a single MDS ID: the only one if there is only one
|
|
|
|
configured, else the only one currently holding a rank,
|
|
|
|
else raise an error.
|
|
|
|
"""
|
2014-07-17 20:35:01 +00:00
|
|
|
if len(self.mds_ids) != 1:
|
2015-02-06 09:55:04 +00:00
|
|
|
alive = self.get_rank_names()
|
|
|
|
if len(alive) == 1:
|
|
|
|
return alive[0]
|
2015-02-04 12:52:42 +00:00
|
|
|
else:
|
|
|
|
raise ValueError("Explicit MDS argument required when multiple MDSs in use")
|
2014-07-17 20:35:01 +00:00
|
|
|
else:
|
|
|
|
return self.mds_ids[0]
|
|
|
|
|
2016-01-08 17:59:35 +00:00
|
|
|
def recreate(self):
|
2014-07-02 11:54:05 +00:00
|
|
|
log.info("Creating new filesystem")
|
2016-01-11 13:23:11 +00:00
|
|
|
self.delete_all_filesystems()
|
2016-08-30 21:23:46 +00:00
|
|
|
self.id = None
|
2016-01-11 13:23:11 +00:00
|
|
|
self.create()
|
2014-07-02 11:54:05 +00:00
|
|
|
|
2016-07-13 14:18:49 +00:00
|
|
|
def put_metadata_object_raw(self, object_id, infile):
|
|
|
|
"""
|
|
|
|
Save an object to the metadata pool
|
|
|
|
"""
|
|
|
|
temp_bin_path = infile
|
|
|
|
self.client_remote.run(args=[
|
|
|
|
'sudo', os.path.join(self._prefix, 'rados'), '-p', self.metadata_pool_name, 'put', object_id, temp_bin_path
|
|
|
|
])
|
|
|
|
|
|
|
|
def get_metadata_object_raw(self, object_id):
|
|
|
|
"""
|
|
|
|
Retrieve an object from the metadata pool and store it in a file.
|
|
|
|
"""
|
|
|
|
temp_bin_path = '/tmp/' + object_id + '.bin'
|
|
|
|
|
|
|
|
self.client_remote.run(args=[
|
|
|
|
'sudo', os.path.join(self._prefix, 'rados'), '-p', self.metadata_pool_name, 'get', object_id, temp_bin_path
|
|
|
|
])
|
|
|
|
|
|
|
|
return temp_bin_path
|
|
|
|
|
2014-07-02 11:54:05 +00:00
|
|
|
def get_metadata_object(self, object_type, object_id):
|
|
|
|
"""
|
|
|
|
Retrieve an object from the metadata pool, pass it through
|
|
|
|
ceph-dencoder to dump it to JSON, and return the decoded object.
|
|
|
|
"""
|
|
|
|
temp_bin_path = '/tmp/out.bin'
|
|
|
|
|
|
|
|
self.client_remote.run(args=[
|
2016-01-03 18:35:17 +00:00
|
|
|
'sudo', os.path.join(self._prefix, 'rados'), '-p', self.metadata_pool_name, 'get', object_id, temp_bin_path
|
2014-07-02 11:54:05 +00:00
|
|
|
])
|
|
|
|
|
2019-12-14 00:16:34 +00:00
|
|
|
dump_json = self.client_remote.sh([
|
2015-08-06 09:20:34 +00:00
|
|
|
'sudo', os.path.join(self._prefix, 'ceph-dencoder'), 'type', object_type, 'import', temp_bin_path, 'decode', 'dump_json'
|
2019-12-14 00:16:34 +00:00
|
|
|
]).strip()
|
2014-07-02 11:54:05 +00:00
|
|
|
try:
|
|
|
|
dump = json.loads(dump_json)
|
|
|
|
except (TypeError, ValueError):
|
|
|
|
log.error("Failed to decode JSON: '{0}'".format(dump_json))
|
|
|
|
raise
|
|
|
|
|
|
|
|
return dump
|
|
|
|
|
|
|
|
def get_journal_version(self):
|
|
|
|
"""
|
|
|
|
Read the JournalPointer and Journal::Header objects to learn the version of
|
|
|
|
encoding in use.
|
|
|
|
"""
|
|
|
|
journal_pointer_object = '400.00000000'
|
|
|
|
journal_pointer_dump = self.get_metadata_object("JournalPointer", journal_pointer_object)
|
|
|
|
journal_ino = journal_pointer_dump['journal_pointer']['front']
|
|
|
|
|
|
|
|
journal_header_object = "{0:x}.00000000".format(journal_ino)
|
|
|
|
journal_header_dump = self.get_metadata_object('Journaler::Header', journal_header_object)
|
|
|
|
|
|
|
|
version = journal_header_dump['journal_header']['stream_format']
|
|
|
|
log.info("Read journal version {0}".format(version))
|
|
|
|
|
2014-07-02 18:25:14 +00:00
|
|
|
return version
|
|
|
|
|
2018-10-06 17:32:18 +00:00
|
|
|
def mds_asok(self, command, mds_id=None, timeout=None):
|
2014-12-02 13:30:06 +00:00
|
|
|
if mds_id is None:
|
|
|
|
mds_id = self.get_lone_mds_id()
|
|
|
|
|
2018-10-06 17:32:18 +00:00
|
|
|
return self.json_asok(command, 'mds', mds_id, timeout=timeout)
|
2014-12-02 13:30:06 +00:00
|
|
|
|
2018-10-06 17:32:18 +00:00
|
|
|
def rank_asok(self, command, rank=0, status=None, timeout=None):
|
2018-07-12 13:25:53 +00:00
|
|
|
info = self.get_rank(rank=rank, status=status)
|
2018-10-06 17:32:18 +00:00
|
|
|
return self.json_asok(command, 'mds', info['name'], timeout=timeout)
|
2018-04-30 21:43:28 +00:00
|
|
|
|
2018-12-18 13:29:41 +00:00
|
|
|
def rank_tell(self, command, rank=0, status=None):
|
|
|
|
info = self.get_rank(rank=rank, status=status)
|
|
|
|
return json.loads(self.mon_manager.raw_cluster_cmd("tell", 'mds.{0}'.format(info['name']), *command))
|
|
|
|
|
2016-11-10 22:47:45 +00:00
|
|
|
def read_cache(self, path, depth=None):
|
|
|
|
cmd = ["dump", "tree", path]
|
|
|
|
if depth is not None:
|
|
|
|
cmd.append(depth.__str__())
|
|
|
|
result = self.mds_asok(cmd)
|
|
|
|
if len(result) == 0:
|
|
|
|
raise RuntimeError("Path not found in cache: {0}".format(path))
|
|
|
|
|
|
|
|
return result
|
|
|
|
|
2017-05-05 23:00:31 +00:00
|
|
|
def wait_for_state(self, goal_state, reject=None, timeout=None, mds_id=None, rank=None):
|
2014-07-02 18:25:14 +00:00
|
|
|
"""
|
|
|
|
Block until the MDS reaches a particular state, or a failure condition
|
|
|
|
is met.
|
|
|
|
|
2015-02-06 09:55:04 +00:00
|
|
|
When there are multiple MDSs, succeed when exaclty one MDS is in the
|
|
|
|
goal state, or fail when any MDS is in the reject state.
|
|
|
|
|
2014-07-02 18:25:14 +00:00
|
|
|
:param goal_state: Return once the MDS is in this state
|
|
|
|
:param reject: Fail if the MDS enters this state before the goal state
|
|
|
|
:param timeout: Fail if this many seconds pass before reaching goal
|
|
|
|
:return: number of seconds waited, rounded down to integer
|
|
|
|
"""
|
|
|
|
|
2016-01-07 11:24:57 +00:00
|
|
|
started_at = time.time()
|
2014-07-02 18:25:14 +00:00
|
|
|
while True:
|
2016-08-30 21:23:46 +00:00
|
|
|
status = self.status()
|
2017-05-05 23:00:31 +00:00
|
|
|
if rank is not None:
|
2018-09-12 01:49:18 +00:00
|
|
|
try:
|
|
|
|
mds_info = status.get_rank(self.id, rank)
|
|
|
|
current_state = mds_info['state'] if mds_info else None
|
|
|
|
log.info("Looked up MDS state for mds.{0}: {1}".format(rank, current_state))
|
|
|
|
except:
|
|
|
|
mdsmap = self.get_mds_map(status=status)
|
|
|
|
if rank in mdsmap['failed']:
|
|
|
|
log.info("Waiting for rank {0} to come back.".format(rank))
|
|
|
|
current_state = None
|
|
|
|
else:
|
|
|
|
raise
|
2017-05-05 23:00:31 +00:00
|
|
|
elif mds_id is not None:
|
2015-02-06 09:55:04 +00:00
|
|
|
# mds_info is None if no daemon with this ID exists in the map
|
2016-08-30 21:23:46 +00:00
|
|
|
mds_info = status.get_mds(mds_id)
|
2015-02-06 09:55:04 +00:00
|
|
|
current_state = mds_info['state'] if mds_info else None
|
|
|
|
log.info("Looked up MDS state for {0}: {1}".format(mds_id, current_state))
|
|
|
|
else:
|
|
|
|
# In general, look for a single MDS
|
2016-08-30 21:23:46 +00:00
|
|
|
states = [m['state'] for m in status.get_ranks(self.id)]
|
2015-02-06 09:55:04 +00:00
|
|
|
if [s for s in states if s == goal_state] == [goal_state]:
|
|
|
|
current_state = goal_state
|
|
|
|
elif reject in states:
|
|
|
|
current_state = reject
|
|
|
|
else:
|
|
|
|
current_state = None
|
|
|
|
log.info("mapped states {0} to {1}".format(states, current_state))
|
2014-07-02 18:25:14 +00:00
|
|
|
|
2016-01-07 11:24:57 +00:00
|
|
|
elapsed = time.time() - started_at
|
2014-07-02 18:25:14 +00:00
|
|
|
if current_state == goal_state:
|
|
|
|
log.info("reached state '{0}' in {1}s".format(current_state, elapsed))
|
|
|
|
return elapsed
|
|
|
|
elif reject is not None and current_state == reject:
|
|
|
|
raise RuntimeError("MDS in reject state {0}".format(current_state))
|
|
|
|
elif timeout is not None and elapsed > timeout:
|
2016-08-30 21:23:46 +00:00
|
|
|
log.error("MDS status at timeout: {0}".format(status.get_fsmap(self.id)))
|
2014-07-02 18:25:14 +00:00
|
|
|
raise RuntimeError(
|
|
|
|
"Reached timeout after {0} seconds waiting for state {1}, while in state {2}".format(
|
2014-12-19 13:49:07 +00:00
|
|
|
elapsed, goal_state, current_state
|
|
|
|
))
|
2014-07-02 18:25:14 +00:00
|
|
|
else:
|
|
|
|
time.sleep(1)
|
2014-11-03 15:38:57 +00:00
|
|
|
|
2015-06-25 00:38:38 +00:00
|
|
|
def _read_data_xattr(self, ino_no, xattr_name, type, pool):
|
2014-11-03 15:38:57 +00:00
|
|
|
mds_id = self.mds_ids[0]
|
|
|
|
remote = self.mds_daemons[mds_id].remote
|
2015-06-25 00:38:38 +00:00
|
|
|
if pool is None:
|
|
|
|
pool = self.get_data_pool_name()
|
2014-11-03 15:38:57 +00:00
|
|
|
|
|
|
|
obj_name = "{0:x}.00000000".format(ino_no)
|
|
|
|
|
|
|
|
args = [
|
2015-08-06 09:20:34 +00:00
|
|
|
os.path.join(self._prefix, "rados"), "-p", pool, "getxattr", obj_name, xattr_name
|
2014-11-03 15:38:57 +00:00
|
|
|
]
|
|
|
|
try:
|
2020-04-23 11:39:31 +00:00
|
|
|
proc = remote.run(args=args, stdout=BytesIO())
|
2014-11-03 15:38:57 +00:00
|
|
|
except CommandFailedError as e:
|
|
|
|
log.error(e.__str__())
|
|
|
|
raise ObjectNotFound(obj_name)
|
|
|
|
|
2020-04-05 13:16:10 +00:00
|
|
|
data = proc.stdout.getvalue()
|
2019-12-14 00:16:34 +00:00
|
|
|
dump = remote.sh(
|
|
|
|
[os.path.join(self._prefix, "ceph-dencoder"),
|
|
|
|
"type", type,
|
|
|
|
"import", "-",
|
|
|
|
"decode", "dump_json"],
|
2020-04-22 12:34:05 +00:00
|
|
|
stdin=data,
|
|
|
|
stdout=StringIO()
|
2014-11-03 15:38:57 +00:00
|
|
|
)
|
|
|
|
|
2019-12-14 00:16:34 +00:00
|
|
|
return json.loads(dump.strip())
|
2014-11-03 15:38:57 +00:00
|
|
|
|
2016-10-20 10:49:08 +00:00
|
|
|
def _write_data_xattr(self, ino_no, xattr_name, data, pool=None):
|
|
|
|
"""
|
|
|
|
Write to an xattr of the 0th data object of an inode. Will
|
|
|
|
succeed whether the object and/or xattr already exist or not.
|
|
|
|
|
|
|
|
:param ino_no: integer inode number
|
|
|
|
:param xattr_name: string name of the xattr
|
|
|
|
:param data: byte array data to write to the xattr
|
|
|
|
:param pool: name of data pool or None to use primary data pool
|
|
|
|
:return: None
|
|
|
|
"""
|
|
|
|
remote = self.mds_daemons[self.mds_ids[0]].remote
|
|
|
|
if pool is None:
|
|
|
|
pool = self.get_data_pool_name()
|
|
|
|
|
|
|
|
obj_name = "{0:x}.00000000".format(ino_no)
|
|
|
|
args = [
|
|
|
|
os.path.join(self._prefix, "rados"), "-p", pool, "setxattr",
|
|
|
|
obj_name, xattr_name, data
|
|
|
|
]
|
2019-12-14 00:16:34 +00:00
|
|
|
remote.sh(args)
|
2016-10-20 10:49:08 +00:00
|
|
|
|
2015-06-25 00:38:38 +00:00
|
|
|
def read_backtrace(self, ino_no, pool=None):
|
|
|
|
"""
|
|
|
|
Read the backtrace from the data pool, return a dict in the format
|
|
|
|
given by inode_backtrace_t::dump, which is something like:
|
|
|
|
|
|
|
|
::
|
|
|
|
|
|
|
|
rados -p cephfs_data getxattr 10000000002.00000000 parent > out.bin
|
|
|
|
ceph-dencoder type inode_backtrace_t import out.bin decode dump_json
|
|
|
|
|
|
|
|
{ "ino": 1099511627778,
|
|
|
|
"ancestors": [
|
|
|
|
{ "dirino": 1,
|
|
|
|
"dname": "blah",
|
|
|
|
"version": 11}],
|
|
|
|
"pool": 1,
|
|
|
|
"old_pools": []}
|
|
|
|
|
|
|
|
:param pool: name of pool to read backtrace from. If omitted, FS must have only
|
|
|
|
one data pool and that will be used.
|
|
|
|
"""
|
|
|
|
return self._read_data_xattr(ino_no, "parent", "inode_backtrace_t", pool)
|
|
|
|
|
|
|
|
def read_layout(self, ino_no, pool=None):
|
|
|
|
"""
|
|
|
|
Read 'layout' xattr of an inode and parse the result, returning a dict like:
|
|
|
|
::
|
|
|
|
{
|
|
|
|
"stripe_unit": 4194304,
|
|
|
|
"stripe_count": 1,
|
|
|
|
"object_size": 4194304,
|
2016-01-30 18:46:21 +00:00
|
|
|
"pool_id": 1,
|
|
|
|
"pool_ns": "",
|
2015-06-25 00:38:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
:param pool: name of pool to read backtrace from. If omitted, FS must have only
|
|
|
|
one data pool and that will be used.
|
|
|
|
"""
|
2016-01-30 18:46:21 +00:00
|
|
|
return self._read_data_xattr(ino_no, "layout", "file_layout_t", pool)
|
2015-06-25 00:38:38 +00:00
|
|
|
|
2015-01-21 14:28:13 +00:00
|
|
|
def _enumerate_data_objects(self, ino, size):
|
|
|
|
"""
|
|
|
|
Get the list of expected data objects for a range, and the list of objects
|
|
|
|
that really exist.
|
|
|
|
|
|
|
|
:return a tuple of two lists of strings (expected, actual)
|
|
|
|
"""
|
|
|
|
stripe_size = 1024 * 1024 * 4
|
|
|
|
|
|
|
|
size = max(stripe_size, size)
|
|
|
|
|
|
|
|
want_objects = [
|
|
|
|
"{0:x}.{1:08x}".format(ino, n)
|
2020-04-04 16:02:40 +00:00
|
|
|
for n in range(0, ((size - 1) // stripe_size) + 1)
|
2015-01-21 14:28:13 +00:00
|
|
|
]
|
|
|
|
|
|
|
|
exist_objects = self.rados(["ls"], pool=self.get_data_pool_name()).split("\n")
|
|
|
|
|
|
|
|
return want_objects, exist_objects
|
|
|
|
|
|
|
|
def data_objects_present(self, ino, size):
|
|
|
|
"""
|
|
|
|
Check that *all* the expected data objects for an inode are present in the data pool
|
|
|
|
"""
|
|
|
|
|
|
|
|
want_objects, exist_objects = self._enumerate_data_objects(ino, size)
|
|
|
|
missing = set(want_objects) - set(exist_objects)
|
|
|
|
|
|
|
|
if missing:
|
|
|
|
log.info("Objects missing (ino {0}, size {1}): {2}".format(
|
|
|
|
ino, size, missing
|
|
|
|
))
|
|
|
|
return False
|
|
|
|
else:
|
|
|
|
log.info("All objects for ino {0} size {1} found".format(ino, size))
|
|
|
|
return True
|
|
|
|
|
|
|
|
def data_objects_absent(self, ino, size):
|
|
|
|
want_objects, exist_objects = self._enumerate_data_objects(ino, size)
|
|
|
|
present = set(want_objects) & set(exist_objects)
|
|
|
|
|
|
|
|
if present:
|
|
|
|
log.info("Objects not absent (ino {0}, size {1}): {2}".format(
|
|
|
|
ino, size, present
|
|
|
|
))
|
|
|
|
return False
|
|
|
|
else:
|
|
|
|
log.info("All objects for ino {0} size {1} are absent".format(ino, size))
|
|
|
|
return True
|
|
|
|
|
2016-12-23 18:59:39 +00:00
|
|
|
def dirfrag_exists(self, ino, frag):
|
|
|
|
try:
|
|
|
|
self.rados(["stat", "{0:x}.{1:08x}".format(ino, frag)])
|
2019-12-09 15:17:23 +00:00
|
|
|
except CommandFailedError:
|
2016-12-23 18:59:39 +00:00
|
|
|
return False
|
|
|
|
else:
|
|
|
|
return True
|
|
|
|
|
2018-01-24 16:11:40 +00:00
|
|
|
def rados(self, args, pool=None, namespace=None, stdin_data=None,
|
qa/tasks/cephfs: allow caller to use BytesIO when calling rados()
when the caller expects binary data, it should pass BytesIO as stdout.
this change shold address the failure of
```
2020-04-05T12:47:25.335 INFO:tasks.cephfs_test_runner:======================================================================
2020-04-05T12:47:25.336 INFO:tasks.cephfs_test_runner:ERROR: test_apply_tag (tasks.cephfs.test_forward_scrub.TestForwardScrub)
2020-04-05T12:47:25.336 INFO:tasks.cephfs_test_runner:----------------------------------------------------------------------
2020-04-05T12:47:25.336 INFO:tasks.cephfs_test_runner:Traceback (most recent call last):
2020-04-05T12:47:25.337 INFO:tasks.cephfs_test_runner: File "/home/teuthworker/src/github.com_tchaikov_ceph_wip-qa-py3/qa/tasks/cephfs/test_forward_scrub.py", line 75, in test_apply_tag
2020-04-05T12:47:25.337 INFO:tasks.cephfs_test_runner: self.assertTagged(inos[dirpath], tag, self.fs.get_metadata_pool_name())
2020-04-05T12:47:25.337 INFO:tasks.cephfs_test_runner: File "/home/teuthworker/src/github.com_tchaikov_ceph_wip-qa-py3/qa/tasks/cephfs/test_forward_scrub.py", line 98, in assertTagged
2020-04-05T12:47:25.338 INFO:tasks.cephfs_test_runner: "scrub_tag"
2020-04-05T12:47:25.338 INFO:tasks.cephfs_test_runner: File "/home/teuthworker/src/github.com_tchaikov_ceph_wip-qa-py3/qa/tasks/cephfs/test_forward_scrub.py", line 35, in _read_str_xattr
2020-04-05T12:47:25.339 INFO:tasks.cephfs_test_runner: strlen = struct.unpack('i', output[0:4])[0]
2020-04-05T12:47:25.339 INFO:tasks.cephfs_test_runner:TypeError: a bytes-like object is required, not 'str'
```
Signed-off-by: Kefu Chai <kchai@redhat.com>
2020-04-05 13:45:51 +00:00
|
|
|
stdin_file=None,
|
|
|
|
stdout_data=None):
|
2014-11-03 15:38:57 +00:00
|
|
|
"""
|
2014-12-19 13:49:07 +00:00
|
|
|
Call into the `rados` CLI from an MDS
|
2014-11-03 15:38:57 +00:00
|
|
|
"""
|
|
|
|
|
2014-12-19 13:49:07 +00:00
|
|
|
if pool is None:
|
|
|
|
pool = self.get_metadata_pool_name()
|
2014-11-03 15:38:57 +00:00
|
|
|
|
|
|
|
# Doesn't matter which MDS we use to run rados commands, they all
|
|
|
|
# have access to the pools
|
|
|
|
mds_id = self.mds_ids[0]
|
|
|
|
remote = self.mds_daemons[mds_id].remote
|
|
|
|
|
|
|
|
# NB we could alternatively use librados pybindings for this, but it's a one-liner
|
|
|
|
# using the `rados` CLI
|
2016-04-12 09:12:25 +00:00
|
|
|
args = ([os.path.join(self._prefix, "rados"), "-p", pool] +
|
|
|
|
(["--namespace", namespace] if namespace else []) +
|
|
|
|
args)
|
2018-01-24 16:11:40 +00:00
|
|
|
|
|
|
|
if stdin_file is not None:
|
|
|
|
args = ["bash", "-c", "cat " + stdin_file + " | " + " ".join(args)]
|
qa/tasks/cephfs: allow caller to use BytesIO when calling rados()
when the caller expects binary data, it should pass BytesIO as stdout.
this change shold address the failure of
```
2020-04-05T12:47:25.335 INFO:tasks.cephfs_test_runner:======================================================================
2020-04-05T12:47:25.336 INFO:tasks.cephfs_test_runner:ERROR: test_apply_tag (tasks.cephfs.test_forward_scrub.TestForwardScrub)
2020-04-05T12:47:25.336 INFO:tasks.cephfs_test_runner:----------------------------------------------------------------------
2020-04-05T12:47:25.336 INFO:tasks.cephfs_test_runner:Traceback (most recent call last):
2020-04-05T12:47:25.337 INFO:tasks.cephfs_test_runner: File "/home/teuthworker/src/github.com_tchaikov_ceph_wip-qa-py3/qa/tasks/cephfs/test_forward_scrub.py", line 75, in test_apply_tag
2020-04-05T12:47:25.337 INFO:tasks.cephfs_test_runner: self.assertTagged(inos[dirpath], tag, self.fs.get_metadata_pool_name())
2020-04-05T12:47:25.337 INFO:tasks.cephfs_test_runner: File "/home/teuthworker/src/github.com_tchaikov_ceph_wip-qa-py3/qa/tasks/cephfs/test_forward_scrub.py", line 98, in assertTagged
2020-04-05T12:47:25.338 INFO:tasks.cephfs_test_runner: "scrub_tag"
2020-04-05T12:47:25.338 INFO:tasks.cephfs_test_runner: File "/home/teuthworker/src/github.com_tchaikov_ceph_wip-qa-py3/qa/tasks/cephfs/test_forward_scrub.py", line 35, in _read_str_xattr
2020-04-05T12:47:25.339 INFO:tasks.cephfs_test_runner: strlen = struct.unpack('i', output[0:4])[0]
2020-04-05T12:47:25.339 INFO:tasks.cephfs_test_runner:TypeError: a bytes-like object is required, not 'str'
```
Signed-off-by: Kefu Chai <kchai@redhat.com>
2020-04-05 13:45:51 +00:00
|
|
|
if stdout_data is None:
|
|
|
|
stdout_data = StringIO()
|
2018-01-24 16:11:40 +00:00
|
|
|
|
qa/tasks/cephfs: allow caller to use BytesIO when calling rados()
when the caller expects binary data, it should pass BytesIO as stdout.
this change shold address the failure of
```
2020-04-05T12:47:25.335 INFO:tasks.cephfs_test_runner:======================================================================
2020-04-05T12:47:25.336 INFO:tasks.cephfs_test_runner:ERROR: test_apply_tag (tasks.cephfs.test_forward_scrub.TestForwardScrub)
2020-04-05T12:47:25.336 INFO:tasks.cephfs_test_runner:----------------------------------------------------------------------
2020-04-05T12:47:25.336 INFO:tasks.cephfs_test_runner:Traceback (most recent call last):
2020-04-05T12:47:25.337 INFO:tasks.cephfs_test_runner: File "/home/teuthworker/src/github.com_tchaikov_ceph_wip-qa-py3/qa/tasks/cephfs/test_forward_scrub.py", line 75, in test_apply_tag
2020-04-05T12:47:25.337 INFO:tasks.cephfs_test_runner: self.assertTagged(inos[dirpath], tag, self.fs.get_metadata_pool_name())
2020-04-05T12:47:25.337 INFO:tasks.cephfs_test_runner: File "/home/teuthworker/src/github.com_tchaikov_ceph_wip-qa-py3/qa/tasks/cephfs/test_forward_scrub.py", line 98, in assertTagged
2020-04-05T12:47:25.338 INFO:tasks.cephfs_test_runner: "scrub_tag"
2020-04-05T12:47:25.338 INFO:tasks.cephfs_test_runner: File "/home/teuthworker/src/github.com_tchaikov_ceph_wip-qa-py3/qa/tasks/cephfs/test_forward_scrub.py", line 35, in _read_str_xattr
2020-04-05T12:47:25.339 INFO:tasks.cephfs_test_runner: strlen = struct.unpack('i', output[0:4])[0]
2020-04-05T12:47:25.339 INFO:tasks.cephfs_test_runner:TypeError: a bytes-like object is required, not 'str'
```
Signed-off-by: Kefu Chai <kchai@redhat.com>
2020-04-05 13:45:51 +00:00
|
|
|
p = remote.run(args=args,
|
|
|
|
stdin=stdin_data,
|
|
|
|
stdout=stdout_data)
|
|
|
|
return p.stdout.getvalue().strip()
|
2014-12-19 13:49:07 +00:00
|
|
|
|
|
|
|
def list_dirfrag(self, dir_ino):
|
|
|
|
"""
|
|
|
|
Read the named object and return the list of omap keys
|
|
|
|
|
|
|
|
:return a list of 0 or more strings
|
|
|
|
"""
|
|
|
|
|
|
|
|
dirfrag_obj_name = "{0:x}.00000000".format(dir_ino)
|
|
|
|
|
2014-11-03 15:38:57 +00:00
|
|
|
try:
|
2014-12-19 13:49:07 +00:00
|
|
|
key_list_str = self.rados(["listomapkeys", dirfrag_obj_name])
|
2014-11-03 15:38:57 +00:00
|
|
|
except CommandFailedError as e:
|
|
|
|
log.error(e.__str__())
|
|
|
|
raise ObjectNotFound(dirfrag_obj_name)
|
|
|
|
|
|
|
|
return key_list_str.split("\n") if key_list_str else []
|
|
|
|
|
2020-01-20 10:38:59 +00:00
|
|
|
def get_meta_of_fs_file(self, dir_ino, obj_name, out):
|
|
|
|
"""
|
|
|
|
get metadata from parent to verify the correctness of the data format encoded by the tool, cephfs-meta-injection.
|
|
|
|
warning : The splitting of directory is not considered here.
|
|
|
|
"""
|
|
|
|
dirfrag_obj_name = "{0:x}.00000000".format(dir_ino)
|
|
|
|
try:
|
|
|
|
ret = self.rados(["getomapval", dirfrag_obj_name, obj_name+"_head", out])
|
|
|
|
except CommandFailedError as e:
|
|
|
|
log.error(e.__str__())
|
|
|
|
raise ObjectNotFound(dir_ino)
|
|
|
|
|
2014-12-19 13:49:07 +00:00
|
|
|
def erase_metadata_objects(self, prefix):
|
2014-11-03 15:38:57 +00:00
|
|
|
"""
|
2014-12-19 13:49:07 +00:00
|
|
|
For all objects in the metadata pool matching the prefix,
|
|
|
|
erase them.
|
|
|
|
|
|
|
|
This O(N) with the number of objects in the pool, so only suitable
|
|
|
|
for use on toy test filesystems.
|
2014-11-03 15:38:57 +00:00
|
|
|
"""
|
2014-12-19 13:49:07 +00:00
|
|
|
all_objects = self.rados(["ls"]).split("\n")
|
|
|
|
matching_objects = [o for o in all_objects if o.startswith(prefix)]
|
|
|
|
for o in matching_objects:
|
|
|
|
self.rados(["rm", o])
|
|
|
|
|
|
|
|
def erase_mds_objects(self, rank):
|
|
|
|
"""
|
|
|
|
Erase all the per-MDS objects for a particular rank. This includes
|
|
|
|
inotable, sessiontable, journal
|
|
|
|
"""
|
|
|
|
|
|
|
|
def obj_prefix(multiplier):
|
|
|
|
"""
|
|
|
|
MDS object naming conventions like rank 1's
|
|
|
|
journal is at 201.***
|
|
|
|
"""
|
|
|
|
return "%x." % (multiplier * 0x100 + rank)
|
|
|
|
|
|
|
|
# MDS_INO_LOG_OFFSET
|
|
|
|
self.erase_metadata_objects(obj_prefix(2))
|
|
|
|
# MDS_INO_LOG_BACKUP_OFFSET
|
|
|
|
self.erase_metadata_objects(obj_prefix(3))
|
|
|
|
# MDS_INO_LOG_POINTER_OFFSET
|
|
|
|
self.erase_metadata_objects(obj_prefix(4))
|
|
|
|
# MDSTables & SessionMap
|
|
|
|
self.erase_metadata_objects("mds{rank:d}_".format(rank=rank))
|
|
|
|
|
2015-07-21 17:39:05 +00:00
|
|
|
@property
|
|
|
|
def _prefix(self):
|
|
|
|
"""
|
|
|
|
Override this to set a different
|
|
|
|
"""
|
|
|
|
return ""
|
|
|
|
|
2018-09-04 06:20:54 +00:00
|
|
|
def _make_rank(self, rank):
|
|
|
|
return "{}:{}".format(self.name, rank)
|
|
|
|
|
2014-12-19 13:49:07 +00:00
|
|
|
def _run_tool(self, tool, args, rank=None, quiet=False):
|
|
|
|
# Tests frequently have [client] configuration that jacks up
|
|
|
|
# the objecter log level (unlikely to be interesting here)
|
|
|
|
# and does not set the mds log level (very interesting here)
|
|
|
|
if quiet:
|
2015-08-06 09:20:34 +00:00
|
|
|
base_args = [os.path.join(self._prefix, tool), '--debug-mds=1', '--debug-objecter=1']
|
2014-12-19 13:49:07 +00:00
|
|
|
else:
|
2015-08-06 09:20:34 +00:00
|
|
|
base_args = [os.path.join(self._prefix, tool), '--debug-mds=4', '--debug-objecter=1']
|
2014-12-19 13:49:07 +00:00
|
|
|
|
|
|
|
if rank is not None:
|
2018-09-04 06:20:54 +00:00
|
|
|
base_args.extend(["--rank", "%s" % str(rank)])
|
2014-12-19 13:49:07 +00:00
|
|
|
|
|
|
|
t1 = datetime.datetime.now()
|
2020-04-23 11:21:46 +00:00
|
|
|
r = self.tool_remote.sh(script=base_args + args, stdout=StringIO()).strip()
|
2014-12-19 13:49:07 +00:00
|
|
|
duration = datetime.datetime.now() - t1
|
|
|
|
log.info("Ran {0} in time {1}, result:\n{2}".format(
|
|
|
|
base_args + args, duration, r
|
|
|
|
))
|
|
|
|
return r
|
|
|
|
|
2015-10-02 16:37:44 +00:00
|
|
|
@property
|
|
|
|
def tool_remote(self):
|
|
|
|
"""
|
|
|
|
An arbitrary remote to use when invoking recovery tools. Use an MDS host because
|
|
|
|
it'll definitely have keys with perms to access cephfs metadata pool. This is public
|
|
|
|
so that tests can use this remote to go get locally written output files from the tools.
|
|
|
|
"""
|
|
|
|
mds_id = self.mds_ids[0]
|
|
|
|
return self.mds_daemons[mds_id].remote
|
|
|
|
|
2018-09-04 06:20:54 +00:00
|
|
|
def journal_tool(self, args, rank, quiet=False):
|
2014-12-19 13:49:07 +00:00
|
|
|
"""
|
2018-09-04 06:20:54 +00:00
|
|
|
Invoke cephfs-journal-tool with the passed arguments for a rank, and return its stdout
|
2014-12-19 13:49:07 +00:00
|
|
|
"""
|
2018-09-04 06:20:54 +00:00
|
|
|
fs_rank = self._make_rank(rank)
|
|
|
|
return self._run_tool("cephfs-journal-tool", args, fs_rank, quiet)
|
2014-12-19 13:49:07 +00:00
|
|
|
|
2020-01-20 10:38:59 +00:00
|
|
|
def meta_tool(self, args, rank, quiet=False):
|
|
|
|
"""
|
|
|
|
Invoke cephfs-meta-injection with the passed arguments for a rank, and return its stdout
|
|
|
|
"""
|
|
|
|
fs_rank = self._make_rank(rank)
|
|
|
|
return self._run_tool("cephfs-meta-injection", args, fs_rank, quiet)
|
|
|
|
|
2014-12-19 13:49:07 +00:00
|
|
|
def table_tool(self, args, quiet=False):
|
|
|
|
"""
|
|
|
|
Invoke cephfs-table-tool with the passed arguments, and return its stdout
|
|
|
|
"""
|
|
|
|
return self._run_tool("cephfs-table-tool", args, None, quiet)
|
2015-05-13 12:39:22 +00:00
|
|
|
|
2015-12-23 13:57:02 +00:00
|
|
|
def data_scan(self, args, quiet=False, worker_count=1):
|
2015-05-13 12:39:22 +00:00
|
|
|
"""
|
|
|
|
Invoke cephfs-data-scan with the passed arguments, and return its stdout
|
2015-12-23 13:57:02 +00:00
|
|
|
|
|
|
|
:param worker_count: if greater than 1, multiple workers will be run
|
|
|
|
in parallel and the return value will be None
|
2015-05-13 12:39:22 +00:00
|
|
|
"""
|
2015-12-23 13:57:02 +00:00
|
|
|
|
|
|
|
workers = []
|
|
|
|
|
|
|
|
for n in range(0, worker_count):
|
|
|
|
if worker_count > 1:
|
|
|
|
# data-scan args first token is a command, followed by args to it.
|
|
|
|
# insert worker arguments after the command.
|
|
|
|
cmd = args[0]
|
|
|
|
worker_args = [cmd] + ["--worker_n", n.__str__(), "--worker_m", worker_count.__str__()] + args[1:]
|
|
|
|
else:
|
|
|
|
worker_args = args
|
|
|
|
|
|
|
|
workers.append(Greenlet.spawn(lambda wargs=worker_args:
|
|
|
|
self._run_tool("cephfs-data-scan", wargs, None, quiet)))
|
|
|
|
|
|
|
|
for w in workers:
|
|
|
|
w.get()
|
|
|
|
|
|
|
|
if worker_count == 1:
|
|
|
|
return workers[0].value
|
|
|
|
else:
|
|
|
|
return None
|
2017-12-19 05:29:11 +00:00
|
|
|
|
|
|
|
def is_full(self):
|
|
|
|
return self.is_pool_full(self.get_data_pool_name())
|