mirror of
https://github.com/ceph/ceph
synced 2025-01-19 01:21:49 +00:00
commit
44dc0320fa
5
suites/fs/recovery/tasks/backtrace.yaml
Normal file
5
suites/fs/recovery/tasks/backtrace.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
|
||||
tasks:
|
||||
- cephfs_test_runner:
|
||||
modules:
|
||||
- tasks.cephfs.test_backtrace
|
@ -59,10 +59,19 @@ class Filesystem(object):
|
||||
self.client_id = client_list[0]
|
||||
self.client_remote = list(misc.get_clients(ctx=ctx, roles=["client.{0}".format(self.client_id)]))[0][1]
|
||||
|
||||
def create(self):
|
||||
def get_pgs_per_fs_pool(self):
|
||||
"""
|
||||
Calculate how many PGs to use when creating a pool, in order to avoid raising any
|
||||
health warnings about mon_pg_warn_min_per_osd
|
||||
|
||||
:return: an integer number of PGs
|
||||
"""
|
||||
pg_warn_min_per_osd = int(self.get_config('mon_pg_warn_min_per_osd'))
|
||||
osd_count = len(list(misc.all_roles_of_type(self._ctx.cluster, 'osd')))
|
||||
pgs_per_fs_pool = pg_warn_min_per_osd * osd_count
|
||||
return pg_warn_min_per_osd * osd_count
|
||||
|
||||
def create(self):
|
||||
pgs_per_fs_pool = self.get_pgs_per_fs_pool()
|
||||
|
||||
self.admin_remote.run(args=['sudo', 'ceph', 'osd', 'pool', 'create', 'metadata', pgs_per_fs_pool.__str__()])
|
||||
self.admin_remote.run(args=['sudo', 'ceph', 'osd', 'pool', 'create', 'data', pgs_per_fs_pool.__str__()])
|
||||
@ -479,7 +488,36 @@ class Filesystem(object):
|
||||
time.sleep(1)
|
||||
elapsed += 1
|
||||
|
||||
def read_backtrace(self, ino_no):
|
||||
def _read_data_xattr(self, ino_no, xattr_name, type, pool):
|
||||
mds_id = self.mds_ids[0]
|
||||
remote = self.mds_daemons[mds_id].remote
|
||||
if pool is None:
|
||||
pool = self.get_data_pool_name()
|
||||
|
||||
obj_name = "{0:x}.00000000".format(ino_no)
|
||||
|
||||
temp_file = "/tmp/{0}_{1}".format(obj_name, datetime.datetime.now().isoformat())
|
||||
|
||||
args = [
|
||||
"rados", "-p", pool, "getxattr", obj_name, xattr_name,
|
||||
run.Raw(">"), temp_file
|
||||
]
|
||||
try:
|
||||
remote.run(
|
||||
args=args,
|
||||
stdout=StringIO())
|
||||
except CommandFailedError as e:
|
||||
log.error(e.__str__())
|
||||
raise ObjectNotFound(obj_name)
|
||||
|
||||
p = remote.run(
|
||||
args=["ceph-dencoder", "type", type, "import", temp_file, "decode", "dump_json"],
|
||||
stdout=StringIO()
|
||||
)
|
||||
|
||||
return json.loads(p.stdout.getvalue().strip())
|
||||
|
||||
def read_backtrace(self, ino_no, pool=None):
|
||||
"""
|
||||
Read the backtrace from the data pool, return a dict in the format
|
||||
given by inode_backtrace_t::dump, which is something like:
|
||||
@ -497,32 +535,26 @@ class Filesystem(object):
|
||||
"pool": 1,
|
||||
"old_pools": []}
|
||||
|
||||
:param pool: name of pool to read backtrace from. If omitted, FS must have only
|
||||
one data pool and that will be used.
|
||||
"""
|
||||
mds_id = self.mds_ids[0]
|
||||
remote = self.mds_daemons[mds_id].remote
|
||||
return self._read_data_xattr(ino_no, "parent", "inode_backtrace_t", pool)
|
||||
|
||||
obj_name = "{0:x}.00000000".format(ino_no)
|
||||
def read_layout(self, ino_no, pool=None):
|
||||
"""
|
||||
Read 'layout' xattr of an inode and parse the result, returning a dict like:
|
||||
::
|
||||
{
|
||||
"stripe_unit": 4194304,
|
||||
"stripe_count": 1,
|
||||
"object_size": 4194304,
|
||||
"pg_pool": 1
|
||||
}
|
||||
|
||||
temp_file = "/tmp/{0}_{1}".format(obj_name, datetime.datetime.now().isoformat())
|
||||
|
||||
args = [
|
||||
"rados", "-p", self.get_data_pool_name(), "getxattr", obj_name, "parent",
|
||||
run.Raw(">"), temp_file
|
||||
]
|
||||
try:
|
||||
remote.run(
|
||||
args=args,
|
||||
stdout=StringIO())
|
||||
except CommandFailedError as e:
|
||||
log.error(e.__str__())
|
||||
raise ObjectNotFound(obj_name)
|
||||
|
||||
p = remote.run(
|
||||
args=["ceph-dencoder", "type", "inode_backtrace_t", "import", temp_file, "decode", "dump_json"],
|
||||
stdout=StringIO()
|
||||
)
|
||||
|
||||
return json.loads(p.stdout.getvalue().strip())
|
||||
:param pool: name of pool to read backtrace from. If omitted, FS must have only
|
||||
one data pool and that will be used.
|
||||
"""
|
||||
return self._read_data_xattr(ino_no, "layout", "ceph_file_layout_wrapper", pool)
|
||||
|
||||
def _enumerate_data_objects(self, ino, size):
|
||||
"""
|
||||
|
80
tasks/cephfs/test_backtrace.py
Normal file
80
tasks/cephfs/test_backtrace.py
Normal file
@ -0,0 +1,80 @@
|
||||
|
||||
from tasks.cephfs.cephfs_test_case import CephFSTestCase
|
||||
|
||||
|
||||
class TestBacktrace(CephFSTestCase):
|
||||
def test_backtrace(self):
|
||||
"""
|
||||
That the 'parent' and 'layout' xattrs on the head objects of files
|
||||
are updated correctly.
|
||||
"""
|
||||
|
||||
def get_pool_id(name):
|
||||
return self.fs.mon_manager.get_pool_dump(name)['pool']
|
||||
|
||||
old_data_pool_name = self.fs.get_data_pool_name()
|
||||
old_pool_id = get_pool_id(old_data_pool_name)
|
||||
|
||||
# Create a file for subsequent checks
|
||||
self.mount_a.run_shell(["mkdir", "parent_a"])
|
||||
self.mount_a.run_shell(["touch", "parent_a/alpha"])
|
||||
file_ino = self.mount_a.path_to_ino("parent_a/alpha")
|
||||
|
||||
# That backtrace and layout are written after initial flush
|
||||
self.fs.mds_asok(["flush", "journal"])
|
||||
backtrace = self.fs.read_backtrace(file_ino)
|
||||
self.assertEqual(['alpha', 'parent_a'], [a['dname'] for a in backtrace['ancestors']])
|
||||
layout = self.fs.read_layout(file_ino)
|
||||
self.assertDictEqual(layout, {
|
||||
"stripe_unit": 4194304,
|
||||
"stripe_count": 1,
|
||||
"object_size": 4194304,
|
||||
"pg_pool": old_pool_id
|
||||
})
|
||||
self.assertEqual(backtrace['pool'], old_pool_id)
|
||||
|
||||
# That backtrace is written after parentage changes
|
||||
self.mount_a.run_shell(["mkdir", "parent_b"])
|
||||
self.mount_a.run_shell(["mv", "parent_a/alpha", "parent_b/alpha"])
|
||||
|
||||
self.fs.mds_asok(["flush", "journal"])
|
||||
backtrace = self.fs.read_backtrace(file_ino)
|
||||
self.assertEqual(['alpha', 'parent_b'], [a['dname'] for a in backtrace['ancestors']])
|
||||
|
||||
# Create a new data pool
|
||||
new_pool_name = "data_new"
|
||||
self.fs.admin_remote.run(args=['sudo', 'ceph', 'osd', 'pool', 'create', new_pool_name,
|
||||
self.fs.get_pgs_per_fs_pool().__str__()])
|
||||
self.fs.admin_remote.run(args=['sudo', 'ceph', 'mds', 'add_data_pool', new_pool_name])
|
||||
new_pool_id = get_pool_id(new_pool_name)
|
||||
|
||||
# That an object which has switched pools gets its backtrace updated
|
||||
self.mount_a.run_shell(["setfattr", "-n", "ceph.file.layout.pool", "-v", new_pool_name, "./parent_b/alpha"])
|
||||
self.fs.mds_asok(["flush", "journal"])
|
||||
backtrace_old_pool = self.fs.read_backtrace(file_ino, pool=old_data_pool_name)
|
||||
self.assertEqual(backtrace_old_pool['pool'], new_pool_id)
|
||||
backtrace_new_pool = self.fs.read_backtrace(file_ino, pool=new_pool_name)
|
||||
self.assertEqual(backtrace_new_pool['pool'], new_pool_id)
|
||||
new_pool_layout = self.fs.read_layout(file_ino, pool=new_pool_name)
|
||||
self.assertEqual(new_pool_layout['pg_pool'], new_pool_id)
|
||||
|
||||
# That subsequent linkage changes are only written to new pool backtrace
|
||||
self.mount_a.run_shell(["mkdir", "parent_c"])
|
||||
self.mount_a.run_shell(["mv", "parent_b/alpha", "parent_c/alpha"])
|
||||
self.fs.mds_asok(["flush", "journal"])
|
||||
backtrace_old_pool = self.fs.read_backtrace(file_ino, pool=old_data_pool_name)
|
||||
self.assertEqual(['alpha', 'parent_b'], [a['dname'] for a in backtrace_old_pool['ancestors']])
|
||||
backtrace_new_pool = self.fs.read_backtrace(file_ino, pool=new_pool_name)
|
||||
self.assertEqual(['alpha', 'parent_c'], [a['dname'] for a in backtrace_new_pool['ancestors']])
|
||||
|
||||
# That layout is written to new pool after change to other field in layout
|
||||
self.mount_a.run_shell(["setfattr", "-n", "ceph.file.layout.object_size", "-v", "8388608", "./parent_c/alpha"])
|
||||
|
||||
self.fs.mds_asok(["flush", "journal"])
|
||||
new_pool_layout = self.fs.read_layout(file_ino, pool=new_pool_name)
|
||||
self.assertEqual(new_pool_layout['object_size'], 8388608)
|
||||
|
||||
# ...but not to the old pool: the old pool's backtrace points to the new pool, and that's enough,
|
||||
# we don't update the layout in all the old pools whenever it changes
|
||||
old_pool_layout = self.fs.read_layout(file_ino, pool=old_data_pool_name)
|
||||
self.assertEqual(old_pool_layout['object_size'], 4194304)
|
Loading…
Reference in New Issue
Block a user