2013-10-12 08:28:27 +00:00
|
|
|
"""Scrub testing"""
|
2012-08-02 17:58:08 +00:00
|
|
|
|
2016-01-08 12:47:31 +00:00
|
|
|
import contextlib
|
2016-02-16 10:40:27 +00:00
|
|
|
import json
|
2012-08-02 17:58:08 +00:00
|
|
|
import logging
|
|
|
|
import os
|
|
|
|
import time
|
2016-02-16 10:40:27 +00:00
|
|
|
import tempfile
|
2012-08-02 17:58:08 +00:00
|
|
|
|
2020-03-24 08:33:22 +00:00
|
|
|
from tasks import ceph_manager
|
2012-08-02 17:58:08 +00:00
|
|
|
from teuthology import misc as teuthology
|
|
|
|
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
2021-06-12 03:04:53 +00:00
|
|
|
def wait_for_victim_pg(manager, poolid):
|
2016-01-08 12:47:31 +00:00
|
|
|
"""Return a PG with some data and its acting set"""
|
2012-08-02 17:58:08 +00:00
|
|
|
# wait for some PG to have data that we can mess with
|
|
|
|
victim = None
|
|
|
|
while victim is None:
|
|
|
|
stats = manager.get_pg_stats()
|
|
|
|
for pg in stats:
|
2021-06-12 03:04:53 +00:00
|
|
|
pgid = str(pg['pgid'])
|
|
|
|
pgpool = int(pgid.split('.')[0])
|
|
|
|
if poolid != pgpool:
|
|
|
|
continue
|
2012-08-02 17:58:08 +00:00
|
|
|
size = pg['stat_sum']['num_bytes']
|
|
|
|
if size > 0:
|
|
|
|
victim = pg['pgid']
|
2016-01-08 12:47:31 +00:00
|
|
|
acting = pg['acting']
|
|
|
|
return victim, acting
|
|
|
|
time.sleep(3)
|
2012-08-02 17:58:08 +00:00
|
|
|
|
|
|
|
|
2016-01-08 12:47:31 +00:00
|
|
|
def find_victim_object(ctx, pg, osd):
|
|
|
|
"""Return a file to be fuzzed"""
|
2019-10-11 15:57:47 +00:00
|
|
|
(osd_remote,) = ctx.cluster.only('osd.%d' % osd).remotes.keys()
|
2013-02-17 06:32:16 +00:00
|
|
|
data_path = os.path.join(
|
|
|
|
'/var/lib/ceph/osd',
|
|
|
|
'ceph-{id}'.format(id=osd),
|
2016-06-10 20:57:37 +00:00
|
|
|
'fuse',
|
|
|
|
'{pg}_head'.format(pg=pg),
|
|
|
|
'all',
|
2013-02-17 06:32:16 +00:00
|
|
|
)
|
2012-08-02 17:58:08 +00:00
|
|
|
|
|
|
|
# fuzz time
|
2020-02-21 20:10:59 +00:00
|
|
|
ls_out = osd_remote.sh('sudo ls %s' % data_path)
|
2012-08-02 17:58:08 +00:00
|
|
|
|
2016-06-10 20:57:37 +00:00
|
|
|
# find an object file we can mess with (and not the pg info object)
|
2016-01-08 12:47:31 +00:00
|
|
|
osdfilename = next(line for line in ls_out.split('\n')
|
2016-06-10 20:57:37 +00:00
|
|
|
if not line.endswith('::::head#'))
|
2013-01-19 01:11:09 +00:00
|
|
|
assert osdfilename is not None
|
2012-08-02 17:58:08 +00:00
|
|
|
|
2013-01-19 01:11:09 +00:00
|
|
|
# Get actual object name from osd stored filename
|
2016-06-10 20:57:37 +00:00
|
|
|
objname = osdfilename.split(':')[4]
|
2016-01-08 12:47:31 +00:00
|
|
|
return osd_remote, os.path.join(data_path, osdfilename), objname
|
|
|
|
|
2012-08-02 17:58:08 +00:00
|
|
|
|
2016-01-08 12:47:31 +00:00
|
|
|
def corrupt_file(osd_remote, path):
|
2012-08-02 17:58:08 +00:00
|
|
|
# put a single \0 at the beginning of the file
|
|
|
|
osd_remote.run(
|
2016-01-08 12:47:31 +00:00
|
|
|
args=['sudo', 'dd',
|
|
|
|
'if=/dev/zero',
|
2016-06-10 20:57:37 +00:00
|
|
|
'of=%s/data' % path,
|
2016-01-08 12:47:31 +00:00
|
|
|
'bs=1', 'count=1', 'conv=notrunc']
|
2012-08-02 17:58:08 +00:00
|
|
|
)
|
|
|
|
|
2016-01-08 12:47:31 +00:00
|
|
|
|
2016-06-16 05:16:57 +00:00
|
|
|
def get_pgnum(pgid):
|
|
|
|
pos = pgid.find('.')
|
|
|
|
assert pos != -1
|
|
|
|
return pgid[pos+1:]
|
2012-08-02 17:58:08 +00:00
|
|
|
|
|
|
|
|
2016-06-16 05:16:57 +00:00
|
|
|
def deep_scrub(manager, victim, pool):
|
|
|
|
# scrub, verify inconsistent
|
|
|
|
pgnum = get_pgnum(victim)
|
|
|
|
manager.do_pg_scrub(pool, pgnum, 'deep-scrub')
|
2012-08-02 17:58:08 +00:00
|
|
|
|
2016-06-16 05:16:57 +00:00
|
|
|
stats = manager.get_single_pg_stats(victim)
|
|
|
|
inconsistent = stats['state'].find('+inconsistent') != -1
|
|
|
|
assert inconsistent
|
2012-08-02 17:58:08 +00:00
|
|
|
|
|
|
|
|
2016-06-16 05:16:57 +00:00
|
|
|
def repair(manager, victim, pool):
|
2012-08-02 17:58:08 +00:00
|
|
|
# repair, verify no longer inconsistent
|
2016-06-16 05:16:57 +00:00
|
|
|
pgnum = get_pgnum(victim)
|
|
|
|
manager.do_pg_scrub(pool, pgnum, 'repair')
|
2013-01-19 01:11:09 +00:00
|
|
|
|
2016-06-16 05:16:57 +00:00
|
|
|
stats = manager.get_single_pg_stats(victim)
|
|
|
|
inconsistent = stats['state'].find('+inconsistent') != -1
|
|
|
|
assert not inconsistent
|
2013-01-19 01:11:09 +00:00
|
|
|
|
2016-01-08 12:47:31 +00:00
|
|
|
|
2016-06-16 05:16:57 +00:00
|
|
|
def test_repair_corrupted_obj(ctx, manager, pg, osd_remote, obj_path, pool):
|
2016-01-08 12:47:31 +00:00
|
|
|
corrupt_file(osd_remote, obj_path)
|
2016-06-16 05:16:57 +00:00
|
|
|
deep_scrub(manager, pg, pool)
|
|
|
|
repair(manager, pg, pool)
|
2016-01-08 12:47:31 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_repair_bad_omap(ctx, manager, pg, osd, objname):
|
2013-01-19 01:11:09 +00:00
|
|
|
# Test deep-scrub with various omap modifications
|
|
|
|
# Modify omap on specific osd
|
|
|
|
log.info('fuzzing omap of %s' % objname)
|
2016-01-08 12:47:31 +00:00
|
|
|
manager.osd_admin_socket(osd, ['rmomapkey', 'rbd', objname, 'key'])
|
|
|
|
manager.osd_admin_socket(osd, ['setomapval', 'rbd', objname,
|
|
|
|
'badkey', 'badval'])
|
|
|
|
manager.osd_admin_socket(osd, ['setomapheader', 'rbd', objname, 'badhdr'])
|
2013-01-19 01:11:09 +00:00
|
|
|
|
2016-06-16 05:16:57 +00:00
|
|
|
deep_scrub(manager, pg, 'rbd')
|
2016-02-22 07:04:49 +00:00
|
|
|
# please note, the repair here is errnomous, it rewrites the correct omap
|
|
|
|
# digest and data digest on the replicas with the corresponding digests
|
|
|
|
# from the primary osd which is hosting the victim object, see
|
|
|
|
# find_victim_object().
|
|
|
|
# so we need to either put this test and the end of this task or
|
|
|
|
# undo the mess-up manually before the "repair()" that just ensures
|
|
|
|
# the cleanup is sane, otherwise the succeeding tests will fail. if they
|
|
|
|
# try set "badkey" in hope to get an "inconsistent" pg with a deep-scrub.
|
|
|
|
manager.osd_admin_socket(osd, ['setomapheader', 'rbd', objname, 'hdr'])
|
|
|
|
manager.osd_admin_socket(osd, ['rmomapkey', 'rbd', objname, 'badkey'])
|
|
|
|
manager.osd_admin_socket(osd, ['setomapval', 'rbd', objname,
|
|
|
|
'key', 'val'])
|
2016-06-16 05:16:57 +00:00
|
|
|
repair(manager, pg, 'rbd')
|
2013-01-19 01:11:09 +00:00
|
|
|
|
|
|
|
|
2016-02-16 10:40:27 +00:00
|
|
|
class MessUp:
|
|
|
|
def __init__(self, manager, osd_remote, pool, osd_id,
|
|
|
|
obj_name, obj_path, omap_key, omap_val):
|
|
|
|
self.manager = manager
|
|
|
|
self.osd = osd_remote
|
|
|
|
self.pool = pool
|
|
|
|
self.osd_id = osd_id
|
|
|
|
self.obj = obj_name
|
|
|
|
self.path = obj_path
|
|
|
|
self.omap_key = omap_key
|
|
|
|
self.omap_val = omap_val
|
|
|
|
|
|
|
|
@contextlib.contextmanager
|
|
|
|
def _test_with_file(self, messup_cmd, *checks):
|
|
|
|
temp = tempfile.mktemp()
|
2016-06-10 20:57:37 +00:00
|
|
|
backup_cmd = ['sudo', 'cp', os.path.join(self.path, 'data'), temp]
|
2016-02-16 10:40:27 +00:00
|
|
|
self.osd.run(args=backup_cmd)
|
|
|
|
self.osd.run(args=messup_cmd.split())
|
|
|
|
yield checks
|
2016-06-10 20:57:37 +00:00
|
|
|
create_cmd = ['sudo', 'mkdir', self.path]
|
|
|
|
self.osd.run(args=create_cmd, check_status=False)
|
|
|
|
restore_cmd = ['sudo', 'cp', temp, os.path.join(self.path, 'data')]
|
2016-02-16 10:40:27 +00:00
|
|
|
self.osd.run(args=restore_cmd)
|
|
|
|
|
|
|
|
def remove(self):
|
2016-06-10 20:57:37 +00:00
|
|
|
cmd = 'sudo rmdir {path}'.format(path=self.path)
|
2016-02-16 10:40:27 +00:00
|
|
|
return self._test_with_file(cmd, 'missing')
|
|
|
|
|
|
|
|
def append(self):
|
2016-06-10 20:57:37 +00:00
|
|
|
cmd = 'sudo dd if=/dev/zero of={path}/data bs=1 count=1 ' \
|
2016-02-16 10:40:27 +00:00
|
|
|
'conv=notrunc oflag=append'.format(path=self.path)
|
|
|
|
return self._test_with_file(cmd,
|
|
|
|
'data_digest_mismatch',
|
|
|
|
'size_mismatch')
|
|
|
|
|
|
|
|
def truncate(self):
|
2016-06-10 20:57:37 +00:00
|
|
|
cmd = 'sudo dd if=/dev/null of={path}/data'.format(path=self.path)
|
2016-02-16 10:40:27 +00:00
|
|
|
return self._test_with_file(cmd,
|
|
|
|
'data_digest_mismatch',
|
|
|
|
'size_mismatch')
|
|
|
|
|
|
|
|
def change_obj(self):
|
2016-06-10 20:57:37 +00:00
|
|
|
cmd = 'sudo dd if=/dev/zero of={path}/data bs=1 count=1 ' \
|
2016-02-16 10:40:27 +00:00
|
|
|
'conv=notrunc'.format(path=self.path)
|
|
|
|
return self._test_with_file(cmd,
|
|
|
|
'data_digest_mismatch')
|
|
|
|
|
|
|
|
@contextlib.contextmanager
|
|
|
|
def rm_omap(self):
|
|
|
|
cmd = ['rmomapkey', self.pool, self.obj, self.omap_key]
|
|
|
|
self.manager.osd_admin_socket(self.osd_id, cmd)
|
|
|
|
yield ('omap_digest_mismatch',)
|
|
|
|
cmd = ['setomapval', self.pool, self.obj,
|
|
|
|
self.omap_key, self.omap_val]
|
|
|
|
self.manager.osd_admin_socket(self.osd_id, cmd)
|
|
|
|
|
|
|
|
@contextlib.contextmanager
|
|
|
|
def add_omap(self):
|
|
|
|
cmd = ['setomapval', self.pool, self.obj, 'badkey', 'badval']
|
|
|
|
self.manager.osd_admin_socket(self.osd_id, cmd)
|
|
|
|
yield ('omap_digest_mismatch',)
|
|
|
|
cmd = ['rmomapkey', self.pool, self.obj, 'badkey']
|
|
|
|
self.manager.osd_admin_socket(self.osd_id, cmd)
|
|
|
|
|
|
|
|
@contextlib.contextmanager
|
|
|
|
def change_omap(self):
|
|
|
|
cmd = ['setomapval', self.pool, self.obj, self.omap_key, 'badval']
|
|
|
|
self.manager.osd_admin_socket(self.osd_id, cmd)
|
|
|
|
yield ('omap_digest_mismatch',)
|
|
|
|
cmd = ['setomapval', self.pool, self.obj, self.omap_key, self.omap_val]
|
|
|
|
self.manager.osd_admin_socket(self.osd_id, cmd)
|
|
|
|
|
|
|
|
|
|
|
|
class InconsistentObjChecker:
|
|
|
|
"""Check the returned inconsistents/inconsistent info"""
|
|
|
|
|
|
|
|
def __init__(self, osd, acting, obj_name):
|
|
|
|
self.osd = osd
|
|
|
|
self.acting = acting
|
|
|
|
self.obj = obj_name
|
|
|
|
assert self.osd in self.acting
|
|
|
|
|
|
|
|
def basic_checks(self, inc):
|
|
|
|
assert inc['object']['name'] == self.obj
|
2016-03-01 01:13:52 +00:00
|
|
|
assert inc['object']['snap'] == "head"
|
2016-02-16 10:40:27 +00:00
|
|
|
assert len(inc['shards']) == len(self.acting), \
|
|
|
|
"the number of returned shard does not match with the acting set"
|
|
|
|
|
|
|
|
def run(self, check, inc):
|
|
|
|
func = getattr(self, check)
|
|
|
|
func(inc)
|
|
|
|
|
2016-05-18 01:10:26 +00:00
|
|
|
def _check_errors(self, inc, err_name):
|
|
|
|
bad_found = False
|
|
|
|
good_found = False
|
|
|
|
for shard in inc['shards']:
|
|
|
|
log.info('shard = %r' % shard)
|
|
|
|
log.info('err = %s' % err_name)
|
|
|
|
assert 'osd' in shard
|
|
|
|
osd = shard['osd']
|
|
|
|
err = err_name in shard['errors']
|
|
|
|
if osd == self.osd:
|
|
|
|
assert bad_found is False, \
|
|
|
|
"multiple entries found for the given OSD"
|
|
|
|
assert err is True, \
|
|
|
|
"Didn't find '{err}' in errors".format(err=err_name)
|
|
|
|
bad_found = True
|
|
|
|
else:
|
|
|
|
assert osd in self.acting, "shard not in acting set"
|
|
|
|
assert err is False, \
|
|
|
|
"Expected '{err}' in errors".format(err=err_name)
|
|
|
|
good_found = True
|
|
|
|
assert bad_found is True, \
|
|
|
|
"Shard for osd.{osd} not found".format(osd=self.osd)
|
|
|
|
assert good_found is True, \
|
|
|
|
"No other acting shards found"
|
|
|
|
|
|
|
|
def _check_attrs(self, inc, attr_name):
|
2016-02-16 10:40:27 +00:00
|
|
|
bad_attr = None
|
|
|
|
good_attr = None
|
|
|
|
for shard in inc['shards']:
|
|
|
|
log.info('shard = %r' % shard)
|
|
|
|
log.info('attr = %s' % attr_name)
|
|
|
|
assert 'osd' in shard
|
|
|
|
osd = shard['osd']
|
2016-04-29 22:33:44 +00:00
|
|
|
attr = shard.get(attr_name, False)
|
2016-02-16 10:40:27 +00:00
|
|
|
if osd == self.osd:
|
|
|
|
assert bad_attr is None, \
|
|
|
|
"multiple entries found for the given OSD"
|
|
|
|
bad_attr = attr
|
|
|
|
else:
|
|
|
|
assert osd in self.acting, "shard not in acting set"
|
|
|
|
assert good_attr is None or good_attr == attr, \
|
|
|
|
"multiple good attrs found"
|
|
|
|
good_attr = attr
|
|
|
|
assert bad_attr is not None, \
|
|
|
|
"bad {attr} not found".format(attr=attr_name)
|
2016-04-30 00:46:51 +00:00
|
|
|
assert good_attr is not None, \
|
|
|
|
"good {attr} not found".format(attr=attr_name)
|
2016-02-16 10:40:27 +00:00
|
|
|
assert good_attr != bad_attr, \
|
|
|
|
"bad attr is identical to the good ones: " \
|
|
|
|
"{0} == {1}".format(good_attr, bad_attr)
|
|
|
|
|
|
|
|
def data_digest_mismatch(self, inc):
|
2016-05-18 01:10:26 +00:00
|
|
|
assert 'data_digest_mismatch' in inc['errors']
|
|
|
|
self._check_attrs(inc, 'data_digest')
|
2016-02-16 10:40:27 +00:00
|
|
|
|
|
|
|
def missing(self, inc):
|
2016-08-31 19:02:31 +00:00
|
|
|
assert 'missing' in inc['union_shard_errors']
|
2016-05-18 01:10:26 +00:00
|
|
|
self._check_errors(inc, 'missing')
|
2016-02-16 10:40:27 +00:00
|
|
|
|
|
|
|
def size_mismatch(self, inc):
|
2016-05-18 01:10:26 +00:00
|
|
|
assert 'size_mismatch' in inc['errors']
|
|
|
|
self._check_attrs(inc, 'size')
|
2016-02-16 10:40:27 +00:00
|
|
|
|
|
|
|
def omap_digest_mismatch(self, inc):
|
2016-05-18 01:10:26 +00:00
|
|
|
assert 'omap_digest_mismatch' in inc['errors']
|
|
|
|
self._check_attrs(inc, 'omap_digest')
|
2016-02-16 10:40:27 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_list_inconsistent_obj(ctx, manager, osd_remote, pg, acting, osd_id,
|
|
|
|
obj_name, obj_path):
|
|
|
|
mon = manager.controller
|
|
|
|
pool = 'rbd'
|
|
|
|
omap_key = 'key'
|
|
|
|
omap_val = 'val'
|
2021-03-10 22:30:32 +00:00
|
|
|
manager.do_rados(['setomapval', obj_name, omap_key, omap_val], pool=pool)
|
2016-10-20 00:10:29 +00:00
|
|
|
# Update missing digests, requires "osd deep scrub update digest min age: 0"
|
|
|
|
pgnum = get_pgnum(pg)
|
|
|
|
manager.do_pg_scrub(pool, pgnum, 'deep-scrub')
|
|
|
|
|
2016-02-16 10:40:27 +00:00
|
|
|
messup = MessUp(manager, osd_remote, pool, osd_id, obj_name, obj_path,
|
|
|
|
omap_key, omap_val)
|
|
|
|
for test in [messup.rm_omap, messup.add_omap, messup.change_omap,
|
|
|
|
messup.append, messup.truncate, messup.change_obj,
|
|
|
|
messup.remove]:
|
|
|
|
with test() as checks:
|
2016-06-16 05:16:57 +00:00
|
|
|
deep_scrub(manager, pg, pool)
|
2016-02-16 10:40:27 +00:00
|
|
|
cmd = 'rados list-inconsistent-pg {pool} ' \
|
|
|
|
'--format=json'.format(pool=pool)
|
2020-02-21 20:10:59 +00:00
|
|
|
pgs = json.loads(mon.sh(cmd))
|
2016-02-16 10:40:27 +00:00
|
|
|
assert pgs == [pg]
|
|
|
|
|
|
|
|
cmd = 'rados list-inconsistent-obj {pg} ' \
|
|
|
|
'--format=json'.format(pg=pg)
|
2020-02-21 20:10:59 +00:00
|
|
|
objs = json.loads(mon.sh(cmd))
|
2016-04-29 22:33:44 +00:00
|
|
|
assert len(objs['inconsistents']) == 1
|
2016-02-16 10:40:27 +00:00
|
|
|
|
|
|
|
checker = InconsistentObjChecker(osd_id, acting, obj_name)
|
2016-04-29 22:33:44 +00:00
|
|
|
inc_obj = objs['inconsistents'][0]
|
2016-03-01 01:13:52 +00:00
|
|
|
log.info('inc = %r', inc_obj)
|
2016-02-16 10:40:27 +00:00
|
|
|
checker.basic_checks(inc_obj)
|
|
|
|
for check in checks:
|
|
|
|
checker.run(check, inc_obj)
|
|
|
|
|
|
|
|
|
2016-01-08 12:47:31 +00:00
|
|
|
def task(ctx, config):
|
|
|
|
"""
|
|
|
|
Test [deep] scrub
|
2013-01-19 01:11:09 +00:00
|
|
|
|
2016-01-08 12:47:31 +00:00
|
|
|
tasks:
|
|
|
|
- chef:
|
|
|
|
- install:
|
|
|
|
- ceph:
|
2020-06-17 19:33:57 +00:00
|
|
|
log-ignorelist:
|
2016-10-20 00:10:07 +00:00
|
|
|
- '!= data_digest'
|
|
|
|
- '!= omap_digest'
|
|
|
|
- '!= size'
|
2016-01-08 12:47:31 +00:00
|
|
|
- deep-scrub 0 missing, 1 inconsistent objects
|
2016-10-20 00:10:07 +00:00
|
|
|
- deep-scrub [0-9]+ errors
|
2016-01-08 12:47:31 +00:00
|
|
|
- repair 0 missing, 1 inconsistent objects
|
2016-10-20 00:10:07 +00:00
|
|
|
- repair [0-9]+ errors, [0-9]+ fixed
|
2018-08-16 22:31:53 +00:00
|
|
|
- shard [0-9]+ .* : missing
|
2016-10-20 00:10:07 +00:00
|
|
|
- deep-scrub 1 missing, 1 inconsistent objects
|
|
|
|
- does not match object info size
|
|
|
|
- attr name mistmatch
|
|
|
|
- deep-scrub 1 missing, 0 inconsistent objects
|
|
|
|
- failed to pick suitable auth object
|
2018-08-06 17:37:04 +00:00
|
|
|
- candidate size [0-9]+ info size [0-9]+ mismatch
|
2016-10-20 00:10:29 +00:00
|
|
|
conf:
|
|
|
|
osd:
|
|
|
|
osd deep scrub update digest min age: 0
|
2016-01-08 12:47:31 +00:00
|
|
|
- scrub_test:
|
|
|
|
"""
|
|
|
|
if config is None:
|
|
|
|
config = {}
|
|
|
|
assert isinstance(config, dict), \
|
|
|
|
'scrub_test task only accepts a dict for configuration'
|
|
|
|
first_mon = teuthology.get_first_mon(ctx, config)
|
2019-10-11 15:57:47 +00:00
|
|
|
(mon,) = ctx.cluster.only(first_mon).remotes.keys()
|
2013-01-19 01:11:09 +00:00
|
|
|
|
2016-01-08 12:47:31 +00:00
|
|
|
num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
|
|
|
|
log.info('num_osds is %s' % num_osds)
|
2012-08-02 17:58:08 +00:00
|
|
|
|
2016-01-08 12:47:31 +00:00
|
|
|
manager = ceph_manager.CephManager(
|
|
|
|
mon,
|
|
|
|
ctx=ctx,
|
|
|
|
logger=log.getChild('ceph_manager'),
|
|
|
|
)
|
2012-08-02 17:58:08 +00:00
|
|
|
|
2016-01-08 12:47:31 +00:00
|
|
|
while len(manager.get_osd_status()['up']) < num_osds:
|
|
|
|
time.sleep(10)
|
2012-08-02 17:58:08 +00:00
|
|
|
|
2016-06-10 20:57:37 +00:00
|
|
|
for i in range(num_osds):
|
|
|
|
manager.raw_cluster_cmd('tell', 'osd.%d' % i, 'injectargs',
|
|
|
|
'--', '--osd-objectstore-fuse')
|
2017-05-18 22:16:55 +00:00
|
|
|
manager.flush_pg_stats(range(num_osds))
|
2016-01-08 12:47:31 +00:00
|
|
|
manager.wait_for_clean()
|
|
|
|
|
2021-06-12 03:04:53 +00:00
|
|
|
osd_dump = manager.get_osd_dump_json()
|
|
|
|
poolid = -1
|
|
|
|
for p in osd_dump['pools']:
|
|
|
|
if p['pool_name'] == 'rbd':
|
|
|
|
poolid = p['pool']
|
|
|
|
break
|
|
|
|
assert poolid != -1
|
|
|
|
|
2016-01-08 12:47:31 +00:00
|
|
|
# write some data
|
2021-03-10 22:30:32 +00:00
|
|
|
p = manager.do_rados(['bench', '--no-cleanup', '1', 'write', '-b', '4096'], pool='rbd')
|
2016-02-22 07:04:49 +00:00
|
|
|
log.info('err is %d' % p.exitstatus)
|
2016-01-08 12:47:31 +00:00
|
|
|
|
|
|
|
# wait for some PG to have data that we can mess with
|
2021-06-12 03:04:53 +00:00
|
|
|
pg, acting = wait_for_victim_pg(manager, poolid)
|
2016-01-08 12:47:31 +00:00
|
|
|
osd = acting[0]
|
|
|
|
|
|
|
|
osd_remote, obj_path, obj_name = find_victim_object(ctx, pg, osd)
|
2021-03-10 22:30:32 +00:00
|
|
|
manager.do_rados(['setomapval', obj_name, 'key', 'val'], pool='rbd')
|
2016-02-22 07:04:49 +00:00
|
|
|
log.info('err is %d' % p.exitstatus)
|
2021-03-10 22:30:32 +00:00
|
|
|
manager.do_rados(['setomapheader', obj_name, 'hdr'], pool='rbd')
|
2016-02-22 07:04:49 +00:00
|
|
|
log.info('err is %d' % p.exitstatus)
|
2016-01-08 12:47:31 +00:00
|
|
|
|
2016-10-20 00:10:29 +00:00
|
|
|
# Update missing digests, requires "osd deep scrub update digest min age: 0"
|
|
|
|
pgnum = get_pgnum(pg)
|
|
|
|
manager.do_pg_scrub('rbd', pgnum, 'deep-scrub')
|
|
|
|
|
2016-02-22 07:04:49 +00:00
|
|
|
log.info('messing with PG %s on osd %d' % (pg, osd))
|
2016-06-16 05:16:57 +00:00
|
|
|
test_repair_corrupted_obj(ctx, manager, pg, osd_remote, obj_path, 'rbd')
|
2016-01-08 12:47:31 +00:00
|
|
|
test_repair_bad_omap(ctx, manager, pg, osd, obj_name)
|
2016-02-16 10:40:27 +00:00
|
|
|
test_list_inconsistent_obj(ctx, manager, osd_remote, pg, acting, osd,
|
|
|
|
obj_name, obj_path)
|
2012-08-02 17:58:08 +00:00
|
|
|
log.info('test successful!')
|
2016-06-10 20:57:37 +00:00
|
|
|
|
|
|
|
# shut down fuse mount
|
|
|
|
for i in range(num_osds):
|
|
|
|
manager.raw_cluster_cmd('tell', 'osd.%d' % i, 'injectargs',
|
|
|
|
'--', '--no-osd-objectstore-fuse')
|
|
|
|
time.sleep(5)
|
|
|
|
log.info('done')
|