mirror of
https://github.com/ceph/ceph
synced 2025-01-01 08:32:24 +00:00
tasks/repair_test: clean up manager usage, adjust to new location
Only repair_test_2 actually uses ctx, the rest of the functions only need the manager. Signed-off-by: Josh Durgin <jdurgin@redhat.com>
This commit is contained in:
parent
6f465efa04
commit
a4267dba2c
@ -4,68 +4,67 @@ Test pool repairing after objects are damaged.
|
||||
import logging
|
||||
import time
|
||||
|
||||
import ceph_manager
|
||||
from teuthology import misc as teuthology
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def choose_primary(ctx, pool, num):
|
||||
def choose_primary(manager, pool, num):
|
||||
"""
|
||||
Return primary to test on.
|
||||
"""
|
||||
log.info("Choosing primary")
|
||||
return ctx.manager.get_pg_primary(pool, num)
|
||||
return manager.get_pg_primary(pool, num)
|
||||
|
||||
|
||||
def choose_replica(ctx, pool, num):
|
||||
def choose_replica(manager, pool, num):
|
||||
"""
|
||||
Return replica to test on.
|
||||
"""
|
||||
log.info("Choosing replica")
|
||||
return ctx.manager.get_pg_replica(pool, num)
|
||||
return manager.get_pg_replica(pool, num)
|
||||
|
||||
|
||||
def trunc(ctx, osd, pool, obj):
|
||||
def trunc(manager, osd, pool, obj):
|
||||
"""
|
||||
truncate an object
|
||||
"""
|
||||
log.info("truncating object")
|
||||
return ctx.manager.osd_admin_socket(
|
||||
return manager.osd_admin_socket(
|
||||
osd,
|
||||
['truncobj', pool, obj, '1'])
|
||||
|
||||
|
||||
def dataerr(ctx, osd, pool, obj):
|
||||
def dataerr(manager, osd, pool, obj):
|
||||
"""
|
||||
cause an error in the data
|
||||
"""
|
||||
log.info("injecting data err on object")
|
||||
return ctx.manager.osd_admin_socket(
|
||||
return manager.osd_admin_socket(
|
||||
osd,
|
||||
['injectdataerr', pool, obj])
|
||||
|
||||
|
||||
def mdataerr(ctx, osd, pool, obj):
|
||||
def mdataerr(manager, osd, pool, obj):
|
||||
"""
|
||||
cause an error in the mdata
|
||||
"""
|
||||
log.info("injecting mdata err on object")
|
||||
return ctx.manager.osd_admin_socket(
|
||||
return manager.osd_admin_socket(
|
||||
osd,
|
||||
['injectmdataerr', pool, obj])
|
||||
|
||||
|
||||
def omaperr(ctx, osd, pool, obj):
|
||||
def omaperr(manager, osd, pool, obj):
|
||||
"""
|
||||
Cause an omap error.
|
||||
"""
|
||||
log.info("injecting omap err on object")
|
||||
return ctx.manager.osd_admin_socket(osd, ['setomapval', pool, obj,
|
||||
return manager.osd_admin_socket(osd, ['setomapval', pool, obj,
|
||||
'badkey', 'badval'])
|
||||
|
||||
|
||||
def repair_test_1(ctx, corrupter, chooser, scrub_type):
|
||||
def repair_test_1(manager, corrupter, chooser, scrub_type):
|
||||
"""
|
||||
Creates an object in the pool, corrupts it,
|
||||
scrubs it, and verifies that the pool is inconsistent. It then repairs
|
||||
@ -77,39 +76,39 @@ def repair_test_1(ctx, corrupter, chooser, scrub_type):
|
||||
:param scrub_type: regular scrub or deep-scrub
|
||||
"""
|
||||
pool = "repair_pool_1"
|
||||
ctx.manager.wait_for_clean()
|
||||
with ctx.manager.pool(pool, 1):
|
||||
manager.wait_for_clean()
|
||||
with manager.pool(pool, 1):
|
||||
|
||||
log.info("starting repair test type 1")
|
||||
victim_osd = chooser(ctx, pool, 0)
|
||||
victim_osd = chooser(manager, pool, 0)
|
||||
|
||||
# create object
|
||||
log.info("doing put")
|
||||
ctx.manager.do_put(pool, 'repair_test_obj', '/etc/hosts')
|
||||
manager.do_put(pool, 'repair_test_obj', '/etc/hosts')
|
||||
|
||||
# corrupt object
|
||||
log.info("corrupting object")
|
||||
corrupter(ctx, victim_osd, pool, 'repair_test_obj')
|
||||
corrupter(manager, victim_osd, pool, 'repair_test_obj')
|
||||
|
||||
# verify inconsistent
|
||||
log.info("scrubbing")
|
||||
ctx.manager.do_pg_scrub(pool, 0, scrub_type)
|
||||
manager.do_pg_scrub(pool, 0, scrub_type)
|
||||
|
||||
assert ctx.manager.pg_inconsistent(pool, 0)
|
||||
assert manager.pg_inconsistent(pool, 0)
|
||||
|
||||
# repair
|
||||
log.info("repairing")
|
||||
ctx.manager.do_pg_scrub(pool, 0, "repair")
|
||||
manager.do_pg_scrub(pool, 0, "repair")
|
||||
|
||||
log.info("re-scrubbing")
|
||||
ctx.manager.do_pg_scrub(pool, 0, scrub_type)
|
||||
manager.do_pg_scrub(pool, 0, scrub_type)
|
||||
|
||||
# verify consistent
|
||||
assert not ctx.manager.pg_inconsistent(pool, 0)
|
||||
assert not manager.pg_inconsistent(pool, 0)
|
||||
log.info("done")
|
||||
|
||||
|
||||
def repair_test_2(ctx, config, chooser):
|
||||
def repair_test_2(ctx, manager, config, chooser):
|
||||
"""
|
||||
First creates a set of objects and
|
||||
sets the omap value. It then corrupts an object, does both a scrub
|
||||
@ -120,89 +119,89 @@ def repair_test_2(ctx, config, chooser):
|
||||
:param chooser: primary or replica selection routine.
|
||||
"""
|
||||
pool = "repair_pool_2"
|
||||
ctx.manager.wait_for_clean()
|
||||
with ctx.manager.pool(pool, 1):
|
||||
manager.wait_for_clean()
|
||||
with manager.pool(pool, 1):
|
||||
log.info("starting repair test type 2")
|
||||
victim_osd = chooser(ctx, pool, 0)
|
||||
victim_osd = chooser(manager, pool, 0)
|
||||
first_mon = teuthology.get_first_mon(ctx, config)
|
||||
(mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
|
||||
|
||||
# create object
|
||||
log.info("doing put and setomapval")
|
||||
ctx.manager.do_put(pool, 'file1', '/etc/hosts')
|
||||
ctx.manager.do_rados(mon, ['-p', pool, 'setomapval', 'file1',
|
||||
manager.do_put(pool, 'file1', '/etc/hosts')
|
||||
manager.do_rados(mon, ['-p', pool, 'setomapval', 'file1',
|
||||
'key', 'val'])
|
||||
ctx.manager.do_put(pool, 'file2', '/etc/hosts')
|
||||
ctx.manager.do_put(pool, 'file3', '/etc/hosts')
|
||||
ctx.manager.do_put(pool, 'file4', '/etc/hosts')
|
||||
ctx.manager.do_put(pool, 'file5', '/etc/hosts')
|
||||
ctx.manager.do_rados(mon, ['-p', pool, 'setomapval', 'file5',
|
||||
manager.do_put(pool, 'file2', '/etc/hosts')
|
||||
manager.do_put(pool, 'file3', '/etc/hosts')
|
||||
manager.do_put(pool, 'file4', '/etc/hosts')
|
||||
manager.do_put(pool, 'file5', '/etc/hosts')
|
||||
manager.do_rados(mon, ['-p', pool, 'setomapval', 'file5',
|
||||
'key', 'val'])
|
||||
ctx.manager.do_put(pool, 'file6', '/etc/hosts')
|
||||
manager.do_put(pool, 'file6', '/etc/hosts')
|
||||
|
||||
# corrupt object
|
||||
log.info("corrupting object")
|
||||
omaperr(ctx, victim_osd, pool, 'file1')
|
||||
omaperr(manager, victim_osd, pool, 'file1')
|
||||
|
||||
# verify inconsistent
|
||||
log.info("scrubbing")
|
||||
ctx.manager.do_pg_scrub(pool, 0, 'deep-scrub')
|
||||
manager.do_pg_scrub(pool, 0, 'deep-scrub')
|
||||
|
||||
assert ctx.manager.pg_inconsistent(pool, 0)
|
||||
assert manager.pg_inconsistent(pool, 0)
|
||||
|
||||
# Regression test for bug #4778, should still
|
||||
# be inconsistent after scrub
|
||||
ctx.manager.do_pg_scrub(pool, 0, 'scrub')
|
||||
manager.do_pg_scrub(pool, 0, 'scrub')
|
||||
|
||||
assert ctx.manager.pg_inconsistent(pool, 0)
|
||||
assert manager.pg_inconsistent(pool, 0)
|
||||
|
||||
# Additional corruptions including 2 types for file1
|
||||
log.info("corrupting more objects")
|
||||
dataerr(ctx, victim_osd, pool, 'file1')
|
||||
mdataerr(ctx, victim_osd, pool, 'file2')
|
||||
trunc(ctx, victim_osd, pool, 'file3')
|
||||
omaperr(ctx, victim_osd, pool, 'file6')
|
||||
dataerr(manager, victim_osd, pool, 'file1')
|
||||
mdataerr(manager, victim_osd, pool, 'file2')
|
||||
trunc(manager, victim_osd, pool, 'file3')
|
||||
omaperr(manager, victim_osd, pool, 'file6')
|
||||
|
||||
# see still inconsistent
|
||||
log.info("scrubbing")
|
||||
ctx.manager.do_pg_scrub(pool, 0, 'deep-scrub')
|
||||
manager.do_pg_scrub(pool, 0, 'deep-scrub')
|
||||
|
||||
assert ctx.manager.pg_inconsistent(pool, 0)
|
||||
assert manager.pg_inconsistent(pool, 0)
|
||||
|
||||
# repair
|
||||
log.info("repairing")
|
||||
ctx.manager.do_pg_scrub(pool, 0, "repair")
|
||||
manager.do_pg_scrub(pool, 0, "repair")
|
||||
|
||||
# Let repair clear inconsistent flag
|
||||
time.sleep(10)
|
||||
|
||||
# verify consistent
|
||||
assert not ctx.manager.pg_inconsistent(pool, 0)
|
||||
assert not manager.pg_inconsistent(pool, 0)
|
||||
|
||||
# In the future repair might determine state of
|
||||
# inconsistency itself, verify with a deep-scrub
|
||||
log.info("scrubbing")
|
||||
ctx.manager.do_pg_scrub(pool, 0, 'deep-scrub')
|
||||
manager.do_pg_scrub(pool, 0, 'deep-scrub')
|
||||
|
||||
# verify consistent
|
||||
assert not ctx.manager.pg_inconsistent(pool, 0)
|
||||
assert not manager.pg_inconsistent(pool, 0)
|
||||
|
||||
log.info("done")
|
||||
|
||||
|
||||
def hinfoerr(ctx, victim, pool, obj):
|
||||
def hinfoerr(manager, victim, pool, obj):
|
||||
"""
|
||||
cause an error in the hinfo_key
|
||||
"""
|
||||
log.info("remove the hinfo_key")
|
||||
ctx.manager.objectstore_tool(pool,
|
||||
options='',
|
||||
args='rm-attr hinfo_key',
|
||||
object_name=obj,
|
||||
osd=victim)
|
||||
manager.objectstore_tool(pool,
|
||||
options='',
|
||||
args='rm-attr hinfo_key',
|
||||
object_name=obj,
|
||||
osd=victim)
|
||||
|
||||
|
||||
def repair_test_erasure_code(ctx, corrupter, victim, scrub_type):
|
||||
def repair_test_erasure_code(manager, corrupter, victim, scrub_type):
|
||||
"""
|
||||
Creates an object in the pool, corrupts it,
|
||||
scrubs it, and verifies that the pool is inconsistent. It then repairs
|
||||
@ -213,35 +212,35 @@ def repair_test_erasure_code(ctx, corrupter, victim, scrub_type):
|
||||
:param scrub_type: regular scrub or deep-scrub
|
||||
"""
|
||||
pool = "repair_pool_3"
|
||||
ctx.manager.wait_for_clean()
|
||||
with ctx.manager.pool(pool_name=pool, pg_num=1,
|
||||
manager.wait_for_clean()
|
||||
with manager.pool(pool_name=pool, pg_num=1,
|
||||
erasure_code_profile_name='default'):
|
||||
|
||||
log.info("starting repair test for erasure code")
|
||||
|
||||
# create object
|
||||
log.info("doing put")
|
||||
ctx.manager.do_put(pool, 'repair_test_obj', '/etc/hosts')
|
||||
manager.do_put(pool, 'repair_test_obj', '/etc/hosts')
|
||||
|
||||
# corrupt object
|
||||
log.info("corrupting object")
|
||||
corrupter(ctx, victim, pool, 'repair_test_obj')
|
||||
corrupter(manager, victim, pool, 'repair_test_obj')
|
||||
|
||||
# verify inconsistent
|
||||
log.info("scrubbing")
|
||||
ctx.manager.do_pg_scrub(pool, 0, scrub_type)
|
||||
manager.do_pg_scrub(pool, 0, scrub_type)
|
||||
|
||||
assert ctx.manager.pg_inconsistent(pool, 0)
|
||||
assert manager.pg_inconsistent(pool, 0)
|
||||
|
||||
# repair
|
||||
log.info("repairing")
|
||||
ctx.manager.do_pg_scrub(pool, 0, "repair")
|
||||
manager.do_pg_scrub(pool, 0, "repair")
|
||||
|
||||
log.info("re-scrubbing")
|
||||
ctx.manager.do_pg_scrub(pool, 0, scrub_type)
|
||||
manager.do_pg_scrub(pool, 0, scrub_type)
|
||||
|
||||
# verify consistent
|
||||
assert not ctx.manager.pg_inconsistent(pool, 0)
|
||||
assert not manager.pg_inconsistent(pool, 0)
|
||||
log.info("done")
|
||||
|
||||
|
||||
@ -287,27 +286,19 @@ def task(ctx, config):
|
||||
assert isinstance(config, dict), \
|
||||
'repair_test task only accepts a dict for config'
|
||||
|
||||
if not hasattr(ctx, 'manager'):
|
||||
first_mon = teuthology.get_first_mon(ctx, config)
|
||||
(mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
|
||||
ctx.manager = ceph_manager.CephManager(
|
||||
mon,
|
||||
ctx=ctx,
|
||||
logger=log.getChild('ceph_manager')
|
||||
)
|
||||
manager = ctx.managers['ceph']
|
||||
manager.wait_for_all_up()
|
||||
|
||||
ctx.manager.wait_for_all_up()
|
||||
manager.raw_cluster_cmd('osd', 'set', 'noscrub')
|
||||
manager.raw_cluster_cmd('osd', 'set', 'nodeep-scrub')
|
||||
|
||||
ctx.manager.raw_cluster_cmd('osd', 'set', 'noscrub')
|
||||
ctx.manager.raw_cluster_cmd('osd', 'set', 'nodeep-scrub')
|
||||
repair_test_1(manager, mdataerr, choose_primary, "scrub")
|
||||
repair_test_1(manager, mdataerr, choose_replica, "scrub")
|
||||
repair_test_1(manager, dataerr, choose_primary, "deep-scrub")
|
||||
repair_test_1(manager, dataerr, choose_replica, "deep-scrub")
|
||||
repair_test_1(manager, trunc, choose_primary, "scrub")
|
||||
repair_test_1(manager, trunc, choose_replica, "scrub")
|
||||
repair_test_2(ctx, manager, config, choose_primary)
|
||||
repair_test_2(ctx, manager, config, choose_replica)
|
||||
|
||||
repair_test_1(ctx, mdataerr, choose_primary, "scrub")
|
||||
repair_test_1(ctx, mdataerr, choose_replica, "scrub")
|
||||
repair_test_1(ctx, dataerr, choose_primary, "deep-scrub")
|
||||
repair_test_1(ctx, dataerr, choose_replica, "deep-scrub")
|
||||
repair_test_1(ctx, trunc, choose_primary, "scrub")
|
||||
repair_test_1(ctx, trunc, choose_replica, "scrub")
|
||||
repair_test_2(ctx, config, choose_primary)
|
||||
repair_test_2(ctx, config, choose_replica)
|
||||
|
||||
repair_test_erasure_code(ctx, hinfoerr, 'primary', "deep-scrub")
|
||||
repair_test_erasure_code(manager, hinfoerr, 'primary', "deep-scrub")
|
||||
|
Loading…
Reference in New Issue
Block a user