2016-05-10 17:27:32 +00:00
|
|
|
"""
|
|
|
|
Resolve stuck peering
|
|
|
|
"""
|
|
|
|
import logging
|
|
|
|
import time
|
|
|
|
|
|
|
|
from teuthology import misc as teuthology
|
2020-03-24 08:33:22 +00:00
|
|
|
from tasks.util.rados import rados
|
2016-05-10 17:27:32 +00:00
|
|
|
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
def task(ctx, config):
|
2016-05-11 19:06:01 +00:00
|
|
|
"""
|
|
|
|
Test handling resolve stuck peering
|
2016-05-10 17:27:32 +00:00
|
|
|
|
2016-05-11 19:06:01 +00:00
|
|
|
requires 3 osds on a single test node
|
|
|
|
"""
|
|
|
|
if config is None:
|
|
|
|
config = {}
|
|
|
|
assert isinstance(config, dict), \
|
|
|
|
'Resolve stuck peering only accepts a dict for config'
|
2016-05-10 17:27:32 +00:00
|
|
|
|
2016-05-11 19:06:01 +00:00
|
|
|
manager = ctx.managers['ceph']
|
2016-05-11 19:01:52 +00:00
|
|
|
|
2016-05-11 19:06:01 +00:00
|
|
|
while len(manager.get_osd_status()['up']) < 3:
|
|
|
|
time.sleep(10)
|
2016-05-10 17:27:32 +00:00
|
|
|
|
|
|
|
|
2016-05-11 19:06:01 +00:00
|
|
|
manager.wait_for_clean()
|
2016-05-10 17:27:32 +00:00
|
|
|
|
2016-05-11 19:06:01 +00:00
|
|
|
dummyfile = '/etc/fstab'
|
|
|
|
dummyfile1 = '/etc/resolv.conf'
|
2016-05-10 17:27:32 +00:00
|
|
|
|
2016-05-11 19:06:01 +00:00
|
|
|
#create 1 PG pool
|
|
|
|
pool='foo'
|
|
|
|
log.info('creating pool foo')
|
|
|
|
manager.raw_cluster_cmd('osd', 'pool', 'create', '%s' % pool, '1')
|
2016-05-10 17:27:32 +00:00
|
|
|
|
2016-05-11 19:06:01 +00:00
|
|
|
#set min_size of the pool to 1
|
|
|
|
#so that we can continue with I/O
|
|
|
|
#when 2 osds are down
|
|
|
|
manager.set_pool_property(pool, "min_size", 1)
|
2016-05-10 17:27:32 +00:00
|
|
|
|
2016-05-11 19:06:01 +00:00
|
|
|
osds = [0, 1, 2]
|
2016-05-10 17:27:32 +00:00
|
|
|
|
2016-05-11 19:06:01 +00:00
|
|
|
primary = manager.get_pg_primary('foo', 0)
|
|
|
|
log.info("primary osd is %d", primary)
|
2016-05-10 17:27:32 +00:00
|
|
|
|
2016-05-11 19:06:01 +00:00
|
|
|
others = list(osds)
|
|
|
|
others.remove(primary)
|
2016-05-10 17:27:32 +00:00
|
|
|
|
2016-05-11 19:06:01 +00:00
|
|
|
log.info('writing initial objects')
|
|
|
|
first_mon = teuthology.get_first_mon(ctx, config)
|
2019-10-11 15:57:47 +00:00
|
|
|
(mon,) = ctx.cluster.only(first_mon).remotes.keys()
|
2016-05-11 19:06:01 +00:00
|
|
|
#create few objects
|
|
|
|
for i in range(100):
|
|
|
|
rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile])
|
2016-05-10 17:27:32 +00:00
|
|
|
|
2016-05-11 19:06:01 +00:00
|
|
|
manager.wait_for_clean()
|
2016-05-10 17:27:32 +00:00
|
|
|
|
2016-05-11 19:06:01 +00:00
|
|
|
#kill other osds except primary
|
|
|
|
log.info('killing other osds except primary')
|
|
|
|
for i in others:
|
|
|
|
manager.kill_osd(i)
|
|
|
|
for i in others:
|
|
|
|
manager.mark_down_osd(i)
|
2016-05-10 17:27:32 +00:00
|
|
|
|
|
|
|
|
2016-05-11 19:06:01 +00:00
|
|
|
for i in range(100):
|
|
|
|
rados(ctx, mon, ['-p', 'foo', 'put', 'new_%d' % i, dummyfile1])
|
2016-05-10 17:27:32 +00:00
|
|
|
|
2016-05-11 19:06:01 +00:00
|
|
|
#kill primary osd
|
|
|
|
manager.kill_osd(primary)
|
|
|
|
manager.mark_down_osd(primary)
|
2016-05-10 17:27:32 +00:00
|
|
|
|
2016-05-11 19:06:01 +00:00
|
|
|
#revive other 2 osds
|
|
|
|
for i in others:
|
|
|
|
manager.revive_osd(i)
|
2016-05-10 17:27:32 +00:00
|
|
|
|
2016-05-11 19:06:01 +00:00
|
|
|
#make sure that pg is down
|
|
|
|
#Assuming pg number for single pg pool will start from 0
|
|
|
|
pgnum=0
|
|
|
|
pgstr = manager.get_pgid(pool, pgnum)
|
|
|
|
stats = manager.get_single_pg_stats(pgstr)
|
2019-10-07 14:09:05 +00:00
|
|
|
print(stats['state'])
|
2016-05-10 17:27:32 +00:00
|
|
|
|
2016-05-11 19:06:01 +00:00
|
|
|
timeout=60
|
|
|
|
start=time.time()
|
2016-05-10 17:27:32 +00:00
|
|
|
|
2016-05-11 19:06:01 +00:00
|
|
|
while 'down' not in stats['state']:
|
|
|
|
assert time.time() - start < timeout, \
|
|
|
|
'failed to reach down state before timeout expired'
|
2016-05-20 20:47:23 +00:00
|
|
|
stats = manager.get_single_pg_stats(pgstr)
|
2016-05-10 17:27:32 +00:00
|
|
|
|
2016-05-11 19:06:01 +00:00
|
|
|
#mark primary as lost
|
|
|
|
manager.raw_cluster_cmd('osd', 'lost', '%d' % primary,\
|
|
|
|
'--yes-i-really-mean-it')
|
2016-05-10 17:27:32 +00:00
|
|
|
|
|
|
|
|
2016-05-11 19:06:01 +00:00
|
|
|
#expect the pg status to be active+undersized+degraded
|
|
|
|
#pg should recover and become active+clean within timeout
|
|
|
|
stats = manager.get_single_pg_stats(pgstr)
|
2019-10-07 14:09:05 +00:00
|
|
|
print(stats['state'])
|
2016-05-10 17:27:32 +00:00
|
|
|
|
2016-05-11 19:06:01 +00:00
|
|
|
timeout=10
|
|
|
|
start=time.time()
|
2016-05-10 17:27:32 +00:00
|
|
|
|
2016-05-11 19:06:01 +00:00
|
|
|
while manager.get_num_down():
|
|
|
|
assert time.time() - start < timeout, \
|
|
|
|
'failed to recover before timeout expired'
|
2017-06-19 18:28:28 +00:00
|
|
|
|
|
|
|
manager.revive_osd(primary)
|