2011-10-04 00:49:13 +00:00
|
|
|
from cStringIO import StringIO
|
2011-06-13 23:36:21 +00:00
|
|
|
import random
|
|
|
|
import time
|
|
|
|
import re
|
|
|
|
import gevent
|
2011-10-17 22:32:22 +00:00
|
|
|
import json
|
2011-06-13 23:36:21 +00:00
|
|
|
|
|
|
|
class Thrasher(gevent.Greenlet):
|
2011-08-25 22:18:42 +00:00
|
|
|
def __init__(self, manager, config, logger=None):
|
2011-06-13 23:36:21 +00:00
|
|
|
self.ceph_manager = manager
|
|
|
|
self.ceph_manager.wait_till_clean()
|
|
|
|
osd_status = self.ceph_manager.get_osd_status()
|
|
|
|
self.in_osds = osd_status['in']
|
2011-09-14 23:31:58 +00:00
|
|
|
self.live_osds = osd_status['live']
|
2011-06-13 23:36:21 +00:00
|
|
|
self.out_osds = osd_status['out']
|
2011-09-14 23:31:58 +00:00
|
|
|
self.dead_osds = osd_status['dead']
|
2011-06-13 23:36:21 +00:00
|
|
|
self.stopping = False
|
|
|
|
self.logger = logger
|
2011-08-25 22:18:42 +00:00
|
|
|
self.config = config
|
2011-09-08 19:54:23 +00:00
|
|
|
if self.logger is not None:
|
2011-06-13 23:36:21 +00:00
|
|
|
self.log = lambda x: self.logger.info(x)
|
|
|
|
else:
|
|
|
|
def tmp(x):
|
|
|
|
print x
|
|
|
|
self.log = tmp
|
2011-08-25 22:18:42 +00:00
|
|
|
if self.config is None:
|
|
|
|
self.config = dict()
|
2012-01-11 14:59:41 +00:00
|
|
|
# prevent monitor from auto-marking things out while thrasher runs
|
|
|
|
manager.raw_cluster_cmd('mon', 'tell', '*', 'injectargs',
|
|
|
|
'--mon-osd-down-out-interval', '0')
|
2011-06-13 23:36:21 +00:00
|
|
|
gevent.Greenlet.__init__(self, self.do_thrash)
|
|
|
|
self.start()
|
|
|
|
|
2011-09-14 23:31:58 +00:00
|
|
|
def kill_osd(self, osd=None):
|
|
|
|
if osd is None:
|
|
|
|
osd = random.choice(self.live_osds)
|
|
|
|
self.log("Killing osd %s, live_osds are %s"%(str(osd),str(self.live_osds)))
|
|
|
|
self.live_osds.remove(osd)
|
|
|
|
self.dead_osds.append(osd)
|
|
|
|
self.ceph_manager.kill_osd(osd)
|
|
|
|
|
|
|
|
def revive_osd(self, osd=None):
|
|
|
|
if osd is None:
|
|
|
|
osd = random.choice(self.dead_osds)
|
|
|
|
self.log("Reviving osd %s"%(str(osd),))
|
|
|
|
self.live_osds.append(osd)
|
|
|
|
self.dead_osds.remove(osd)
|
|
|
|
self.ceph_manager.revive_osd(osd)
|
|
|
|
|
|
|
|
def out_osd(self, osd=None):
|
|
|
|
if osd is None:
|
|
|
|
osd = random.choice(self.in_osds)
|
|
|
|
self.log("Removing osd %s, in_osds are: %s"%(str(osd),str(self.in_osds)))
|
|
|
|
self.ceph_manager.mark_out_osd(osd)
|
|
|
|
self.in_osds.remove(osd)
|
|
|
|
self.out_osds.append(osd)
|
|
|
|
|
|
|
|
def in_osd(self, osd=None):
|
|
|
|
if osd is None:
|
|
|
|
osd = random.choice(self.out_osds)
|
|
|
|
if osd in self.dead_osds:
|
|
|
|
return self.revive_osd(osd)
|
2011-06-13 23:36:21 +00:00
|
|
|
self.log("Adding osd %s"%(str(osd),))
|
|
|
|
self.out_osds.remove(osd)
|
|
|
|
self.in_osds.append(osd)
|
|
|
|
self.ceph_manager.mark_in_osd(osd)
|
|
|
|
|
|
|
|
def all_up(self):
|
2011-09-14 23:31:58 +00:00
|
|
|
while len(self.dead_osds) > 0:
|
|
|
|
self.revive_osd()
|
2011-06-13 23:36:21 +00:00
|
|
|
while len(self.out_osds) > 0:
|
2011-09-14 23:31:58 +00:00
|
|
|
self.in_osd()
|
2011-06-13 23:36:21 +00:00
|
|
|
|
|
|
|
def do_join(self):
|
|
|
|
self.stopping = True
|
|
|
|
self.get()
|
|
|
|
|
2011-09-14 23:31:58 +00:00
|
|
|
def choose_action(self):
|
|
|
|
chance_down = self.config.get("chance_down", 0)
|
|
|
|
if isinstance(chance_down, int):
|
|
|
|
chance_down = float(chance_down) / 100
|
|
|
|
minin = self.config.get("min_in", 2)
|
|
|
|
minout = self.config.get("min_out", 0)
|
|
|
|
minlive = self.config.get("min_live", 2)
|
2012-01-11 00:20:50 +00:00
|
|
|
mindead = self.config.get("min_dead", 1)
|
2011-09-14 23:31:58 +00:00
|
|
|
|
|
|
|
actions = []
|
|
|
|
if len(self.in_osds) > minin:
|
|
|
|
actions.append((self.out_osd, 1.0,))
|
|
|
|
if len(self.live_osds) > minlive and chance_down > 0:
|
|
|
|
actions.append((self.kill_osd, chance_down))
|
|
|
|
if len(self.out_osds) > minout:
|
|
|
|
actions.append((self.in_osd, 1.0,))
|
|
|
|
if len(self.dead_osds) > mindead:
|
|
|
|
actions.append((self.revive_osd, 1.0))
|
|
|
|
|
|
|
|
total = sum([y for (x,y) in actions])
|
|
|
|
rev_cum = reduce(lambda l,(y1,y2): l+[(y1, (l[-1][1]+y2)/total)], actions, [(0, 0)])[1:]
|
|
|
|
rev_cum.reverse()
|
|
|
|
final_rev_cum = [(x, y-rev_cum[-1][1]) for (x,y) in rev_cum]
|
|
|
|
val = random.uniform(0, 1)
|
|
|
|
for (action, prob) in final_rev_cum:
|
|
|
|
if (prob < val):
|
|
|
|
return action
|
|
|
|
return None
|
|
|
|
|
2011-06-13 23:36:21 +00:00
|
|
|
def do_thrash(self):
|
2011-09-08 19:54:23 +00:00
|
|
|
cleanint = self.config.get("clean_interval", 60)
|
2012-01-10 21:57:55 +00:00
|
|
|
maxdead = self.config.get("max_dead", 1);
|
2011-09-08 19:54:23 +00:00
|
|
|
delay = self.config.get("op_delay", 5)
|
2011-06-13 23:36:21 +00:00
|
|
|
self.log("starting do_thrash")
|
|
|
|
while not self.stopping:
|
2011-09-14 23:31:58 +00:00
|
|
|
self.log(" ".join([str(x) for x in ["in_osds: ", self.in_osds, " out_osds: ", self.out_osds,
|
|
|
|
"dead_osds: ", self.dead_osds, "live_osds: ",
|
|
|
|
self.live_osds]]))
|
2011-09-08 19:54:23 +00:00
|
|
|
if random.uniform(0,1) < (float(delay) / cleanint):
|
2012-01-10 21:57:55 +00:00
|
|
|
while len(self.dead_osds) > maxdead:
|
|
|
|
self.revive_osd()
|
2011-11-17 19:11:33 +00:00
|
|
|
self.ceph_manager.wait_till_clean(
|
|
|
|
timeout=self.config.get('timeout')
|
|
|
|
)
|
2011-09-14 23:31:58 +00:00
|
|
|
self.choose_action()()
|
2011-08-25 22:18:42 +00:00
|
|
|
time.sleep(delay)
|
2011-09-08 21:07:23 +00:00
|
|
|
self.all_up()
|
2011-06-13 23:36:21 +00:00
|
|
|
|
|
|
|
class CephManager:
|
2011-09-14 23:31:58 +00:00
|
|
|
def __init__(self, controller, ctx=None, logger=None):
|
|
|
|
self.ctx = ctx
|
2011-06-13 23:36:21 +00:00
|
|
|
self.controller = controller
|
|
|
|
if (logger):
|
|
|
|
self.log = lambda x: logger.info(x)
|
|
|
|
else:
|
|
|
|
def tmp(x):
|
|
|
|
print x
|
|
|
|
self.log = tmp
|
|
|
|
|
2011-10-04 00:49:13 +00:00
|
|
|
def raw_cluster_cmd(self, *args):
|
|
|
|
ceph_args = [
|
|
|
|
'LD_LIBRARY_PRELOAD=/tmp/cephtest/binary/usr/local/lib',
|
|
|
|
'/tmp/cephtest/enable-coredump',
|
|
|
|
'/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
|
|
|
|
'/tmp/cephtest/archive/coverage',
|
|
|
|
'/tmp/cephtest/binary/usr/local/bin/ceph',
|
|
|
|
'-k', '/tmp/cephtest/ceph.keyring',
|
|
|
|
'-c', '/tmp/cephtest/ceph.conf',
|
2011-10-17 22:32:22 +00:00
|
|
|
'--concise',
|
2011-10-04 00:49:13 +00:00
|
|
|
]
|
|
|
|
ceph_args.extend(args)
|
2011-06-13 23:36:21 +00:00
|
|
|
proc = self.controller.run(
|
2011-10-04 00:49:13 +00:00
|
|
|
args=ceph_args,
|
|
|
|
stdout=StringIO(),
|
2011-06-13 23:36:21 +00:00
|
|
|
)
|
2011-10-04 00:49:13 +00:00
|
|
|
return proc.stdout.getvalue()
|
2011-06-13 23:36:21 +00:00
|
|
|
|
|
|
|
def raw_cluster_status(self):
|
2011-10-04 00:49:13 +00:00
|
|
|
return self.raw_cluster_cmd('-s')
|
2011-07-12 01:00:03 +00:00
|
|
|
|
2011-06-13 23:36:21 +00:00
|
|
|
def raw_osd_status(self):
|
2011-10-04 00:49:13 +00:00
|
|
|
return self.raw_cluster_cmd('osd', 'dump')
|
2011-06-13 23:36:21 +00:00
|
|
|
|
|
|
|
def get_osd_status(self):
|
|
|
|
osd_lines = filter(
|
2011-09-29 16:09:31 +00:00
|
|
|
lambda x: x.startswith('osd.') and (("up" in x) or ("down" in x)),
|
2011-06-13 23:36:21 +00:00
|
|
|
self.raw_osd_status().split('\n'))
|
|
|
|
self.log(osd_lines)
|
2011-09-29 16:09:31 +00:00
|
|
|
in_osds = [int(i[4:].split()[0]) for i in filter(
|
2011-06-13 23:36:21 +00:00
|
|
|
lambda x: " in " in x,
|
|
|
|
osd_lines)]
|
2011-09-29 16:09:31 +00:00
|
|
|
out_osds = [int(i[4:].split()[0]) for i in filter(
|
2011-06-13 23:36:21 +00:00
|
|
|
lambda x: " out " in x,
|
|
|
|
osd_lines)]
|
2011-09-29 16:09:31 +00:00
|
|
|
up_osds = [int(i[4:].split()[0]) for i in filter(
|
2011-06-13 23:36:21 +00:00
|
|
|
lambda x: " up " in x,
|
|
|
|
osd_lines)]
|
2011-09-29 16:09:31 +00:00
|
|
|
down_osds = [int(i[4:].split()[0]) for i in filter(
|
2011-06-13 23:36:21 +00:00
|
|
|
lambda x: " down " in x,
|
|
|
|
osd_lines)]
|
2011-09-14 23:31:58 +00:00
|
|
|
dead_osds = [int(x.id_) for x in
|
|
|
|
filter(lambda x: not x.running(), self.ctx.daemons.iter_daemons_of_role('osd'))]
|
|
|
|
live_osds = [int(x.id_) for x in
|
|
|
|
filter(lambda x: x.running(), self.ctx.daemons.iter_daemons_of_role('osd'))]
|
2011-07-12 01:00:03 +00:00
|
|
|
return { 'in' : in_osds, 'out' : out_osds, 'up' : up_osds,
|
2011-09-14 23:31:58 +00:00
|
|
|
'down' : down_osds, 'dead' : dead_osds, 'live' : live_osds, 'raw' : osd_lines }
|
2011-06-13 23:36:21 +00:00
|
|
|
|
|
|
|
def get_num_pgs(self):
|
|
|
|
status = self.raw_cluster_status()
|
2011-11-03 20:27:44 +00:00
|
|
|
self.log(status)
|
2011-06-13 23:36:21 +00:00
|
|
|
return int(re.search(
|
|
|
|
"\d* pgs:",
|
|
|
|
status).group(0).split()[0])
|
|
|
|
|
2011-10-17 22:32:22 +00:00
|
|
|
def get_pg_stats(self):
|
|
|
|
out = self.raw_cluster_cmd('--', 'pg','dump','--format=json')
|
|
|
|
j = json.loads('\n'.join(out.split('\n')[1:]))
|
|
|
|
return j['pg_stats']
|
|
|
|
|
|
|
|
def get_osd_dump(self):
|
|
|
|
out = self.raw_cluster_cmd('--', 'osd','dump','--format=json')
|
|
|
|
j = json.loads('\n'.join(out.split('\n')[1:]))
|
|
|
|
return j['osds']
|
|
|
|
|
|
|
|
def get_num_unfound_objects(self):
|
|
|
|
status = self.raw_cluster_status()
|
|
|
|
self.log(status)
|
|
|
|
match = re.search(
|
|
|
|
"\d+/\d+ unfound",
|
|
|
|
status)
|
|
|
|
if match == None:
|
|
|
|
return 0
|
|
|
|
else:
|
|
|
|
return int(match.group(0).split('/')[0])
|
|
|
|
|
2011-06-13 23:36:21 +00:00
|
|
|
def get_num_active_clean(self):
|
2011-10-19 17:04:07 +00:00
|
|
|
pgs = self.get_pg_stats()
|
|
|
|
num = 0
|
|
|
|
for pg in pgs:
|
|
|
|
if pg['state'].startswith('active+clean'):
|
|
|
|
num += 1
|
|
|
|
return num
|
2011-06-13 23:36:21 +00:00
|
|
|
|
2011-10-17 22:32:22 +00:00
|
|
|
def get_num_active(self):
|
|
|
|
pgs = self.get_pg_stats()
|
|
|
|
num = 0
|
|
|
|
for pg in pgs:
|
|
|
|
if pg['state'].startswith('active'):
|
|
|
|
num += 1
|
|
|
|
return num
|
|
|
|
|
2011-06-13 23:36:21 +00:00
|
|
|
def is_clean(self):
|
|
|
|
return self.get_num_active_clean() == self.get_num_pgs()
|
|
|
|
|
2011-09-09 01:09:11 +00:00
|
|
|
def wait_till_clean(self, timeout=None):
|
2011-06-13 23:36:21 +00:00
|
|
|
self.log("waiting till clean")
|
2011-09-09 01:09:11 +00:00
|
|
|
start = time.time()
|
2011-06-13 23:36:21 +00:00
|
|
|
while not self.is_clean():
|
2011-09-09 01:09:11 +00:00
|
|
|
if timeout is not None:
|
|
|
|
assert time.time() - start < timeout, \
|
|
|
|
'failed to become clean before timeout expired'
|
2011-06-13 23:36:21 +00:00
|
|
|
time.sleep(3)
|
|
|
|
self.log("clean!")
|
|
|
|
|
2011-10-17 22:32:22 +00:00
|
|
|
def osd_is_up(self, osd):
|
|
|
|
osds = self.get_osd_dump()
|
|
|
|
return osds[osd]['up'] > 0
|
|
|
|
|
|
|
|
def wait_till_osd_is_up(self, osd, timeout=None):
|
|
|
|
self.log('waiting for osd.%d to be up' % osd);
|
|
|
|
start = time.time()
|
|
|
|
while not self.osd_is_up(osd):
|
|
|
|
if timeout is not None:
|
|
|
|
assert time.time() - start < timeout, \
|
|
|
|
'osd.%d failed to come up before timeout expired' % osd
|
|
|
|
time.sleep(3)
|
|
|
|
self.log('osd.%d is up' % osd)
|
|
|
|
|
|
|
|
def is_active(self):
|
|
|
|
return self.get_num_active() == self.get_num_pgs()
|
|
|
|
|
|
|
|
def wait_till_active(self, timeout=None):
|
|
|
|
self.log("waiting till active")
|
|
|
|
start = time.time()
|
|
|
|
while not self.is_active():
|
|
|
|
if timeout is not None:
|
|
|
|
assert time.time() - start < timeout, \
|
|
|
|
'failed to become active before timeout expired'
|
|
|
|
time.sleep(3)
|
|
|
|
self.log("active!")
|
|
|
|
|
2011-06-13 23:36:21 +00:00
|
|
|
def mark_out_osd(self, osd):
|
2011-10-04 00:49:13 +00:00
|
|
|
self.raw_cluster_cmd('osd', 'out', str(osd))
|
2011-06-13 23:36:21 +00:00
|
|
|
|
2011-09-14 23:31:58 +00:00
|
|
|
def kill_osd(self, osd):
|
|
|
|
self.ctx.daemons.get_daemon('osd', osd).stop()
|
|
|
|
|
|
|
|
def revive_osd(self, osd):
|
|
|
|
self.ctx.daemons.get_daemon('osd', osd).restart()
|
|
|
|
|
2011-08-25 22:19:30 +00:00
|
|
|
def mark_down_osd(self, osd):
|
2011-10-04 00:49:13 +00:00
|
|
|
self.raw_cluster_cmd('osd', 'down', str(osd))
|
2011-08-25 22:19:30 +00:00
|
|
|
|
2011-06-13 23:36:21 +00:00
|
|
|
def mark_in_osd(self, osd):
|
2011-10-04 00:49:13 +00:00
|
|
|
self.raw_cluster_cmd('osd', 'in', str(osd))
|
2011-11-09 06:02:58 +00:00
|
|
|
|
|
|
|
|
|
|
|
## monitors
|
|
|
|
|
|
|
|
def kill_mon(self, mon):
|
|
|
|
self.ctx.daemons.get_daemon('mon', mon).stop()
|
|
|
|
|
|
|
|
def revive_mon(self, mon):
|
|
|
|
self.ctx.daemons.get_daemon('mon', mon).restart()
|
|
|
|
|
|
|
|
def get_mon_status(self, mon):
|
|
|
|
addr = self.ctx.ceph.conf['mon.%s' % mon]['mon addr']
|
|
|
|
out = self.raw_cluster_cmd('-m', addr, 'mon_status')
|
|
|
|
return json.loads(out)
|
|
|
|
|
|
|
|
def get_mon_quorum(self):
|
|
|
|
out = self.raw_cluster_cmd('quorum_status')
|
|
|
|
j = json.loads(out)
|
2011-11-17 21:52:17 +00:00
|
|
|
self.log('quorum_status is %s' % out)
|
2011-11-09 06:02:58 +00:00
|
|
|
return j['quorum']
|
|
|
|
|
2011-11-17 19:05:12 +00:00
|
|
|
def wait_for_mon_quorum_size(self, size, timeout=300):
|
2011-11-09 06:02:58 +00:00
|
|
|
self.log('waiting for quorum size %d' % size)
|
|
|
|
start = time.time()
|
|
|
|
while not len(self.get_mon_quorum()) == size:
|
|
|
|
if timeout is not None:
|
|
|
|
assert time.time() - start < timeout, \
|
|
|
|
'failed to reach quorum size %d before timeout expired' % size
|
|
|
|
time.sleep(3)
|
|
|
|
self.log("quorum is size %d" % size)
|