2019-05-06 11:27:09 +00:00
|
|
|
import logging
|
|
|
|
import signal
|
|
|
|
import time
|
|
|
|
|
|
|
|
from gevent import sleep
|
|
|
|
from gevent.greenlet import Greenlet
|
|
|
|
from gevent.event import Event
|
|
|
|
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
class DaemonWatchdog(Greenlet):
|
|
|
|
"""
|
|
|
|
DaemonWatchdog::
|
|
|
|
|
|
|
|
Watch Ceph daemons for failures. If an extended failure is detected (i.e.
|
|
|
|
not intentional), then the watchdog will unmount file systems and send
|
|
|
|
SIGTERM to all daemons. The duration of an extended failure is configurable
|
|
|
|
with watchdog_daemon_timeout.
|
|
|
|
|
2021-03-03 03:39:09 +00:00
|
|
|
ceph:
|
|
|
|
watchdog:
|
|
|
|
daemon_restart [default: no]: restart daemon if "normal" exit (status==0).
|
|
|
|
|
|
|
|
daemon_timeout [default: 300]: number of seconds a daemon
|
|
|
|
is allowed to be failed before the
|
|
|
|
watchdog will bark.
|
2019-05-06 11:27:09 +00:00
|
|
|
"""
|
|
|
|
|
2019-05-07 08:33:15 +00:00
|
|
|
def __init__(self, ctx, config, thrashers):
|
2019-08-05 10:52:10 +00:00
|
|
|
super(DaemonWatchdog, self).__init__()
|
2021-03-03 03:39:09 +00:00
|
|
|
self.config = ctx.config.get('watchdog', {})
|
2019-05-06 11:27:09 +00:00
|
|
|
self.ctx = ctx
|
|
|
|
self.e = None
|
|
|
|
self.logger = log.getChild('daemon_watchdog')
|
2019-05-07 08:33:15 +00:00
|
|
|
self.cluster = config.get('cluster', 'ceph')
|
2019-05-06 11:27:09 +00:00
|
|
|
self.name = 'watchdog'
|
|
|
|
self.stopping = Event()
|
|
|
|
self.thrashers = thrashers
|
|
|
|
|
|
|
|
def _run(self):
|
|
|
|
try:
|
|
|
|
self.watch()
|
|
|
|
except Exception as e:
|
|
|
|
# See _run exception comment for MDSThrasher
|
|
|
|
self.e = e
|
|
|
|
self.logger.exception("exception:")
|
|
|
|
# allow successful completion so gevent doesn't see an exception...
|
|
|
|
|
|
|
|
def log(self, x):
|
|
|
|
"""Write data to logger"""
|
|
|
|
self.logger.info(x)
|
|
|
|
|
|
|
|
def stop(self):
|
|
|
|
self.stopping.set()
|
|
|
|
|
|
|
|
def bark(self):
|
|
|
|
self.log("BARK! unmounting mounts and killing all daemons")
|
|
|
|
for mount in self.ctx.mounts.values():
|
|
|
|
try:
|
|
|
|
mount.umount_wait(force=True)
|
|
|
|
except:
|
|
|
|
self.logger.exception("ignoring exception:")
|
|
|
|
daemons = []
|
2019-05-07 08:33:15 +00:00
|
|
|
daemons.extend(filter(lambda daemon: daemon.running() and not daemon.proc.finished, self.ctx.daemons.iter_daemons_of_role('osd', cluster=self.cluster)))
|
|
|
|
daemons.extend(filter(lambda daemon: daemon.running() and not daemon.proc.finished, self.ctx.daemons.iter_daemons_of_role('mds', cluster=self.cluster)))
|
|
|
|
daemons.extend(filter(lambda daemon: daemon.running() and not daemon.proc.finished, self.ctx.daemons.iter_daemons_of_role('mon', cluster=self.cluster)))
|
|
|
|
daemons.extend(filter(lambda daemon: daemon.running() and not daemon.proc.finished, self.ctx.daemons.iter_daemons_of_role('rgw', cluster=self.cluster)))
|
|
|
|
daemons.extend(filter(lambda daemon: daemon.running() and not daemon.proc.finished, self.ctx.daemons.iter_daemons_of_role('mgr', cluster=self.cluster)))
|
|
|
|
|
2019-05-06 11:27:09 +00:00
|
|
|
for daemon in daemons:
|
|
|
|
try:
|
|
|
|
daemon.signal(signal.SIGTERM)
|
|
|
|
except:
|
|
|
|
self.logger.exception("ignoring exception:")
|
|
|
|
|
|
|
|
def watch(self):
|
|
|
|
self.log("watchdog starting")
|
2021-03-03 03:39:09 +00:00
|
|
|
daemon_timeout = int(self.config.get('daemon_timeout', 300))
|
|
|
|
daemon_restart = self.config.get('daemon_restart', False)
|
2019-05-06 11:27:09 +00:00
|
|
|
daemon_failure_time = {}
|
|
|
|
while not self.stopping.is_set():
|
|
|
|
bark = False
|
|
|
|
now = time.time()
|
|
|
|
|
2019-05-07 08:33:15 +00:00
|
|
|
osds = self.ctx.daemons.iter_daemons_of_role('osd', cluster=self.cluster)
|
|
|
|
mons = self.ctx.daemons.iter_daemons_of_role('mon', cluster=self.cluster)
|
|
|
|
mdss = self.ctx.daemons.iter_daemons_of_role('mds', cluster=self.cluster)
|
|
|
|
rgws = self.ctx.daemons.iter_daemons_of_role('rgw', cluster=self.cluster)
|
|
|
|
mgrs = self.ctx.daemons.iter_daemons_of_role('mgr', cluster=self.cluster)
|
2019-05-06 11:27:09 +00:00
|
|
|
|
|
|
|
daemon_failures = []
|
2019-05-07 08:33:15 +00:00
|
|
|
daemon_failures.extend(filter(lambda daemon: daemon.running() and daemon.proc.finished, osds))
|
2019-05-06 11:27:09 +00:00
|
|
|
daemon_failures.extend(filter(lambda daemon: daemon.running() and daemon.proc.finished, mons))
|
|
|
|
daemon_failures.extend(filter(lambda daemon: daemon.running() and daemon.proc.finished, mdss))
|
2019-05-07 08:33:15 +00:00
|
|
|
daemon_failures.extend(filter(lambda daemon: daemon.running() and daemon.proc.finished, rgws))
|
|
|
|
daemon_failures.extend(filter(lambda daemon: daemon.running() and daemon.proc.finished, mgrs))
|
|
|
|
|
2019-05-06 11:27:09 +00:00
|
|
|
for daemon in daemon_failures:
|
|
|
|
name = daemon.role + '.' + daemon.id_
|
|
|
|
dt = daemon_failure_time.setdefault(name, (daemon, now))
|
|
|
|
assert dt[0] is daemon
|
|
|
|
delta = now-dt[1]
|
|
|
|
self.log("daemon {name} is failed for ~{t:.0f}s".format(name=name, t=delta))
|
|
|
|
if delta > daemon_timeout:
|
|
|
|
bark = True
|
2021-03-03 03:39:09 +00:00
|
|
|
if daemon_restart == 'normal' and daemon.proc.exitstatus == 0:
|
|
|
|
self.log(f"attempting to restart daemon {name}")
|
|
|
|
daemon.restart()
|
2019-05-06 11:27:09 +00:00
|
|
|
|
|
|
|
# If a daemon is no longer failed, remove it from tracking:
|
2019-12-17 01:26:12 +00:00
|
|
|
for name in list(daemon_failure_time.keys()):
|
2019-05-06 11:27:09 +00:00
|
|
|
if name not in [d.role + '.' + d.id_ for d in daemon_failures]:
|
|
|
|
self.log("daemon {name} has been restored".format(name=name))
|
|
|
|
del daemon_failure_time[name]
|
|
|
|
|
|
|
|
for thrasher in self.thrashers:
|
2019-08-05 10:52:10 +00:00
|
|
|
if thrasher.exception is not None:
|
2019-09-27 03:39:00 +00:00
|
|
|
self.log("{name} failed".format(name=thrasher.name))
|
2019-05-06 11:27:09 +00:00
|
|
|
bark = True
|
|
|
|
|
|
|
|
if bark:
|
|
|
|
self.bark()
|
|
|
|
return
|
|
|
|
|
|
|
|
sleep(5)
|
|
|
|
|
|
|
|
self.log("watchdog finished")
|