2011-08-10 22:38:57 +00:00
|
|
|
import argparse
|
|
|
|
import yaml
|
2013-09-12 22:03:10 +00:00
|
|
|
import textwrap
|
2013-09-12 00:13:22 +00:00
|
|
|
from argparse import RawTextHelpFormatter
|
2011-08-10 22:38:57 +00:00
|
|
|
|
2013-09-30 21:18:39 +00:00
|
|
|
|
2011-08-10 22:38:57 +00:00
|
|
|
def parse_args():
|
|
|
|
from teuthology.run import config_file
|
|
|
|
from teuthology.run import MergeConfig
|
|
|
|
|
2013-09-12 22:03:10 +00:00
|
|
|
parser = argparse.ArgumentParser(
|
|
|
|
description='Reset test machines',
|
|
|
|
epilog=textwrap.dedent('''
|
2013-09-30 21:18:39 +00:00
|
|
|
Examples:
|
|
|
|
teuthology-nuke -t target.yaml --unlock --owner user@host
|
|
|
|
teuthology-nuke -t target.yaml --pid 1234 --unlock --owner user@host \n
|
|
|
|
'''),
|
2013-09-12 22:03:10 +00:00
|
|
|
formatter_class=RawTextHelpFormatter)
|
2011-08-10 22:38:57 +00:00
|
|
|
parser.add_argument(
|
|
|
|
'-v', '--verbose',
|
|
|
|
action='store_true', default=None,
|
|
|
|
help='be more verbose'
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
2012-01-12 22:48:36 +00:00
|
|
|
'-t', '--targets',
|
2011-08-10 22:38:57 +00:00
|
|
|
nargs='+',
|
|
|
|
type=config_file,
|
|
|
|
action=MergeConfig,
|
|
|
|
default={},
|
2012-01-12 22:48:36 +00:00
|
|
|
dest='config',
|
|
|
|
help='yaml config containing machines to nuke',
|
2011-08-10 22:38:57 +00:00
|
|
|
)
|
|
|
|
parser.add_argument(
|
2012-07-03 19:53:08 +00:00
|
|
|
'-a', '--archive',
|
2011-08-10 22:38:57 +00:00
|
|
|
metavar='DIR',
|
2012-07-03 19:53:08 +00:00
|
|
|
help='archive path for a job to kill and nuke',
|
2011-08-10 22:38:57 +00:00
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
'--owner',
|
|
|
|
help='job owner',
|
|
|
|
)
|
2012-07-03 19:22:26 +00:00
|
|
|
parser.add_argument(
|
2013-09-30 21:18:39 +00:00
|
|
|
'-p',
|
|
|
|
'--pid',
|
|
|
|
type=int,
|
|
|
|
default=False,
|
2012-07-03 23:22:38 +00:00
|
|
|
help='pid of the process to be killed',
|
2012-07-03 19:22:26 +00:00
|
|
|
)
|
2011-08-31 21:36:32 +00:00
|
|
|
parser.add_argument(
|
|
|
|
'-r', '--reboot-all',
|
|
|
|
action='store_true',
|
|
|
|
default=False,
|
|
|
|
help='reboot all machines',
|
|
|
|
)
|
2011-09-01 22:35:27 +00:00
|
|
|
parser.add_argument(
|
|
|
|
'-s', '--synch-clocks',
|
|
|
|
action='store_true',
|
|
|
|
default=False,
|
|
|
|
help='synchronize clocks on all machines',
|
|
|
|
)
|
2012-04-25 00:51:16 +00:00
|
|
|
parser.add_argument(
|
|
|
|
'-u', '--unlock',
|
|
|
|
action='store_true',
|
|
|
|
default=False,
|
|
|
|
help='Unlock each successfully nuked machine, and output targets that'
|
|
|
|
'could not be nuked.'
|
|
|
|
)
|
2013-02-03 17:09:49 +00:00
|
|
|
parser.add_argument(
|
|
|
|
'-n', '--name',
|
|
|
|
metavar='NAME',
|
|
|
|
help='Name of run to cleanup'
|
|
|
|
)
|
2013-04-30 20:10:35 +00:00
|
|
|
parser.add_argument(
|
|
|
|
'-i', '--noipmi',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help='Skip ipmi checking'
|
|
|
|
)
|
2011-08-10 22:38:57 +00:00
|
|
|
args = parser.parse_args()
|
|
|
|
return args
|
|
|
|
|
2013-09-30 21:18:39 +00:00
|
|
|
|
2011-08-10 23:06:45 +00:00
|
|
|
def shutdown_daemons(ctx, log):
|
2011-09-13 21:53:02 +00:00
|
|
|
from .orchestra import run
|
2011-08-10 22:38:57 +00:00
|
|
|
nodes = {}
|
|
|
|
for remote in ctx.cluster.remotes.iterkeys():
|
|
|
|
proc = remote.run(
|
|
|
|
args=[
|
2011-10-03 16:55:58 +00:00
|
|
|
'if', 'grep', '-q', 'ceph-fuse', '/etc/mtab', run.Raw(';'),
|
2011-08-10 22:38:57 +00:00
|
|
|
'then',
|
2011-10-03 16:55:58 +00:00
|
|
|
'grep', 'ceph-fuse', '/etc/mtab', run.Raw('|'),
|
2011-08-10 22:38:57 +00:00
|
|
|
'grep', '-o', " /.* fuse", run.Raw('|'),
|
|
|
|
'grep', '-o', "/.* ", run.Raw('|'),
|
|
|
|
'xargs', 'sudo', 'fusermount', '-u', run.Raw(';'),
|
|
|
|
'fi',
|
|
|
|
run.Raw(';'),
|
2013-02-22 18:51:51 +00:00
|
|
|
'sudo',
|
2011-08-10 22:38:57 +00:00
|
|
|
'killall',
|
|
|
|
'--quiet',
|
2011-11-20 04:56:26 +00:00
|
|
|
'ceph-mon',
|
|
|
|
'ceph-osd',
|
|
|
|
'ceph-mds',
|
|
|
|
'ceph-fuse',
|
2013-07-18 19:31:11 +00:00
|
|
|
'ceph-disk',
|
2011-11-20 04:56:26 +00:00
|
|
|
'radosgw',
|
2013-02-18 21:38:54 +00:00
|
|
|
'ceph_test_rados',
|
2012-02-11 22:20:41 +00:00
|
|
|
'rados',
|
2011-09-29 03:50:24 +00:00
|
|
|
'apache2',
|
2011-10-17 17:40:16 +00:00
|
|
|
run.Raw('||'),
|
2013-09-30 21:18:39 +00:00
|
|
|
'true', # ignore errors from ceph binaries not being found
|
2011-08-10 22:38:57 +00:00
|
|
|
],
|
|
|
|
wait=False,
|
|
|
|
)
|
|
|
|
nodes[remote.name] = proc
|
2013-08-30 15:58:10 +00:00
|
|
|
|
2011-08-10 22:38:57 +00:00
|
|
|
for name, proc in nodes.iteritems():
|
|
|
|
log.info('Waiting for %s to finish shutdowns...', name)
|
|
|
|
proc.exitstatus.get()
|
|
|
|
|
2013-09-30 21:18:39 +00:00
|
|
|
|
2011-08-10 23:06:45 +00:00
|
|
|
def find_kernel_mounts(ctx, log):
|
2011-09-13 21:53:02 +00:00
|
|
|
from .orchestra import run
|
2011-08-10 22:38:57 +00:00
|
|
|
nodes = {}
|
|
|
|
log.info('Looking for kernel mounts to handle...')
|
|
|
|
for remote in ctx.cluster.remotes.iterkeys():
|
|
|
|
proc = remote.run(
|
|
|
|
args=[
|
2013-09-30 21:18:39 +00:00
|
|
|
'grep', '-q', ' ceph ', '/etc/mtab',
|
2011-09-01 02:46:10 +00:00
|
|
|
run.Raw('||'),
|
2013-09-30 21:18:39 +00:00
|
|
|
'grep', '-q', '^/dev/rbd', '/etc/mtab',
|
2011-08-10 22:38:57 +00:00
|
|
|
],
|
|
|
|
wait=False,
|
|
|
|
)
|
|
|
|
nodes[remote] = proc
|
|
|
|
kernel_mounts = list()
|
|
|
|
for remote, proc in nodes.iteritems():
|
|
|
|
try:
|
|
|
|
proc.exitstatus.get()
|
|
|
|
log.debug('kernel mount exists on %s', remote.name)
|
|
|
|
kernel_mounts.append(remote)
|
2013-09-30 21:18:39 +00:00
|
|
|
except run.CommandFailedError: # no mounts!
|
2011-08-10 22:38:57 +00:00
|
|
|
log.debug('no kernel mount on %s', remote.name)
|
2013-08-30 15:58:10 +00:00
|
|
|
|
2011-08-10 23:06:45 +00:00
|
|
|
return kernel_mounts
|
|
|
|
|
2013-09-30 21:18:39 +00:00
|
|
|
|
2011-08-10 23:06:45 +00:00
|
|
|
def remove_kernel_mounts(ctx, kernel_mounts, log):
|
2011-08-10 22:38:57 +00:00
|
|
|
"""
|
|
|
|
properly we should be able to just do a forced unmount,
|
2013-08-30 15:58:10 +00:00
|
|
|
but that doesn't seem to be working, so you should reboot instead
|
2011-08-10 23:06:45 +00:00
|
|
|
"""
|
2011-09-13 21:53:02 +00:00
|
|
|
from .orchestra import run
|
2011-08-10 22:38:57 +00:00
|
|
|
nodes = {}
|
|
|
|
for remote in kernel_mounts:
|
|
|
|
log.info('clearing kernel mount from %s', remote.name)
|
|
|
|
proc = remote.run(
|
|
|
|
args=[
|
|
|
|
'grep', 'ceph', '/etc/mtab', run.Raw('|'),
|
|
|
|
'grep', '-o', "on /.* type", run.Raw('|'),
|
|
|
|
'grep', '-o', "/.* ", run.Raw('|'),
|
2012-04-03 22:56:36 +00:00
|
|
|
'xargs', '-r',
|
|
|
|
'sudo', 'umount', '-f', run.Raw(';'),
|
2011-08-10 22:38:57 +00:00
|
|
|
'fi'
|
2011-08-10 23:06:45 +00:00
|
|
|
],
|
2011-08-10 22:38:57 +00:00
|
|
|
wait=False
|
|
|
|
)
|
|
|
|
nodes[remote] = proc
|
2011-08-10 23:06:45 +00:00
|
|
|
|
|
|
|
for remote, proc in nodes:
|
|
|
|
proc.exitstatus.get()
|
|
|
|
|
2013-09-30 21:18:39 +00:00
|
|
|
|
2012-03-06 17:34:38 +00:00
|
|
|
def remove_osd_mounts(ctx, log):
|
|
|
|
"""
|
|
|
|
unmount any osd data mounts (scratch disks)
|
|
|
|
"""
|
|
|
|
from .orchestra import run
|
|
|
|
ctx.cluster.run(
|
|
|
|
args=[
|
2013-01-23 20:37:39 +00:00
|
|
|
'grep',
|
2013-02-17 06:32:16 +00:00
|
|
|
'/var/lib/ceph/osd/',
|
2013-01-23 20:37:39 +00:00
|
|
|
'/etc/mtab',
|
|
|
|
run.Raw('|'),
|
2012-03-06 17:34:38 +00:00
|
|
|
'awk', '{print $2}', run.Raw('|'),
|
2012-04-03 22:56:36 +00:00
|
|
|
'xargs', '-r',
|
|
|
|
'sudo', 'umount', run.Raw(';'),
|
2012-03-06 17:34:38 +00:00
|
|
|
'true'
|
|
|
|
],
|
|
|
|
)
|
|
|
|
|
2013-09-30 21:18:39 +00:00
|
|
|
|
2012-08-16 22:50:10 +00:00
|
|
|
def remove_osd_tmpfs(ctx, log):
|
|
|
|
"""
|
|
|
|
unmount tmpfs mounts
|
|
|
|
"""
|
|
|
|
from .orchestra import run
|
|
|
|
ctx.cluster.run(
|
|
|
|
args=[
|
2013-02-06 07:28:08 +00:00
|
|
|
'egrep', 'tmpfs\s+/mnt', '/etc/mtab', run.Raw('|'),
|
2012-08-16 22:50:10 +00:00
|
|
|
'awk', '{print $2}', run.Raw('|'),
|
|
|
|
'xargs', '-r',
|
|
|
|
'sudo', 'umount', run.Raw(';'),
|
|
|
|
'true'
|
|
|
|
],
|
|
|
|
)
|
|
|
|
|
2013-09-30 21:18:39 +00:00
|
|
|
|
2011-08-31 21:36:32 +00:00
|
|
|
def reboot(ctx, remotes, log):
|
2013-10-01 16:57:34 +00:00
|
|
|
from .orchestra import run
|
2011-08-10 23:06:45 +00:00
|
|
|
import time
|
2011-08-10 22:38:57 +00:00
|
|
|
nodes = {}
|
2011-08-31 21:36:32 +00:00
|
|
|
for remote in remotes:
|
2011-08-10 22:38:57 +00:00
|
|
|
log.info('rebooting %s', remote.name)
|
2013-09-30 21:18:39 +00:00
|
|
|
proc = remote.run( # note use of -n to force a no-sync reboot
|
2013-10-01 16:57:34 +00:00
|
|
|
args=[
|
|
|
|
'timeout', '5', 'sync',
|
|
|
|
run.Raw(';'),
|
|
|
|
'sudo', 'reboot', '-f', '-n'
|
2013-09-30 21:18:39 +00:00
|
|
|
],
|
2011-08-10 22:38:57 +00:00
|
|
|
wait=False
|
2013-09-30 21:18:39 +00:00
|
|
|
)
|
2011-08-10 22:38:57 +00:00
|
|
|
nodes[remote] = proc
|
|
|
|
# we just ignore these procs because reboot -f doesn't actually
|
|
|
|
# send anything back to the ssh client!
|
|
|
|
#for remote, proc in nodes.iteritems():
|
|
|
|
#proc.exitstatus.get()
|
|
|
|
from teuthology.misc import reconnect
|
2011-08-31 21:36:32 +00:00
|
|
|
if remotes:
|
2011-08-10 22:38:57 +00:00
|
|
|
log.info('waiting for nodes to reboot')
|
2013-09-30 21:18:39 +00:00
|
|
|
time.sleep(5) # if we try and reconnect too quickly, it succeeds!
|
|
|
|
reconnect(ctx, 480) # allow 8 minutes for the reboots
|
|
|
|
|
2011-08-10 22:38:57 +00:00
|
|
|
|
2011-10-17 17:40:16 +00:00
|
|
|
def reset_syslog_dir(ctx, log):
|
|
|
|
from .orchestra import run
|
|
|
|
nodes = {}
|
|
|
|
for remote in ctx.cluster.remotes.iterkeys():
|
|
|
|
proc = remote.run(
|
|
|
|
args=[
|
|
|
|
'if', 'test', '-e', '/etc/rsyslog.d/80-cephtest.conf',
|
|
|
|
run.Raw(';'),
|
|
|
|
'then',
|
|
|
|
'sudo', 'rm', '-f', '--', '/etc/rsyslog.d/80-cephtest.conf',
|
|
|
|
run.Raw('&&'),
|
2013-03-13 23:42:09 +00:00
|
|
|
'sudo', 'service', 'rsyslog', 'restart',
|
2011-10-17 17:40:16 +00:00
|
|
|
run.Raw(';'),
|
|
|
|
'fi',
|
|
|
|
run.Raw(';'),
|
|
|
|
],
|
|
|
|
wait=False,
|
|
|
|
)
|
|
|
|
nodes[remote.name] = proc
|
|
|
|
|
|
|
|
for name, proc in nodes.iteritems():
|
|
|
|
log.info('Waiting for %s to restart syslog...', name)
|
|
|
|
proc.exitstatus.get()
|
|
|
|
|
2013-09-30 21:18:39 +00:00
|
|
|
|
2013-02-16 00:58:40 +00:00
|
|
|
def dpkg_configure(ctx, log):
|
|
|
|
from .orchestra import run
|
|
|
|
nodes = {}
|
|
|
|
for remote in ctx.cluster.remotes.iterkeys():
|
|
|
|
proc = remote.run(
|
|
|
|
args=[
|
|
|
|
'sudo', 'dpkg', '--configure', '-a',
|
|
|
|
run.Raw('&&'),
|
|
|
|
'sudo', 'apt-get', '-f', 'install',
|
2013-02-17 03:36:45 +00:00
|
|
|
run.Raw('||'),
|
|
|
|
':',
|
2013-02-16 00:58:40 +00:00
|
|
|
],
|
|
|
|
wait=False,
|
|
|
|
)
|
|
|
|
nodes[remote.name] = proc
|
|
|
|
|
|
|
|
for name, proc in nodes.iteritems():
|
2013-09-30 21:18:39 +00:00
|
|
|
log.info(
|
|
|
|
'Waiting for %s to dpkg --configure -a and apt-get -f install...',
|
|
|
|
name)
|
2013-02-16 00:58:40 +00:00
|
|
|
proc.exitstatus.get()
|
|
|
|
|
2013-09-30 21:18:39 +00:00
|
|
|
|
2013-02-06 19:16:52 +00:00
|
|
|
def remove_installed_packages(ctx, log):
|
2013-02-18 23:06:52 +00:00
|
|
|
from teuthology.task import install as install_task
|
2013-02-06 19:16:52 +00:00
|
|
|
|
2013-02-16 00:58:40 +00:00
|
|
|
dpkg_configure(ctx, log)
|
2013-05-01 15:55:38 +00:00
|
|
|
config = {'project': 'ceph'}
|
2013-09-30 21:18:39 +00:00
|
|
|
install_task.remove_packages(
|
|
|
|
ctx,
|
|
|
|
config,
|
|
|
|
{"deb": install_task.deb_packages['ceph'],
|
|
|
|
"rpm": install_task.rpm_packages['ceph']})
|
2013-05-01 15:55:38 +00:00
|
|
|
install_task.remove_sources(ctx, config)
|
2013-03-21 05:51:24 +00:00
|
|
|
install_task.purge_data(ctx)
|
2013-02-06 19:16:52 +00:00
|
|
|
|
2013-09-30 21:18:39 +00:00
|
|
|
|
2011-08-10 23:06:45 +00:00
|
|
|
def remove_testing_tree(ctx, log):
|
2013-09-10 17:53:41 +00:00
|
|
|
from teuthology.misc import get_testdir
|
2013-02-06 19:16:52 +00:00
|
|
|
from .orchestra import run
|
2011-08-10 22:38:57 +00:00
|
|
|
nodes = {}
|
|
|
|
for remote in ctx.cluster.remotes.iterkeys():
|
|
|
|
proc = remote.run(
|
|
|
|
args=[
|
2013-09-10 17:53:41 +00:00
|
|
|
'sudo', 'rm', '-rf', get_testdir(ctx),
|
2013-02-06 19:16:52 +00:00
|
|
|
# just for old time's sake
|
|
|
|
run.Raw('&&'),
|
|
|
|
'sudo', 'rm', '-rf', '/tmp/cephtest',
|
|
|
|
run.Raw('&&'),
|
2013-02-26 01:54:49 +00:00
|
|
|
'sudo', 'rm', '-rf', '/home/ubuntu/cephtest',
|
|
|
|
run.Raw('&&'),
|
2013-02-06 19:16:52 +00:00
|
|
|
'sudo', 'rm', '-rf', '/etc/ceph',
|
2011-08-10 22:38:57 +00:00
|
|
|
],
|
|
|
|
wait=False,
|
|
|
|
)
|
|
|
|
nodes[remote.name] = proc
|
|
|
|
|
|
|
|
for name, proc in nodes.iteritems():
|
|
|
|
log.info('Waiting for %s to clear filesystem...', name)
|
|
|
|
proc.exitstatus.get()
|
2011-08-10 23:06:45 +00:00
|
|
|
|
2013-09-30 21:18:39 +00:00
|
|
|
|
2011-09-01 22:35:27 +00:00
|
|
|
def synch_clocks(remotes, log):
|
2011-09-13 21:53:02 +00:00
|
|
|
from .orchestra import run
|
2011-09-01 22:35:27 +00:00
|
|
|
nodes = {}
|
|
|
|
for remote in remotes:
|
|
|
|
proc = remote.run(
|
|
|
|
args=[
|
|
|
|
'sudo', 'service', 'ntp', 'stop',
|
|
|
|
run.Raw('&&'),
|
|
|
|
'sudo', 'ntpdate-debian',
|
|
|
|
run.Raw('&&'),
|
|
|
|
'sudo', 'hwclock', '--systohc', '--utc',
|
|
|
|
run.Raw('&&'),
|
|
|
|
'sudo', 'service', 'ntp', 'start',
|
2012-04-23 16:21:02 +00:00
|
|
|
run.Raw('||'),
|
|
|
|
'true', # ignore errors; we may be racing with ntpd startup
|
2011-09-01 22:35:27 +00:00
|
|
|
],
|
|
|
|
wait=False,
|
|
|
|
)
|
|
|
|
nodes[remote.name] = proc
|
|
|
|
for name, proc in nodes.iteritems():
|
|
|
|
log.info('Waiting for clock to synchronize on %s...', name)
|
|
|
|
proc.exitstatus.get()
|
|
|
|
|
2013-09-30 21:18:39 +00:00
|
|
|
|
2011-08-10 23:06:45 +00:00
|
|
|
def main():
|
2013-09-30 21:18:39 +00:00
|
|
|
import gevent.monkey
|
|
|
|
gevent.monkey.patch_all(dns=False)
|
|
|
|
from .orchestra import monkey
|
|
|
|
monkey.patch_all()
|
2012-07-03 19:53:08 +00:00
|
|
|
from teuthology.run import config_file
|
2013-09-26 15:32:28 +00:00
|
|
|
import os
|
2011-08-10 23:06:45 +00:00
|
|
|
|
|
|
|
import logging
|
|
|
|
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
ctx = parse_args()
|
|
|
|
|
|
|
|
loglevel = logging.INFO
|
|
|
|
if ctx.verbose:
|
|
|
|
loglevel = logging.DEBUG
|
|
|
|
|
|
|
|
logging.basicConfig(
|
|
|
|
level=loglevel,
|
|
|
|
)
|
|
|
|
|
2013-08-29 23:41:09 +00:00
|
|
|
info = {}
|
2012-07-03 19:53:08 +00:00
|
|
|
if ctx.archive:
|
|
|
|
ctx.config = config_file(ctx.archive + '/config.yaml')
|
2013-08-29 23:41:09 +00:00
|
|
|
ifn = os.path.join(ctx.archive, 'info.yaml')
|
|
|
|
if os.path.exists(ifn):
|
|
|
|
with file(ifn, 'r') as fd:
|
|
|
|
info = yaml.load(fd.read())
|
2012-07-03 19:53:08 +00:00
|
|
|
if not ctx.pid:
|
2013-08-29 23:41:09 +00:00
|
|
|
ctx.pid = info.get('pid')
|
|
|
|
if not ctx.pid:
|
|
|
|
ctx.pid = int(open(ctx.archive + '/pid').read().rstrip('\n'))
|
2012-07-03 19:53:08 +00:00
|
|
|
if not ctx.owner:
|
2013-08-29 23:41:09 +00:00
|
|
|
ctx.owner = info.get('owner')
|
|
|
|
if not ctx.owner:
|
|
|
|
ctx.owner = open(ctx.archive + '/owner').read().rstrip('\n')
|
nuke: s/run_name/name/
This matches an existing argument (with the same meaning) and
avoids an error like
2013-10-01T17:20:35.395 CRITICAL:root: File "/var/lib/teuthworker/teuthology-master/virtualenv/bin/teuthology", line 9, in <module>
load_entry_point('teuthology==0.0.1', 'console_scripts', 'teuthology')()
File "/home/teuthworker/teuthology-master/teuthology/run.py", line 235, in main
nuke(ctx, log, ctx.lock)
File "/home/teuthworker/teuthology-master/teuthology/nuke.py", line 391, in nuke
if ctx.run_name:
2013-10-01T17:20:35.395 CRITICAL:root:AttributeError: 'Namespace' object has no attribute 'run_name'
Signed-off-by: Sage Weil <sage@inktank.com>
2013-10-02 03:57:53 +00:00
|
|
|
ctx.name = info.get('name')
|
2012-07-03 19:53:08 +00:00
|
|
|
|
2011-08-10 23:06:45 +00:00
|
|
|
from teuthology.misc import read_config
|
|
|
|
read_config(ctx)
|
|
|
|
|
2013-09-30 21:18:39 +00:00
|
|
|
log.info(
|
|
|
|
'\n '.join(
|
|
|
|
['targets:', ] + yaml.safe_dump(
|
|
|
|
ctx.config['targets'],
|
|
|
|
default_flow_style=False).splitlines()))
|
2011-08-10 23:06:45 +00:00
|
|
|
|
|
|
|
if ctx.owner is None:
|
|
|
|
from teuthology.misc import get_user
|
|
|
|
ctx.owner = get_user()
|
|
|
|
|
2012-07-03 19:22:26 +00:00
|
|
|
if ctx.pid:
|
2012-07-03 23:22:38 +00:00
|
|
|
if ctx.archive:
|
2012-07-18 18:04:30 +00:00
|
|
|
log.info('Killing teuthology process at pid %d', ctx.pid)
|
2012-07-07 03:15:55 +00:00
|
|
|
os.system('grep -q %s /proc/%d/cmdline && sudo kill %d' % (
|
2013-09-30 21:18:39 +00:00
|
|
|
ctx.archive,
|
|
|
|
ctx.pid,
|
|
|
|
ctx.pid))
|
2012-07-03 23:22:38 +00:00
|
|
|
else:
|
2012-09-13 21:31:46 +00:00
|
|
|
import subprocess
|
2013-09-30 21:18:39 +00:00
|
|
|
subprocess.check_call(["kill", "-9", str(ctx.pid)])
|
2012-04-25 00:51:16 +00:00
|
|
|
|
2013-05-10 01:20:59 +00:00
|
|
|
nuke(ctx, log, ctx.unlock, ctx.synch_clocks, ctx.reboot_all, ctx.noipmi)
|
2012-04-25 00:51:16 +00:00
|
|
|
|
2013-09-30 21:18:39 +00:00
|
|
|
|
2013-05-10 01:20:59 +00:00
|
|
|
def nuke(ctx, log, should_unlock, sync_clocks=True, reboot_all=True,
|
|
|
|
noipmi=False):
|
2012-04-25 00:51:16 +00:00
|
|
|
from teuthology.parallel import parallel
|
2013-09-17 21:11:58 +00:00
|
|
|
from teuthology.lock import list_locks
|
2012-04-25 00:51:16 +00:00
|
|
|
total_unnuked = {}
|
2013-09-17 21:11:58 +00:00
|
|
|
targets = dict(ctx.config['targets'])
|
nuke: s/run_name/name/
This matches an existing argument (with the same meaning) and
avoids an error like
2013-10-01T17:20:35.395 CRITICAL:root: File "/var/lib/teuthworker/teuthology-master/virtualenv/bin/teuthology", line 9, in <module>
load_entry_point('teuthology==0.0.1', 'console_scripts', 'teuthology')()
File "/home/teuthworker/teuthology-master/teuthology/run.py", line 235, in main
nuke(ctx, log, ctx.lock)
File "/home/teuthworker/teuthology-master/teuthology/nuke.py", line 391, in nuke
if ctx.run_name:
2013-10-01T17:20:35.395 CRITICAL:root:AttributeError: 'Namespace' object has no attribute 'run_name'
Signed-off-by: Sage Weil <sage@inktank.com>
2013-10-02 03:57:53 +00:00
|
|
|
if ctx.name:
|
2013-09-17 21:11:58 +00:00
|
|
|
log.info('Checking targets against current locks')
|
|
|
|
locks = list_locks(ctx)
|
|
|
|
#Remove targets who's description doesn't match archive name.
|
|
|
|
for lock in locks:
|
|
|
|
for target in targets:
|
|
|
|
if target == lock['name']:
|
nuke: s/run_name/name/
This matches an existing argument (with the same meaning) and
avoids an error like
2013-10-01T17:20:35.395 CRITICAL:root: File "/var/lib/teuthworker/teuthology-master/virtualenv/bin/teuthology", line 9, in <module>
load_entry_point('teuthology==0.0.1', 'console_scripts', 'teuthology')()
File "/home/teuthworker/teuthology-master/teuthology/run.py", line 235, in main
nuke(ctx, log, ctx.lock)
File "/home/teuthworker/teuthology-master/teuthology/nuke.py", line 391, in nuke
if ctx.run_name:
2013-10-01T17:20:35.395 CRITICAL:root:AttributeError: 'Namespace' object has no attribute 'run_name'
Signed-off-by: Sage Weil <sage@inktank.com>
2013-10-02 03:57:53 +00:00
|
|
|
if ctx.name not in lock['description']:
|
2013-09-17 21:11:58 +00:00
|
|
|
del ctx.config['targets'][lock['name']]
|
|
|
|
log.info('Not nuking %s because description doesn\'t match', lock['name'])
|
2012-04-25 00:51:16 +00:00
|
|
|
with parallel() as p:
|
|
|
|
for target, hostkey in ctx.config['targets'].iteritems():
|
|
|
|
p.spawn(
|
|
|
|
nuke_one,
|
|
|
|
ctx,
|
|
|
|
{target: hostkey},
|
|
|
|
log,
|
|
|
|
should_unlock,
|
|
|
|
sync_clocks,
|
|
|
|
reboot_all,
|
2012-07-11 21:23:51 +00:00
|
|
|
ctx.config.get('check-locks', True),
|
2013-05-10 01:20:59 +00:00
|
|
|
noipmi,
|
2012-04-25 00:51:16 +00:00
|
|
|
)
|
|
|
|
for unnuked in p:
|
|
|
|
if unnuked:
|
|
|
|
total_unnuked.update(unnuked)
|
|
|
|
if total_unnuked:
|
2013-09-30 21:18:39 +00:00
|
|
|
log.error('Could not nuke the following targets:\n' +
|
|
|
|
'\n '.join(['targets:', ] +
|
|
|
|
yaml.safe_dump(
|
|
|
|
total_unnuked,
|
|
|
|
default_flow_style=False).splitlines()))
|
|
|
|
|
2012-04-25 00:51:16 +00:00
|
|
|
|
2013-05-10 01:20:59 +00:00
|
|
|
def nuke_one(ctx, targets, log, should_unlock, synch_clocks, reboot_all,
|
|
|
|
check_locks, noipmi):
|
2012-04-25 00:51:16 +00:00
|
|
|
from teuthology.lock import unlock
|
|
|
|
ret = None
|
|
|
|
ctx = argparse.Namespace(
|
|
|
|
config=dict(targets=targets),
|
|
|
|
owner=ctx.owner,
|
2012-07-11 21:23:51 +00:00
|
|
|
check_locks=check_locks,
|
2012-04-25 00:51:16 +00:00
|
|
|
synch_clocks=synch_clocks,
|
|
|
|
reboot_all=reboot_all,
|
|
|
|
teuthology_config=ctx.teuthology_config,
|
2013-02-03 17:09:49 +00:00
|
|
|
name=ctx.name,
|
2013-05-10 01:20:59 +00:00
|
|
|
noipmi=noipmi,
|
2012-04-25 00:51:16 +00:00
|
|
|
)
|
|
|
|
try:
|
|
|
|
nuke_helper(ctx, log)
|
2013-08-30 15:58:10 +00:00
|
|
|
except Exception:
|
|
|
|
log.exception('Could not nuke all targets in %s' % targets)
|
2012-04-25 00:51:16 +00:00
|
|
|
# not re-raising the so that parallel calls aren't killed
|
|
|
|
ret = targets
|
|
|
|
else:
|
|
|
|
if should_unlock:
|
|
|
|
for target in targets.keys():
|
|
|
|
unlock(ctx, target, ctx.owner)
|
|
|
|
return ret
|
2011-11-09 00:01:39 +00:00
|
|
|
|
2013-09-30 21:18:39 +00:00
|
|
|
|
2012-04-25 00:51:16 +00:00
|
|
|
def nuke_helper(ctx, log):
|
2013-01-23 02:13:19 +00:00
|
|
|
# ensure node is up with ipmi
|
|
|
|
from teuthology.orchestra import remote
|
|
|
|
|
|
|
|
(target,) = ctx.config['targets'].keys()
|
|
|
|
host = target.split('@')[-1]
|
|
|
|
shortname = host.split('.')[0]
|
2013-07-04 02:07:35 +00:00
|
|
|
if 'vpm' in shortname:
|
|
|
|
return
|
2013-01-23 02:13:19 +00:00
|
|
|
log.debug('shortname: %s' % shortname)
|
2013-04-30 20:10:35 +00:00
|
|
|
log.debug('{ctx}'.format(ctx=ctx))
|
|
|
|
if not ctx.noipmi and 'ipmi_user' in ctx.teuthology_config:
|
2013-09-30 21:18:39 +00:00
|
|
|
console = remote.getRemoteConsole(
|
|
|
|
name=host,
|
|
|
|
ipmiuser=ctx.teuthology_config['ipmi_user'],
|
|
|
|
ipmipass=ctx.teuthology_config['ipmi_password'],
|
|
|
|
ipmidomain=ctx.teuthology_config['ipmi_domain'])
|
|
|
|
cname = '{host}.{domain}'.format(
|
|
|
|
host=shortname,
|
|
|
|
domain=ctx.teuthology_config['ipmi_domain'])
|
2013-02-01 14:24:41 +00:00
|
|
|
log.info('checking console status of %s' % cname)
|
|
|
|
if not console.check_status():
|
|
|
|
# not powered on or can't get IPMI status. Try to power on
|
|
|
|
console.power_on()
|
|
|
|
# try to get status again, waiting for login prompt this time
|
2013-02-01 17:45:04 +00:00
|
|
|
log.info('checking console status of %s' % cname)
|
2013-02-01 14:24:41 +00:00
|
|
|
if not console.check_status(100):
|
2013-09-30 21:18:39 +00:00
|
|
|
log.error('Failed to get console status for %s, ' +
|
|
|
|
'disabling console...' % cname)
|
2013-02-01 14:24:41 +00:00
|
|
|
log.info('console ready on %s' % cname)
|
|
|
|
else:
|
|
|
|
log.info('console ready on %s' % cname)
|
2013-01-23 02:13:19 +00:00
|
|
|
|
2011-08-10 23:06:45 +00:00
|
|
|
from teuthology.task.internal import check_lock, connect
|
2012-07-11 21:23:51 +00:00
|
|
|
if ctx.check_locks:
|
|
|
|
check_lock(ctx, None)
|
2011-08-10 23:06:45 +00:00
|
|
|
connect(ctx, None)
|
|
|
|
|
2011-10-03 16:55:58 +00:00
|
|
|
log.info('Unmount ceph-fuse and killing daemons...')
|
2011-08-10 23:06:45 +00:00
|
|
|
shutdown_daemons(ctx, log)
|
|
|
|
log.info('All daemons killed.')
|
|
|
|
|
2013-02-06 07:31:37 +00:00
|
|
|
need_reboot = find_kernel_mounts(ctx, log)
|
2012-03-06 17:34:38 +00:00
|
|
|
|
2013-02-06 07:31:37 +00:00
|
|
|
# no need to unmount anything if we're rebooting
|
2011-08-31 21:36:32 +00:00
|
|
|
if ctx.reboot_all:
|
|
|
|
need_reboot = ctx.cluster.remotes.keys()
|
2013-02-06 07:31:37 +00:00
|
|
|
else:
|
|
|
|
log.info('Unmount any osd data directories...')
|
|
|
|
remove_osd_mounts(ctx, log)
|
|
|
|
log.info('Unmount any osd tmpfs dirs...')
|
|
|
|
remove_osd_tmpfs(ctx, log)
|
|
|
|
#log.info('Dealing with any kernel mounts...')
|
|
|
|
#remove_kernel_mounts(ctx, need_reboot, log)
|
|
|
|
|
|
|
|
if need_reboot:
|
|
|
|
reboot(ctx, need_reboot, log)
|
2011-08-10 23:06:45 +00:00
|
|
|
log.info('All kernel mounts gone.')
|
|
|
|
|
2011-09-01 22:35:27 +00:00
|
|
|
log.info('Synchronizing clocks...')
|
|
|
|
if ctx.synch_clocks:
|
|
|
|
need_reboot = ctx.cluster.remotes.keys()
|
|
|
|
synch_clocks(need_reboot, log)
|
|
|
|
|
2013-08-23 16:00:47 +00:00
|
|
|
log.info('Making sure firmware.git is not locked...')
|
2013-09-30 21:18:39 +00:00
|
|
|
ctx.cluster.run(args=['sudo', 'rm', '-f',
|
|
|
|
'/lib/firmware/updates/.git/index.lock', ])
|
2013-08-23 16:00:47 +00:00
|
|
|
|
2011-10-17 17:40:16 +00:00
|
|
|
log.info('Reseting syslog output locations...')
|
|
|
|
reset_syslog_dir(ctx, log)
|
2011-08-10 23:06:45 +00:00
|
|
|
log.info('Clearing filesystem of test data...')
|
|
|
|
remove_testing_tree(ctx, log)
|
2011-08-10 22:38:57 +00:00
|
|
|
log.info('Filesystem Cleared.')
|
2013-02-06 19:16:52 +00:00
|
|
|
remove_installed_packages(ctx, log)
|
|
|
|
log.info('Installed packages removed.')
|