qa: use skipTest method instead of exception

This is the recommended method to skip a test according to [1]. It also lets us
avoid an unnecessary import.

[1] https://docs.python.org/2/library/unittest.html#unittest.TestCase.skipTest

Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
This commit is contained in:
Patrick Donnelly 2019-04-24 09:35:46 -07:00
parent dd93973894
commit 1071f73c76
No known key found for this signature in database
GPG Key ID: 3A2A7E25BEA8AADB
12 changed files with 20 additions and 34 deletions

View File

@ -1,6 +1,5 @@
import unittest
from unittest import case
import time
import logging
@ -40,7 +39,7 @@ class CephTestCase(unittest.TestCase):
if objectstore != "memstore":
# You certainly *could* run this on a real OSD, but you don't want to sit
# here for hours waiting for the test to fill up a 1TB drive!
raise case.SkipTest("Require `memstore` OSD backend (test " \
raise self.skipTest("Require `memstore` OSD backend (test " \
"would take too long on full sized OSDs")

View File

@ -1,6 +1,5 @@
import json
import logging
from unittest import case
from tasks.ceph_test_case import CephTestCase
import os
import re
@ -64,12 +63,12 @@ class CephFSTestCase(CephTestCase):
super(CephFSTestCase, self).setUp()
if len(self.mds_cluster.mds_ids) < self.MDSS_REQUIRED:
raise case.SkipTest("Only have {0} MDSs, require {1}".format(
self.skipTest("Only have {0} MDSs, require {1}".format(
len(self.mds_cluster.mds_ids), self.MDSS_REQUIRED
))
if len(self.mounts) < self.CLIENTS_REQUIRED:
raise case.SkipTest("Only have {0} clients, require {1}".format(
self.skipTest("Only have {0} clients, require {1}".format(
len(self.mounts), self.CLIENTS_REQUIRED
))
@ -78,11 +77,11 @@ class CephFSTestCase(CephTestCase):
# kclient kill() power cycles nodes, so requires clients to each be on
# their own node
if self.mounts[0].client_remote.hostname == self.mounts[1].client_remote.hostname:
raise case.SkipTest("kclient clients must be on separate nodes")
self.skipTest("kclient clients must be on separate nodes")
if self.REQUIRE_ONE_CLIENT_REMOTE:
if self.mounts[0].client_remote.hostname in self.mds_cluster.get_mds_hostnames():
raise case.SkipTest("Require first client to be on separate server from MDSs")
self.skipTest("Require first client to be on separate server from MDSs")
# Create friendly mount_a, mount_b attrs
for i in range(0, self.CLIENTS_REQUIRED):
@ -150,7 +149,7 @@ class CephFSTestCase(CephTestCase):
if self.REQUIRE_RECOVERY_FILESYSTEM:
if not self.REQUIRE_FILESYSTEM:
raise case.SkipTest("Recovery filesystem requires a primary filesystem as well")
self.skipTest("Recovery filesystem requires a primary filesystem as well")
self.fs.mon_manager.raw_cluster_cmd('fs', 'flag', 'set',
'enable_multiple', 'true',
'--yes-i-really-mean-it')

View File

@ -2,7 +2,6 @@
import os
import time
from textwrap import dedent
from unittest import SkipTest
from tasks.cephfs.fuse_mount import FuseMount
from tasks.cephfs.cephfs_test_case import CephFSTestCase, for_teuthology
@ -17,7 +16,7 @@ class TestCapFlush(CephFSTestCase):
"""
if not isinstance(self.mount_a, FuseMount):
raise SkipTest("Require FUSE client to inject client release failure")
self.skipTest("Require FUSE client to inject client release failure")
dir_path = os.path.join(self.mount_a.mountpoint, "testdir")
py_script = dedent("""

View File

@ -6,7 +6,6 @@ exceed the limits of how many caps/inodes they should hold.
import logging
from textwrap import dedent
from unittest import SkipTest
from teuthology.orchestra.run import CommandFailedError
from tasks.cephfs.cephfs_test_case import CephFSTestCase, needs_trimming
from tasks.cephfs.fuse_mount import FuseMount
@ -116,7 +115,7 @@ class TestClientLimits(CephFSTestCase):
# The debug hook to inject the failure only exists in the fuse client
if not isinstance(self.mount_a, FuseMount):
raise SkipTest("Require FUSE client to inject client release failure")
self.skipTest("Require FUSE client to inject client release failure")
self.set_conf('client.{0}'.format(self.mount_a.client_id), 'client inject release failure', 'true')
self.mount_a.teardown()
@ -158,7 +157,7 @@ class TestClientLimits(CephFSTestCase):
# The debug hook to inject the failure only exists in the fuse client
if not isinstance(self.mount_a, FuseMount):
raise SkipTest("Require FUSE client to inject client release failure")
self.skipTest("Require FUSE client to inject client release failure")
self.set_conf('client', 'client inject fixed oldest tid', 'true')
self.mount_a.teardown()
@ -183,7 +182,7 @@ class TestClientLimits(CephFSTestCase):
# The debug hook to inject the failure only exists in the fuse client
if not isinstance(self.mount_a, FuseMount):
raise SkipTest("Require FUSE client to inject client release failure")
self.skipTest("Require FUSE client to inject client release failure")
if mount_subdir:
# fuse assigns a fix inode number (1) to root inode. But in mounting into

View File

@ -14,8 +14,6 @@ from teuthology.orchestra.run import CommandFailedError, ConnectionLostError
from tasks.cephfs.fuse_mount import FuseMount
from tasks.cephfs.cephfs_test_case import CephFSTestCase
from teuthology.packaging import get_package_version
from unittest import SkipTest
log = logging.getLogger(__name__)
@ -488,7 +486,7 @@ class TestClientRecovery(CephFSTestCase):
def test_stale_renew(self):
if not isinstance(self.mount_a, FuseMount):
raise SkipTest("Require FUSE client to handle signal STOP/CONT")
self.skipTest("Require FUSE client to handle signal STOP/CONT")
session_timeout = self.fs.get_var("session_timeout")
@ -525,7 +523,7 @@ class TestClientRecovery(CephFSTestCase):
Check that abort_conn() skips closing mds sessions.
"""
if not isinstance(self.mount_a, FuseMount):
raise SkipTest("Testing libcephfs function")
self.skipTest("Testing libcephfs function")
session_timeout = self.fs.get_var("session_timeout")

View File

@ -1,5 +1,4 @@
from unittest import case
from tasks.cephfs.cephfs_test_case import CephFSTestCase
from tasks.cephfs.fuse_mount import FuseMount
@ -21,7 +20,7 @@ class TestConfigCommands(CephFSTestCase):
"""
if not isinstance(self.mount_a, FuseMount):
raise case.SkipTest("Test only applies to FUSE clients")
self.skipTest("Test only applies to FUSE clients")
test_key = "client_cache_size"
test_val = "123"

View File

@ -2,7 +2,6 @@ import time
import signal
import json
import logging
from unittest import case, SkipTest
from random import randint
from cephfs_test_case import CephFSTestCase
@ -242,11 +241,11 @@ class TestFailover(CephFSTestCase):
"""
if not isinstance(self.mount_a, FuseMount):
raise SkipTest("Requires FUSE client to inject client metadata")
self.skipTest("Requires FUSE client to inject client metadata")
require_active = self.fs.get_config("fuse_require_active_mds", service_type="mon").lower() == "true"
if not require_active:
raise case.SkipTest("fuse_require_active_mds is not set")
self.skipTest("fuse_require_active_mds is not set")
grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon"))

View File

@ -1,5 +1,4 @@
from unittest import SkipTest
from tasks.cephfs.fuse_mount import FuseMount
from tasks.cephfs.cephfs_test_case import CephFSTestCase
from teuthology.orchestra.run import CommandFailedError, ConnectionLostError
@ -21,7 +20,7 @@ class TestMisc(CephFSTestCase):
"""
if not isinstance(self.mount_a, FuseMount):
raise SkipTest("Require FUSE client")
self.skipTest("Require FUSE client")
# Enable debug. Client will requests CEPH_CAP_XATTR_SHARED
# on lookup/open

View File

@ -1,7 +1,6 @@
from StringIO import StringIO
import json
import logging
from unittest import SkipTest
from tasks.cephfs.fuse_mount import FuseMount
from teuthology.exceptions import CommandFailedError
@ -193,7 +192,7 @@ class TestSessionMap(CephFSTestCase):
def test_session_reject(self):
if not isinstance(self.mount_a, FuseMount):
raise SkipTest("Requires FUSE client to inject client metadata")
self.skipTest("Requires FUSE client to inject client metadata")
self.mount_a.run_shell(["mkdir", "foo"])
self.mount_a.run_shell(["mkdir", "foo/bar"])

View File

@ -5,7 +5,6 @@ from textwrap import dedent
from tasks.cephfs.fuse_mount import FuseMount
from tasks.cephfs.cephfs_test_case import CephFSTestCase
from teuthology.orchestra.run import CommandFailedError, Raw
from unittest import SkipTest
log = logging.getLogger(__name__)
@ -44,7 +43,7 @@ class TestSnapshots(CephFSTestCase):
check snaptable transcation
"""
if not isinstance(self.mount_a, FuseMount):
raise SkipTest("Require FUSE client to forcibly kill mount")
self.skipTest("Require FUSE client to forcibly kill mount")
self.fs.set_allow_new_snaps(True);
self.fs.set_max_mds(2)

View File

@ -1,5 +1,3 @@
from unittest import case
import json
import logging
@ -101,7 +99,7 @@ class MgrTestCase(CephTestCase):
assert cls.mgr_cluster is not None
if len(cls.mgr_cluster.mgr_ids) < cls.MGRS_REQUIRED:
raise case.SkipTest("Only have {0} manager daemons, "
self.skipTest("Only have {0} manager daemons, "
"{1} are required".format(
len(cls.mgr_cluster.mgr_ids), cls.MGRS_REQUIRED))

View File

@ -2,7 +2,6 @@
import json
import logging
import time
from unittest import SkipTest
from mgr_test_case import MgrTestCase
@ -63,7 +62,7 @@ class TestProgress(MgrTestCase):
def setUp(self):
# Ensure we have at least four OSDs
if self._osd_count() < 4:
raise SkipTest("Not enough OSDS!")
self.skipTest("Not enough OSDS!")
# Remove any filesystems so that we can remove their pools
if self.mds_cluster: