From 7ef18559cbf68c7a09d73c528db600a3c959ac2c Mon Sep 17 00:00:00 2001 From: Deepika Upadhyay Date: Thu, 15 Oct 2020 17:32:06 +0530 Subject: [PATCH] qa: drop hammer branch qa tests fixes: https://tracker.ceph.com/issues/47731 Signed-off-by: Deepika Upadhyay --- qa/erasure-code/ec-feature-plugins-v3.yaml | 98 ------------------- qa/machine_types/schedule_rados_ovh.sh | 3 - qa/machine_types/schedule_subset.sh | 3 - .../buildpackages/tasks/branch.yaml | 10 -- qa/tasks/ceph.py | 26 ++--- 5 files changed, 8 insertions(+), 132 deletions(-) delete mode 100644 qa/erasure-code/ec-feature-plugins-v3.yaml delete mode 100644 qa/suites/teuthology/buildpackages/tasks/branch.yaml diff --git a/qa/erasure-code/ec-feature-plugins-v3.yaml b/qa/erasure-code/ec-feature-plugins-v3.yaml deleted file mode 100644 index 332b9440728..00000000000 --- a/qa/erasure-code/ec-feature-plugins-v3.yaml +++ /dev/null @@ -1,98 +0,0 @@ -# -# Test the expected behavior of the -# -# CEPH_FEATURE_ERASURE_CODE_PLUGINS_V3 -# -# feature. -# -roles: -- - mon.a - - mon.b - - osd.0 - - osd.1 -- - osd.2 - - mon.c - - mgr.x -tasks: -# -# Install hammer -# -- install: - branch: hammer -- ceph: - fs: xfs -# -# We don't need mon.c for now: it will be used later to make sure an old -# mon cannot join the quorum once the feature has been activated -# -- ceph.stop: - daemons: [mon.c] -- exec: - mon.a: - - |- - ceph osd erasure-code-profile set WRONG plugin=WRONG - ceph osd pool create poolWRONG 12 12 erasure WRONG 2>&1 | grep "failed to load plugin using profile WRONG" -# -# Partial upgrade, osd.2 is not upgraded -# -- install.upgrade: - osd.0: -# -# a is the leader -# -- ceph.restart: - daemons: [mon.a] - wait-for-healthy: false -- exec: - mon.a: - - |- - ceph osd erasure-code-profile set profile-shec k=2 m=1 c=1 plugin=shec 2>&1 | grep "unsupported by: the monitor cluster" -- ceph.restart: - daemons: [mon.b, osd.1, osd.0] - wait-for-healthy: false - wait-for-osds-up: true -# -# The shec plugin cannot be used because osd.2 is not upgraded yet -# and would crash. -# -- exec: - mon.a: - - |- - ceph osd erasure-code-profile set profile-shec k=2 m=1 c=1 plugin=shec 2>&1 | grep "unsupported by: osd.2" -# -# Taking osd.2 out, the rest of the cluster is upgraded -# -- ceph.stop: - daemons: [osd.2] -- sleep: - duration: 60 -# -# Creating an erasure code profile using the shec plugin now works -# -- exec: - mon.a: - - "ceph osd erasure-code-profile set profile-shec k=2 m=1 c=1 plugin=shec" -# -# osd.2 won't be able to join the because is does not support the feature -# -- ceph.restart: - daemons: [osd.2] - wait-for-healthy: false -- sleep: - duration: 60 -- exec: - osd.2: - - |- - grep "protocol feature.*missing" /var/log/ceph/ceph-osd.2.log -# -# mon.c won't be able to join the because it does not support the feature -# -- ceph.restart: - daemons: [mon.c] - wait-for-healthy: false -- sleep: - duration: 60 -- exec: - mon.c: - - |- - grep "missing.*feature" /var/log/ceph/ceph-mon.c.log diff --git a/qa/machine_types/schedule_rados_ovh.sh b/qa/machine_types/schedule_rados_ovh.sh index cefa98b6a95..aeb37162e3f 100755 --- a/qa/machine_types/schedule_rados_ovh.sh +++ b/qa/machine_types/schedule_rados_ovh.sh @@ -24,9 +24,6 @@ echo "Scheduling " $2 " branch" if [ $2 = "master" ] ; then # run master branch with --newest option looking for good sha1 7 builds back teuthology-suite -v -c $2 -m $3 -k distro -s rados --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/28 --newest 7 -e $4 ~/vps.yaml $5 -elif [ $2 = "hammer" ] ; then - # run hammer branch with less jobs - teuthology-suite -v -c $2 -m $3 -k distro -s rados --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/56 -e $4 ~/vps.yaml $5 elif [ $2 = "jewel" ] ; then # run jewel branch with /40 jobs teuthology-suite -v -c $2 -m $3 -k distro -s rados --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/40 -e $4 ~/vps.yaml $5 diff --git a/qa/machine_types/schedule_subset.sh b/qa/machine_types/schedule_subset.sh index 32fd2e34189..0970b294844 100755 --- a/qa/machine_types/schedule_subset.sh +++ b/qa/machine_types/schedule_subset.sh @@ -29,9 +29,6 @@ if [ $2 = "master" ] ; then # run master branch with --newest option looking for good sha1 7 builds back with /100000 jobs # using '-p 80 --force-priority' as an execption ATM teuthology-suite -v -c $2 -m $3 -k $6 -s $4 --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/100000 --newest 7 -e $5 $7 -p 80 --force-priority -elif [ $2 = "hammer" ] ; then - # run hammer branch with less jobs - teuthology-suite -v -c $2 -m $3 -k $6 -s $4 --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/56 -e $5 $7 elif [ $2 = "jewel" ] ; then # run jewel branch with /40 jobs teuthology-suite -v -c $2 -m $3 -k $6 -s $4 --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/40 -e $5 $7 diff --git a/qa/suites/teuthology/buildpackages/tasks/branch.yaml b/qa/suites/teuthology/buildpackages/tasks/branch.yaml deleted file mode 100644 index 1dad96f3a3a..00000000000 --- a/qa/suites/teuthology/buildpackages/tasks/branch.yaml +++ /dev/null @@ -1,10 +0,0 @@ -roles: - - [mon.a, mgr.x, client.0] -tasks: - - install: - # branch has precedence over sha1 - branch: hammer - sha1: e5b6eea91cc37434f78a987d2dd1d3edd4a23f3f # dumpling - - exec: - client.0: - - ceph --version | grep 'version 0.94' diff --git a/qa/tasks/ceph.py b/qa/tasks/ceph.py index 798fe7c50f2..ceed1d5705c 100644 --- a/qa/tasks/ceph.py +++ b/qa/tasks/ceph.py @@ -974,14 +974,9 @@ def cluster(ctx, config): ) mnt_point = DATA_PATH.format( type_='osd', cluster=cluster_name, id_=id_) - try: - remote.run(args=[ - 'sudo', 'chown', '-R', 'ceph:ceph', mnt_point - ]) - except run.CommandFailedError as e: - # hammer does not have ceph user, so ignore this error - log.info('ignoring error when chown ceph:ceph,' - 'probably installing hammer: %s', e) + remote.run(args=[ + 'sudo', 'chown', '-R', 'ceph:ceph', mnt_point + ]) log.info('Reading keys from all nodes...') keys_fp = BytesIO() @@ -1071,14 +1066,9 @@ def cluster(ctx, config): '--keyring', keyring_path, ], ) - try: - remote.run(args=[ - 'sudo', 'chown', '-R', 'ceph:ceph', mnt_point - ]) - except run.CommandFailedError as e: - # hammer does not have ceph user, so ignore this error - log.info('ignoring error when chown ceph:ceph,' - 'probably installing hammer: %s', e) + remote.run(args=[ + 'sudo', 'chown', '-R', 'ceph:ceph', mnt_point + ]) run.wait( mons.run( @@ -1321,7 +1311,7 @@ def run_daemon(ctx, config, type_): daemon_signal = 'term' # create osds in order. (this only matters for pre-luminous, which might - # be hammer, which doesn't take an id_ argument to legacy 'osd create'). + # be jewel/hammer, which doesn't take an id_ argument to legacy 'osd create'). osd_uuids = {} for remote, roles_for_host in daemons.remotes.items(): is_type_ = teuthology.is_type(type_, cluster_name) @@ -1348,7 +1338,7 @@ def run_daemon(ctx, config, type_): ] ) except: - # fallback to pre-luminous (hammer or jewel) + # fallback to pre-luminous (jewel) remote.run( args=[ 'sudo', 'ceph', '--cluster', cluster_name,