Merge pull request #37678 from ideepika/wip-drop-hammer-from-qa

qa: drop hammer branch qa tests

Reviewed-by: Josh Durgin <jdurgin@redhat.com>
Reviewed-by: Neha Ojha <nojha@redhat.com>
This commit is contained in:
Neha Ojha 2020-10-23 08:35:02 -07:00 committed by GitHub
commit 27871caa7f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 8 additions and 132 deletions

View File

@ -1,98 +0,0 @@
#
# Test the expected behavior of the
#
# CEPH_FEATURE_ERASURE_CODE_PLUGINS_V3
#
# feature.
#
roles:
- - mon.a
- mon.b
- osd.0
- osd.1
- - osd.2
- mon.c
- mgr.x
tasks:
#
# Install hammer
#
- install:
branch: hammer
- ceph:
fs: xfs
#
# We don't need mon.c for now: it will be used later to make sure an old
# mon cannot join the quorum once the feature has been activated
#
- ceph.stop:
daemons: [mon.c]
- exec:
mon.a:
- |-
ceph osd erasure-code-profile set WRONG plugin=WRONG
ceph osd pool create poolWRONG 12 12 erasure WRONG 2>&1 | grep "failed to load plugin using profile WRONG"
#
# Partial upgrade, osd.2 is not upgraded
#
- install.upgrade:
osd.0:
#
# a is the leader
#
- ceph.restart:
daemons: [mon.a]
wait-for-healthy: false
- exec:
mon.a:
- |-
ceph osd erasure-code-profile set profile-shec k=2 m=1 c=1 plugin=shec 2>&1 | grep "unsupported by: the monitor cluster"
- ceph.restart:
daemons: [mon.b, osd.1, osd.0]
wait-for-healthy: false
wait-for-osds-up: true
#
# The shec plugin cannot be used because osd.2 is not upgraded yet
# and would crash.
#
- exec:
mon.a:
- |-
ceph osd erasure-code-profile set profile-shec k=2 m=1 c=1 plugin=shec 2>&1 | grep "unsupported by: osd.2"
#
# Taking osd.2 out, the rest of the cluster is upgraded
#
- ceph.stop:
daemons: [osd.2]
- sleep:
duration: 60
#
# Creating an erasure code profile using the shec plugin now works
#
- exec:
mon.a:
- "ceph osd erasure-code-profile set profile-shec k=2 m=1 c=1 plugin=shec"
#
# osd.2 won't be able to join the because is does not support the feature
#
- ceph.restart:
daemons: [osd.2]
wait-for-healthy: false
- sleep:
duration: 60
- exec:
osd.2:
- |-
grep "protocol feature.*missing" /var/log/ceph/ceph-osd.2.log
#
# mon.c won't be able to join the because it does not support the feature
#
- ceph.restart:
daemons: [mon.c]
wait-for-healthy: false
- sleep:
duration: 60
- exec:
mon.c:
- |-
grep "missing.*feature" /var/log/ceph/ceph-mon.c.log

View File

@ -24,9 +24,6 @@ echo "Scheduling " $2 " branch"
if [ $2 = "master" ] ; then
# run master branch with --newest option looking for good sha1 7 builds back
teuthology-suite -v -c $2 -m $3 -k distro -s rados --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/28 --newest 7 -e $4 ~/vps.yaml $5
elif [ $2 = "hammer" ] ; then
# run hammer branch with less jobs
teuthology-suite -v -c $2 -m $3 -k distro -s rados --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/56 -e $4 ~/vps.yaml $5
elif [ $2 = "jewel" ] ; then
# run jewel branch with /40 jobs
teuthology-suite -v -c $2 -m $3 -k distro -s rados --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/40 -e $4 ~/vps.yaml $5

View File

@ -29,9 +29,6 @@ if [ $2 = "master" ] ; then
# run master branch with --newest option looking for good sha1 7 builds back with /100000 jobs
# using '-p 80 --force-priority' as an execption ATM
teuthology-suite -v -c $2 -m $3 -k $6 -s $4 --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/100000 --newest 7 -e $5 $7 -p 80 --force-priority
elif [ $2 = "hammer" ] ; then
# run hammer branch with less jobs
teuthology-suite -v -c $2 -m $3 -k $6 -s $4 --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/56 -e $5 $7
elif [ $2 = "jewel" ] ; then
# run jewel branch with /40 jobs
teuthology-suite -v -c $2 -m $3 -k $6 -s $4 --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/40 -e $5 $7

View File

@ -1,10 +0,0 @@
roles:
- [mon.a, mgr.x, client.0]
tasks:
- install:
# branch has precedence over sha1
branch: hammer
sha1: e5b6eea91cc37434f78a987d2dd1d3edd4a23f3f # dumpling
- exec:
client.0:
- ceph --version | grep 'version 0.94'

View File

@ -974,14 +974,9 @@ def cluster(ctx, config):
)
mnt_point = DATA_PATH.format(
type_='osd', cluster=cluster_name, id_=id_)
try:
remote.run(args=[
'sudo', 'chown', '-R', 'ceph:ceph', mnt_point
])
except run.CommandFailedError as e:
# hammer does not have ceph user, so ignore this error
log.info('ignoring error when chown ceph:ceph,'
'probably installing hammer: %s', e)
remote.run(args=[
'sudo', 'chown', '-R', 'ceph:ceph', mnt_point
])
log.info('Reading keys from all nodes...')
keys_fp = BytesIO()
@ -1071,14 +1066,9 @@ def cluster(ctx, config):
'--keyring', keyring_path,
],
)
try:
remote.run(args=[
'sudo', 'chown', '-R', 'ceph:ceph', mnt_point
])
except run.CommandFailedError as e:
# hammer does not have ceph user, so ignore this error
log.info('ignoring error when chown ceph:ceph,'
'probably installing hammer: %s', e)
remote.run(args=[
'sudo', 'chown', '-R', 'ceph:ceph', mnt_point
])
run.wait(
mons.run(
@ -1321,7 +1311,7 @@ def run_daemon(ctx, config, type_):
daemon_signal = 'term'
# create osds in order. (this only matters for pre-luminous, which might
# be hammer, which doesn't take an id_ argument to legacy 'osd create').
# be jewel/hammer, which doesn't take an id_ argument to legacy 'osd create').
osd_uuids = {}
for remote, roles_for_host in daemons.remotes.items():
is_type_ = teuthology.is_type(type_, cluster_name)
@ -1348,7 +1338,7 @@ def run_daemon(ctx, config, type_):
]
)
except:
# fallback to pre-luminous (hammer or jewel)
# fallback to pre-luminous (jewel)
remote.run(
args=[
'sudo', 'ceph', '--cluster', cluster_name,