qa: bring qa/suites/upgrade/reef-x

Bases on quincy-x.

```
$ cp -R qa/suites/upgrade/quincy-x/ qa/suites/upgrade/reef-x
$ git add qa/suites/upgrade/reef-x
$ git mv qa/suites/upgrade/reef-x/filestore-remove-check/1-ceph-install/quincy.yaml qa/suites/upgrade/reef-x/filestore-remove-check/1-ceph-install/reef.yaml
$ find qa/suites/upgrade/reef-x/ -type f -exec sed -i 's/quincy/reef/g' {} +
```

A note from rebase: changes from 05e24270a2
have been pulled in.

Signed-off-by: Radoslaw Zarzynski <rzarzyns@redhat.com>
This commit is contained in:
Radoslaw Zarzynski 2023-08-22 10:37:44 +00:00 committed by Radosław Zarzyński
parent b35871b920
commit 7af05d0944
17 changed files with 127 additions and 1 deletions

View File

@ -107,7 +107,7 @@ QA suite
- [x] create qa/workunits/test_telemetry_(X-1).sh
- [x] create qa/workunits/test_telemetry_(X-1)_x.sh
- [ ] create qa/suites/upgrade/(X-1)-x
- [x] create qa/suites/upgrade/(X-1)-x
- [x] remove qa/suites/upgrade/(X-3)-x-*
- [x] create qa/releases/X.yaml
- [x] create qa/suites/rados/thrash-old-clients/1-install/(X-1).yaml

View File

@ -0,0 +1 @@
../.qa

View File

@ -0,0 +1 @@
../.qa/

View File

@ -0,0 +1,6 @@
openstack:
- machine:
disk: 100 # GB
- volumes: # attached to each instance
count: 4
size: 30 # GB

View File

@ -0,0 +1,33 @@
meta:
- desc: |
Run ceph on one nodes,
Use xfs beneath the osds. upgrade to reef
should fail to start the osds with filestore
overrides:
ceph:
mon_bind_msgr2: false
mon_bind_addrvec: false
mon-health-to-clog: false
wait-for-healthy: false
wait-for-osds-up: false
wait-for-scrub: false
skip_stop_pg_num_changes: true
fs: xfs
log-ignorelist:
- overall HEALTH_
- \(MON_DOWN\)
- \(MGR_DOWN\)
- slow request
- \(MON_MSGR2_NOT_ENABLED\)
conf:
global:
enable experimental unrecoverable data corrupting features: "*"
mon warn on msgr2 not enabled: false
mon:
mon warn on osd down out interval zero: false
roles:
- - mon.a
- mgr.x
- osd.0
- osd.1
- osd.2

View File

@ -0,0 +1 @@
../.qa/

View File

@ -0,0 +1,32 @@
meta:
- desc: install ceph/reef latest
tasks:
- install:
exclude_packages:
- ceph-mgr-cephadm
- cephadm
- libcephfs-dev
branch: reef
- print: "**** done install reef"
- ceph:
create_rbd_pool: false
conf:
global:
bluestore_warn_on_legacy_statfs: false
bluestore warn on no per pool omap: false
mon pg warn min per osd: 0
mon:
mon_warn_on_insecure_global_id_reclaim: false
mon_warn_on_insecure_global_id_reclaim_allowed: false
log-ignorelist:
- Not found or unloadable
- evicting unresponsive client
- exec:
osd.0:
- ceph osd require-osd-release reef
- print: "**** done ceph"
overrides:
ceph:
conf:
mon:
mon warn on osd down out interval zero: false

View File

@ -0,0 +1,20 @@
meta:
- desc: |
install upgrade ceph/-x on cluster
restart : mons, osd.*
tasks:
- install.upgrade:
mon.a:
- exec:
osd.0:
- ceph osd require-osd-release reef
- print: "**** done install.upgrade of nodes"
- ceph.restart:
daemons: [mon.a,mgr.x,osd.0,osd.1,osd.2]
mon-health-to-clog: false
wait-for-healthy: false
wait-for-osds-up: false
wait-for-scrub: false
skip_stop_pg_num_changes: true
expected-failure: "FileStore has been deprecated and is no longer supported"
- print: "**** done ceph.restart of all mons and osds"

View File

@ -0,0 +1 @@
../.qa/

View File

@ -0,0 +1,14 @@
overrides:
ceph:
fs: xfs
conf:
osd:
osd objectstore: filestore
osd sloppy crc: true
ceph-deploy:
fs: xfs
filestore: True
conf:
osd:
osd objectstore: filestore
osd sloppy crc: true

View File

@ -0,0 +1,9 @@
os_type: ubuntu
os_version: "20.04"
# the normal ubuntu 20.04 kernel (5.4.0-88-generic currently) have a bug that prevents the nvme_loop
# from behaving. I think it is this:
# https://lkml.org/lkml/2020/9/21/1456
# (at least, that is the symptom: nvme nvme1: Connect command failed, error wo/DNR bit: 880)
overrides:
kernel:
hwe: true

View File

@ -9,4 +9,6 @@ workload:
clients:
client.0:
- rbd/test_librbd.sh
env:
RBD_FEATURES: "61"
- print: "**** done end test_rbd_api.yaml"

View File

@ -14,5 +14,7 @@ workload:
clients:
client.0:
- rbd/test_librbd_python.sh
env:
RBD_FEATURES: "61"
- print: "**** done end test_rbd_python.yaml"

View File

@ -7,4 +7,6 @@ first-half-tasks:
clients:
client.0:
- rbd/test_librbd.sh
env:
RBD_FEATURES: "61"
- print: "**** done rbd/test_librbd.sh 7-workload"

View File

@ -7,4 +7,6 @@ stress-tasks:
clients:
client.0:
- rbd/test_librbd.sh
env:
RBD_FEATURES: "61"
- print: "**** done rbd/test_librbd.sh 7-workload"