mirror of
https://github.com/ceph/ceph
synced 2025-02-20 17:37:29 +00:00
qa: bring qa/suites/upgrade/reef-x
Bases on quincy-x.
```
$ cp -R qa/suites/upgrade/quincy-x/ qa/suites/upgrade/reef-x
$ git add qa/suites/upgrade/reef-x
$ git mv qa/suites/upgrade/reef-x/filestore-remove-check/1-ceph-install/quincy.yaml qa/suites/upgrade/reef-x/filestore-remove-check/1-ceph-install/reef.yaml
$ find qa/suites/upgrade/reef-x/ -type f -exec sed -i 's/quincy/reef/g' {} +
```
A note from rebase: changes from 05e24270a2
have been pulled in.
Signed-off-by: Radoslaw Zarzynski <rzarzyns@redhat.com>
This commit is contained in:
parent
b35871b920
commit
7af05d0944
@ -107,7 +107,7 @@ QA suite
|
||||
|
||||
- [x] create qa/workunits/test_telemetry_(X-1).sh
|
||||
- [x] create qa/workunits/test_telemetry_(X-1)_x.sh
|
||||
- [ ] create qa/suites/upgrade/(X-1)-x
|
||||
- [x] create qa/suites/upgrade/(X-1)-x
|
||||
- [x] remove qa/suites/upgrade/(X-3)-x-*
|
||||
- [x] create qa/releases/X.yaml
|
||||
- [x] create qa/suites/rados/thrash-old-clients/1-install/(X-1).yaml
|
||||
|
0
qa/suites/upgrade/reef-x/filestore-remove-check/%
Normal file
0
qa/suites/upgrade/reef-x/filestore-remove-check/%
Normal file
1
qa/suites/upgrade/reef-x/filestore-remove-check/.qa
Symbolic link
1
qa/suites/upgrade/reef-x/filestore-remove-check/.qa
Symbolic link
@ -0,0 +1 @@
|
||||
../.qa
|
1
qa/suites/upgrade/reef-x/filestore-remove-check/0-cluster/.qa
Symbolic link
1
qa/suites/upgrade/reef-x/filestore-remove-check/0-cluster/.qa
Symbolic link
@ -0,0 +1 @@
|
||||
../.qa/
|
@ -0,0 +1,6 @@
|
||||
openstack:
|
||||
- machine:
|
||||
disk: 100 # GB
|
||||
- volumes: # attached to each instance
|
||||
count: 4
|
||||
size: 30 # GB
|
@ -0,0 +1,33 @@
|
||||
meta:
|
||||
- desc: |
|
||||
Run ceph on one nodes,
|
||||
Use xfs beneath the osds. upgrade to reef
|
||||
should fail to start the osds with filestore
|
||||
overrides:
|
||||
ceph:
|
||||
mon_bind_msgr2: false
|
||||
mon_bind_addrvec: false
|
||||
mon-health-to-clog: false
|
||||
wait-for-healthy: false
|
||||
wait-for-osds-up: false
|
||||
wait-for-scrub: false
|
||||
skip_stop_pg_num_changes: true
|
||||
fs: xfs
|
||||
log-ignorelist:
|
||||
- overall HEALTH_
|
||||
- \(MON_DOWN\)
|
||||
- \(MGR_DOWN\)
|
||||
- slow request
|
||||
- \(MON_MSGR2_NOT_ENABLED\)
|
||||
conf:
|
||||
global:
|
||||
enable experimental unrecoverable data corrupting features: "*"
|
||||
mon warn on msgr2 not enabled: false
|
||||
mon:
|
||||
mon warn on osd down out interval zero: false
|
||||
roles:
|
||||
- - mon.a
|
||||
- mgr.x
|
||||
- osd.0
|
||||
- osd.1
|
||||
- osd.2
|
@ -0,0 +1 @@
|
||||
../.qa/
|
@ -0,0 +1,32 @@
|
||||
meta:
|
||||
- desc: install ceph/reef latest
|
||||
tasks:
|
||||
- install:
|
||||
exclude_packages:
|
||||
- ceph-mgr-cephadm
|
||||
- cephadm
|
||||
- libcephfs-dev
|
||||
branch: reef
|
||||
- print: "**** done install reef"
|
||||
- ceph:
|
||||
create_rbd_pool: false
|
||||
conf:
|
||||
global:
|
||||
bluestore_warn_on_legacy_statfs: false
|
||||
bluestore warn on no per pool omap: false
|
||||
mon pg warn min per osd: 0
|
||||
mon:
|
||||
mon_warn_on_insecure_global_id_reclaim: false
|
||||
mon_warn_on_insecure_global_id_reclaim_allowed: false
|
||||
log-ignorelist:
|
||||
- Not found or unloadable
|
||||
- evicting unresponsive client
|
||||
- exec:
|
||||
osd.0:
|
||||
- ceph osd require-osd-release reef
|
||||
- print: "**** done ceph"
|
||||
overrides:
|
||||
ceph:
|
||||
conf:
|
||||
mon:
|
||||
mon warn on osd down out interval zero: false
|
@ -0,0 +1,20 @@
|
||||
meta:
|
||||
- desc: |
|
||||
install upgrade ceph/-x on cluster
|
||||
restart : mons, osd.*
|
||||
tasks:
|
||||
- install.upgrade:
|
||||
mon.a:
|
||||
- exec:
|
||||
osd.0:
|
||||
- ceph osd require-osd-release reef
|
||||
- print: "**** done install.upgrade of nodes"
|
||||
- ceph.restart:
|
||||
daemons: [mon.a,mgr.x,osd.0,osd.1,osd.2]
|
||||
mon-health-to-clog: false
|
||||
wait-for-healthy: false
|
||||
wait-for-osds-up: false
|
||||
wait-for-scrub: false
|
||||
skip_stop_pg_num_changes: true
|
||||
expected-failure: "FileStore has been deprecated and is no longer supported"
|
||||
- print: "**** done ceph.restart of all mons and osds"
|
1
qa/suites/upgrade/reef-x/filestore-remove-check/objectstore/.qa
Symbolic link
1
qa/suites/upgrade/reef-x/filestore-remove-check/objectstore/.qa
Symbolic link
@ -0,0 +1 @@
|
||||
../.qa/
|
@ -0,0 +1,14 @@
|
||||
overrides:
|
||||
ceph:
|
||||
fs: xfs
|
||||
conf:
|
||||
osd:
|
||||
osd objectstore: filestore
|
||||
osd sloppy crc: true
|
||||
ceph-deploy:
|
||||
fs: xfs
|
||||
filestore: True
|
||||
conf:
|
||||
osd:
|
||||
osd objectstore: filestore
|
||||
osd sloppy crc: true
|
@ -0,0 +1,9 @@
|
||||
os_type: ubuntu
|
||||
os_version: "20.04"
|
||||
# the normal ubuntu 20.04 kernel (5.4.0-88-generic currently) have a bug that prevents the nvme_loop
|
||||
# from behaving. I think it is this:
|
||||
# https://lkml.org/lkml/2020/9/21/1456
|
||||
# (at least, that is the symptom: nvme nvme1: Connect command failed, error wo/DNR bit: 880)
|
||||
overrides:
|
||||
kernel:
|
||||
hwe: true
|
@ -9,4 +9,6 @@ workload:
|
||||
clients:
|
||||
client.0:
|
||||
- rbd/test_librbd.sh
|
||||
env:
|
||||
RBD_FEATURES: "61"
|
||||
- print: "**** done end test_rbd_api.yaml"
|
||||
|
@ -14,5 +14,7 @@ workload:
|
||||
clients:
|
||||
client.0:
|
||||
- rbd/test_librbd_python.sh
|
||||
env:
|
||||
RBD_FEATURES: "61"
|
||||
- print: "**** done end test_rbd_python.yaml"
|
||||
|
||||
|
@ -7,4 +7,6 @@ first-half-tasks:
|
||||
clients:
|
||||
client.0:
|
||||
- rbd/test_librbd.sh
|
||||
env:
|
||||
RBD_FEATURES: "61"
|
||||
- print: "**** done rbd/test_librbd.sh 7-workload"
|
||||
|
@ -7,4 +7,6 @@ stress-tasks:
|
||||
clients:
|
||||
client.0:
|
||||
- rbd/test_librbd.sh
|
||||
env:
|
||||
RBD_FEATURES: "61"
|
||||
- print: "**** done rbd/test_librbd.sh 7-workload"
|
||||
|
Loading…
Reference in New Issue
Block a user