Merge PR #27291 into master

* refs/pull/27291/head:
	qa/suites/upgrade/nautilus-x-singleton: upgrade mgrs early
	qa/suites/upgrade/mimic-x-singleton: upgrade mgrs later
	qa/suites: add upgrade/nautilus-x-singleton and symlink from rados/upgrade
	qa/suites: move rados/upgrade/mimic-x-singleton to upgrade/ and symlink

Reviewed-by: Sage Weil <sage@redhat.com>
This commit is contained in:
Sage Weil 2019-04-03 14:07:27 -05:00
commit 1e7629983f
64 changed files with 299 additions and 1 deletions

View File

@ -0,0 +1 @@
../../upgrade/mimic-x-singleton/

View File

@ -0,0 +1 @@
../../upgrade/nautilus-x-singleton

View File

@ -17,7 +17,6 @@ overrides:
debug mds: 20
roles:
- - mon.a
- mgr.x
- mds.a
- osd.0
- osd.1
@ -29,6 +28,7 @@ roles:
- osd.6
- osd.7
- - mon.c
- mgr.x
- mgr.y
- osd.8
- osd.9

View File

View File

@ -0,0 +1 @@
../.qa/

View File

@ -0,0 +1 @@
../.qa/

View File

@ -0,0 +1,6 @@
openstack:
- machine:
disk: 100 # GB
- volumes: # attached to each instance
count: 3
size: 30 # GB

View File

@ -0,0 +1,37 @@
meta:
- desc: |
Run ceph on two nodes,
with a separate client-only node.
Use xfs beneath the osds.
overrides:
ceph:
mon_bind_addrvec: false
mon_bind_msgr2: false
fs: xfs
conf:
global:
ms dump corrupt message level: 0
ms bind msgr2: false
mds:
debug ms: 1
debug mds: 20
roles:
- - mon.a
- mgr.x
- mds.a
- osd.0
- osd.1
- osd.2
- osd.3
- - mon.b
- mgr.y
- osd.4
- osd.5
- osd.6
- osd.7
- - mon.c
- osd.8
- osd.9
- osd.10
- osd.11
- - client.0

View File

@ -0,0 +1 @@
../.qa/

View File

@ -0,0 +1,22 @@
overrides:
ceph:
log-whitelist:
- \(MON_DOWN\)
- \(MGR_DOWN\)
- slow request
meta:
- desc: install ceph/nautilus latest
tasks:
- install:
branch: nautilus
exclude_packages:
- librados3
- ceph-mgr-dashboard
- ceph-mgr-diskprediction-local
- ceph-mgr-diskprediction-cloud
- ceph-mgr-rook
- ceph-mgr-ssh
extra_packages: ['librados2']
- print: "**** done install nautilus"
- ceph:
- print: "**** done ceph"

View File

@ -0,0 +1 @@
../.qa/

View File

@ -0,0 +1,20 @@
meta:
- desc: |
install upgrade ceph/-x on one node only
1st half
restart : osd.0,1,2,3,4,5
tasks:
- install.upgrade:
mon.a:
mon.b:
- print: "**** done install.upgrade osd.0"
- ceph.restart:
daemons: [mgr.x, mgr.y]
- ceph.restart:
daemons: [mon.a, mon.b]
wait-for-healthy: false
mon-health-to-clog: false
- ceph.restart:
daemons: [osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, osd.6, osd.7]
wait-for-healthy: false
- print: "**** done ceph.restart 1st 2/3s"

View File

@ -0,0 +1 @@
../.qa/

View File

@ -0,0 +1,22 @@
meta:
- desc: |
randomly kill and revive osd
small chance to increase the number of pgs
overrides:
ceph:
log-whitelist:
- but it is still running
- objects unfound and apparently lost
- log bound mismatch
tasks:
- parallel:
- split_tasks
split_tasks:
sequential:
- thrashosds:
disable_objectstore_tool_tests: true
timeout: 1200
chance_pgnum_grow: 1
chance_pgpnum_fix: 1
aggressive_pg_num_changes: false
- print: "**** done thrashosds 3-thrash"

View File

@ -0,0 +1 @@
../.qa/

View File

@ -0,0 +1,11 @@
meta:
- desc: |
run basic cls tests for rbd
split_tasks:
sequential:
- workunit:
branch: nautilus
clients:
client.0:
- cls/test_cls_rbd.sh
- print: "**** done cls/test_cls_rbd.sh 5-workload"

View File

@ -0,0 +1,13 @@
meta:
- desc: |
run basic import/export cli tests for rbd
split_tasks:
sequential:
- workunit:
branch: nautilus
clients:
client.0:
- rbd/import_export.sh
env:
RBD_CREATE_ARGS: --new-format
- print: "**** done rbd/import_export.sh 5-workload"

View File

@ -0,0 +1,17 @@
meta:
- desc: |
randomized correctness test for rados operations on a replicated pool,
using only reads, writes, and deletes
split_tasks:
sequential:
- full_sequential:
- rados:
clients: [client.0]
ops: 4000
objects: 500
write_append_excl: false
op_weights:
read: 45
write: 45
delete: 10
- print: "**** done rados/readwrite 5-workload"

View File

@ -0,0 +1,19 @@
meta:
- desc: |
randomized correctness test for rados operations on a replicated pool with snapshot operations
split_tasks:
sequential:
- full_sequential:
- rados:
clients: [client.0]
ops: 4000
objects: 50
write_append_excl: false
op_weights:
read: 100
write: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50
- print: "**** done rados/snaps-few-objects 5-workload"

View File

@ -0,0 +1 @@
../.qa/

View File

@ -0,0 +1,41 @@
meta:
- desc: |
run randomized correctness test for rados operations
generate write load with rados bench
split_tasks:
sequential:
- full_sequential:
- radosbench:
clients: [client.0]
time: 150
- radosbench:
clients: [client.0]
time: 150
- radosbench:
clients: [client.0]
time: 150
- radosbench:
clients: [client.0]
time: 150
- radosbench:
clients: [client.0]
time: 150
- radosbench:
clients: [client.0]
time: 150
- radosbench:
clients: [client.0]
time: 150
- radosbench:
clients: [client.0]
time: 150
- radosbench:
clients: [client.0]
time: 150
- radosbench:
clients: [client.0]
time: 150
- radosbench:
clients: [client.0]
time: 150
- print: "**** done radosbench 7-workload"

View File

@ -0,0 +1,11 @@
meta:
- desc: |
librbd C and C++ api tests
split_tasks:
sequential:
- workunit:
branch: nautilus
clients:
client.0:
- rbd/test_librbd.sh
- print: "**** done rbd/test_librbd.sh 7-workload"

View File

@ -0,0 +1,30 @@
meta:
- desc: |
install upgrade on remaining node
restartin remaining osds
overrides:
ceph:
log-whitelist:
- overall HEALTH_
- \(FS_DEGRADED\)
- \(MDS_
tasks:
- install.upgrade:
mon.c:
- ceph.restart:
daemons: [mon.c, mgr.x, mgr.y]
wait-for-up: true
wait-for-healthy: false
- ceph.restart:
daemons: [osd.8, osd.9, osd.10, osd.11]
wait-for-up: true
wait-for-healthy: false
- ceph.restart:
daemons: [mds.a]
wait-for-up: true
wait-for-healthy: false
- exec:
mon.a:
- ceph mon enable-msgr2
- install.upgrade:
client.0:

View File

@ -0,0 +1 @@
../../../releases/octopus.yaml

View File

@ -0,0 +1 @@
../.qa/

View File

@ -0,0 +1,9 @@
meta:
- desc: |
librbd python api tests
tasks:
- workunit:
clients:
client.0:
- rbd/test_librbd_python.sh
- print: "**** done rbd/test_librbd_python.sh 9-workload"

View File

@ -0,0 +1,11 @@
meta:
- desc: |
swift api tests for rgw
tasks:
- rgw:
client.0:
- print: "**** done rgw 9-workload"
- swift:
client.0:
rgw_server: client.0
- print: "**** done swift 9-workload"

View File

@ -0,0 +1,16 @@
meta:
- desc: |
randomized correctness test for rados operations on a replicated pool with snapshot operations
tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 500
write_append_excl: false
op_weights:
read: 100
write: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50

View File

@ -0,0 +1 @@
.qa/distros/supported-random-distro$

View File

@ -0,0 +1 @@
.qa/tasks/thrashosds-health.yaml