Merge pull request #38806 from yuriw/wip-yuriw-octopus-x-master

qa/tests: added rbd_import_export workload

Reviewed-by: Josh Durgin <jdurgin@redhat.com>
Reviewed-by: Neha Ojha <nojha@redhat.com>
This commit is contained in:
Yuri Weinstein 2021-01-12 13:25:59 -08:00 committed by GitHub
commit dd0ad25540
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
38 changed files with 341 additions and 0 deletions

View File

@ -0,0 +1,14 @@
meta:
- desc: |
run basic import/export cli tests for rbd
workload:
full_sequential:
- print: "**** done start rbd_import_export.yaml"
- workunit:
branch: octopus
clients:
client.1:
- rbd/import_export.sh
env:
RBD_CREATE_ARGS: --new-format
- print: "**** done end rbd_import_export.yaml"

View File

@ -0,0 +1 @@
../.qa

View File

@ -0,0 +1 @@
../.qa/

View File

@ -0,0 +1,6 @@
openstack:
- machine:
disk: 100 # GB
- volumes: # attached to each instance
count: 4
size: 30 # GB

View File

@ -0,0 +1,40 @@
meta:
- desc: |
Run ceph on two nodes,
with a separate client-only node.
Use xfs beneath the osds.
overrides:
ceph:
mon_bind_msgr2: false
mon_bind_addrvec: false
fs: xfs
log-ignorelist:
- overall HEALTH_
- \(MON_DOWN\)
- \(MGR_DOWN\)
- slow request
- \(MON_MSGR2_NOT_ENABLED\)
conf:
global:
enable experimental unrecoverable data corrupting features: "*"
mon warn on msgr2 not enabled: false
mon:
mon warn on osd down out interval zero: false
roles:
- - mon.a
- mgr.x
- osd.0
- osd.1
- osd.2
- osd.3
- - mon.b
- osd.4
- osd.5
- osd.6
- osd.7
- - mon.c
- - osd.8
- osd.9
- osd.10
- osd.11
- - client.0

View File

@ -0,0 +1 @@
../.qa/

View File

@ -0,0 +1,32 @@
meta:
- desc: install ceph/octopus latest
tasks:
- install:
exclude_packages:
- ceph-mgr-cephadm
- cephadm
- libcephfs-dev
branch: octopus
- print: "**** done install octopus"
- ceph:
conf:
global:
bluestore_warn_on_legacy_statfs: false
bluestore warn on no per pool omap: false
mon pg warn min per osd: 0
log-ignorelist:
- Not found or unloadable
- evicting unresponsive client
- exec:
osd.0:
- ceph osd require-osd-release octopus
- ceph osd set-require-min-compat-client octopus
- print: "**** done ceph"
- rgw:
- client.0
- print: "**** done => started rgw client.0"
overrides:
ceph:
conf:
mon:
mon warn on osd down out interval zero: false

View File

@ -0,0 +1,6 @@
overrides:
ceph:
conf:
osd:
osd min pg log entries: 1
osd max pg log entries: 2

View File

@ -0,0 +1 @@
../.qa/

View File

@ -0,0 +1,14 @@
meta:
- desc: |
install upgrade ceph/-x on 2/3 of cluster
restart : mons, osd.0-7
tasks:
- install.upgrade:
mon.a:
mon.b:
mon.c:
- print: "**** done install.upgrade of first 3 nodes"
- ceph.restart:
daemons: [mon.a,mon.b,mgr.x,osd.0,osd.1,osd.2,osd.3,osd.4,osd.5,osd.6,osd.7]
mon-health-to-clog: false
- print: "**** done ceph.restart of all mons and 2/3 of osds"

View File

@ -0,0 +1 @@
../.qa/

View File

@ -0,0 +1,26 @@
meta:
- desc: |
randomly kill and revive osd
small chance to increase the number of pgs
overrides:
ceph:
log-ignorelist:
- but it is still running
- wrongly marked me down
- objects unfound and apparently lost
- log bound mismatch
tasks:
- parallel:
- stress-tasks
stress-tasks:
- thrashosds:
timeout: 1200
chance_pgnum_grow: 1
chance_pgpnum_fix: 1
chance_thrash_cluster_full: 0
chance_thrash_pg_upmap: 0
chance_thrash_pg_upmap_items: 0
disable_objectstore_tool_tests: true
chance_force_recovery: 0
aggressive_pg_num_changes: false
- print: "**** done thrashosds 3-thrash"

View File

@ -0,0 +1 @@
../.qa/

View File

@ -0,0 +1,52 @@
meta:
- desc: |
run randomized correctness test for rados operations
generate write load with rados bench
stress-tasks:
- full_sequential:
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- print: "**** done radosbench 7-workload"

View File

@ -0,0 +1,10 @@
meta:
- desc: |
run basic cls tests for rbd
stress-tasks:
- workunit:
branch: octopus
clients:
client.0:
- cls/test_cls_rbd.sh
- print: "**** done cls/test_cls_rbd.sh 5-workload"

View File

@ -0,0 +1,12 @@
meta:
- desc: |
run basic import/export cli tests for rbd
stress-tasks:
- workunit:
branch: octopus
clients:
client.0:
- rbd/import_export.sh
env:
RBD_CREATE_ARGS: --new-format
- print: "**** done rbd/import_export.sh 5-workload"

View File

@ -0,0 +1,10 @@
meta:
- desc: |
librbd C and C++ api tests
stress-tasks:
- workunit:
branch: octopus
clients:
client.0:
- rbd/test_librbd.sh
- print: "**** done rbd/test_librbd.sh 7-workload"

View File

@ -0,0 +1,16 @@
meta:
- desc: |
randomized correctness test for rados operations on a replicated pool,
using only reads, writes, and deletes
stress-tasks:
- full_sequential:
- rados:
clients: [client.0]
ops: 4000
objects: 500
write_append_excl: false
op_weights:
read: 45
write: 45
delete: 10
- print: "**** done rados/readwrite 5-workload"

View File

@ -0,0 +1,12 @@
meta:
- desc: |
rgw ragweed prepare before upgrade
stress-tasks:
- full_sequential:
- sequential:
- ragweed:
client.0:
default-branch: ceph-octopus
rgw_server: client.0
stages: prepare
- print: "**** done rgw ragweed prepare 4-workload"

View File

@ -0,0 +1,18 @@
meta:
- desc: |
randomized correctness test for rados operations on a replicated pool with snapshot operations
stress-tasks:
- full_sequential:
- rados:
clients: [client.0]
ops: 4000
objects: 50
write_append_excl: false
op_weights:
read: 100
write: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50
- print: "**** done rados/snaps-few-objects 5-workload"

View File

@ -0,0 +1,21 @@
tasks:
- install.upgrade:
osd.8:
client.0:
extra_packages:
- python3-rados
- python3-rgw
- python3-rbd
- python3-cephfs
- ceph.restart:
daemons: [mon.c, osd.8, osd.9, osd.10, osd.11, rgw.*]
wait-for-healthy: false
wait-for-osds-up: true
- print: "**** restarted/upgrated => mon.c, osd.8, osd.9, osd.10, osd.11, rgw.*"
- exec:
osd.0:
- ceph osd set pglog_hardlimit
- ceph osd dump --format=json-pretty | grep "flags"
- ceph config set global mon_warn_on_msgr2_not_enabled false
- print: "**** try to set pglog_hardlimit again, should succeed"

View File

@ -0,0 +1 @@
.qa/releases/pacific.yaml

View File

@ -0,0 +1,6 @@
tasks:
- exec:
mon.a:
- ceph mon enable-msgr2
- ceph config rm global mon_warn_on_msgr2_not_enabled
- ceph.healthy:

View File

@ -0,0 +1 @@
../.qa/

View File

@ -0,0 +1,14 @@
meta:
- desc: |
librbd python api tests
overrides:
ceph:
conf:
client:
rbd default clone format: 1
tasks:
- workunit:
clients:
client.0:
- rbd/test_librbd_python.sh
- print: "**** done rbd/test_librbd_python.sh 9-workload"

View File

@ -0,0 +1,16 @@
meta:
- desc: |
randomized correctness test for rados operations on a replicated pool with snapshot operations
tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 500
write_append_excl: false
op_weights:
read: 100
write: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50

View File

@ -0,0 +1 @@
.qa/mon_election

View File

@ -0,0 +1 @@
../.qa/

View File

@ -0,0 +1 @@
.qa/objectstore/bluestore-bitmap.yaml

View File

@ -0,0 +1 @@
.qa/objectstore/filestore-xfs.yaml

View File

@ -0,0 +1 @@
.qa/tasks/thrashosds-health.yaml

View File

@ -0,0 +1 @@
.qa/distros/all/ubuntu_18.04.yaml