Copied giant-x suite to next branch

Signed-off-by: Yuri Weinstein <yuri.weinstein@inktank.com>
This commit is contained in:
Yuri Weinstein 2014-11-17 08:42:31 -08:00
parent 5f19ef7116
commit 2c442c93ec
63 changed files with 383 additions and 0 deletions

View File

View File

@ -0,0 +1,19 @@
roles:
- - mon.a
- mds.a
- osd.0
- osd.1
- - mon.b
- mon.c
- osd.2
- osd.3
- - client.0
- client.1
overrides:
ceph:
log-whitelist:
- scrub mismatch
- ScrubResult
conf:
mon:
mon warn on legacy crush tunables: false

View File

@ -0,0 +1,11 @@
tasks:
- install:
branch: giant
- print: "**** done installing giant"
- ceph:
fs: xfs
- print: "**** done ceph"
- parallel:
- workload
- upgrade-sequence
- print: "**** done parallel"

View File

@ -0,0 +1 @@
../../../../../erasure-code/ec-rados-default.yaml

View File

@ -0,0 +1,8 @@
workload:
sequential:
- workunit:
branch: giant
clients:
client.0:
- cls
- print: "**** done cls 2-workload"

View File

@ -0,0 +1,8 @@
workload:
sequential:
- workunit:
branch: giant
clients:
client.0:
- rados/load-gen-big.sh
- print: "**** done rados/load-gen-big.sh 2-workload"

View File

@ -0,0 +1,8 @@
workload:
sequential:
- workunit:
branch: giant
clients:
client.0:
- rbd/test_librbd.sh
- print: "**** done rbd/test_librbd.sh 2-workload"

View File

@ -0,0 +1,8 @@
workload:
sequential:
- workunit:
branch: giant
clients:
client.0:
- rbd/test_librbd_python.sh
- print: "**** done rbd/test_librbd_python.sh 2-workload"

View File

@ -0,0 +1,8 @@
upgrade-sequence:
sequential:
- install.upgrade:
mon.a:
mon.b:
- print: "**** done install.upgrade mon.a and mon.b"
- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
- print: "**** done ceph.restart all"

View File

@ -0,0 +1,37 @@
upgrade-sequence:
sequential:
- install.upgrade:
mon.a:
- print: "**** done install.upgrade mon.a to the version from teuthology-suite arg"
- ceph.restart:
daemons: [mon.a]
wait-for-healthy: true
- sleep:
duration: 60
- ceph.restart:
daemons: [osd.0, osd.1]
wait-for-healthy: true
- sleep:
duration: 60
- ceph.restart: [mds.a]
- sleep:
duration: 60
- print: "**** running mixed versions of osds and mons"
#do we need to use "ceph osd crush tunables giant" ?
- exec:
mon.b:
- ceph osd crush tunables firefly
- print: "**** done ceph osd crush tunables firefly"
- install.upgrade:
mon.b:
- print: "**** done install.upgrade mon.b to the version from teuthology-suite arg"
- ceph.restart:
daemons: [mon.b, mon.c]
wait-for-healthy: true
- sleep:
duration: 60
- ceph.restart:
daemons: [osd.2, osd.3]
wait-for-healthy: true
- sleep:
duration: 60

View File

@ -0,0 +1,13 @@
tasks:
- rados:
clients: [client.1]
ops: 4000
objects: 50
op_weights:
read: 100
write: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50
- print: "**** done rados 4-final-workload"

View File

@ -0,0 +1,6 @@
tasks:
- workunit:
clients:
client.1:
- rados/load-gen-mix.sh
- print: "**** done rados/load-gen-mix.sh 4-final-workload"

View File

@ -0,0 +1,10 @@
tasks:
- mon_thrash:
revive_delay: 20
thrash_delay: 1
- print: "**** done mon_thrash 4-final-workload"
- workunit:
clients:
client.1:
- rados/test.sh
- print: "**** done rados/test.sh 4-final-workload"

View File

@ -0,0 +1,6 @@
tasks:
- workunit:
clients:
client.1:
- cls/test_cls_rbd.sh
- print: "**** done cls/test_cls_rbd.sh 4-final-workload"

View File

@ -0,0 +1,8 @@
tasks:
- workunit:
clients:
client.1:
- rbd/import_export.sh
env:
RBD_CREATE_ARGS: --new-format
- print: "**** done rbd/import_export.sh 4-final-workload"

View File

@ -0,0 +1,7 @@
tasks:
- rgw: [client.1]
- print: "**** done rgw 4-final-workload"
- swift:
client.1:
rgw_server: client.1
- print: "**** done swift 4-final-workload"

View File

@ -0,0 +1 @@
../../../../distros/supported

View File

@ -0,0 +1 @@
../stress-split/0-cluster

View File

@ -0,0 +1 @@
../stress-split/1-giant-install/

View File

@ -0,0 +1 @@
../stress-split/2-partial-upgrade

View File

@ -0,0 +1,13 @@
overrides:
ceph:
log-whitelist:
- wrongly marked me down
- objects unfound and apparently lost
- log bound mismatch
tasks:
- thrashosds:
timeout: 1200
chance_pgnum_grow: 1
chance_pgpnum_fix: 1
min_in: 4
- print: "**** done thrashosds 3-thrash"

View File

@ -0,0 +1 @@
../stress-split/4-mon

View File

@ -0,0 +1 @@
../../../../../erasure-code/ec-rados-default.yaml

View File

@ -0,0 +1 @@
../stress-split/6-next-mon

View File

@ -0,0 +1 @@
../stress-split/8-next-mon

View File

@ -0,0 +1,25 @@
tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 50
ec_pool: true
erasure_code_profile:
name: isaprofile
plugin: isa
k: 2
m: 1
technique: reed_sol_van
ruleset-failure-domain: osd
op_weights:
read: 100
write: 0
append: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50
copy_from: 50
setattr: 25
rmattr: 25
- print: "**** done ec-rados-plugin=isa-k=2-m=1 9-workload"

View File

@ -0,0 +1,3 @@
os_type: rhel
os_version: "7.0"
arch: x86_64

View File

@ -0,0 +1,3 @@
os_type: ubuntu
os_version: "14.04"
arch: x86_64

View File

@ -0,0 +1 @@
../stress-split/0-cluster

View File

@ -0,0 +1 @@
../stress-split/1-giant-install/

View File

@ -0,0 +1 @@
../stress-split/2-partial-upgrade

View File

@ -0,0 +1,13 @@
overrides:
ceph:
log-whitelist:
- wrongly marked me down
- objects unfound and apparently lost
- log bound mismatch
tasks:
- thrashosds:
timeout: 1200
chance_pgnum_grow: 1
chance_pgpnum_fix: 1
min_in: 4
- print: "**** done thrashosds 3-thrash"

View File

@ -0,0 +1 @@
../stress-split/4-mon

View File

@ -0,0 +1 @@
../../../../../erasure-code/ec-rados-default.yaml

View File

@ -0,0 +1 @@
../stress-split/6-next-mon

View File

@ -0,0 +1 @@
../stress-split/8-next-mon

View File

@ -0,0 +1 @@
../../../../../erasure-code/ec-rados-plugin=jerasure-k=3-m=1.yaml

View File

@ -0,0 +1 @@
../stress-split/distros

View File

View File

@ -0,0 +1,25 @@
overrides:
ceph:
conf:
mon:
mon warn on legacy crush tunables: false
roles:
- - mon.a
- mon.b
- mon.c
- mds.a
- osd.0
- osd.1
- osd.2
- osd.3
- osd.4
- osd.5
- osd.6
- - osd.7
- osd.8
- osd.9
- osd.10
- osd.11
- osd.12
- osd.13
- - client.0

View File

@ -0,0 +1,7 @@
tasks:
- install:
branch: giant
- print: "**** done install giant"
- ceph:
fs: xfs
- print: "**** done ceph"

View File

@ -0,0 +1,7 @@
tasks:
- install.upgrade:
osd.0:
- print: "**** done install.upgrade osd.0"
- ceph.restart:
daemons: [osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, osd.6]
- print: "**** done ceph.restart 1st half"

View File

@ -0,0 +1,12 @@
overrides:
ceph:
log-whitelist:
- wrongly marked me down
- objects unfound and apparently lost
- log bound mismatch
tasks:
- thrashosds:
timeout: 1200
chance_pgnum_grow: 1
chance_pgpnum_fix: 1
- print: "**** done thrashosds 3-thrash"

View File

@ -0,0 +1,6 @@
tasks:
- ceph.restart:
daemons: [mon.a]
wait-for-healthy: false
wait-for-osds-up: true
- print: "**** done ceph.restart mon.a"

View File

@ -0,0 +1,7 @@
tasks:
- workunit:
branch: giant
clients:
client.0:
- cls/test_cls_rbd.sh
- print: "**** done cls/test_cls_rbd.sh 5-workload"

View File

@ -0,0 +1,9 @@
tasks:
- workunit:
branch: giant
clients:
client.0:
- rbd/import_export.sh
env:
RBD_CREATE_ARGS: --new-format
- print: "**** done rbd/import_export.sh 5-workload"

View File

@ -0,0 +1,10 @@
tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 500
op_weights:
read: 45
write: 45
delete: 10
- print: "**** done rados/readwrite 5-workload"

View File

@ -0,0 +1,13 @@
tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 50
op_weights:
read: 100
write: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50
- print: "**** done rados/snaps-few-objects 5-workload"

View File

@ -0,0 +1,6 @@
tasks:
- ceph.restart:
daemons: [mon.b]
wait-for-healthy: false
wait-for-osds-up: true
- print: "**** done ceph.restart mon.b 6-next-mon"

View File

@ -0,0 +1,5 @@
tasks:
- radosbench:
clients: [client.0]
time: 1800
- print: "**** done radosbench 7-workload"

View File

@ -0,0 +1,7 @@
tasks:
- workunit:
branch: giant
clients:
client.0:
- rbd/test_librbd.sh
- print: "**** done rbd/test_librbd.sh 7-workload"

View File

@ -0,0 +1,8 @@
tasks:
- ceph.restart:
daemons: [mon.c]
wait-for-healthy: false
wait-for-osds-up: true
- print: "**** done ceph.restart mon.c 8-next-mon"
- ceph.wait_for_mon_quorum: [a, b, c]
- print: "**** done wait_for_mon_quorum 8-next-mon"

View File

@ -0,0 +1,7 @@
tasks:
- workunit:
branch: giant
clients:
client.0:
- rbd/test_librbd_python.sh
- print: "**** done rbd/test_librbd_python.sh 9-workload"

View File

@ -0,0 +1,9 @@
tasks:
- rgw:
client.0:
default_idle_timeout: 300
- print: "**** done rgw 9-workload"
- swift:
client.0:
rgw_server: client.0
- print: "**** done swift 9-workload"

View File

@ -0,0 +1,12 @@
tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 500
op_weights:
read: 100
write: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50

View File

@ -0,0 +1 @@
../../../../distros/supported