mirror of
https://github.com/ceph/ceph
synced 2025-02-21 01:47:25 +00:00
Copied giant-x suite to next branch
Signed-off-by: Yuri Weinstein <yuri.weinstein@inktank.com>
This commit is contained in:
parent
5f19ef7116
commit
2c442c93ec
0
suites/upgrade/giant-x/parallel/%
Normal file
0
suites/upgrade/giant-x/parallel/%
Normal file
19
suites/upgrade/giant-x/parallel/0-cluster/start.yaml
Normal file
19
suites/upgrade/giant-x/parallel/0-cluster/start.yaml
Normal file
@ -0,0 +1,19 @@
|
||||
roles:
|
||||
- - mon.a
|
||||
- mds.a
|
||||
- osd.0
|
||||
- osd.1
|
||||
- - mon.b
|
||||
- mon.c
|
||||
- osd.2
|
||||
- osd.3
|
||||
- - client.0
|
||||
- client.1
|
||||
overrides:
|
||||
ceph:
|
||||
log-whitelist:
|
||||
- scrub mismatch
|
||||
- ScrubResult
|
||||
conf:
|
||||
mon:
|
||||
mon warn on legacy crush tunables: false
|
11
suites/upgrade/giant-x/parallel/1-giant-install/giant.yaml
Normal file
11
suites/upgrade/giant-x/parallel/1-giant-install/giant.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
tasks:
|
||||
- install:
|
||||
branch: giant
|
||||
- print: "**** done installing giant"
|
||||
- ceph:
|
||||
fs: xfs
|
||||
- print: "**** done ceph"
|
||||
- parallel:
|
||||
- workload
|
||||
- upgrade-sequence
|
||||
- print: "**** done parallel"
|
0
suites/upgrade/giant-x/parallel/2-workload/+
Normal file
0
suites/upgrade/giant-x/parallel/2-workload/+
Normal file
1
suites/upgrade/giant-x/parallel/2-workload/ec-rados-default.yaml
Symbolic link
1
suites/upgrade/giant-x/parallel/2-workload/ec-rados-default.yaml
Symbolic link
@ -0,0 +1 @@
|
||||
../../../../../erasure-code/ec-rados-default.yaml
|
@ -0,0 +1,8 @@
|
||||
workload:
|
||||
sequential:
|
||||
- workunit:
|
||||
branch: giant
|
||||
clients:
|
||||
client.0:
|
||||
- cls
|
||||
- print: "**** done cls 2-workload"
|
@ -0,0 +1,8 @@
|
||||
workload:
|
||||
sequential:
|
||||
- workunit:
|
||||
branch: giant
|
||||
clients:
|
||||
client.0:
|
||||
- rados/load-gen-big.sh
|
||||
- print: "**** done rados/load-gen-big.sh 2-workload"
|
@ -0,0 +1,8 @@
|
||||
workload:
|
||||
sequential:
|
||||
- workunit:
|
||||
branch: giant
|
||||
clients:
|
||||
client.0:
|
||||
- rbd/test_librbd.sh
|
||||
- print: "**** done rbd/test_librbd.sh 2-workload"
|
@ -0,0 +1,8 @@
|
||||
workload:
|
||||
sequential:
|
||||
- workunit:
|
||||
branch: giant
|
||||
clients:
|
||||
client.0:
|
||||
- rbd/test_librbd_python.sh
|
||||
- print: "**** done rbd/test_librbd_python.sh 2-workload"
|
@ -0,0 +1,8 @@
|
||||
upgrade-sequence:
|
||||
sequential:
|
||||
- install.upgrade:
|
||||
mon.a:
|
||||
mon.b:
|
||||
- print: "**** done install.upgrade mon.a and mon.b"
|
||||
- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
|
||||
- print: "**** done ceph.restart all"
|
@ -0,0 +1,37 @@
|
||||
upgrade-sequence:
|
||||
sequential:
|
||||
- install.upgrade:
|
||||
mon.a:
|
||||
- print: "**** done install.upgrade mon.a to the version from teuthology-suite arg"
|
||||
- ceph.restart:
|
||||
daemons: [mon.a]
|
||||
wait-for-healthy: true
|
||||
- sleep:
|
||||
duration: 60
|
||||
- ceph.restart:
|
||||
daemons: [osd.0, osd.1]
|
||||
wait-for-healthy: true
|
||||
- sleep:
|
||||
duration: 60
|
||||
- ceph.restart: [mds.a]
|
||||
- sleep:
|
||||
duration: 60
|
||||
- print: "**** running mixed versions of osds and mons"
|
||||
#do we need to use "ceph osd crush tunables giant" ?
|
||||
- exec:
|
||||
mon.b:
|
||||
- ceph osd crush tunables firefly
|
||||
- print: "**** done ceph osd crush tunables firefly"
|
||||
- install.upgrade:
|
||||
mon.b:
|
||||
- print: "**** done install.upgrade mon.b to the version from teuthology-suite arg"
|
||||
- ceph.restart:
|
||||
daemons: [mon.b, mon.c]
|
||||
wait-for-healthy: true
|
||||
- sleep:
|
||||
duration: 60
|
||||
- ceph.restart:
|
||||
daemons: [osd.2, osd.3]
|
||||
wait-for-healthy: true
|
||||
- sleep:
|
||||
duration: 60
|
0
suites/upgrade/giant-x/parallel/4-final-workload/+
Normal file
0
suites/upgrade/giant-x/parallel/4-final-workload/+
Normal file
@ -0,0 +1,13 @@
|
||||
tasks:
|
||||
- rados:
|
||||
clients: [client.1]
|
||||
ops: 4000
|
||||
objects: 50
|
||||
op_weights:
|
||||
read: 100
|
||||
write: 100
|
||||
delete: 50
|
||||
snap_create: 50
|
||||
snap_remove: 50
|
||||
rollback: 50
|
||||
- print: "**** done rados 4-final-workload"
|
@ -0,0 +1,6 @@
|
||||
tasks:
|
||||
- workunit:
|
||||
clients:
|
||||
client.1:
|
||||
- rados/load-gen-mix.sh
|
||||
- print: "**** done rados/load-gen-mix.sh 4-final-workload"
|
@ -0,0 +1,10 @@
|
||||
tasks:
|
||||
- mon_thrash:
|
||||
revive_delay: 20
|
||||
thrash_delay: 1
|
||||
- print: "**** done mon_thrash 4-final-workload"
|
||||
- workunit:
|
||||
clients:
|
||||
client.1:
|
||||
- rados/test.sh
|
||||
- print: "**** done rados/test.sh 4-final-workload"
|
@ -0,0 +1,6 @@
|
||||
tasks:
|
||||
- workunit:
|
||||
clients:
|
||||
client.1:
|
||||
- cls/test_cls_rbd.sh
|
||||
- print: "**** done cls/test_cls_rbd.sh 4-final-workload"
|
@ -0,0 +1,8 @@
|
||||
tasks:
|
||||
- workunit:
|
||||
clients:
|
||||
client.1:
|
||||
- rbd/import_export.sh
|
||||
env:
|
||||
RBD_CREATE_ARGS: --new-format
|
||||
- print: "**** done rbd/import_export.sh 4-final-workload"
|
@ -0,0 +1,7 @@
|
||||
tasks:
|
||||
- rgw: [client.1]
|
||||
- print: "**** done rgw 4-final-workload"
|
||||
- swift:
|
||||
client.1:
|
||||
rgw_server: client.1
|
||||
- print: "**** done swift 4-final-workload"
|
1
suites/upgrade/giant-x/parallel/distros
Symbolic link
1
suites/upgrade/giant-x/parallel/distros
Symbolic link
@ -0,0 +1 @@
|
||||
../../../../distros/supported
|
@ -0,0 +1 @@
|
||||
../stress-split/0-cluster
|
@ -0,0 +1 @@
|
||||
../stress-split/1-giant-install/
|
@ -0,0 +1 @@
|
||||
../stress-split/2-partial-upgrade
|
@ -0,0 +1,13 @@
|
||||
overrides:
|
||||
ceph:
|
||||
log-whitelist:
|
||||
- wrongly marked me down
|
||||
- objects unfound and apparently lost
|
||||
- log bound mismatch
|
||||
tasks:
|
||||
- thrashosds:
|
||||
timeout: 1200
|
||||
chance_pgnum_grow: 1
|
||||
chance_pgpnum_fix: 1
|
||||
min_in: 4
|
||||
- print: "**** done thrashosds 3-thrash"
|
1
suites/upgrade/giant-x/stress-split-erasure-code-x86_64/4-mon
Symbolic link
1
suites/upgrade/giant-x/stress-split-erasure-code-x86_64/4-mon
Symbolic link
@ -0,0 +1 @@
|
||||
../stress-split/4-mon
|
@ -0,0 +1 @@
|
||||
../../../../../erasure-code/ec-rados-default.yaml
|
@ -0,0 +1 @@
|
||||
../stress-split/6-next-mon
|
@ -0,0 +1 @@
|
||||
../stress-split/8-next-mon
|
@ -0,0 +1,25 @@
|
||||
tasks:
|
||||
- rados:
|
||||
clients: [client.0]
|
||||
ops: 4000
|
||||
objects: 50
|
||||
ec_pool: true
|
||||
erasure_code_profile:
|
||||
name: isaprofile
|
||||
plugin: isa
|
||||
k: 2
|
||||
m: 1
|
||||
technique: reed_sol_van
|
||||
ruleset-failure-domain: osd
|
||||
op_weights:
|
||||
read: 100
|
||||
write: 0
|
||||
append: 100
|
||||
delete: 50
|
||||
snap_create: 50
|
||||
snap_remove: 50
|
||||
rollback: 50
|
||||
copy_from: 50
|
||||
setattr: 25
|
||||
rmattr: 25
|
||||
- print: "**** done ec-rados-plugin=isa-k=2-m=1 9-workload"
|
@ -0,0 +1,3 @@
|
||||
os_type: rhel
|
||||
os_version: "7.0"
|
||||
arch: x86_64
|
@ -0,0 +1,3 @@
|
||||
os_type: ubuntu
|
||||
os_version: "14.04"
|
||||
arch: x86_64
|
0
suites/upgrade/giant-x/stress-split-erasure-code/%
Normal file
0
suites/upgrade/giant-x/stress-split-erasure-code/%
Normal file
1
suites/upgrade/giant-x/stress-split-erasure-code/0-cluster
Symbolic link
1
suites/upgrade/giant-x/stress-split-erasure-code/0-cluster
Symbolic link
@ -0,0 +1 @@
|
||||
../stress-split/0-cluster
|
1
suites/upgrade/giant-x/stress-split-erasure-code/1-giant-install
Symbolic link
1
suites/upgrade/giant-x/stress-split-erasure-code/1-giant-install
Symbolic link
@ -0,0 +1 @@
|
||||
../stress-split/1-giant-install/
|
@ -0,0 +1 @@
|
||||
../stress-split/2-partial-upgrade
|
@ -0,0 +1,13 @@
|
||||
overrides:
|
||||
ceph:
|
||||
log-whitelist:
|
||||
- wrongly marked me down
|
||||
- objects unfound and apparently lost
|
||||
- log bound mismatch
|
||||
tasks:
|
||||
- thrashosds:
|
||||
timeout: 1200
|
||||
chance_pgnum_grow: 1
|
||||
chance_pgpnum_fix: 1
|
||||
min_in: 4
|
||||
- print: "**** done thrashosds 3-thrash"
|
1
suites/upgrade/giant-x/stress-split-erasure-code/4-mon
Symbolic link
1
suites/upgrade/giant-x/stress-split-erasure-code/4-mon
Symbolic link
@ -0,0 +1 @@
|
||||
../stress-split/4-mon
|
@ -0,0 +1 @@
|
||||
../../../../../erasure-code/ec-rados-default.yaml
|
1
suites/upgrade/giant-x/stress-split-erasure-code/6-next-mon
Symbolic link
1
suites/upgrade/giant-x/stress-split-erasure-code/6-next-mon
Symbolic link
@ -0,0 +1 @@
|
||||
../stress-split/6-next-mon
|
1
suites/upgrade/giant-x/stress-split-erasure-code/8-next-mon
Symbolic link
1
suites/upgrade/giant-x/stress-split-erasure-code/8-next-mon
Symbolic link
@ -0,0 +1 @@
|
||||
../stress-split/8-next-mon
|
@ -0,0 +1 @@
|
||||
../../../../../erasure-code/ec-rados-plugin=jerasure-k=3-m=1.yaml
|
1
suites/upgrade/giant-x/stress-split-erasure-code/distros
Symbolic link
1
suites/upgrade/giant-x/stress-split-erasure-code/distros
Symbolic link
@ -0,0 +1 @@
|
||||
../stress-split/distros
|
0
suites/upgrade/giant-x/stress-split/%
Normal file
0
suites/upgrade/giant-x/stress-split/%
Normal file
25
suites/upgrade/giant-x/stress-split/0-cluster/start.yaml
Normal file
25
suites/upgrade/giant-x/stress-split/0-cluster/start.yaml
Normal file
@ -0,0 +1,25 @@
|
||||
overrides:
|
||||
ceph:
|
||||
conf:
|
||||
mon:
|
||||
mon warn on legacy crush tunables: false
|
||||
roles:
|
||||
- - mon.a
|
||||
- mon.b
|
||||
- mon.c
|
||||
- mds.a
|
||||
- osd.0
|
||||
- osd.1
|
||||
- osd.2
|
||||
- osd.3
|
||||
- osd.4
|
||||
- osd.5
|
||||
- osd.6
|
||||
- - osd.7
|
||||
- osd.8
|
||||
- osd.9
|
||||
- osd.10
|
||||
- osd.11
|
||||
- osd.12
|
||||
- osd.13
|
||||
- - client.0
|
@ -0,0 +1,7 @@
|
||||
tasks:
|
||||
- install:
|
||||
branch: giant
|
||||
- print: "**** done install giant"
|
||||
- ceph:
|
||||
fs: xfs
|
||||
- print: "**** done ceph"
|
@ -0,0 +1,7 @@
|
||||
tasks:
|
||||
- install.upgrade:
|
||||
osd.0:
|
||||
- print: "**** done install.upgrade osd.0"
|
||||
- ceph.restart:
|
||||
daemons: [osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, osd.6]
|
||||
- print: "**** done ceph.restart 1st half"
|
12
suites/upgrade/giant-x/stress-split/3-thrash/default.yaml
Normal file
12
suites/upgrade/giant-x/stress-split/3-thrash/default.yaml
Normal file
@ -0,0 +1,12 @@
|
||||
overrides:
|
||||
ceph:
|
||||
log-whitelist:
|
||||
- wrongly marked me down
|
||||
- objects unfound and apparently lost
|
||||
- log bound mismatch
|
||||
tasks:
|
||||
- thrashosds:
|
||||
timeout: 1200
|
||||
chance_pgnum_grow: 1
|
||||
chance_pgpnum_fix: 1
|
||||
- print: "**** done thrashosds 3-thrash"
|
6
suites/upgrade/giant-x/stress-split/4-mon/mona.yaml
Normal file
6
suites/upgrade/giant-x/stress-split/4-mon/mona.yaml
Normal file
@ -0,0 +1,6 @@
|
||||
tasks:
|
||||
- ceph.restart:
|
||||
daemons: [mon.a]
|
||||
wait-for-healthy: false
|
||||
wait-for-osds-up: true
|
||||
- print: "**** done ceph.restart mon.a"
|
0
suites/upgrade/giant-x/stress-split/5-workload/+
Normal file
0
suites/upgrade/giant-x/stress-split/5-workload/+
Normal file
@ -0,0 +1,7 @@
|
||||
tasks:
|
||||
- workunit:
|
||||
branch: giant
|
||||
clients:
|
||||
client.0:
|
||||
- cls/test_cls_rbd.sh
|
||||
- print: "**** done cls/test_cls_rbd.sh 5-workload"
|
@ -0,0 +1,9 @@
|
||||
tasks:
|
||||
- workunit:
|
||||
branch: giant
|
||||
clients:
|
||||
client.0:
|
||||
- rbd/import_export.sh
|
||||
env:
|
||||
RBD_CREATE_ARGS: --new-format
|
||||
- print: "**** done rbd/import_export.sh 5-workload"
|
@ -0,0 +1,10 @@
|
||||
tasks:
|
||||
- rados:
|
||||
clients: [client.0]
|
||||
ops: 4000
|
||||
objects: 500
|
||||
op_weights:
|
||||
read: 45
|
||||
write: 45
|
||||
delete: 10
|
||||
- print: "**** done rados/readwrite 5-workload"
|
@ -0,0 +1,13 @@
|
||||
tasks:
|
||||
- rados:
|
||||
clients: [client.0]
|
||||
ops: 4000
|
||||
objects: 50
|
||||
op_weights:
|
||||
read: 100
|
||||
write: 100
|
||||
delete: 50
|
||||
snap_create: 50
|
||||
snap_remove: 50
|
||||
rollback: 50
|
||||
- print: "**** done rados/snaps-few-objects 5-workload"
|
6
suites/upgrade/giant-x/stress-split/6-next-mon/monb.yaml
Normal file
6
suites/upgrade/giant-x/stress-split/6-next-mon/monb.yaml
Normal file
@ -0,0 +1,6 @@
|
||||
tasks:
|
||||
- ceph.restart:
|
||||
daemons: [mon.b]
|
||||
wait-for-healthy: false
|
||||
wait-for-osds-up: true
|
||||
- print: "**** done ceph.restart mon.b 6-next-mon"
|
0
suites/upgrade/giant-x/stress-split/7-workload/+
Normal file
0
suites/upgrade/giant-x/stress-split/7-workload/+
Normal file
@ -0,0 +1,5 @@
|
||||
tasks:
|
||||
- radosbench:
|
||||
clients: [client.0]
|
||||
time: 1800
|
||||
- print: "**** done radosbench 7-workload"
|
@ -0,0 +1,7 @@
|
||||
tasks:
|
||||
- workunit:
|
||||
branch: giant
|
||||
clients:
|
||||
client.0:
|
||||
- rbd/test_librbd.sh
|
||||
- print: "**** done rbd/test_librbd.sh 7-workload"
|
8
suites/upgrade/giant-x/stress-split/8-next-mon/monc.yaml
Normal file
8
suites/upgrade/giant-x/stress-split/8-next-mon/monc.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
tasks:
|
||||
- ceph.restart:
|
||||
daemons: [mon.c]
|
||||
wait-for-healthy: false
|
||||
wait-for-osds-up: true
|
||||
- print: "**** done ceph.restart mon.c 8-next-mon"
|
||||
- ceph.wait_for_mon_quorum: [a, b, c]
|
||||
- print: "**** done wait_for_mon_quorum 8-next-mon"
|
0
suites/upgrade/giant-x/stress-split/9-workload/+
Normal file
0
suites/upgrade/giant-x/stress-split/9-workload/+
Normal file
@ -0,0 +1,7 @@
|
||||
tasks:
|
||||
- workunit:
|
||||
branch: giant
|
||||
clients:
|
||||
client.0:
|
||||
- rbd/test_librbd_python.sh
|
||||
- print: "**** done rbd/test_librbd_python.sh 9-workload"
|
@ -0,0 +1,9 @@
|
||||
tasks:
|
||||
- rgw:
|
||||
client.0:
|
||||
default_idle_timeout: 300
|
||||
- print: "**** done rgw 9-workload"
|
||||
- swift:
|
||||
client.0:
|
||||
rgw_server: client.0
|
||||
- print: "**** done swift 9-workload"
|
@ -0,0 +1,12 @@
|
||||
tasks:
|
||||
- rados:
|
||||
clients: [client.0]
|
||||
ops: 4000
|
||||
objects: 500
|
||||
op_weights:
|
||||
read: 100
|
||||
write: 100
|
||||
delete: 50
|
||||
snap_create: 50
|
||||
snap_remove: 50
|
||||
rollback: 50
|
1
suites/upgrade/giant-x/stress-split/distros
Symbolic link
1
suites/upgrade/giant-x/stress-split/distros
Symbolic link
@ -0,0 +1 @@
|
||||
../../../../distros/supported
|
Loading…
Reference in New Issue
Block a user