Added singleton hammer-x test to address #12625

Signed-off-by: Yuri Weinstein <yweinste@redhat.com>
This commit is contained in:
Yuri Weinstein 2015-08-07 11:02:01 -07:00
parent 978828537e
commit 317559df65
22 changed files with 212 additions and 0 deletions

View File

@ -0,0 +1,17 @@
overrides:
ceph:
conf:
mon:
mon warn on legacy crush tunables: false
roles:
- - mon.a
- mon.b
- mon.c
- mds.a
- osd.0
- osd.1
- osd.2
- - osd.3
- osd.4
- osd.5
- - client.0

View File

@ -0,0 +1,7 @@
tasks:
- install:
branch: hammer
- print: "**** done install hammer"
- ceph:
fs: xfs
- print: "**** done ceph"

View File

@ -0,0 +1,7 @@
tasks:
- install.upgrade:
osd.0:
- print: "**** done install.upgrade osd.0"
- ceph.restart:
daemons: [osd.0, osd.1, osd.2, osd.3]
- print: "**** done ceph.restart 1st half"

View File

@ -0,0 +1,12 @@
overrides:
ceph:
log-whitelist:
- wrongly marked me down
- objects unfound and apparently lost
- log bound mismatch
tasks:
- thrashosds:
timeout: 1200
chance_pgnum_grow: 1
chance_pgpnum_fix: 1
- print: "**** done thrashosds 3-thrash"

View File

@ -0,0 +1,6 @@
tasks:
- ceph.restart:
daemons: [mon.a]
wait-for-healthy: false
wait-for-osds-up: true
- print: "**** done ceph.restart mon.a"

View File

@ -0,0 +1,7 @@
tasks:
- workunit:
branch: hammer
clients:
client.0:
- cls/test_cls_rbd.sh
- print: "**** done cls/test_cls_rbd.sh 5-workload"

View File

@ -0,0 +1,9 @@
tasks:
- workunit:
branch: hammer
clients:
client.0:
- rbd/import_export.sh
env:
RBD_CREATE_ARGS: --new-format
- print: "**** done rbd/import_export.sh 5-workload"

View File

@ -0,0 +1,11 @@
tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 500
write_append_excl: false
op_weights:
read: 45
write: 45
delete: 10
- print: "**** done rados/readwrite 5-workload"

View File

@ -0,0 +1,14 @@
tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 50
write_append_excl: false
op_weights:
read: 100
write: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50
- print: "**** done rados/snaps-few-objects 5-workload"

View File

@ -0,0 +1,6 @@
tasks:
- ceph.restart:
daemons: [mon.b]
wait-for-healthy: false
wait-for-osds-up: true
- print: "**** done ceph.restart mon.b 6-next-mon"

View File

@ -0,0 +1,5 @@
tasks:
- radosbench:
clients: [client.0]
time: 1800
- print: "**** done radosbench 7-workload"

View File

@ -0,0 +1,7 @@
tasks:
- workunit:
branch: hammer
clients:
client.0:
- rbd/test_librbd.sh
- print: "**** done rbd/test_librbd.sh 7-workload"

View File

@ -0,0 +1,8 @@
tasks:
- ceph.restart:
daemons: [mon.c]
wait-for-healthy: false
wait-for-osds-up: true
- print: "**** done ceph.restart mon.c 8-next-mon"
- ceph.wait_for_mon_quorum: [a, b, c]
- print: "**** done wait_for_mon_quorum 8-next-mon"

View File

@ -0,0 +1,31 @@
#
# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
# the default value of 4096 It is also not a multiple of 1024*1024 and
# creates situations where rounding rules during recovery becomes
# necessary.
#
tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 50
ec_pool: true
write_append_excl: false
erasure_code_profile:
name: jerasure31profile
plugin: jerasure
k: 3
m: 1
technique: reed_sol_van
ruleset-failure-domain: osd
op_weights:
read: 100
write: 0
append: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50
copy_from: 50
setattr: 25
rmattr: 25

View File

@ -0,0 +1,7 @@
tasks:
- workunit:
branch: hammer
clients:
client.0:
- rbd/test_librbd_python.sh
- print: "**** done rbd/test_librbd_python.sh 9-workload"

View File

@ -0,0 +1,9 @@
tasks:
- rgw:
client.0:
default_idle_timeout: 300
- print: "**** done rgw 9-workload"
- swift:
client.0:
rgw_server: client.0
- print: "**** done swift 9-workload"

View File

@ -0,0 +1,13 @@
tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 500
write_append_excl: false
op_weights:
read: 100
write: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50

View File

@ -0,0 +1,36 @@
overrides:
ceph:
log-whitelist:
- must scrub before tier agent can activate
tasks:
workunit:
sequential:
- exec:
client.0:
- ceph osd pool create base 4
- ceph osd pool create cache 4
- ceph osd tier add base cache
- ceph osd tier cache-mode cache writeback
- ceph osd tier set-overlay base cache
- ceph osd pool set cache hit_set_type bloom
- ceph osd pool set cache hit_set_count 8
- ceph osd pool set cache hit_set_period 3600
- ceph osd pool set cache target_max_objects 250
- rados:
clients: [client.0]
pools: [base]
ops: 4000
objects: 500
pool_snaps: true
op_weights:
read: 100
write: 100
delete: 50
copy_from: 50
flush: 50
try_flush: 50
evict: 50
snap_create: 50
snap_remove: 50
rollback: 50
- print: "**** done test_cache-pool-snaps 9-workload"