Merge pull request #1209 from ceph/wip-rados-upgrade

rados/upgrade: remove hammer-x-singleton
This commit is contained in:
Yuri Weinstein 2016-10-17 08:37:08 -07:00 committed by GitHub
commit a53e0b211d
24 changed files with 0 additions and 229 deletions

View File

@ -1,4 +0,0 @@
openstack:
- volumes: # attached to each instance
count: 3
size: 10 # GB

View File

@ -1,16 +0,0 @@
overrides:
ceph:
conf:
mon:
mon warn on legacy crush tunables: false
roles:
- - mon.a
- mon.b
- mon.c
- osd.0
- osd.1
- osd.2
- - osd.3
- osd.4
- osd.5
- - client.0

View File

@ -1,7 +0,0 @@
tasks:
- install:
branch: hammer
- print: "**** done install hammer"
- ceph:
fs: xfs
- print: "**** done ceph"

View File

@ -1,7 +0,0 @@
tasks:
- install.upgrade:
osd.0:
- print: "**** done install.upgrade osd.0"
- ceph.restart:
daemons: [osd.0, osd.1, osd.2, osd.3]
- print: "**** done ceph.restart 1st half"

View File

@ -1,13 +0,0 @@
overrides:
ceph:
log-whitelist:
- wrongly marked me down
- objects unfound and apparently lost
- log bound mismatch
- failed to encode
tasks:
- thrashosds:
timeout: 1200
chance_pgnum_grow: 1
chance_pgpnum_fix: 1
- print: "**** done thrashosds 3-thrash"

View File

@ -1,6 +0,0 @@
tasks:
- ceph.restart:
daemons: [mon.a]
wait-for-healthy: false
wait-for-osds-up: true
- print: "**** done ceph.restart mon.a"

View File

@ -1,7 +0,0 @@
tasks:
- workunit:
branch: hammer
clients:
client.0:
- cls/test_cls_rbd.sh
- print: "**** done cls/test_cls_rbd.sh 5-workload"

View File

@ -1,9 +0,0 @@
tasks:
- workunit:
branch: hammer
clients:
client.0:
- rbd/import_export.sh
env:
RBD_CREATE_ARGS: --new-format
- print: "**** done rbd/import_export.sh 5-workload"

View File

@ -1,11 +0,0 @@
tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 500
write_append_excl: false
op_weights:
read: 45
write: 45
delete: 10
- print: "**** done rados/readwrite 5-workload"

View File

@ -1,14 +0,0 @@
tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 50
write_append_excl: false
op_weights:
read: 100
write: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50
- print: "**** done rados/snaps-few-objects 5-workload"

View File

@ -1,6 +0,0 @@
tasks:
- ceph.restart:
daemons: [mon.b]
wait-for-healthy: false
wait-for-osds-up: true
- print: "**** done ceph.restart mon.b 6-next-mon"

View File

@ -1,18 +0,0 @@
tasks:
- full_sequential:
- radosbench:
clients: [client.0]
time: 150
- radosbench:
clients: [client.0]
time: 150
- radosbench:
clients: [client.0]
time: 150
- radosbench:
clients: [client.0]
time: 150
- radosbench:
clients: [client.0]
time: 150
- print: "**** done radosbench 7-workload"

View File

@ -1,7 +0,0 @@
tasks:
- workunit:
branch: hammer
clients:
client.0:
- rbd/test_librbd.sh
- print: "**** done rbd/test_librbd.sh 7-workload"

View File

@ -1,8 +0,0 @@
tasks:
- ceph.restart:
daemons: [mon.c]
wait-for-healthy: false
wait-for-osds-up: true
- print: "**** done ceph.restart mon.c 8-next-mon"
- ceph.wait_for_mon_quorum: [a, b, c]
- print: "**** done wait_for_mon_quorum 8-next-mon"

View File

@ -1,31 +0,0 @@
#
# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
# the default value of 4096 It is also not a multiple of 1024*1024 and
# creates situations where rounding rules during recovery becomes
# necessary.
#
tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 50
ec_pool: true
write_append_excl: false
erasure_code_profile:
name: jerasure31profile
plugin: jerasure
k: 3
m: 1
technique: reed_sol_van
ruleset-failure-domain: osd
op_weights:
read: 100
write: 0
append: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50
copy_from: 50
setattr: 25
rmattr: 25

View File

@ -1,7 +0,0 @@
tasks:
- workunit:
branch: hammer
clients:
client.0:
- rbd/test_librbd_python.sh
- print: "**** done rbd/test_librbd_python.sh 9-workload"

View File

@ -1,9 +0,0 @@
tasks:
- rgw:
client.0:
default_idle_timeout: 300
- print: "**** done rgw 9-workload"
- swift:
client.0:
rgw_server: client.0
- print: "**** done swift 9-workload"

View File

@ -1,13 +0,0 @@
tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 500
write_append_excl: false
op_weights:
read: 100
write: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50

View File

@ -1,36 +0,0 @@
overrides:
ceph:
log-whitelist:
- must scrub before tier agent can activate
tasks:
workunit:
sequential:
- exec:
client.0:
- sudo ceph osd pool create base 4
- sudo ceph osd pool create cache 4
- sudo ceph osd tier add base cache
- sudo ceph osd tier cache-mode cache writeback
- sudo ceph osd tier set-overlay base cache
- sudo ceph osd pool set cache hit_set_type bloom
- sudo ceph osd pool set cache hit_set_count 8
- sudo ceph osd pool set cache hit_set_period 3600
- sudo ceph osd pool set cache target_max_objects 250
- rados:
clients: [client.0]
pools: [base]
ops: 4000
objects: 500
pool_snaps: true
op_weights:
read: 100
write: 100
delete: 50
copy_from: 50
flush: 50
try_flush: 50
evict: 50
snap_create: 50
snap_remove: 50
rollback: 50
- print: "**** done test_cache-pool-snaps 9-workload"