Merge remote-tracking branch 'gh/wip-infernalis'

This commit is contained in:
Sage Weil 2015-09-28 10:32:04 -04:00
commit 9eea6ea6f9
18 changed files with 163 additions and 0 deletions

5
releases/infernalis.yaml Normal file
View File

@ -0,0 +1,5 @@
tasks:
- exec:
osd.0:
- ceph osd set sortbitwise
- for p in `ceph osd pool ls` ; do ceph osd pool set $p use_gmt_hitset true ; done

View File

@ -0,0 +1 @@
../../../../releases/infernalis.yaml

View File

View File

@ -0,0 +1,19 @@
roles:
- - mon.a
- mon.b
- mon.c
- mds.a
- osd.0
- osd.1
- - osd.2
- osd.3
- - client.0
overrides:
ceph:
log-whitelist:
- scrub mismatch
- ScrubResult
- failed to encode map
conf:
mon:
mon warn on legacy crush tunables: false

View File

@ -0,0 +1,7 @@
tasks:
- install:
branch: hammer
- print: "**** done install hammer"
- ceph:
fs: xfs
- print: "**** done ceph"

View File

@ -0,0 +1,5 @@
tasks:
- exec:
client.0:
- ceph osd erasure-code-profile set teuthologyprofile ruleset-failure-domain=osd k=2 m=1
- ceph osd pool create base-pool 4 4 erasure teuthologyprofile

View File

@ -0,0 +1,4 @@
tasks:
- exec:
client.0:
- ceph osd pool create base-pool 4

View File

@ -0,0 +1,10 @@
tasks:
- exec:
client.0:
- ceph osd pool create cache-pool 4
- ceph osd tier add base-pool cache-pool
- ceph osd tier cache-mode cache-pool writeback
- ceph osd tier set-overlay base-pool cache-pool
- ceph osd pool set cache-pool hit_set_type bloom
- ceph osd pool set cache-pool hit_set_count 8
- ceph osd pool set cache-pool hit_set_period 5

View File

@ -0,0 +1,84 @@
tasks:
- parallel:
- workload-when-upgrading
- upgrade-sequence
- print: "**** done upgrade"
workload-when-upgrading:
sequential:
- rados:
clients: [client.0]
ops: 4000
objects: 50
pools: [base-pool]
write_append_excl: false
op_weights:
read: 100
write: 0
append: 100
delete: 50
copy_from: 50
setattr: 25
rmattr: 25
- print: "**** done rados when upgrading"
upgrade-sequence:
sequential:
- upgrade-first-half
- flip-but-fail
- upgrade-second-half
upgrade-first-half:
sequential:
- install.upgrade:
mon.a:
- print: "**** done install.upgrade mon.{a,b,c} and osd.{0,1}"
- ceph.restart:
daemons: [mon.a]
wait-for-healthy: true
- sleep:
duration: 60
- ceph.restart:
daemons: [osd.0]
wait-for-healthy: true
- sleep:
duration: 60
- ceph.restart:
daemons: [osd.1]
wait-for-healthy: true
- sleep:
duration: 60
upgrade-second-half:
sequential:
- install.upgrade:
mon.b:
- print: "**** done install.upgrade osd.{2,3}"
- ceph.restart:
daemons: [mon.b]
wait-for-healthy: true
- sleep:
duration: 60
- ceph.restart:
daemons: [mon.c]
wait-for-healthy: true
- sleep:
duration: 60
- ceph.restart:
daemons: [osd.2]
wait-for-healthy: true
- sleep:
duration: 60
- ceph.restart:
daemons: [osd.3]
wait-for-healthy: true
- sleep:
duration: 60
flip-but-fail:
sequential:
- exec:
mon.a:
- |-
ceph osd set sortbitwise 2>&1 | grep "not all up OSDs have OSD_BITWISE_HOBJ_SORT feature"
- print: "**** done flip-but-fail"

View File

@ -0,0 +1,28 @@
tasks:
- parallel:
- workload-2
- flip-and-success
workload-2:
sequential:
- rados:
clients: [client.0]
ops: 1000
objects: 50
pools: [base-pool]
op_weights:
read: 100
write: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50
- print: "**** done rados after upgrading"
flip-and-success:
sequential:
- exec:
client.0:
- ceph osd set sortbitwise
- ceph osd pool set cache-pool use_gmt_hitset true
- print: "**** done flip-and-success"