ceph/suites/rados/singleton-nomsgr/all/11429.yaml
Loic Dachary d9a8f49fc5 openstack: define the resources required to run a rados suite
Signed-off-by: Loic Dachary <loic@dachary.org>
2016-03-30 01:45:14 +02:00

127 lines
3.1 KiB
YAML

# we don't have el7 packages for old releases
# http://tracker.ceph.com/issues/15139
os_type: ubuntu
overrides:
ceph:
conf:
mon:
debug mon: 20
debug ms: 1
debug paxos: 20
mon warn on legacy crush tunables: false
mon min osdmap epochs: 3
osd:
osd map cache size: 2
osd map max advance: 1
debug filestore: 20
debug journal: 20
debug ms: 1
debug osd: 20
log-whitelist:
- osd_map_cache_size
- slow request
- scrub mismatch
- ScrubResult
- failed to encode
roles:
- - mon.a
- osd.0
- osd.1
- mon.b
- mon.c
- osd.2
- client.0
openstack:
- volumes: # attached to each instance
count: 3
size: 10 # GB
tasks:
- install:
branch: v0.80.8
- print: '**** done installing firefly'
- ceph:
fs: xfs
- print: '**** done ceph'
- full_sequential:
- ceph_manager.create_pool:
args: ['toremove']
kwargs:
pg_num: 4096
- sleep:
duration: 30
- ceph_manager.wait_for_clean: null
- radosbench:
clients: [client.0]
time: 120
size: 1
pool: toremove
create_pool: false
cleanup: false
- ceph_manager.remove_pool:
args: ['toremove']
- exec:
client.0:
- rados -p rbd ls -
- exec:
osd.0:
- ceph daemon osd.0 config set filestore_blackhole true
- ceph.restart:
daemons:
- osd.0
- osd.1
- osd.2
- exec:
osd.0:
- sudo grep -c 'load_pgs. skipping PG' /var/log/ceph/ceph-osd.0.log
- ceph_manager.wait_for_clean: null
- ceph_manager.create_pool:
args: ['newpool']
- loop:
count: 100
body:
- ceph_manager.set_pool_property:
args: ['newpool', 'min_size', 2]
- ceph_manager.set_pool_property:
args: ['newpool', 'min_size', 1]
- ceph_manager.wait_for_clean: null
- loop:
count: 100
body:
- ceph_manager.set_pool_property:
args: ['newpool', 'min_size', 2]
- ceph_manager.set_pool_property:
args: ['newpool', 'min_size', 1]
- sleep:
duration: 10
- ceph_manager.wait_for_clean: null
- print: '**** done creating zombie pgs'
- install.upgrade:
mon.a:
branch: hammer
- ceph.restart: [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2]
- ceph_manager.wait_for_clean: null
- ceph.restart: [osd.0, osd.1, osd.2]
- ceph_manager.wait_for_clean: null
- exec:
osd.0:
- sudo grep -c 'Skipping the pg for now' /var/log/ceph/ceph-osd.0.log
- print: '**** done verifying hammer upgrade'
- install.upgrade:
mon.a: null
- ceph.restart: [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2]
- ceph_manager.wait_for_clean: null
- ceph.restart: [osd.0, osd.1, osd.2]
- exec:
osd.0:
- sudo grep -c 'unable to peek at' /var/log/ceph/ceph-osd.0.log
- radosbench:
clients: [client.0]
time: 30
size: 1
- ceph_manager.wait_for_clean: null
- ceph.restart: [osd.0, osd.1, osd.2]
- ceph_manager.wait_for_clean: null
- print: '**** done verifying final upgrade'