mirror of
https://github.com/ceph/ceph
synced 2024-12-30 07:23:11 +00:00
763f1877ec
The teuthology machines are periodically running out of space due to the aggressive log settings. Fixes: http://tracker.ceph.com/issues/21251 Signed-off-by: Jason Dillaman <dillaman@redhat.com>
25 lines
873 B
YAML
25 lines
873 B
YAML
tasks:
|
|
- exec:
|
|
client.0:
|
|
- sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2
|
|
- sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile
|
|
- sudo ceph osd pool set datapool allow_ec_overwrites true
|
|
- rbd pool init datapool
|
|
|
|
overrides:
|
|
thrashosds:
|
|
bdev_inject_crash: 2
|
|
bdev_inject_crash_probability: .5
|
|
ceph:
|
|
fs: xfs
|
|
conf:
|
|
client:
|
|
rbd default data pool: datapool
|
|
osd: # force bluestore since it's required for ec overwrites
|
|
osd objectstore: bluestore
|
|
bluestore block size: 96636764160
|
|
enable experimental unrecoverable data corrupting features: "*"
|
|
osd debug randomize hobject sort order: false
|
|
# this doesn't work with failures bc the log writes are not atomic across the two backends
|
|
# bluestore bluefs env mirror: true
|