mirror of
https://github.com/ceph/ceph
synced 2025-01-01 16:42:29 +00:00
83880580aa
With cache tiering facets gone, "pool" facets are strictly about --data-pool option now. Rename to "data-pool" and create symlinks to a common directory. Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
28 lines
951 B
YAML
28 lines
951 B
YAML
tasks:
|
|
- exec:
|
|
client.0:
|
|
- sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2
|
|
- sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile
|
|
- sudo ceph osd pool set datapool allow_ec_overwrites true
|
|
- rbd pool init datapool
|
|
|
|
overrides:
|
|
thrashosds:
|
|
bdev_inject_crash: 2
|
|
bdev_inject_crash_probability: .5
|
|
ceph:
|
|
fs: xfs
|
|
log-ignorelist:
|
|
- overall HEALTH_
|
|
- \(CACHE_POOL_NO_HIT_SET\)
|
|
conf:
|
|
client:
|
|
rbd default data pool: datapool
|
|
osd: # force bluestore since it's required for ec overwrites
|
|
osd objectstore: bluestore
|
|
bluestore block size: 96636764160
|
|
enable experimental unrecoverable data corrupting features: "*"
|
|
osd debug randomize hobject sort order: false
|
|
# this doesn't work with failures bc the log writes are not atomic across the two backends
|
|
# bluestore bluefs env mirror: true
|