mirror of
https://github.com/ceph/ceph
synced 2025-02-26 12:33:41 +00:00
When we are doing cache tiering, we are more sensitive to short PG logs because the dup op entries are not perfectly promoted from the base to the cache. See: http://tracker.ceph.com/issues/38358 http://tracker.ceph.com/issues/24320 This works around the problem by not testing short pg logs in combination with cache tiering. This works because the short_pg_log.yaml fragment sets the short log in the [global] section but the cache workloads overload it (back to a large/default value) in the [osd] section. Signed-off-by: Sage Weil <sage@redhat.com>
37 lines
1.1 KiB
YAML
37 lines
1.1 KiB
YAML
overrides:
|
|
ceph:
|
|
log-whitelist:
|
|
- must scrub before tier agent can activate
|
|
conf:
|
|
osd:
|
|
# override short_pg_log_entries.yaml (which sets these under [global])
|
|
osd_min_pg_log_entries: 3000
|
|
osd_max_pg_log_entries: 3000
|
|
tasks:
|
|
- exec:
|
|
client.0:
|
|
- sudo ceph osd erasure-code-profile set myprofile crush-failure-domain=osd m=2 k=2
|
|
- sudo ceph osd pool create base 4 4 erasure myprofile
|
|
- sudo ceph osd pool application enable base rados
|
|
- sudo ceph osd pool set base min_size 2
|
|
- sudo ceph osd pool create cache 4
|
|
- sudo ceph osd tier add base cache
|
|
- sudo ceph osd tier cache-mode cache writeback
|
|
- sudo ceph osd tier set-overlay base cache
|
|
- sudo ceph osd pool set cache hit_set_type bloom
|
|
- sudo ceph osd pool set cache hit_set_count 8
|
|
- sudo ceph osd pool set cache hit_set_period 60
|
|
- sudo ceph osd pool set cache target_max_objects 5000
|
|
- rados:
|
|
clients: [client.0]
|
|
pools: [base]
|
|
ops: 10000
|
|
objects: 6600
|
|
max_seconds: 1200
|
|
size: 1024
|
|
op_weights:
|
|
read: 100
|
|
write: 100
|
|
delete: 50
|
|
copy_from: 50
|