mirror of
https://github.com/ceph/ceph
synced 2025-02-26 20:44:21 +00:00
When we are doing cache tiering, we are more sensitive to short PG logs because the dup op entries are not perfectly promoted from the base to the cache. See: http://tracker.ceph.com/issues/38358 http://tracker.ceph.com/issues/24320 This works around the problem by not testing short pg logs in combination with cache tiering. This works because the short_pg_log.yaml fragment sets the short log in the [global] section but the cache workloads overload it (back to a large/default value) in the [osd] section. Signed-off-by: Sage Weil <sage@redhat.com>
35 lines
1.1 KiB
YAML
35 lines
1.1 KiB
YAML
overrides:
|
|
ceph:
|
|
log-whitelist:
|
|
- must scrub before tier agent can activate
|
|
conf:
|
|
osd:
|
|
# override short_pg_log_entries.yaml (which sets these under [global])
|
|
osd_min_pg_log_entries: 3000
|
|
osd_max_pg_log_entries: 3000
|
|
tasks:
|
|
- exec:
|
|
client.0:
|
|
- sudo ceph osd pool create base 4
|
|
- sudo ceph osd pool application enable base rados
|
|
- sudo ceph osd pool create cache 4
|
|
- sudo ceph osd tier add base cache
|
|
- sudo ceph osd tier cache-mode cache writeback
|
|
- sudo ceph osd tier set-overlay base cache
|
|
- sudo ceph osd pool set cache hit_set_type bloom
|
|
- sudo ceph osd pool set cache hit_set_count 8
|
|
- sudo ceph osd pool set cache hit_set_period 60
|
|
- sudo ceph osd pool set cache target_max_objects 250
|
|
- sudo ceph osd pool set cache min_read_recency_for_promote 2
|
|
- sudo ceph osd pool set cache min_write_recency_for_promote 2
|
|
- rados:
|
|
clients: [client.0]
|
|
pools: [base]
|
|
ops: 4000
|
|
objects: 500
|
|
op_weights:
|
|
read: 100
|
|
write: 100
|
|
delete: 50
|
|
copy_from: 50
|