mirror of
https://github.com/ceph/ceph
synced 2025-01-10 05:00:59 +00:00
afd0b508c2
When we are doing cache tiering, we are more sensitive to short PG logs because the dup op entries are not perfectly promoted from the base to the cache. See: http://tracker.ceph.com/issues/38358 http://tracker.ceph.com/issues/24320 This works around the problem by not testing short pg logs in combination with cache tiering. This works because the short_pg_log.yaml fragment sets the short log in the [global] section but the cache workloads overload it (back to a large/default value) in the [osd] section. Signed-off-by: Sage Weil <sage@redhat.com>
45 lines
1.3 KiB
YAML
45 lines
1.3 KiB
YAML
overrides:
|
|
ceph:
|
|
log-whitelist:
|
|
- must scrub before tier agent can activate
|
|
conf:
|
|
osd:
|
|
# override short_pg_log_entries.yaml (which sets these under [global])
|
|
osd_min_pg_log_entries: 3000
|
|
osd_max_pg_log_entries: 3000
|
|
tasks:
|
|
- exec:
|
|
client.0:
|
|
- sudo ceph osd pool create base 4
|
|
- sudo ceph osd pool application enable base rados
|
|
- sudo ceph osd pool create cache 4
|
|
- sudo ceph osd tier add base cache
|
|
- sudo ceph osd tier cache-mode cache writeback
|
|
- sudo ceph osd tier set-overlay base cache
|
|
- sudo ceph osd pool set cache hit_set_type bloom
|
|
- sudo ceph osd pool set cache hit_set_count 8
|
|
- sudo ceph osd pool set cache hit_set_period 3600
|
|
- sudo ceph osd pool set cache target_max_objects 250
|
|
- sudo ceph osd pool set cache min_read_recency_for_promote 0
|
|
- sudo ceph osd pool set cache min_write_recency_for_promote 0
|
|
- rados:
|
|
clients: [client.0]
|
|
pools: [base]
|
|
ops: 4000
|
|
objects: 500
|
|
pool_snaps: true
|
|
op_weights:
|
|
read: 100
|
|
write: 100
|
|
delete: 50
|
|
copy_from: 50
|
|
cache_flush: 50
|
|
cache_try_flush: 50
|
|
cache_evict: 50
|
|
snap_create: 50
|
|
snap_remove: 50
|
|
rollback: 50
|
|
openstack:
|
|
- machine:
|
|
ram: 15000 # MB
|