mirror of
https://github.com/ceph/ceph
synced 2024-12-13 23:17:07 +00:00
d6c66f3fa6
This is mostly for testing: a lot of tests assume that there are no existing pools. These tests relied on a config to turn off creating the "device_health_metrics" pool which generally exists for any new Ceph cluster. It would be better to make these tests tolerant of the new .mgr pool but clearly there's a lot of these. So just convert the config to make it work. Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
41 lines
1.2 KiB
YAML
41 lines
1.2 KiB
YAML
# verify #13098 fix
|
|
openstack:
|
|
- volumes: # attached to each instance
|
|
count: 3
|
|
size: 10 # GB
|
|
roles:
|
|
- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
|
|
overrides:
|
|
ceph:
|
|
log-ignorelist:
|
|
- is full
|
|
- overall HEALTH_
|
|
- \(POOL_FULL\)
|
|
- \(POOL_NEAR_FULL\)
|
|
- \(CACHE_POOL_NO_HIT_SET\)
|
|
- \(CACHE_POOL_NEAR_FULL\)
|
|
tasks:
|
|
- install:
|
|
- ceph:
|
|
pre-mgr-commands:
|
|
- sudo ceph config set mgr mgr_pool false --force
|
|
conf:
|
|
global:
|
|
osd max object name len: 460
|
|
osd max object namespace len: 64
|
|
- exec:
|
|
client.0:
|
|
- ceph osd pool create ec-ca 1 1
|
|
- ceph osd pool create ec 1 1 erasure default
|
|
- ceph osd pool application enable ec rados
|
|
- ceph osd tier add ec ec-ca
|
|
- ceph osd tier cache-mode ec-ca readproxy
|
|
- ceph osd tier set-overlay ec ec-ca
|
|
- ceph osd pool set ec-ca hit_set_type bloom
|
|
- ceph osd pool set-quota ec-ca max_bytes 20480000
|
|
- ceph osd pool set-quota ec max_bytes 20480000
|
|
- ceph osd pool set ec-ca target_max_bytes 20480000
|
|
- timeout 30 rados -p ec-ca bench 30 write || true
|
|
- ceph osd pool set-quota ec-ca max_bytes 0
|
|
- ceph osd pool set-quota ec max_bytes 0
|