mirror of
https://github.com/ceph/ceph
synced 2025-01-23 19:46:56 +00:00
d6c66f3fa6
This is mostly for testing: a lot of tests assume that there are no existing pools. These tests relied on a config to turn off creating the "device_health_metrics" pool which generally exists for any new Ceph cluster. It would be better to make these tests tolerant of the new .mgr pool but clearly there's a lot of these. So just convert the config to make it work. Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
35 lines
641 B
YAML
35 lines
641 B
YAML
roles:
|
|
- - mon.a
|
|
- mgr.x
|
|
- osd.0
|
|
- osd.1
|
|
- osd.2
|
|
- osd.3
|
|
openstack:
|
|
- volumes: # attached to each instance
|
|
count: 4
|
|
size: 10 # GB
|
|
overrides:
|
|
ceph:
|
|
create_rbd_pool: False
|
|
pre-mgr-commands:
|
|
- sudo ceph config set mgr mgr_pool false --force
|
|
conf:
|
|
mon:
|
|
osd pool default size: 2
|
|
osd:
|
|
mon max pg per osd : 1
|
|
osd max pg per osd hard ratio : 1
|
|
log-ignorelist:
|
|
- \(TOO_FEW_PGS\)
|
|
- \(PG_
|
|
- \(PENDING_CREATING_PGS\)
|
|
tasks:
|
|
- install:
|
|
- ceph:
|
|
- osd_max_pg_per_osd:
|
|
test_create_from_mon: False
|
|
pg_num: 1
|
|
pool_size: 2
|
|
from_primary: True
|