mirror of
https://github.com/ceph/ceph
synced 2024-12-23 11:54:11 +00:00
d6c66f3fa6
This is mostly for testing: a lot of tests assume that there are no existing pools. These tests relied on a config to turn off creating the "device_health_metrics" pool which generally exists for any new Ceph cluster. It would be better to make these tests tolerant of the new .mgr pool but clearly there's a lot of these. So just convert the config to make it work. Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
51 lines
1.0 KiB
YAML
51 lines
1.0 KiB
YAML
openstack:
|
|
- volumes: # attached to each instance
|
|
count: 3
|
|
size: 10 # GB
|
|
roles:
|
|
- - mon.a
|
|
- mgr.x
|
|
- osd.0
|
|
- osd.1
|
|
- osd.2
|
|
- client.0
|
|
- - osd.3
|
|
- osd.4
|
|
- osd.5
|
|
tasks:
|
|
- install:
|
|
- ceph:
|
|
pre-mgr-commands:
|
|
- sudo ceph config set mgr mgr_pool false --force
|
|
log-ignorelist:
|
|
- overall HEALTH_
|
|
- \(PG_
|
|
- \(OSD_
|
|
- \(OBJECT_
|
|
conf:
|
|
osd:
|
|
osd debug reject backfill probability: .3
|
|
osd min pg log entries: 25
|
|
osd max pg log entries: 100
|
|
osd max object name len: 460
|
|
osd max object namespace len: 64
|
|
- exec:
|
|
client.0:
|
|
- sudo ceph osd pool create foo 64
|
|
- sudo ceph osd pool application enable foo rados
|
|
- rados -p foo bench 60 write -b 1024 --no-cleanup
|
|
- sudo ceph osd pool set foo size 3
|
|
- sudo ceph osd out 0 1
|
|
- sleep:
|
|
duration: 60
|
|
- exec:
|
|
client.0:
|
|
- sudo ceph osd in 0 1
|
|
- sleep:
|
|
duration: 60
|
|
- exec:
|
|
client.0:
|
|
- sudo ceph osd pool set foo size 2
|
|
- sleep:
|
|
duration: 300
|