mirror of
https://github.com/ceph/ceph
synced 2025-01-11 05:29:51 +00:00
d6c66f3fa6
This is mostly for testing: a lot of tests assume that there are no existing pools. These tests relied on a config to turn off creating the "device_health_metrics" pool which generally exists for any new Ceph cluster. It would be better to make these tests tolerant of the new .mgr pool but clearly there's a lot of these. So just convert the config to make it work. Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
61 lines
1.2 KiB
YAML
61 lines
1.2 KiB
YAML
roles:
|
|
- - mon.a
|
|
- mon.b
|
|
- mon.c
|
|
- mgr.x
|
|
- osd.0
|
|
- osd.1
|
|
openstack:
|
|
- volumes: # attached to each instance
|
|
count: 2
|
|
size: 20 # GB
|
|
tasks:
|
|
- install:
|
|
- ceph:
|
|
pre-mgr-commands:
|
|
- sudo ceph config set mgr mgr_pool false --force
|
|
fs: xfs
|
|
conf:
|
|
osd:
|
|
osd recovery sleep: .1
|
|
osd objectstore: filestore
|
|
log-ignorelist:
|
|
- \(POOL_APP_NOT_ENABLED\)
|
|
- \(OSDMAP_FLAGS\)
|
|
- \(OSD_
|
|
- \(OBJECT_
|
|
- \(PG_
|
|
- overall HEALTH
|
|
- exec:
|
|
osd.0:
|
|
- ceph osd pool create foo 32
|
|
- ceph osd pool application enable foo foo
|
|
- rados -p foo bench 30 write -b 4096 --no-cleanup
|
|
- ceph osd set noup
|
|
- ceph.restart:
|
|
daemons: [osd.0]
|
|
wait-for-up: false
|
|
wait-for-healthy: false
|
|
- exec:
|
|
osd.0:
|
|
- sleep 5
|
|
- rados -p foo bench 3 write -b 4096 --no-cleanup
|
|
- ceph osd unset noup
|
|
- sleep 10
|
|
- ceph osd set noup
|
|
- ceph.restart:
|
|
daemons: [osd.1]
|
|
wait-for-up: false
|
|
wait-for-healthy: false
|
|
- exec:
|
|
osd.0:
|
|
- ceph osd out 0
|
|
- sleep 10
|
|
- ceph osd unset noup
|
|
- ceph.healthy:
|
|
wait-for-healthy: false # only wait for osds up and pgs clean, ignore misplaced
|
|
- exec:
|
|
osd.0:
|
|
- ceph osd in 0
|
|
- ceph.healthy:
|