mirror of
https://github.com/ceph/ceph
synced 2025-01-10 05:00:59 +00:00
ce654c5133
The scrub_pgs command also waits for healthy for a while, but fails silently if it times out, which means the subsequent scrubs will also fail to clean up. This forces an earlier failure that does not obscure the root cause. Signed-off-by: Sage Weil <sage@redhat.com>
25 lines
706 B
YAML
25 lines
706 B
YAML
# do not require luminous osds at mkfs time; only set flag at
|
|
# the end of the test run, then do a final scrub (to convert any
|
|
# legacy snapsets), and verify we are healthy.
|
|
tasks:
|
|
- full_sequential_finally:
|
|
- exec:
|
|
mon.a:
|
|
- ceph osd set require_luminous_osds
|
|
# make sure osds have latest map
|
|
- rados -p rbd bench 5 write -b 4096
|
|
- ceph.healthy:
|
|
- ceph.osd_scrub_pgs:
|
|
cluster: ceph
|
|
- exec:
|
|
mon.a:
|
|
- ceph pg dump -f json-pretty
|
|
- "ceph pg dump sum -f json-pretty | grep num_legacy_snapsets | head -1 | grep ': 0'"
|
|
overrides:
|
|
ceph:
|
|
conf:
|
|
global:
|
|
mon debug no require luminous: true
|
|
thrashosds:
|
|
chance_thrash_cluster_full: 0
|