ceph/qa/workunits/rados/test_health_warnings.sh
Sage Weil aecaebe087 qa/workunits/rados/test_health_warning: misc fixes
- use 'noup' flag
- wait for healthy between each test
- check counts for each type

Fixes: http://tracker.ceph.com/issues/19990
Signed-off-by: Sage Weil <sage@redhat.com>
2017-05-22 10:09:19 -04:00

60 lines
1.2 KiB
Bash
Executable File

#!/bin/bash -ex
set -u
# number of osds = 10
crushtool -o crushmap --build --num_osds 10 host straw 2 rack straw 2 row straw 2 root straw 0
ceph osd setcrushmap -i crushmap
ceph osd tree
wait_for_healthy() {
while ceph health | grep down
do
sleep 1
done
}
test_mark_two_osds_same_host_down() {
ceph osd set noup
ceph osd down osd.0 osd.1
ceph health detail
ceph health | grep "1 host"
ceph health | grep "2 osds"
ceph health detail | grep "osd.0"
ceph health detail | grep "osd.1"
ceph osd unset noup
wait_for_healthy
}
test_mark_two_osds_same_rack_down() {
ceph osd set noup
ceph osd down osd.8 osd.9
ceph health detail
ceph health | grep "1 host"
ceph health | grep "1 rack"
ceph health | grep "1 row"
ceph health | grep "2 osds"
ceph health detail | grep "osd.8"
ceph health detail | grep "osd.9"
ceph osd unset noup
wait_for_healthy
}
test_mark_all_osds_down() {
ceph osd set noup
ceph osd down `ceph osd ls`
ceph health detail
ceph health | grep "2 rows"
ceph health | grep "3 racks"
ceph health | grep "5 hosts"
ceph health | grep "10 osds"
ceph osd unset noup
wait_for_healthy
}
test_mark_two_osds_same_host_down
test_mark_two_osds_same_rack_down
test_mark_all_osds_down
exit 0