mirror of
https://github.com/ceph/ceph
synced 2024-12-19 01:46:00 +00:00
e4e83a0dd7
A class can be considered as in-use only if it is referenced by any of the existing crush rules. The patch also makes the output more human readable. For example: ./bin/ceph osd crush rule create-replicated myrule default host ssd ./bin/ceph osd crush class rm ssd Error EBUSY: class 'ssd' still referenced by crush_rule 'myrule' Signed-off-by: xie xingguo <xie.xingguo@zte.com.cn>
196 lines
6.4 KiB
Bash
Executable File
196 lines
6.4 KiB
Bash
Executable File
#!/bin/bash
|
|
#
|
|
# Copyright (C) 2017 Red Hat <contact@redhat.com>
|
|
#
|
|
# Author: Loic Dachary <loic@dachary.org>
|
|
#
|
|
# This program is free software; you can redistribute it and/or modify
|
|
# it under the terms of the GNU Library Public License as published by
|
|
# the Free Software Foundation; either version 2, or (at your option)
|
|
# any later version.
|
|
#
|
|
# This program is distributed in the hope that it will be useful,
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
# GNU Library Public License for more details.
|
|
#
|
|
|
|
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
|
|
|
|
function run() {
|
|
local dir=$1
|
|
shift
|
|
|
|
export CEPH_MON="127.0.0.1:7130" # git grep '\<7130\>' : there must be only one
|
|
export CEPH_ARGS
|
|
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
|
|
CEPH_ARGS+="--mon-host=$CEPH_MON "
|
|
#
|
|
# Disable auto-class, so we can inject device class manually below
|
|
#
|
|
CEPH_ARGS+="--osd-class-update-on-start=false "
|
|
|
|
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
|
|
for func in $funcs ; do
|
|
setup $dir || return 1
|
|
$func $dir || return 1
|
|
teardown $dir || return 1
|
|
done
|
|
}
|
|
|
|
function add_something() {
|
|
local dir=$1
|
|
local obj=${2:-SOMETHING}
|
|
|
|
local payload=ABCDEF
|
|
echo $payload > $dir/ORIGINAL
|
|
rados --pool rbd put $obj $dir/ORIGINAL || return 1
|
|
}
|
|
|
|
function get_osds_up() {
|
|
local poolname=$1
|
|
local objectname=$2
|
|
|
|
local osds=$(ceph --format xml osd map $poolname $objectname 2>/dev/null | \
|
|
$XMLSTARLET sel -t -m "//up/osd" -v . -o ' ')
|
|
# get rid of the trailing space
|
|
echo $osds
|
|
}
|
|
|
|
function TEST_classes() {
|
|
local dir=$1
|
|
|
|
run_mon $dir a || return 1
|
|
run_osd $dir 0 || return 1
|
|
run_osd $dir 1 || return 1
|
|
run_osd $dir 2 || return 1
|
|
create_rbd_pool || return 1
|
|
|
|
test "$(get_osds_up rbd SOMETHING)" == "1 2 0" || return 1
|
|
add_something $dir SOMETHING || return 1
|
|
|
|
#
|
|
# osd.0 has class ssd and the rule is modified
|
|
# to only take ssd devices.
|
|
#
|
|
ceph osd getcrushmap > $dir/map || return 1
|
|
crushtool -d $dir/map -o $dir/map.txt || return 1
|
|
${SED} -i \
|
|
-e '/device 0 osd.0/s/$/ class ssd/' \
|
|
-e '/step take default/s/$/ class ssd/' \
|
|
$dir/map.txt || return 1
|
|
crushtool -c $dir/map.txt -o $dir/map-new || return 1
|
|
ceph osd setcrushmap -i $dir/map-new || return 1
|
|
|
|
#
|
|
# There can only be one mapping since there only is
|
|
# one device with ssd class.
|
|
#
|
|
ok=false
|
|
for delay in 2 4 8 16 32 64 128 256 ; do
|
|
if test "$(get_osds_up rbd SOMETHING_ELSE)" == "0" ; then
|
|
ok=true
|
|
break
|
|
fi
|
|
sleep $delay
|
|
ceph osd dump # for debugging purposes
|
|
ceph pg dump # for debugging purposes
|
|
done
|
|
$ok || return 1
|
|
#
|
|
# Writing keeps working because the pool is min_size 1 by
|
|
# default.
|
|
#
|
|
add_something $dir SOMETHING_ELSE || return 1
|
|
|
|
#
|
|
# Sanity check that the rule indeed has ssd
|
|
# generated bucket with a name including ~ssd.
|
|
#
|
|
ceph osd crush dump | grep -q '~ssd' || return 1
|
|
}
|
|
|
|
function TEST_set_device_class() {
|
|
local dir=$1
|
|
|
|
TEST_classes $dir || return 1
|
|
|
|
ceph osd crush set-device-class ssd osd.0 || return 1
|
|
ceph osd crush class ls-osd ssd | grep 0 || return 1
|
|
ceph osd crush set-device-class ssd osd.1 || return 1
|
|
ceph osd crush class ls-osd ssd | grep 1 || return 1
|
|
ceph osd crush set-device-class ssd 0 1 || return 1 # should be idempotent
|
|
|
|
ok=false
|
|
for delay in 2 4 8 16 32 64 128 256 ; do
|
|
if test "$(get_osds_up rbd SOMETHING_ELSE)" == "0 1" ; then
|
|
ok=true
|
|
break
|
|
fi
|
|
sleep $delay
|
|
ceph osd crush dump
|
|
ceph osd dump # for debugging purposes
|
|
ceph pg dump # for debugging purposes
|
|
done
|
|
$ok || return 1
|
|
}
|
|
|
|
function TEST_mon_classes() {
|
|
local dir=$1
|
|
|
|
run_mon $dir a || return 1
|
|
run_osd $dir 0 || return 1
|
|
run_osd $dir 1 || return 1
|
|
run_osd $dir 2 || return 1
|
|
|
|
test "$(get_osds_up rbd SOMETHING)" == "1 2 0" || return 1
|
|
add_something $dir SOMETHING || return 1
|
|
|
|
ceph osd crush class create CLASS || return 1
|
|
ceph osd crush class create CLASS || return 1 # idempotent
|
|
ceph osd crush class ls | grep CLASS || return 1
|
|
ceph osd crush class rename CLASS TEMP || return 1
|
|
ceph osd crush class ls | grep TEMP || return 1
|
|
ceph osd crush class rename TEMP CLASS || return 1
|
|
ceph osd crush class ls | grep CLASS || return 1
|
|
ceph osd crush class rm CLASS || return 1
|
|
expect_failure $dir ENOENT ceph osd crush class rm CLASS || return 1
|
|
|
|
ceph osd crush set-device-class abc osd.2 || return 1
|
|
ceph osd crush move osd.2 root=foo rack=foo-rack host=foo-host || return 1
|
|
out=`ceph osd tree |awk '$1 == 2 && $2 == "abc" {print $0}'`
|
|
if [ "$out" == "" ]; then
|
|
return 1
|
|
fi
|
|
|
|
ceph osd crush rule create-replicated foo-rule foo host abc || return 1
|
|
|
|
# test class_is_in_use
|
|
ceph osd crush set-device-class hdd osd.0 || return 1
|
|
ceph osd crush set-device-class ssd osd.1 || return 1
|
|
ceph osd crush rule create-replicated foo-hdd1 default host hdd || return 1
|
|
ceph osd crush rule create-replicated foo-hdd2 default host hdd || return 1
|
|
ceph osd crush rule create-replicated foo-ssd default host ssd || return 1
|
|
expect_failure $dir EBUSY ceph osd crush class rm hdd || return 1
|
|
expect_failure $dir EBUSY ceph osd crush class rm ssd || return 1
|
|
expect_failure $dir EBUSY ceph osd crush class rename hdd asdf || return 1
|
|
expect_failure $dir EBUSY ceph osd crush class rename ssd qwer || return 1
|
|
ceph osd crush rule rm foo-hdd1 || return 1
|
|
expect_failure $dir EBUSY ceph osd crush class rm hdd || return 1 # still referenced by foo-hdd2
|
|
ceph osd crush rule rm foo-hdd2 || return 1
|
|
ceph osd crush rule rm foo-ssd || return 1
|
|
ceph osd crush class rename hdd asdf || return 1
|
|
ceph osd crush class rename ssd qwer || return 1
|
|
ceph osd crush class rm asdf || return 1
|
|
ceph osd crush class rm qwer || return 1
|
|
expect_failure $dir EBUSY ceph osd crush class rm abc || return 1 # still referenced by foo-rule
|
|
ceph osd crush rule rm foo-rule || return 1
|
|
ceph osd crush class rm abc || return 1
|
|
}
|
|
|
|
main crush-classes "$@"
|
|
|
|
# Local Variables:
|
|
# compile-command: "cd ../../../build ; ln -sf ../src/ceph-disk/ceph_disk/main.py bin/ceph-disk && make -j4 && ../src/test/crush/crush-classes.sh"
|
|
# End:
|