2013-05-10 18:26:24 +00:00
#!/bin/bash -x
2012-12-19 16:37:42 +00:00
2016-04-18 13:48:45 +00:00
source $( dirname $0 ) /../ceph-helpers.sh
2015-05-31 13:13:52 +00:00
2012-12-19 16:37:42 +00:00
set -e
2013-12-15 20:41:00 +00:00
set -o functrace
2015-05-30 14:46:26 +00:00
PS4 = '${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
2014-07-09 11:43:04 +00:00
SUDO = ${ SUDO :- sudo }
2012-12-19 16:37:42 +00:00
2014-09-06 17:52:21 +00:00
function check_no_osd_down( )
{
! ceph osd dump | grep ' down '
}
function wait_no_osd_down( )
{
2016-06-03 12:53:00 +00:00
max_run = 300
for i in $( seq 1 $max_run ) ; do
2014-09-06 17:52:21 +00:00
if ! check_no_osd_down ; then
2016-06-03 12:53:00 +00:00
echo " waiting for osd(s) to come back up ( $i / $max_run ) "
2014-09-06 17:52:21 +00:00
sleep 1
else
break
fi
done
check_no_osd_down
}
2014-02-04 11:55:19 +00:00
function expect_false( )
2013-05-31 04:50:02 +00:00
{
set -x
if " $@ " ; then return 1; else return 0; fi
}
2014-01-14 11:39:24 +00:00
2016-07-25 08:57:51 +00:00
TEMP_DIR = $( mktemp -d ${ TMPDIR -/tmp } /cephtool.XXX)
2016-07-05 13:01:00 +00:00
trap " rm -fr $TEMP_DIR " 0
2016-07-15 06:21:50 +00:00
TMPFILE = $( mktemp $TEMP_DIR /test_invalid.XXX)
2013-08-03 03:50:20 +00:00
2014-10-04 09:34:27 +00:00
#
# retry_eagain max cmd args ...
#
# retry cmd args ... if it exits on error and its output contains the
# string EAGAIN, at most $max times
#
function retry_eagain( )
{
local max = $1
shift
local status
2016-07-05 13:01:00 +00:00
local tmpfile = $TEMP_DIR /retry_eagain.$$
2014-10-04 09:34:27 +00:00
local count
for count in $( seq 1 $max ) ; do
status = 0
" $@ " > $tmpfile 2>& 1 || status = $?
if test $status = 0 ||
! grep --quiet EAGAIN $tmpfile ; then
break
fi
sleep 1
done
if test $count = $max ; then
echo retried with non zero exit status, $max times: " $@ " >& 2
fi
cat $tmpfile
rm $tmpfile
return $status
}
#
# map_enxio_to_eagain cmd arg ...
#
# add EAGAIN to the output of cmd arg ... if the output contains
# ENXIO.
#
function map_enxio_to_eagain( )
{
local status = 0
2016-07-05 13:01:00 +00:00
local tmpfile = $TEMP_DIR /map_enxio_to_eagain.$$
2014-10-04 09:34:27 +00:00
" $@ " > $tmpfile 2>& 1 || status = $?
if test $status != 0 &&
grep --quiet ENXIO $tmpfile ; then
echo " EAGAIN added by $0 ::map_enxio_to_eagain " >> $tmpfile
fi
cat $tmpfile
rm $tmpfile
return $status
}
2013-08-03 03:50:20 +00:00
function check_response( )
{
2014-10-15 20:15:34 +00:00
expected_string = $1
2014-02-04 11:55:19 +00:00
retcode = $2
expected_retcode = $3
if [ " $expected_retcode " -a $retcode != $expected_retcode ] ; then
echo " return code invalid: got $retcode , expected $expected_retcode " >& 2
2013-08-03 03:50:20 +00:00
exit 1
fi
2014-10-28 21:09:58 +00:00
if ! grep --quiet -- " $expected_string " $TMPFILE ; then
2014-10-15 20:15:34 +00:00
echo " Didn't find $expected_string in output " >& 2
2013-08-03 03:50:20 +00:00
cat $TMPFILE >& 2
exit 1
fi
}
2014-05-23 16:02:23 +00:00
function get_config_value_or_die( )
{
local target config_opt raw val
target = $1
config_opt = $2
2014-07-01 06:42:58 +00:00
raw = " ` $SUDO ceph daemon $target config get $config_opt 2>/dev/null` "
2014-05-23 16:02:23 +00:00
if [ [ $? -ne 0 ] ] ; then
echo " error obtaining config opt ' $config_opt ' from ' $target ': $raw "
exit 1
fi
raw = ` echo $raw | sed -e 's/[{} "]//g' `
val = ` echo $raw | cut -f2 -d:`
echo " $val "
return 0
}
function expect_config_value( )
{
local target config_opt expected_val val
target = $1
config_opt = $2
expected_val = $3
val = $( get_config_value_or_die $target $config_opt )
if [ [ " $val " != " $expected_val " ] ] ; then
echo " expected ' $expected_val ', got ' $val ' "
exit 1
fi
}
2014-12-30 09:59:31 +00:00
function ceph_watch_start( )
{
local whatch_opt = --watch
if [ -n " $1 " ] ; then
whatch_opt = --watch-$1
fi
2016-07-15 08:20:22 +00:00
CEPH_WATCH_FILE = ${ TEMP_DIR } /CEPH_WATCH_$$
2014-12-30 09:59:31 +00:00
ceph $whatch_opt > $CEPH_WATCH_FILE &
CEPH_WATCH_PID = $!
2016-03-01 09:40:04 +00:00
# wait until the "ceph" client is connected and receiving
# log messages from monitor
for i in ` seq 3` ; do
grep -q "cluster" $CEPH_WATCH_FILE && break
sleep 1
done
2014-12-30 09:59:31 +00:00
}
function ceph_watch_wait( )
{
local regexp = $1
local timeout = 30
if [ -n " $2 " ] ; then
timeout = $2
fi
for i in ` seq ${ timeout } ` ; do
grep -q " $regexp " $CEPH_WATCH_FILE && break
2016-03-23 09:22:28 +00:00
sleep 1
2014-12-30 09:59:31 +00:00
done
kill $CEPH_WATCH_PID
2015-01-28 12:43:32 +00:00
if ! grep " $regexp " $CEPH_WATCH_FILE ; then
echo " pattern ${ regexp } not found in watch file. Full watch file content: " >& 2
cat $CEPH_WATCH_FILE >& 2
return 1
fi
2014-12-30 09:59:31 +00:00
}
2014-10-16 06:04:03 +00:00
function test_mon_injectargs( )
{
2015-12-24 06:17:41 +00:00
CEPH_ARGS = '--mon_debug_dump_location the.dump' ceph tell osd.0 injectargs --no-osd_enable_op_tracker >& $TMPFILE || return 1
check_response "osd_enable_op_tracker = 'false'"
2014-10-16 15:39:15 +00:00
! grep "the.dump" $TMPFILE || return 1
2015-12-24 06:17:41 +00:00
ceph tell osd.0 injectargs '--osd_enable_op_tracker --osd_op_history_duration 500' >& $TMPFILE || return 1
check_response "osd_enable_op_tracker = 'true' osd_op_history_duration = '500'"
ceph tell osd.0 injectargs --no-osd_enable_op_tracker >& $TMPFILE || return 1
check_response "osd_enable_op_tracker = 'false'"
ceph tell osd.0 injectargs -- --osd_enable_op_tracker >& $TMPFILE || return 1
check_response "osd_enable_op_tracker = 'true'"
ceph tell osd.0 injectargs -- '--osd_enable_op_tracker --osd_op_history_duration 600' >& $TMPFILE || return 1
check_response "osd_enable_op_tracker = 'true' osd_op_history_duration = '600'"
2016-07-05 13:01:00 +00:00
expect_failure $TEMP_DIR "Option --osd_op_history_duration requires an argument" \
2016-05-05 05:49:43 +00:00
ceph tell osd.0 injectargs -- '--osd_op_history_duration'
2015-12-24 06:17:41 +00:00
ceph tell osd.0 injectargs -- '--mon-lease 6' >& $TMPFILE || return 1
check_response "mon_lease = '6' (unchangeable)"
2016-05-05 05:49:43 +00:00
# osd-scrub-auto-repair-num-errors is an OPT_U32, so -1 is not a valid setting
expect_false ceph tell osd.0 injectargs --osd-scrub-auto-repair-num-errors -1
2014-10-16 06:04:03 +00:00
}
2014-06-25 00:11:52 +00:00
function test_mon_injectargs_SI( )
{
2014-06-25 23:55:46 +00:00
# Test SI units during injectargs and 'config set'
# We only aim at testing the units are parsed accordingly
# and don't intend to test whether the options being set
# actually expect SI units to be passed.
# Keep in mind that all integer based options (i.e., INT,
# LONG, U32, U64) will accept SI unit modifiers.
initial_value = $( get_config_value_or_die "mon.a" "mon_pg_warn_min_objects" )
2014-07-01 06:42:58 +00:00
$SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10
2014-06-25 23:55:46 +00:00
expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
2014-07-01 06:42:58 +00:00
$SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10K
2014-06-25 23:55:46 +00:00
expect_config_value "mon.a" "mon_pg_warn_min_objects" 10240
2014-07-01 06:42:58 +00:00
$SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 1G
2014-06-25 23:55:46 +00:00
expect_config_value "mon.a" "mon_pg_warn_min_objects" 1073741824
2014-07-01 06:42:58 +00:00
$SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10F > $TMPFILE || true
2014-06-25 23:55:46 +00:00
check_response "'10F': (22) Invalid argument"
# now test with injectargs
ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10'
expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10K'
expect_config_value "mon.a" "mon_pg_warn_min_objects" 10240
ceph tell mon.a injectargs '--mon_pg_warn_min_objects 1G'
expect_config_value "mon.a" "mon_pg_warn_min_objects" 1073741824
2015-04-23 17:27:44 +00:00
expect_false ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10F'
2016-05-05 05:49:43 +00:00
expect_false ceph tell mon.a injectargs '--mon_globalid_prealloc -1'
2014-07-01 06:42:58 +00:00
$SUDO ceph daemon mon.a config set mon_pg_warn_min_objects $initial_value
2014-06-25 00:11:52 +00:00
}
2013-08-03 03:50:20 +00:00
2015-08-13 17:41:47 +00:00
function test_tiering_agent( )
{
local slow = slow_eviction
local fast = fast_eviction
ceph osd pool create $slow 1 1
ceph osd pool create $fast 1 1
ceph osd tier add $slow $fast
ceph osd tier cache-mode $fast writeback
ceph osd tier set-overlay $slow $fast
ceph osd pool set $fast hit_set_type bloom
rados -p $slow put obj1 /etc/group
ceph osd pool set $fast target_max_objects 1
ceph osd pool set $fast hit_set_count 1
ceph osd pool set $fast hit_set_period 5
# wait for the object to be evicted from the cache
local evicted
evicted = false
2016-03-23 09:45:33 +00:00
for i in ` seq 1 300` ; do
2015-08-13 17:41:47 +00:00
if ! rados -p $fast ls | grep obj1 ; then
evicted = true
break
fi
2016-03-23 09:45:33 +00:00
sleep 1
2015-08-13 17:41:47 +00:00
done
$evicted # assert
# the object is proxy read and promoted to the cache
2015-12-07 22:41:12 +00:00
rados -p $slow get obj1 - >/dev/null
2015-08-13 17:41:47 +00:00
# wait for the promoted object to be evicted again
evicted = false
2016-03-23 09:45:33 +00:00
for i in ` seq 1 300` ; do
2015-08-13 17:41:47 +00:00
if ! rados -p $fast ls | grep obj1 ; then
evicted = true
break
fi
2016-03-23 09:45:33 +00:00
sleep 1
2015-08-13 17:41:47 +00:00
done
$evicted # assert
ceph osd tier remove-overlay $slow
ceph osd tier remove $slow $fast
ceph osd pool delete $fast $fast --yes-i-really-really-mean-it
ceph osd pool delete $slow $slow --yes-i-really-really-mean-it
}
2014-06-25 00:11:52 +00:00
function test_tiering( )
{
2014-06-25 23:55:46 +00:00
# tiering
2014-05-27 10:04:43 +00:00
ceph osd pool create slow 2
ceph osd pool create slow2 2
2014-06-25 23:55:46 +00:00
ceph osd pool create cache 2
ceph osd pool create cache2 2
2014-05-27 10:04:43 +00:00
ceph osd tier add slow cache
ceph osd tier add slow cache2
expect_false ceph osd tier add slow2 cache
2014-06-25 23:55:46 +00:00
# test some state transitions
ceph osd tier cache-mode cache writeback
2016-03-28 20:57:42 +00:00
expect_false ceph osd tier cache-mode cache forward
ceph osd tier cache-mode cache forward --yes-i-really-mean-it
expect_false ceph osd tier cache-mode cache readonly
ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
expect_false ceph osd tier cache-mode cache forward
ceph osd tier cache-mode cache forward --yes-i-really-mean-it
2014-06-25 23:55:46 +00:00
ceph osd tier cache-mode cache none
ceph osd tier cache-mode cache writeback
2016-03-28 20:57:42 +00:00
ceph osd tier cache-mode cache proxy
ceph osd tier cache-mode cache writeback
2014-06-25 23:55:46 +00:00
expect_false ceph osd tier cache-mode cache none
2016-03-28 20:57:42 +00:00
expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
2014-06-25 23:55:46 +00:00
# test with dirty objects in the tier pool
# tier pool currently set to 'writeback'
rados -p cache put /etc/passwd /etc/passwd
2015-07-23 21:04:53 +00:00
ceph tell osd.\* flush_pg_stats || true
2014-06-25 23:55:46 +00:00
# 1 dirty object in pool 'cache'
2016-03-28 20:57:42 +00:00
ceph osd tier cache-mode cache proxy
2014-06-25 23:55:46 +00:00
expect_false ceph osd tier cache-mode cache none
2016-03-28 20:57:42 +00:00
expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
2014-06-25 23:55:46 +00:00
ceph osd tier cache-mode cache writeback
# remove object from tier pool
rados -p cache rm /etc/passwd
rados -p cache cache-flush-evict-all
2015-07-23 21:04:53 +00:00
ceph tell osd.\* flush_pg_stats || true
2014-06-25 23:55:46 +00:00
# no dirty objects in pool 'cache'
2016-03-28 20:57:42 +00:00
ceph osd tier cache-mode cache proxy
2014-06-25 23:55:46 +00:00
ceph osd tier cache-mode cache none
2016-03-28 20:57:42 +00:00
ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
2014-06-25 23:55:46 +00:00
TRIES = 0
while ! ceph osd pool set cache pg_num 3 --yes-i-really-mean-it 2>$TMPFILE
do
2014-04-21 21:18:21 +00:00
grep 'currently creating pgs' $TMPFILE
TRIES = $(( $TRIES + 1 ))
test $TRIES -ne 60
sleep 3
2014-06-25 23:55:46 +00:00
done
expect_false ceph osd pool set cache pg_num 4
ceph osd tier cache-mode cache none
2014-05-27 10:04:43 +00:00
ceph osd tier set-overlay slow cache
expect_false ceph osd tier set-overlay slow cache2
expect_false ceph osd tier remove slow cache
ceph osd tier remove-overlay slow
ceph osd tier set-overlay slow cache2
ceph osd tier remove-overlay slow
ceph osd tier remove slow cache
ceph osd tier add slow2 cache
expect_false ceph osd tier set-overlay slow cache
ceph osd tier set-overlay slow2 cache
ceph osd tier remove-overlay slow2
ceph osd tier remove slow2 cache
ceph osd tier remove slow cache2
2014-06-25 23:55:46 +00:00
# make sure a non-empty pool fails
rados -p cache2 put /etc/passwd /etc/passwd
while ! ceph df | grep cache2 | grep ' 1 ' ; do
2014-03-04 05:11:17 +00:00
echo waiting for pg stats to flush
sleep 2
2014-06-25 23:55:46 +00:00
done
2014-05-27 10:04:43 +00:00
expect_false ceph osd tier add slow cache2
ceph osd tier add slow cache2 --force-nonempty
ceph osd tier remove slow cache2
2014-06-25 23:55:46 +00:00
2014-09-10 15:07:53 +00:00
ceph osd pool ls | grep cache2
ceph osd pool ls -f json-pretty | grep cache2
ceph osd pool ls detail | grep cache2
ceph osd pool ls detail -f json-pretty | grep cache2
2014-06-25 23:55:46 +00:00
ceph osd pool delete cache cache --yes-i-really-really-mean-it
ceph osd pool delete cache2 cache2 --yes-i-really-really-mean-it
2015-04-29 19:34:25 +00:00
# make sure we can't clobber snapshot state
ceph osd pool create snap_base 2
ceph osd pool create snap_cache 2
2015-07-07 18:43:01 +00:00
ceph osd pool mksnap snap_cache snapname
expect_false ceph osd tier add snap_base snap_cache
2015-04-29 19:34:25 +00:00
ceph osd pool delete snap_base snap_base --yes-i-really-really-mean-it
ceph osd pool delete snap_cache snap_cache --yes-i-really-really-mean-it
2015-05-15 20:05:40 +00:00
# make sure we can't create an ec pool tier
ceph osd pool create eccache 2 2 erasure
ceph osd pool create repbase 2
expect_false ceph osd tier add repbase eccache
ceph osd pool delete repbase repbase --yes-i-really-really-mean-it
ceph osd pool delete eccache eccache --yes-i-really-really-mean-it
2014-06-25 23:55:46 +00:00
# convenient add-cache command
ceph osd pool create cache3 2
2014-05-27 10:04:43 +00:00
ceph osd tier add-cache slow cache3 1024000
2014-06-25 23:55:46 +00:00
ceph osd dump | grep cache3 | grep bloom | grep 'false_positive_probability: 0.05' | grep 'target_bytes 1024000' | grep '1200s x4'
2015-03-10 14:21:56 +00:00
ceph osd tier remove slow cache3 2> $TMPFILE || true
check_response "EBUSY: tier pool 'cache3' is the overlay for 'slow'; please remove-overlay first"
ceph osd tier remove-overlay slow
2014-05-27 10:04:43 +00:00
ceph osd tier remove slow cache3
2014-09-10 15:07:53 +00:00
ceph osd pool ls | grep cache3
2014-06-25 23:55:46 +00:00
ceph osd pool delete cache3 cache3 --yes-i-really-really-mean-it
2014-10-01 21:39:39 +00:00
! ceph osd pool ls | grep cache3 || exit 1
2014-06-25 23:55:46 +00:00
2014-07-10 23:41:01 +00:00
ceph osd pool delete slow2 slow2 --yes-i-really-really-mean-it
ceph osd pool delete slow slow --yes-i-really-really-mean-it
2015-03-10 14:21:56 +00:00
# check add-cache whether work
ceph osd pool create datapool 2
ceph osd pool create cachepool 2
ceph osd tier add-cache datapool cachepool 1024000
ceph osd tier cache-mode cachepool writeback
2015-03-20 01:55:39 +00:00
rados -p datapool put object /etc/passwd
2015-03-10 14:21:56 +00:00
rados -p cachepool stat object
rados -p cachepool cache-flush object
rados -p datapool stat object
ceph osd tier remove-overlay datapool
ceph osd tier remove datapool cachepool
ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
2014-06-25 23:55:46 +00:00
# protection against pool removal when used as tiers
ceph osd pool create datapool 2
ceph osd pool create cachepool 2
ceph osd tier add-cache datapool cachepool 1024000
ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it 2> $TMPFILE || true
check_response "EBUSY: pool 'cachepool' is a tier of 'datapool'"
ceph osd pool delete datapool datapool --yes-i-really-really-mean-it 2> $TMPFILE || true
check_response "EBUSY: pool 'datapool' has tiers cachepool"
2015-03-10 14:21:56 +00:00
ceph osd tier remove-overlay datapool
2014-06-25 23:55:46 +00:00
ceph osd tier remove datapool cachepool
ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
2015-04-10 18:00:13 +00:00
## check health check
2015-05-13 07:26:46 +00:00
ceph osd set notieragent
2015-04-16 21:12:42 +00:00
ceph osd pool create datapool 2
ceph osd pool create cache4 2
ceph osd tier add-cache datapool cache4 1024000
ceph osd tier cache-mode cache4 writeback
tmpfile = $( mktemp| grep tmp)
dd if = /dev/zero of = $tmpfile bs = 4K count = 1
2015-05-13 08:39:37 +00:00
ceph osd pool set cache4 target_max_objects 200
ceph osd pool set cache4 target_max_bytes 1000000
rados -p cache4 put foo1 $tmpfile
rados -p cache4 put foo2 $tmpfile
2015-04-16 21:12:42 +00:00
rm -f $tmpfile
2015-07-23 21:04:53 +00:00
ceph tell osd.\* flush_pg_stats || true
2015-06-17 04:26:36 +00:00
ceph df | grep datapool | grep ' 2 '
2015-04-16 21:12:42 +00:00
ceph osd tier remove-overlay datapool
ceph osd tier remove datapool cache4
ceph osd pool delete cache4 cache4 --yes-i-really-really-mean-it
ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
2015-05-13 07:26:46 +00:00
ceph osd unset notieragent
2015-04-10 18:00:13 +00:00
2014-07-08 18:22:01 +00:00
# make sure 'tier remove' behaves as we expect
# i.e., removing a tier from a pool that's not its base pool only
# results in a 'pool foo is now (or already was) not a tier of bar'
#
ceph osd pool create basepoolA 2
ceph osd pool create basepoolB 2
poolA_id = $( ceph osd dump | grep 'pool.*basepoolA' | awk '{print $2;}' )
poolB_id = $( ceph osd dump | grep 'pool.*basepoolB' | awk '{print $2;}' )
ceph osd pool create cache5 2
ceph osd pool create cache6 2
ceph osd tier add basepoolA cache5
ceph osd tier add basepoolB cache6
ceph osd tier remove basepoolB cache5 2>& 1 | grep 'not a tier of'
ceph osd dump | grep "pool.*'cache5'" 2>& 1 | grep " tier_of[ \t]\+ $poolA_id "
ceph osd tier remove basepoolA cache6 2>& 1 | grep 'not a tier of'
ceph osd dump | grep "pool.*'cache6'" 2>& 1 | grep " tier_of[ \t]\+ $poolB_id "
ceph osd tier remove basepoolA cache5 2>& 1 | grep 'not a tier of'
2014-09-06 16:58:08 +00:00
! ceph osd dump | grep "pool.*'cache5'" 2>& 1 | grep "tier_of" || exit 1
2014-07-08 18:22:01 +00:00
ceph osd tier remove basepoolB cache6 2>& 1 | grep 'not a tier of'
2014-09-06 16:58:08 +00:00
! ceph osd dump | grep "pool.*'cache6'" 2>& 1 | grep "tier_of" || exit 1
2014-07-08 18:22:01 +00:00
2014-09-06 16:58:08 +00:00
! ceph osd dump | grep "pool.*'basepoolA'" 2>& 1 | grep "tiers" || exit 1
! ceph osd dump | grep "pool.*'basepoolB'" 2>& 1 | grep "tiers" || exit 1
2014-07-08 18:22:01 +00:00
ceph osd pool delete cache6 cache6 --yes-i-really-really-mean-it
ceph osd pool delete cache5 cache5 --yes-i-really-really-mean-it
ceph osd pool delete basepoolB basepoolB --yes-i-really-really-mean-it
ceph osd pool delete basepoolA basepoolA --yes-i-really-really-mean-it
2014-06-25 00:11:52 +00:00
}
function test_auth( )
{
2014-06-25 23:55:46 +00:00
ceph auth add client.xx mon allow osd "allow *"
ceph auth export client.xx >client.xx.keyring
ceph auth add client.xx -i client.xx.keyring
rm -f client.xx.keyring
ceph auth list | grep client.xx
ceph auth get client.xx | grep caps | grep mon
ceph auth get client.xx | grep caps | grep osd
ceph auth get-key client.xx
ceph auth print-key client.xx
ceph auth print_key client.xx
ceph auth caps client.xx osd "allow rw"
2016-07-05 06:15:41 +00:00
expect_false sh <<< "ceph auth get client.xx | grep caps | grep mon"
2014-06-25 23:55:46 +00:00
ceph auth get client.xx | grep osd | grep "allow rw"
ceph auth export | grep client.xx
ceph auth export -o authfile
ceph auth import -i authfile
ceph auth export -o authfile2
diff authfile authfile2
rm authfile authfile2
ceph auth del client.xx
2015-04-23 16:50:37 +00:00
expect_false ceph auth get client.xx
# (almost) interactive mode
echo -e 'auth add client.xx mon allow osd "allow *"\n' | ceph
ceph auth get client.xx
2015-04-23 17:27:44 +00:00
# script mode
echo 'auth del client.xx' | ceph
expect_false ceph auth get client.xx
2015-04-23 16:50:37 +00:00
2014-10-15 20:15:34 +00:00
#
# get / set auid
#
local auid = 444
ceph-authtool --create-keyring --name client.TEST --gen-key --set-uid $auid TEST-keyring
ceph auth import --in-file TEST-keyring
rm TEST-keyring
ceph auth get client.TEST > $TMPFILE
check_response " auid = $auid "
ceph --format json-pretty auth get client.TEST > $TMPFILE
check_response '"auid": ' $auid
ceph auth list > $TMPFILE
check_response " auid: $auid "
ceph --format json-pretty auth list > $TMPFILE
check_response '"auid": ' $auid
ceph auth del client.TEST
2014-06-25 00:11:52 +00:00
}
2013-05-10 18:26:24 +00:00
2014-09-08 16:45:20 +00:00
function test_auth_profiles( )
{
ceph auth add client.xx-profile-ro mon 'allow profile read-only'
ceph auth add client.xx-profile-rw mon 'allow profile read-write'
ceph auth add client.xx-profile-rd mon 'allow profile role-definer'
ceph auth export > client.xx.keyring
# read-only is allowed all read-only commands (auth excluded)
ceph -n client.xx-profile-ro -k client.xx.keyring status
ceph -n client.xx-profile-ro -k client.xx.keyring osd dump
ceph -n client.xx-profile-ro -k client.xx.keyring pg dump
ceph -n client.xx-profile-ro -k client.xx.keyring mon dump
ceph -n client.xx-profile-ro -k client.xx.keyring mds dump
# read-only gets access denied for rw commands or auth commands
ceph -n client.xx-profile-ro -k client.xx.keyring log foo >& $TMPFILE || true
check_response "EACCES: access denied"
ceph -n client.xx-profile-ro -k client.xx.keyring osd set noout >& $TMPFILE || true
check_response "EACCES: access denied"
ceph -n client.xx-profile-ro -k client.xx.keyring auth list >& $TMPFILE || true
check_response "EACCES: access denied"
# read-write is allowed for all read-write commands (except auth)
ceph -n client.xx-profile-rw -k client.xx.keyring status
ceph -n client.xx-profile-rw -k client.xx.keyring osd dump
ceph -n client.xx-profile-rw -k client.xx.keyring pg dump
ceph -n client.xx-profile-rw -k client.xx.keyring mon dump
ceph -n client.xx-profile-rw -k client.xx.keyring mds dump
ceph -n client.xx-profile-rw -k client.xx.keyring log foo
ceph -n client.xx-profile-rw -k client.xx.keyring osd set noout
ceph -n client.xx-profile-rw -k client.xx.keyring osd unset noout
# read-write gets access denied for auth commands
ceph -n client.xx-profile-rw -k client.xx.keyring auth list >& $TMPFILE || true
check_response "EACCES: access denied"
# role-definer is allowed RWX 'auth' commands and read-only 'mon' commands
ceph -n client.xx-profile-rd -k client.xx.keyring auth list
ceph -n client.xx-profile-rd -k client.xx.keyring auth export
ceph -n client.xx-profile-rd -k client.xx.keyring auth add client.xx-profile-foo
ceph -n client.xx-profile-rd -k client.xx.keyring status
ceph -n client.xx-profile-rd -k client.xx.keyring osd dump >& $TMPFILE || true
check_response "EACCES: access denied"
ceph -n client.xx-profile-rd -k client.xx.keyring pg dump >& $TMPFILE || true
check_response "EACCES: access denied"
# read-only 'mon' subsystem commands are allowed
ceph -n client.xx-profile-rd -k client.xx.keyring mon dump
# but read-write 'mon' commands are not
ceph -n client.xx-profile-rd -k client.xx.keyring mon add foo 1.1.1.1 >& $TMPFILE || true
check_response "EACCES: access denied"
ceph -n client.xx-profile-rd -k client.xx.keyring mds dump >& $TMPFILE || true
check_response "EACCES: access denied"
ceph -n client.xx-profile-rd -k client.xx.keyring log foo >& $TMPFILE || true
check_response "EACCES: access denied"
ceph -n client.xx-profile-rd -k client.xx.keyring osd set noout >& $TMPFILE || true
check_response "EACCES: access denied"
ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-ro
ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-rw
2014-10-20 17:00:15 +00:00
# add a new role-definer with the existing role-definer
ceph -n client.xx-profile-rd -k client.xx.keyring \
auth add client.xx-profile-rd2 mon 'allow profile role-definer'
ceph -n client.xx-profile-rd -k client.xx.keyring \
auth export > client.xx.keyring.2
# remove old role-definer using the new role-definer
ceph -n client.xx-profile-rd2 -k client.xx.keyring.2 \
auth del client.xx-profile-rd
# remove the remaining role-definer with admin
ceph auth del client.xx-profile-rd2
rm -f client.xx.keyring client.xx.keyring.2
2014-09-08 16:45:20 +00:00
}
2014-06-25 00:11:52 +00:00
2015-11-10 07:54:33 +00:00
function test_mon_caps( )
{
2016-07-05 13:01:00 +00:00
ceph-authtool --create-keyring $TEMP_DIR /ceph.client.bug.keyring
chmod +r $TEMP_DIR /ceph.client.bug.keyring
ceph-authtool $TEMP_DIR /ceph.client.bug.keyring -n client.bug --gen-key
ceph auth add client.bug -i $TEMP_DIR /ceph.client.bug.keyring
2015-11-10 07:54:33 +00:00
2016-07-05 13:01:00 +00:00
rados lspools --keyring $TEMP_DIR /ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
2015-11-10 07:54:33 +00:00
check_response "Permission denied"
2016-07-05 13:01:00 +00:00
rm -rf $TEMP_DIR /ceph.client.bug.keyring
2015-11-29 14:50:46 +00:00
ceph auth del client.bug
2016-07-05 13:01:00 +00:00
ceph-authtool --create-keyring $TEMP_DIR /ceph.client.bug.keyring
chmod +r $TEMP_DIR /ceph.client.bug.keyring
ceph-authtool $TEMP_DIR /ceph.client.bug.keyring -n client.bug --gen-key
ceph-authtool -n client.bug --cap mon '' $TEMP_DIR /ceph.client.bug.keyring
ceph auth add client.bug -i $TEMP_DIR /ceph.client.bug.keyring
rados lspools --keyring $TEMP_DIR /ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
2015-11-10 07:54:33 +00:00
check_response "Permission denied"
}
2014-06-25 00:11:52 +00:00
function test_mon_misc( )
{
2014-06-25 23:55:46 +00:00
# with and without verbosity
ceph osd dump | grep '^epoch'
ceph --concise osd dump | grep '^epoch'
# df
ceph df > $TMPFILE
grep GLOBAL $TMPFILE
grep -v DIRTY $TMPFILE
ceph df detail > $TMPFILE
grep CATEGORY $TMPFILE
grep DIRTY $TMPFILE
ceph df --format json > $TMPFILE
2014-08-06 20:16:49 +00:00
grep 'total_bytes' $TMPFILE
2014-06-25 23:55:46 +00:00
grep -v 'dirty' $TMPFILE
ceph df detail --format json > $TMPFILE
2014-08-06 20:16:49 +00:00
grep 'rd_bytes' $TMPFILE
2014-06-25 23:55:46 +00:00
grep 'dirty' $TMPFILE
2014-08-06 20:16:49 +00:00
ceph df --format xml | grep '<total_bytes>'
ceph df detail --format xml | grep '<rd_bytes>'
2014-06-25 23:55:46 +00:00
ceph fsid
ceph health
ceph health detail
ceph health --format json-pretty
ceph health detail --format xml-pretty
2015-03-31 15:41:53 +00:00
ceph node ls
for t in mon osd mds ; do
ceph node ls $t
done
2014-12-30 09:59:31 +00:00
ceph_watch_start
2014-06-25 23:55:46 +00:00
mymsg = " this is a test log message $$ . $( date) "
ceph log " $mymsg "
2014-12-30 09:59:31 +00:00
ceph_watch_wait " $mymsg "
2015-03-26 17:08:04 +00:00
2015-05-12 13:59:11 +00:00
ceph mon metadata a
2016-05-13 09:55:06 +00:00
ceph mon metadata
2015-07-07 06:54:24 +00:00
ceph node ls
2014-06-25 00:11:52 +00:00
}
2014-09-30 13:45:42 +00:00
function check_mds_active( )
{
2016-02-17 14:59:12 +00:00
fs_name = $1
ceph fs get $fs_name | grep active
2014-09-30 13:45:42 +00:00
}
function wait_mds_active( )
{
2016-02-17 14:59:12 +00:00
fs_name = $1
2016-06-03 12:53:00 +00:00
max_run = 300
for i in $( seq 1 $max_run ) ; do
2016-02-17 14:59:12 +00:00
if ! check_mds_active $fs_name ; then
2016-06-03 12:53:00 +00:00
echo " waiting for an active MDS daemon ( $i / $max_run ) "
2014-09-30 13:45:42 +00:00
sleep 5
else
break
fi
done
2016-02-17 14:59:12 +00:00
check_mds_active $fs_name
2014-09-30 13:45:42 +00:00
}
function get_mds_gids( )
{
2016-02-17 14:59:12 +00:00
fs_name = $1
ceph fs get $fs_name --format= json | python -c "import json; import sys; print ' '.join([m['gid'].__str__() for m in json.load(sys.stdin)['mdsmap']['info'].values()])"
2014-09-30 13:45:42 +00:00
}
2014-07-09 11:43:04 +00:00
function fail_all_mds( )
{
2016-02-17 14:59:12 +00:00
fs_name = $1
ceph fs set $fs_name cluster_down true
mds_gids = $( get_mds_gids $fs_name )
2014-07-09 11:43:04 +00:00
for mds_gid in $mds_gids ; do
ceph mds fail $mds_gid
done
2016-02-17 14:59:12 +00:00
if check_mds_active $fs_name ; then
2014-09-30 13:45:42 +00:00
echo "An active MDS remains, something went wrong"
2016-02-17 14:59:12 +00:00
ceph fs get $fs_name
2014-09-30 13:45:42 +00:00
exit -1
fi
2014-07-09 11:43:04 +00:00
}
2014-09-30 13:45:42 +00:00
function remove_all_fs( )
2014-06-25 00:11:52 +00:00
{
2014-09-30 13:45:42 +00:00
existing_fs = $( ceph fs ls --format= json | python -c "import json; import sys; print ' '.join([fs['name'] for fs in json.load(sys.stdin)])" )
2016-02-17 14:59:12 +00:00
for fs_name in $existing_fs ; do
echo " Removing fs ${ fs_name } ... "
fail_all_mds $fs_name
echo " Removing existing filesystem ' ${ fs_name } '... "
ceph fs rm $fs_name --yes-i-really-mean-it
echo " Removed ' ${ fs_name } '. "
done
2014-09-30 13:45:42 +00:00
}
# So that tests requiring MDS can skip if one is not configured
# in the cluster at all
function mds_exists( )
{
ceph auth list | grep "^mds"
}
2016-06-17 05:54:59 +00:00
# some of the commands are just not idempotent.
function without_test_dup_command( )
{
if [ -z ${ CEPH_CLI_TEST_DUP_COMMAND +x } ] ; then
$@
else
local saved = ${ CEPH_CLI_TEST_DUP_COMMAND }
unset CEPH_CLI_TEST_DUP_COMMAND
$@
CEPH_CLI_TEST_DUP_COMMAND = saved
fi
}
2014-09-30 13:45:42 +00:00
function test_mds_tell( )
{
2016-02-17 14:59:12 +00:00
FS_NAME = cephfs
2014-09-30 13:45:42 +00:00
if ! mds_exists ; then
echo "Skipping test, no MDS found"
return
fi
remove_all_fs
ceph osd pool create fs_data 10
ceph osd pool create fs_metadata 10
2016-02-17 14:59:12 +00:00
ceph fs new $FS_NAME fs_metadata fs_data
wait_mds_active $FS_NAME
2014-09-30 13:45:42 +00:00
# Test injectargs by GID
2016-02-17 14:59:12 +00:00
old_mds_gids = $( get_mds_gids $FS_NAME )
2014-09-30 13:45:42 +00:00
echo Old GIDs: $old_mds_gids
for mds_gid in $old_mds_gids ; do
ceph tell mds.$mds_gid injectargs "--debug-mds 20"
done
2016-05-05 05:49:43 +00:00
expect_false ceph tell mds.a injectargs mds_max_file_recover -1
2014-09-30 13:45:42 +00:00
# Test respawn by rank
2016-06-17 05:54:59 +00:00
without_test_dup_command ceph tell mds.0 respawn
2014-09-30 13:45:42 +00:00
new_mds_gids = $old_mds_gids
while [ $new_mds_gids -eq $old_mds_gids ] ; do
sleep 5
2016-02-17 14:59:12 +00:00
new_mds_gids = $( get_mds_gids $FS_NAME )
2014-09-30 13:45:42 +00:00
done
echo New GIDs: $new_mds_gids
# Test respawn by ID
2016-06-17 05:54:59 +00:00
without_test_dup_command ceph tell mds.a respawn
2014-09-30 13:45:42 +00:00
new_mds_gids = $old_mds_gids
while [ $new_mds_gids -eq $old_mds_gids ] ; do
sleep 5
2016-02-17 14:59:12 +00:00
new_mds_gids = $( get_mds_gids $FS_NAME )
2014-09-30 13:45:42 +00:00
done
echo New GIDs: $new_mds_gids
remove_all_fs
ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
}
function test_mon_mds( )
{
2016-02-17 14:59:12 +00:00
FS_NAME = cephfs
2014-09-30 13:45:42 +00:00
remove_all_fs
2014-07-09 11:43:04 +00:00
2014-05-27 10:04:43 +00:00
ceph osd pool create fs_data 10
ceph osd pool create fs_metadata 10
2016-02-17 14:59:12 +00:00
ceph fs new $FS_NAME fs_metadata fs_data
ceph fs set $FS_NAME cluster_down true
ceph fs set $FS_NAME cluster_down false
2014-05-27 10:04:43 +00:00
2016-02-17 14:59:12 +00:00
# Legacy commands, act on default fs
2014-10-06 18:06:20 +00:00
ceph mds cluster_down
ceph mds cluster_up
ceph mds compat rm_incompat 4
ceph mds compat rm_incompat 4
2014-07-09 11:43:04 +00:00
# We don't want any MDSs to be up, their activity can interfere with
# the "current_epoch + 1" checking below if they're generating updates
2016-02-17 14:59:12 +00:00
fail_all_mds $FS_NAME
2014-07-09 11:43:04 +00:00
2014-06-24 21:14:03 +00:00
# Check for default crash_replay_interval set automatically in 'fs new'
2015-09-28 17:13:40 +00:00
#This may vary based on ceph.conf (e.g., it's 5 in teuthology runs)
#ceph osd dump | grep fs_data > $TMPFILE
#check_response "crash_replay_interval 45 "
2014-06-24 21:14:03 +00:00
2014-06-25 23:55:46 +00:00
ceph mds compat show
expect_false ceph mds deactivate 2
ceph mds dump
2016-02-17 14:59:12 +00:00
ceph fs dump
ceph fs get $FS_NAME
for mds_gid in $( get_mds_gids $FS_NAME ) ; do
2015-03-31 08:36:52 +00:00
ceph mds metadata $mds_id
done
2016-05-13 10:29:09 +00:00
ceph mds metadata
2014-06-25 23:55:46 +00:00
# XXX mds fail, but how do you undo it?
2016-07-05 13:01:00 +00:00
mdsmapfile = $TEMP_DIR /mdsmap.$$
2014-06-25 23:55:46 +00:00
current_epoch = $( ceph mds getmap -o $mdsmapfile --no-log-to-stderr 2>& 1 | grep epoch | sed 's/.*epoch //' )
[ -s $mdsmapfile ]
rm $mdsmapfile
ceph osd pool create data2 10
2014-05-27 10:04:43 +00:00
ceph osd pool create data3 10
2016-02-17 14:59:12 +00:00
data2_pool = $( ceph osd dump | grep "pool.*'data2'" | awk '{print $2;}' )
data3_pool = $( ceph osd dump | grep "pool.*'data3'" | awk '{print $2;}' )
2014-05-27 10:04:43 +00:00
ceph mds add_data_pool $data2_pool
ceph mds add_data_pool $data3_pool
2014-10-21 14:37:35 +00:00
ceph mds add_data_pool 100 >& $TMPFILE || true
check_response "Error ENOENT"
ceph mds add_data_pool foobarbaz >& $TMPFILE || true
check_response "Error ENOENT"
2014-05-27 10:04:43 +00:00
ceph mds remove_data_pool $data2_pool
ceph mds remove_data_pool $data3_pool
2014-06-25 23:55:46 +00:00
ceph osd pool delete data2 data2 --yes-i-really-really-mean-it
2014-05-27 10:04:43 +00:00
ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
2016-03-31 03:12:26 +00:00
expect_false ceph mds set_max_mds 4
ceph mds set allow_multimds true --yes-i-really-mean-it
2014-06-25 23:55:46 +00:00
ceph mds set_max_mds 4
ceph mds set_max_mds 3
2015-07-29 09:05:43 +00:00
ceph mds set_max_mds 256
expect_false ceph mds set_max_mds 257
2014-06-25 23:55:46 +00:00
ceph mds set max_mds 4
2015-07-29 09:05:43 +00:00
ceph mds set max_mds 256
expect_false ceph mds set max_mds 257
2014-06-25 23:55:46 +00:00
expect_false ceph mds set max_mds asdf
expect_false ceph mds set inline_data true
ceph mds set inline_data true --yes-i-really-mean-it
ceph mds set inline_data yes --yes-i-really-mean-it
ceph mds set inline_data 1 --yes-i-really-mean-it
expect_false ceph mds set inline_data --yes-i-really-mean-it
ceph mds set inline_data false
ceph mds set inline_data no
ceph mds set inline_data 0
expect_false ceph mds set inline_data asdf
ceph mds set max_file_size 1048576
expect_false ceph mds set max_file_size 123asdf
expect_false ceph mds set allow_new_snaps
expect_false ceph mds set allow_new_snaps true
ceph mds set allow_new_snaps true --yes-i-really-mean-it
ceph mds set allow_new_snaps 0
ceph mds set allow_new_snaps false
ceph mds set allow_new_snaps no
expect_false ceph mds set allow_new_snaps taco
# we should never be able to add EC pools as data or metadata pools
2014-09-15 19:47:18 +00:00
# create an ec-pool...
2014-06-25 23:55:46 +00:00
ceph osd pool create mds-ec-pool 10 10 erasure
set +e
ceph mds add_data_pool mds-ec-pool 2>$TMPFILE
check_response 'erasure-code' $? 22
set -e
2014-07-09 11:43:04 +00:00
ec_poolnum = $( ceph osd dump | grep "pool.* 'mds-ec-pool" | awk '{print $2;}' )
data_poolnum = $( ceph osd dump | grep "pool.* 'fs_data" | awk '{print $2;}' )
metadata_poolnum = $( ceph osd dump | grep "pool.* 'fs_metadata" | awk '{print $2;}' )
2014-07-01 17:18:48 +00:00
2016-02-17 14:59:12 +00:00
fail_all_mds $FS_NAME
2015-01-12 13:54:52 +00:00
2016-01-15 13:04:21 +00:00
set +e
# Check that rmfailed requires confirmation
expect_false ceph mds rmfailed 0
ceph mds rmfailed 0 --yes-i-really-mean-it
set -e
2016-02-17 14:59:12 +00:00
# Check that `newfs` is no longer permitted
expect_false ceph mds newfs $metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE
2015-01-12 13:54:52 +00:00
# Check that 'fs reset' runs
2016-02-17 14:59:12 +00:00
ceph fs reset $FS_NAME --yes-i-really-mean-it
# Check that creating a second FS fails by default
ceph osd pool create fs_metadata2 10
ceph osd pool create fs_data2 10
set +e
expect_false ceph fs new cephfs2 fs_metadata2 fs_data2
set -e
2015-01-12 13:54:52 +00:00
2016-02-17 14:59:12 +00:00
# Check that setting enable_multiple enables creation of second fs
2016-03-31 03:12:26 +00:00
ceph fs flag set enable_multiple true --yes-i-really-mean-it
2016-02-17 14:59:12 +00:00
ceph fs new cephfs2 fs_metadata2 fs_data2
2015-01-14 22:08:09 +00:00
2016-02-17 14:59:12 +00:00
# Clean up multi-fs stuff
fail_all_mds cephfs2
ceph fs rm cephfs2 --yes-i-really-mean-it
ceph osd pool delete fs_metadata2 fs_metadata2 --yes-i-really-really-mean-it
ceph osd pool delete fs_data2 fs_data2 --yes-i-really-really-mean-it
fail_all_mds $FS_NAME
# Clean up to enable subsequent fs new tests
ceph fs rm $FS_NAME --yes-i-really-mean-it
2014-07-01 17:18:48 +00:00
2014-07-09 11:43:04 +00:00
set +e
2016-02-17 14:59:12 +00:00
ceph fs new $FS_NAME fs_metadata mds-ec-pool 2>$TMPFILE
2014-06-25 23:55:46 +00:00
check_response 'erasure-code' $? 22
2016-02-17 14:59:12 +00:00
ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
2014-06-24 21:49:54 +00:00
check_response 'erasure-code' $? 22
2016-02-17 14:59:12 +00:00
ceph fs new $FS_NAME mds-ec-pool mds-ec-pool 2>$TMPFILE
2014-06-24 21:49:54 +00:00
check_response 'erasure-code' $? 22
2014-06-25 23:55:46 +00:00
set -e
2014-09-15 19:47:18 +00:00
2015-06-01 12:55:22 +00:00
# ... new create a cache tier in front of the EC pool...
2014-09-15 19:47:18 +00:00
ceph osd pool create mds-tier 2
ceph osd tier add mds-ec-pool mds-tier
ceph osd tier set-overlay mds-ec-pool mds-tier
tier_poolnum = $( ceph osd dump | grep "pool.* 'mds-tier" | awk '{print $2;}' )
2015-06-01 12:55:22 +00:00
# Use of a readonly tier should be forbidden
2016-03-28 20:57:42 +00:00
ceph osd tier cache-mode mds-tier readonly --yes-i-really-mean-it
2015-06-01 12:55:22 +00:00
set +e
2016-02-17 14:59:12 +00:00
ceph fs new $FS_NAME fs_metadata mds-ec-pool 2>$TMPFILE
2015-06-01 12:55:22 +00:00
check_response 'has a write tier (mds-tier) that is configured to forward' $? 22
2014-09-15 19:47:18 +00:00
set -e
2015-06-01 12:55:22 +00:00
# Use of a writeback tier should enable FS creation
ceph osd tier cache-mode mds-tier writeback
2016-02-17 14:59:12 +00:00
ceph fs new $FS_NAME fs_metadata mds-ec-pool
2014-09-16 09:49:15 +00:00
# While a FS exists using the tiered pools, I should not be allowed
# to remove the tier
set +e
ceph osd tier remove-overlay mds-ec-pool 2>$TMPFILE
check_response 'in use by CephFS' $? 16
ceph osd tier remove mds-ec-pool mds-tier 2>$TMPFILE
check_response 'in use by CephFS' $? 16
set -e
2016-02-17 14:59:12 +00:00
fail_all_mds $FS_NAME
ceph fs rm $FS_NAME --yes-i-really-mean-it
2014-09-15 19:47:18 +00:00
# ... but we should be forbidden from using the cache pool in the FS directly.
set +e
2016-02-17 14:59:12 +00:00
ceph fs new $FS_NAME fs_metadata mds-tier 2>$TMPFILE
2014-09-15 19:47:18 +00:00
check_response 'in use as a cache tier' $? 22
2016-02-17 14:59:12 +00:00
ceph fs new $FS_NAME mds-tier fs_data 2>$TMPFILE
2014-09-15 19:47:18 +00:00
check_response 'in use as a cache tier' $? 22
2016-02-17 14:59:12 +00:00
ceph fs new $FS_NAME mds-tier mds-tier 2>$TMPFILE
2014-09-15 19:47:18 +00:00
check_response 'in use as a cache tier' $? 22
set -e
# Clean up tier + EC pools
ceph osd tier remove-overlay mds-ec-pool
ceph osd tier remove mds-ec-pool mds-tier
2014-09-16 09:49:15 +00:00
# Create a FS using the 'cache' pool now that it's no longer a tier
2016-02-17 14:59:12 +00:00
ceph fs new $FS_NAME fs_metadata mds-tier
2014-09-16 09:49:15 +00:00
# We should be forbidden from using this pool as a tier now that
# it's in use for CephFS
set +e
ceph osd tier add mds-ec-pool mds-tier 2>$TMPFILE
check_response 'in use by CephFS' $? 16
set -e
2016-02-17 14:59:12 +00:00
fail_all_mds $FS_NAME
ceph fs rm $FS_NAME --yes-i-really-mean-it
2015-06-01 12:55:22 +00:00
ceph osd pool delete mds-ec-pool mds-ec-pool --yes-i-really-really-mean-it
2014-09-16 09:49:15 +00:00
2014-11-25 16:54:42 +00:00
# Create a FS and check that we can subsequently add a cache tier to it
2016-02-17 14:59:12 +00:00
ceph fs new $FS_NAME fs_metadata fs_data
2014-11-25 16:54:42 +00:00
# Adding overlay to FS pool should be permitted, RADOS clients handle this.
ceph osd tier add fs_metadata mds-tier
ceph osd tier cache-mode mds-tier writeback
ceph osd tier set-overlay fs_metadata mds-tier
2015-06-01 12:55:22 +00:00
# Removing tier should be permitted because the underlying pool is
# replicated (#11504 case)
2016-03-28 20:57:42 +00:00
ceph osd tier cache-mode mds-tier proxy
2014-12-02 21:43:03 +00:00
ceph osd tier remove-overlay fs_metadata
ceph osd tier remove fs_metadata mds-tier
2014-09-15 19:47:18 +00:00
ceph osd pool delete mds-tier mds-tier --yes-i-really-really-mean-it
2015-06-01 12:55:22 +00:00
# Clean up FS
2016-02-17 14:59:12 +00:00
fail_all_mds $FS_NAME
ceph fs rm $FS_NAME --yes-i-really-mean-it
2014-06-25 23:55:46 +00:00
ceph mds stat
# ceph mds tell mds.a getmap
# ceph mds rm
# ceph mds rmfailed
# ceph mds set_state
# ceph mds stop
2014-05-27 10:04:43 +00:00
ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
2014-06-25 00:11:52 +00:00
}
2013-05-10 18:26:24 +00:00
2015-07-07 06:54:24 +00:00
function test_mon_mds_metadata( )
{
local nmons = $( ceph tell 'mon.*' version | grep -c 'version' )
test " $nmons " -gt 0
ceph mds dump |
sed -nEe "s/^([0-9]+):.*'([a-z])' mds\\.([0-9]+)\\..*/\\1 \\2 \\3/p" |
while read gid id rank; do
ceph mds metadata ${ gid } | grep '"hostname":'
ceph mds metadata ${ id } | grep '"hostname":'
ceph mds metadata ${ rank } | grep '"hostname":'
local n = $( ceph tell 'mon.*' mds metadata ${ id } | grep -c '"hostname":' )
test " $n " -eq " $nmons "
done
expect_false ceph mds metadata UNKNOWN
}
2014-06-25 00:11:52 +00:00
function test_mon_mon( )
{
2015-03-04 06:51:34 +00:00
# print help message
2016-04-04 14:55:08 +00:00
ceph --help mon
2014-06-25 23:55:46 +00:00
# no mon add/remove
ceph mon dump
2016-07-05 13:01:00 +00:00
ceph mon getmap -o $TEMP_DIR /monmap.$$
[ -s $TEMP_DIR /monmap.$$ ]
2014-06-25 23:55:46 +00:00
# ceph mon tell
ceph mon_status
2014-06-25 00:11:52 +00:00
}
2013-05-10 18:26:24 +00:00
2014-06-25 00:11:52 +00:00
function test_mon_osd( )
{
2014-06-25 00:17:58 +00:00
#
# osd blacklist
#
2014-06-25 23:55:46 +00:00
bl = 192.168.0.1:0/1000
2015-05-22 13:08:04 +00:00
# Escaped form which may appear in JSON output
bl_json = 192.168.0.1:0\\ \\ /1000
2014-06-25 23:55:46 +00:00
ceph osd blacklist add $bl
ceph osd blacklist ls | grep $bl
2015-05-22 13:08:04 +00:00
ceph osd blacklist ls --format= json-pretty | grep $bl_json
ceph osd dump --format= json-pretty | grep $bl
ceph osd dump | grep " ^blacklist $bl "
2014-06-25 23:55:46 +00:00
ceph osd blacklist rm $bl
2016-07-05 06:15:41 +00:00
ceph osd blacklist ls | expect_false grep $bl
2014-06-25 23:55:46 +00:00
bl = 192.168.0.1
# test without nonce, invalid nonce
ceph osd blacklist add $bl
ceph osd blacklist ls | grep $bl
ceph osd blacklist rm $bl
2016-07-05 06:15:41 +00:00
ceph osd blacklist ls | expect_false grep $expect_false bl
2014-06-25 23:55:46 +00:00
expect_false " ceph osd blacklist $bl /-1 "
expect_false " ceph osd blacklist $bl /foo "
2016-03-07 15:08:48 +00:00
# test with wrong address
expect_false "ceph osd blacklist 1234.56.78.90/100"
2015-12-15 17:19:30 +00:00
# Test `clear`
ceph osd blacklist add $bl
ceph osd blacklist ls | grep $bl
ceph osd blacklist clear
2016-07-05 06:15:41 +00:00
ceph osd blacklist ls | expect_false grep $bl
2015-12-15 17:19:30 +00:00
2014-06-25 00:17:58 +00:00
#
# osd crush
#
2014-12-05 23:58:03 +00:00
ceph osd crush reweight-all
2014-06-25 23:55:46 +00:00
ceph osd crush tunables legacy
ceph osd crush show-tunables | grep argonaut
ceph osd crush tunables bobtail
ceph osd crush show-tunables | grep bobtail
ceph osd crush tunables firefly
ceph osd crush show-tunables | grep firefly
2014-12-03 00:43:16 +00:00
ceph osd crush set-tunable straw_calc_version 0
ceph osd crush get-tunable straw_calc_version | grep 0
ceph osd crush set-tunable straw_calc_version 1
ceph osd crush get-tunable straw_calc_version | grep 1
2014-06-25 00:17:58 +00:00
#
# osd scrub
#
2014-06-25 23:55:46 +00:00
# how do I tell when these are done?
ceph osd scrub 0
ceph osd deep-scrub 0
ceph osd repair 0
2015-08-07 20:14:09 +00:00
for f in noup nodown noin noout noscrub nodeep-scrub nobackfill norebalance norecover notieragent full sortbitwise
2014-06-25 23:55:46 +00:00
do
2013-08-07 15:35:48 +00:00
ceph osd set $f
ceph osd unset $f
2014-06-25 23:55:46 +00:00
done
2016-01-14 18:28:21 +00:00
ceph osd set sortbitwise # new backends cant handle nibblewise
2014-06-25 23:55:46 +00:00
expect_false ceph osd set bogus
expect_false ceph osd unset bogus
2016-05-21 06:11:55 +00:00
ceph osd set require_jewel_osds
expect_false ceph osd unset require_jewel_osds
2016-07-28 11:17:34 +00:00
ceph osd set require_kraken_osds
expect_false ceph osd unset require_kraken_osds
2014-06-25 23:55:46 +00:00
ceph osd set noup
ceph osd down 0
ceph osd dump | grep 'osd.0 down'
ceph osd unset noup
2016-06-03 12:53:00 +00:00
max_run = 1000
for ( ( i = 0; i < $max_run ; i++) ) ; do
2014-06-25 23:55:46 +00:00
if ! ceph osd dump | grep 'osd.0 up' ; then
2016-06-03 12:53:00 +00:00
echo " waiting for osd.0 to come back up ( $i / $max_run ) "
2016-03-21 15:20:25 +00:00
sleep 1
2014-06-25 23:55:46 +00:00
else
break
fi
done
2014-08-31 20:59:04 +00:00
ceph osd dump | grep 'osd.0 up'
2014-06-25 23:55:46 +00:00
2014-09-29 11:47:06 +00:00
ceph osd thrash 0
2014-06-25 23:55:46 +00:00
ceph osd dump | grep 'osd.0 up'
2016-04-30 11:11:07 +00:00
# ceph osd find expects the OsdName, so both ints and osd.n should work.
2014-06-25 23:55:46 +00:00
ceph osd find 1
2016-04-30 11:11:07 +00:00
ceph osd find osd.1
2016-09-05 10:04:55 +00:00
expect_false ceph osd find osd.xyz
expect_false ceph osd find xyz
expect_false ceph osd find 0.1
2014-09-19 13:28:36 +00:00
ceph --format plain osd find 1 # falls back to json-pretty
2016-10-31 15:02:46 +00:00
if [ ` uname` = = Linux ] ; then
ceph osd metadata 1 | grep 'distro'
ceph --format plain osd metadata 1 | grep 'distro' # falls back to json-pretty
fi
2014-06-25 23:55:46 +00:00
ceph osd out 0
ceph osd dump | grep 'osd.0.*out'
ceph osd in 0
ceph osd dump | grep 'osd.0.*in'
ceph osd find 0
2016-07-05 13:01:00 +00:00
f = $TEMP_DIR /map.$$
2014-06-25 23:55:46 +00:00
ceph osd getcrushmap -o $f
[ -s $f ]
2015-01-28 23:37:42 +00:00
ceph osd setcrushmap -i $f
2014-06-25 23:55:46 +00:00
rm $f
ceph osd getmap -o $f
[ -s $f ]
rm $f
save = $( ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//' )
2015-02-15 08:35:59 +00:00
[ " $save " -gt 0 ]
ceph osd setmaxosd $(( save - 1 )) 2>& 1 | grep 'EBUSY'
2014-06-25 23:55:46 +00:00
ceph osd setmaxosd 10
ceph osd getmaxosd | grep 'max_osd = 10'
ceph osd setmaxosd $save
ceph osd getmaxosd | grep " max_osd = $save "
for id in ` ceph osd ls` ; do
2014-10-04 09:34:27 +00:00
retry_eagain 5 map_enxio_to_eagain ceph tell osd.$id version
2014-06-25 23:55:46 +00:00
done
ceph osd rm 0 2>& 1 | grep 'EBUSY'
2014-11-12 17:49:54 +00:00
local old_osds = $( echo $( ceph osd ls) )
2014-06-25 23:55:46 +00:00
id = ` ceph osd create`
2015-02-15 08:35:59 +00:00
ceph osd find $id
2014-06-25 23:55:46 +00:00
ceph osd lost $id --yes-i-really-mean-it
2014-08-14 20:18:07 +00:00
expect_false ceph osd setmaxosd $id
2014-11-12 17:49:54 +00:00
local new_osds = $( echo $( ceph osd ls) )
for id in $( echo $new_osds | sed -e " s/ $old_osds // " ) ; do
ceph osd rm $id
done
2014-06-25 23:55:46 +00:00
uuid = ` uuidgen`
id = ` ceph osd create $uuid `
id2 = ` ceph osd create $uuid `
[ " $id " = " $id2 " ]
ceph osd rm $id
2016-04-04 14:55:08 +00:00
ceph --help osd
2015-02-15 08:35:59 +00:00
# reset max_osd.
ceph osd setmaxosd $id
ceph osd getmaxosd | grep " max_osd = $save "
local max_osd = $save
ceph osd create $uuid 0 2>& 1 | grep 'EINVAL'
ceph osd create $uuid $(( max_osd - 1 )) 2>& 1 | grep 'EINVAL'
id = ` ceph osd create $uuid $max_osd `
[ " $id " = " $max_osd " ]
ceph osd find $id
max_osd = $(( max_osd + 1 ))
ceph osd getmaxosd | grep " max_osd = $max_osd "
ceph osd create $uuid $(( id - 1 )) 2>& 1 | grep 'EINVAL'
ceph osd create $uuid $(( id + 1 )) 2>& 1 | grep 'EINVAL'
id2 = ` ceph osd create $uuid `
[ " $id " = " $id2 " ]
id2 = ` ceph osd create $uuid $id `
[ " $id " = " $id2 " ]
uuid = ` uuidgen`
local gap_start = $max_osd
id = ` ceph osd create $uuid $(( gap_start + 100 )) `
[ " $id " = " $(( gap_start + 100 )) " ]
max_osd = $(( id + 1 ))
ceph osd getmaxosd | grep " max_osd = $max_osd "
ceph osd create $uuid $gap_start 2>& 1 | grep 'EINVAL'
2015-05-21 14:39:30 +00:00
#
# When CEPH_CLI_TEST_DUP_COMMAND is set, osd create
# is repeated and consumes two osd id, not just one.
#
local next_osd
if test " $CEPH_CLI_TEST_DUP_COMMAND " ; then
next_osd = $(( gap_start + 1 ))
else
next_osd = $gap_start
fi
2015-02-15 08:35:59 +00:00
id = ` ceph osd create`
2015-05-21 14:39:30 +00:00
[ " $id " = " $next_osd " ]
2015-02-15 08:35:59 +00:00
2015-05-21 14:39:30 +00:00
next_osd = $(( id + 1 ))
2015-02-15 08:35:59 +00:00
id = ` ceph osd create $( uuidgen) `
2015-05-21 14:39:30 +00:00
[ " $id " = " $next_osd " ]
2015-02-15 08:35:59 +00:00
2015-05-21 14:39:30 +00:00
next_osd = $(( id + 1 ))
id = ` ceph osd create $( uuidgen) $next_osd `
[ " $id " = " $next_osd " ]
2015-02-15 08:35:59 +00:00
local new_osds = $( echo $( ceph osd ls) )
for id in $( echo $new_osds | sed -e " s/ $old_osds // " ) ; do
[ $id -ge $save ]
ceph osd rm $id
done
ceph osd setmaxosd $save
2014-06-25 23:55:46 +00:00
ceph osd ls
2014-05-27 10:04:43 +00:00
ceph osd pool create data 10
2014-06-25 23:55:46 +00:00
ceph osd lspools | grep data
ceph osd map data foo | grep 'pool.*data.*object.*foo.*pg.*up.*acting'
2015-03-17 06:50:55 +00:00
ceph osd map data foo namespace| grep 'pool.*data.*object.*namespace/foo.*pg.*up.*acting'
2014-05-27 10:04:43 +00:00
ceph osd pool delete data data --yes-i-really-really-mean-it
2014-06-25 23:55:46 +00:00
ceph osd pause
ceph osd dump | grep 'flags pauserd,pausewr'
ceph osd unpause
ceph osd tree
2014-08-06 21:06:18 +00:00
ceph osd perf
2014-08-06 21:06:30 +00:00
ceph osd blocked-by
2014-06-25 23:55:46 +00:00
ceph osd stat | grep up,
2014-06-25 00:11:52 +00:00
}
2013-05-10 18:26:24 +00:00
2014-06-25 00:11:52 +00:00
function test_mon_osd_pool( )
{
2014-06-25 00:17:58 +00:00
#
# osd pool
#
2014-05-27 10:04:43 +00:00
ceph osd pool create data 10
2014-06-25 23:55:46 +00:00
ceph osd pool mksnap data datasnap
rados -p data lssnap | grep datasnap
ceph osd pool rmsnap data datasnap
2016-03-02 16:38:36 +00:00
expect_false ceph osd pool rmsnap pool_fake snapshot
2014-05-27 10:04:43 +00:00
ceph osd pool delete data data --yes-i-really-really-mean-it
2014-06-25 23:55:46 +00:00
ceph osd pool create data2 10
ceph osd pool rename data2 data3
ceph osd lspools | grep data3
ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
ceph osd pool create replicated 12 12 replicated
ceph osd pool create replicated 12 12 replicated
ceph osd pool create replicated 12 12 # default is replicated
ceph osd pool create replicated 12 # default is replicated, pgp_num = pg_num
# should fail because the type is not the same
expect_false ceph osd pool create replicated 12 12 erasure
ceph osd lspools | grep replicated
ceph osd pool delete replicated replicated --yes-i-really-really-mean-it
2014-06-25 00:11:52 +00:00
}
2013-05-10 18:26:24 +00:00
2014-06-27 21:20:51 +00:00
function test_mon_osd_pool_quota( )
{
#
# test osd pool set/get quota
#
# create tmp pool
ceph osd pool create tmp-quota-pool 36
#
# set erroneous quotas
#
expect_false ceph osd pool set-quota tmp-quota-pool max_fooness 10
expect_false ceph osd pool set-quota tmp-quota-pool max_bytes -1
expect_false ceph osd pool set-quota tmp-quota-pool max_objects aaa
#
# set valid quotas
#
ceph osd pool set-quota tmp-quota-pool max_bytes 10
ceph osd pool set-quota tmp-quota-pool max_objects 10M
#
# get quotas
#
ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10B'
ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*10240k objects'
#
# get quotas in json-pretty format
#
ceph osd pool get-quota tmp-quota-pool --format= json-pretty | \
grep '"quota_max_objects":.*10485760'
ceph osd pool get-quota tmp-quota-pool --format= json-pretty | \
grep '"quota_max_bytes":.*10'
#
# reset pool quotas
#
ceph osd pool set-quota tmp-quota-pool max_bytes 0
ceph osd pool set-quota tmp-quota-pool max_objects 0
#
# test N/A quotas
#
ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*N/A'
ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*N/A'
#
# cleanup tmp pool
ceph osd pool delete tmp-quota-pool tmp-quota-pool --yes-i-really-really-mean-it
}
2014-06-25 00:11:52 +00:00
function test_mon_pg( )
{
2014-06-25 23:55:46 +00:00
ceph pg debug unfound_objects_exist
ceph pg debug degraded_pgs_exist
ceph pg deep-scrub 0.0
ceph pg dump
ceph pg dump pgs_brief --format= json
ceph pg dump pgs --format= json
ceph pg dump pools --format= json
ceph pg dump osds --format= json
ceph pg dump sum --format= json
ceph pg dump all --format= json
ceph pg dump pgs_brief osds --format= json
ceph pg dump pools osds pgs_brief --format= json
ceph pg dump_json
ceph pg dump_pools_json
ceph pg dump_stuck inactive
ceph pg dump_stuck unclean
ceph pg dump_stuck stale
2015-01-15 05:22:48 +00:00
ceph pg dump_stuck undersized
ceph pg dump_stuck degraded
2015-01-30 01:49:23 +00:00
ceph pg ls
ceph pg ls 0
ceph pg ls stale
2016-05-20 09:31:31 +00:00
expect_false ceph pg ls scrubq
2015-05-11 09:02:41 +00:00
ceph pg ls active stale repair recovering
2015-01-30 01:49:23 +00:00
ceph pg ls 0 active
ceph pg ls 0 active stale
2015-01-30 02:38:27 +00:00
ceph pg ls-by-primary osd.0
ceph pg ls-by-primary osd.0 0
ceph pg ls-by-primary osd.0 active
ceph pg ls-by-primary osd.0 active stale
ceph pg ls-by-primary osd.0 0 active stale
2015-01-30 04:55:19 +00:00
ceph pg ls-by-osd osd.0
ceph pg ls-by-osd osd.0 0
ceph pg ls-by-osd osd.0 active
ceph pg ls-by-osd osd.0 active stale
ceph pg ls-by-osd osd.0 0 active stale
2015-01-30 06:36:35 +00:00
ceph pg ls-by-pool rbd
ceph pg ls-by-pool rbd active stale
2014-06-25 23:55:46 +00:00
# can't test this...
# ceph pg force_create_pg
2016-07-05 13:01:00 +00:00
ceph pg getmap -o $TEMP_DIR /map.$$
[ -s $TEMP_DIR /map.$$ ]
2014-06-25 23:55:46 +00:00
ceph pg map 0.0 | grep acting
ceph pg repair 0.0
ceph pg scrub 0.0
ceph pg set_full_ratio 0.90
ceph pg dump --format= plain | grep '^full_ratio 0.9'
ceph pg set_full_ratio 0.95
ceph pg set_nearfull_ratio 0.90
ceph pg dump --format= plain | grep '^nearfull_ratio 0.9'
ceph pg set_nearfull_ratio 0.85
ceph pg stat | grep 'pgs:'
ceph pg 0.0 query
ceph tell 0.0 query
ceph quorum enter
ceph quorum_status
ceph report | grep osd_stats
ceph status
ceph -s
2014-06-25 00:17:58 +00:00
#
# tell osd version
#
2014-06-25 23:55:46 +00:00
ceph tell osd.0 version
expect_false ceph tell osd.9999 version
2015-01-27 08:51:07 +00:00
expect_false ceph tell osd.foo version
2014-06-25 23:55:46 +00:00
2014-06-25 00:17:58 +00:00
# back to pg stuff
2014-06-25 23:55:46 +00:00
ceph tell osd.0 dump_pg_recovery_stats | grep Started
ceph osd reweight 0 0.9
expect_false ceph osd reweight 0 -1
2016-04-30 11:11:07 +00:00
ceph osd reweight osd.0 1
2014-06-25 23:55:46 +00:00
ceph osd primary-affinity osd.0 .9
expect_false ceph osd primary-affinity osd.0 -2
2016-03-07 14:56:15 +00:00
expect_false ceph osd primary-affinity osd.9999 .5
2014-06-25 23:55:46 +00:00
ceph osd primary-affinity osd.0 1
ceph osd pg-temp 0.0 0 1 2
2016-04-30 11:11:07 +00:00
ceph osd pg-temp 0.0 osd.1 osd.0 osd.2
2014-06-25 23:55:46 +00:00
expect_false ceph osd pg-temp asdf qwer
expect_false ceph osd pg-temp 0.0 asdf
expect_false ceph osd pg-temp 0.0
# don't test ceph osd primary-temp for now
2014-06-25 00:11:52 +00:00
}
function test_mon_osd_pool_set( )
{
2014-05-27 10:04:43 +00:00
TEST_POOL_GETSET = pool_getset
2015-05-31 13:15:30 +00:00
ceph osd pool create $TEST_POOL_GETSET 1
2015-05-31 13:18:35 +00:00
wait_for_clean
2015-03-05 18:01:05 +00:00
ceph osd pool get $TEST_POOL_GETSET all
2014-03-29 17:46:16 +00:00
2014-07-17 17:14:35 +00:00
for s in pg_num pgp_num size min_size crash_replay_interval crush_ruleset; do
2014-05-27 10:04:43 +00:00
ceph osd pool get $TEST_POOL_GETSET $s
2014-06-25 23:55:46 +00:00
done
2014-05-27 10:04:43 +00:00
old_size = $( ceph osd pool get $TEST_POOL_GETSET size | sed -e 's/size: //' )
2014-06-25 23:55:46 +00:00
( ( new_size = old_size + 1 ) )
2014-05-27 10:04:43 +00:00
ceph osd pool set $TEST_POOL_GETSET size $new_size
ceph osd pool get $TEST_POOL_GETSET size | grep " size: $new_size "
ceph osd pool set $TEST_POOL_GETSET size $old_size
2014-06-25 23:55:46 +00:00
2015-05-31 13:15:30 +00:00
ceph osd pool create pool_erasure 1 1 erasure
2015-05-31 13:18:35 +00:00
wait_for_clean
2014-06-25 23:55:46 +00:00
set +e
ceph osd pool set pool_erasure size 4444 2>$TMPFILE
check_response 'not change the size'
set -e
2014-07-17 17:14:35 +00:00
ceph osd pool get pool_erasure erasure_code_profile
2014-06-25 23:55:46 +00:00
auid = 5555
2014-05-27 10:04:43 +00:00
ceph osd pool set $TEST_POOL_GETSET auid $auid
ceph osd pool get $TEST_POOL_GETSET auid | grep $auid
ceph --format= xml osd pool get $TEST_POOL_GETSET auid | grep $auid
ceph osd pool set $TEST_POOL_GETSET auid 0
2015-09-11 08:05:42 +00:00
for flag in hashpspool nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub; do
2015-01-19 14:41:05 +00:00
ceph osd pool set $TEST_POOL_GETSET $flag false
2015-09-11 18:34:52 +00:00
ceph osd pool get $TEST_POOL_GETSET $flag | grep " $flag : false "
2015-01-19 14:41:05 +00:00
ceph osd pool set $TEST_POOL_GETSET $flag true
2015-09-11 18:34:52 +00:00
ceph osd pool get $TEST_POOL_GETSET $flag | grep " $flag : true "
2015-01-19 14:41:05 +00:00
ceph osd pool set $TEST_POOL_GETSET $flag 1
2015-09-11 18:34:52 +00:00
ceph osd pool get $TEST_POOL_GETSET $flag | grep " $flag : true "
2015-01-19 14:41:05 +00:00
ceph osd pool set $TEST_POOL_GETSET $flag 0
2015-09-11 18:34:52 +00:00
ceph osd pool get $TEST_POOL_GETSET $flag | grep " $flag : false "
2015-01-19 14:41:05 +00:00
expect_false ceph osd pool set $TEST_POOL_GETSET $flag asdf
expect_false ceph osd pool set $TEST_POOL_GETSET $flag 2
done
2014-05-27 10:04:43 +00:00
2016-07-05 06:15:41 +00:00
ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
2015-09-29 06:35:32 +00:00
ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 123456
ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | grep 'scrub_min_interval: 123456'
ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 0
2016-07-05 06:15:41 +00:00
ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
2015-09-29 06:35:32 +00:00
2016-07-05 06:15:41 +00:00
ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
2015-09-29 06:35:32 +00:00
ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 123456
ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | grep 'scrub_max_interval: 123456'
ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 0
2016-07-05 06:15:41 +00:00
ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
2015-09-29 06:35:32 +00:00
2016-07-05 06:15:41 +00:00
ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
2015-09-29 06:35:32 +00:00
ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 123456
ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | grep 'deep_scrub_interval: 123456'
ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 0
2016-07-05 06:15:41 +00:00
ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
2015-09-29 06:35:32 +00:00
2016-07-05 06:15:41 +00:00
ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
2015-12-03 02:16:52 +00:00
ceph osd pool set $TEST_POOL_GETSET recovery_priority 5
ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: 5'
ceph osd pool set $TEST_POOL_GETSET recovery_priority 0
2016-07-05 06:15:41 +00:00
ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
2015-12-03 02:16:52 +00:00
2016-07-05 06:15:41 +00:00
ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
2015-12-03 02:16:52 +00:00
ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 5
ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | grep 'recovery_op_priority: 5'
ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 0
2016-07-05 06:15:41 +00:00
ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
2015-12-03 02:16:52 +00:00
2016-07-05 06:15:41 +00:00
ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
2016-02-25 11:07:59 +00:00
ceph osd pool set $TEST_POOL_GETSET scrub_priority 5
ceph osd pool get $TEST_POOL_GETSET scrub_priority | grep 'scrub_priority: 5'
ceph osd pool set $TEST_POOL_GETSET scrub_priority 0
2016-07-05 06:15:41 +00:00
ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
2016-02-25 11:07:59 +00:00
2015-01-19 14:41:05 +00:00
ceph osd pool set $TEST_POOL_GETSET nopgchange 1
expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 10
expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 10
ceph osd pool set $TEST_POOL_GETSET nopgchange 0
ceph osd pool set $TEST_POOL_GETSET pg_num 10
2015-05-31 13:18:35 +00:00
wait_for_clean
2015-01-19 14:41:05 +00:00
ceph osd pool set $TEST_POOL_GETSET pgp_num 10
2016-08-30 03:40:00 +00:00
old_pgs = $( ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //' )
new_pgs = $(( $old_pgs + $( ceph osd stat | grep osdmap | awk '{print $3}' ) * 32 ))
ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs
ceph osd pool set $TEST_POOL_GETSET pgp_num $new_pgs
wait_for_clean
old_pgs = $( ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //' )
new_pgs = $(( $old_pgs + $( ceph osd stat | grep osdmap | awk '{print $3}' ) * 32 + 1 ))
expect_false ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs
2015-01-19 14:41:05 +00:00
ceph osd pool set $TEST_POOL_GETSET nosizechange 1
expect_false ceph osd pool set $TEST_POOL_GETSET size 2
expect_false ceph osd pool set $TEST_POOL_GETSET min_size 2
ceph osd pool set $TEST_POOL_GETSET nosizechange 0
ceph osd pool set $TEST_POOL_GETSET size 2
2015-05-31 13:18:35 +00:00
wait_for_clean
2015-01-19 14:41:05 +00:00
ceph osd pool set $TEST_POOL_GETSET min_size 2
ceph osd pool set $TEST_POOL_GETSET nodelete 1
expect_false ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
ceph osd pool set $TEST_POOL_GETSET nodelete 0
2014-05-27 10:04:43 +00:00
ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
2014-06-25 23:55:46 +00:00
2014-07-03 14:24:51 +00:00
ceph osd pool get rbd crush_ruleset | grep 'crush_ruleset: 0'
}
function test_mon_osd_tiered_pool_set( )
{
2014-07-03 14:32:46 +00:00
# this is really a tier pool
ceph osd pool create real-tier 2
ceph osd tier add rbd real-tier
ceph osd pool set real-tier hit_set_type explicit_hash
ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_hash"
ceph osd pool set real-tier hit_set_type explicit_object
ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_object"
ceph osd pool set real-tier hit_set_type bloom
ceph osd pool get real-tier hit_set_type | grep "hit_set_type: bloom"
expect_false ceph osd pool set real-tier hit_set_type i_dont_exist
ceph osd pool set real-tier hit_set_period 123
ceph osd pool get real-tier hit_set_period | grep "hit_set_period: 123"
ceph osd pool set real-tier hit_set_count 12
ceph osd pool get real-tier hit_set_count | grep "hit_set_count: 12"
ceph osd pool set real-tier hit_set_fpp .01
ceph osd pool get real-tier hit_set_fpp | grep "hit_set_fpp: 0.01"
ceph osd pool set real-tier target_max_objects 123
ceph osd pool get real-tier target_max_objects | \
2014-06-30 16:51:47 +00:00
grep 'target_max_objects:[ \t]\+123'
2014-07-03 14:32:46 +00:00
ceph osd pool set real-tier target_max_bytes 123456
ceph osd pool get real-tier target_max_bytes | \
2014-06-30 16:51:47 +00:00
grep 'target_max_bytes:[ \t]\+123456'
2014-07-03 14:32:46 +00:00
ceph osd pool set real-tier cache_target_dirty_ratio .123
ceph osd pool get real-tier cache_target_dirty_ratio | \
2014-06-30 16:51:47 +00:00
grep 'cache_target_dirty_ratio:[ \t]\+0.123'
2014-07-03 14:32:46 +00:00
expect_false ceph osd pool set real-tier cache_target_dirty_ratio -.2
expect_false ceph osd pool set real-tier cache_target_dirty_ratio 1.1
2015-06-01 08:50:31 +00:00
ceph osd pool set real-tier cache_target_dirty_high_ratio .123
ceph osd pool get real-tier cache_target_dirty_high_ratio | \
grep 'cache_target_dirty_high_ratio:[ \t]\+0.123'
expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio -.2
expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio 1.1
2014-07-03 14:32:46 +00:00
ceph osd pool set real-tier cache_target_full_ratio .123
ceph osd pool get real-tier cache_target_full_ratio | \
2014-06-30 16:51:47 +00:00
grep 'cache_target_full_ratio:[ \t]\+0.123'
2014-06-25 23:55:46 +00:00
ceph osd dump -f json-pretty | grep '"cache_target_full_ratio_micro": 123000'
2014-07-03 14:32:46 +00:00
ceph osd pool set real-tier cache_target_full_ratio 1.0
ceph osd pool set real-tier cache_target_full_ratio 0
expect_false ceph osd pool set real-tier cache_target_full_ratio 1.1
ceph osd pool set real-tier cache_min_flush_age 123
ceph osd pool get real-tier cache_min_flush_age | \
2014-06-30 16:51:47 +00:00
grep 'cache_min_flush_age:[ \t]\+123'
2014-07-03 14:32:46 +00:00
ceph osd pool set real-tier cache_min_evict_age 234
ceph osd pool get real-tier cache_min_evict_age | \
2014-06-30 16:51:47 +00:00
grep 'cache_min_evict_age:[ \t]\+234'
2014-07-03 14:32:46 +00:00
# this is not a tier pool
ceph osd pool create fake-tier 2
2015-05-31 13:18:35 +00:00
wait_for_clean
2014-07-03 14:32:46 +00:00
expect_false ceph osd pool set fake-tier hit_set_type explicit_hash
expect_false ceph osd pool get fake-tier hit_set_type
expect_false ceph osd pool set fake-tier hit_set_type explicit_object
expect_false ceph osd pool get fake-tier hit_set_type
expect_false ceph osd pool set fake-tier hit_set_type bloom
expect_false ceph osd pool get fake-tier hit_set_type
expect_false ceph osd pool set fake-tier hit_set_type i_dont_exist
expect_false ceph osd pool set fake-tier hit_set_period 123
expect_false ceph osd pool get fake-tier hit_set_period
expect_false ceph osd pool set fake-tier hit_set_count 12
expect_false ceph osd pool get fake-tier hit_set_count
expect_false ceph osd pool set fake-tier hit_set_fpp .01
expect_false ceph osd pool get fake-tier hit_set_fpp
expect_false ceph osd pool set fake-tier target_max_objects 123
expect_false ceph osd pool get fake-tier target_max_objects
expect_false ceph osd pool set fake-tier target_max_bytes 123456
expect_false ceph osd pool get fake-tier target_max_bytes
expect_false ceph osd pool set fake-tier cache_target_dirty_ratio .123
expect_false ceph osd pool get fake-tier cache_target_dirty_ratio
2014-07-08 18:22:01 +00:00
expect_false ceph osd pool set fake-tier cache_target_dirty_ratio -.2
expect_false ceph osd pool set fake-tier cache_target_dirty_ratio 1.1
2015-06-01 08:50:31 +00:00
expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio .123
expect_false ceph osd pool get fake-tier cache_target_dirty_high_ratio
expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio -.2
expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio 1.1
2014-07-03 14:32:46 +00:00
expect_false ceph osd pool set fake-tier cache_target_full_ratio .123
expect_false ceph osd pool get fake-tier cache_target_full_ratio
expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.0
expect_false ceph osd pool set fake-tier cache_target_full_ratio 0
2014-07-08 18:22:01 +00:00
expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.1
2014-07-03 14:32:46 +00:00
expect_false ceph osd pool set fake-tier cache_min_flush_age 123
expect_false ceph osd pool get fake-tier cache_min_flush_age
expect_false ceph osd pool set fake-tier cache_min_evict_age 234
expect_false ceph osd pool get fake-tier cache_min_evict_age
ceph osd tier remove rbd real-tier
ceph osd pool delete real-tier real-tier --yes-i-really-really-mean-it
ceph osd pool delete fake-tier fake-tier --yes-i-really-really-mean-it
2014-06-25 00:11:52 +00:00
}
2013-01-19 06:35:32 +00:00
2014-06-25 00:11:52 +00:00
function test_mon_osd_erasure_code( )
{
2014-03-03 15:28:04 +00:00
2014-06-25 23:55:46 +00:00
ceph osd erasure-code-profile set fooprofile a = b c = d
ceph osd erasure-code-profile set fooprofile a = b c = d
expect_false ceph osd erasure-code-profile set fooprofile a = b c = d e = f
ceph osd erasure-code-profile set fooprofile a = b c = d e = f --force
ceph osd erasure-code-profile set fooprofile a = b c = d e = f
expect_false ceph osd erasure-code-profile set fooprofile a = b c = d e = f g = h
2014-06-24 19:55:27 +00:00
#
# cleanup by removing profile 'fooprofile'
ceph osd erasure-code-profile rm fooprofile
2014-06-25 00:11:52 +00:00
}
2014-03-31 17:01:43 +00:00
2014-06-25 00:11:52 +00:00
function test_mon_osd_misc( )
{
2014-06-25 23:55:46 +00:00
set +e
2013-08-03 03:50:20 +00:00
2014-06-25 23:55:46 +00:00
# expect error about missing 'pool' argument
ceph osd map 2>$TMPFILE ; check_response 'pool' $? 22
2013-08-03 03:50:20 +00:00
2014-06-25 23:55:46 +00:00
# expect error about unused argument foo
ceph osd ls foo 2>$TMPFILE ; check_response 'unused' $? 22
2013-08-03 03:50:20 +00:00
2014-06-25 23:55:46 +00:00
# expect "not in range" for invalid full ratio
ceph pg set_full_ratio 95 2>$TMPFILE ; check_response 'not in range' $? 22
2013-08-03 03:50:20 +00:00
2014-06-25 23:55:46 +00:00
# expect "not in range" for invalid overload percentage
2016-03-09 18:25:55 +00:00
ceph osd reweight-by-utilization 80 2>$TMPFILE ; check_response 'higher than 100' $? 22
2014-08-19 03:57:28 +00:00
2014-06-25 23:55:46 +00:00
set -e
2014-08-19 03:57:28 +00:00
ceph osd reweight-by-utilization 110
2016-03-02 22:34:48 +00:00
ceph osd reweight-by-utilization 110 .5
2016-04-30 02:01:56 +00:00
expect_false ceph osd reweight-by-utilization 110 0
expect_false ceph osd reweight-by-utilization 110 -0.1
2016-03-02 22:34:48 +00:00
ceph osd test-reweight-by-utilization 110 .5 --no-increasing
2016-04-30 02:01:56 +00:00
ceph osd test-reweight-by-utilization 110 .5 4 --no-increasing
expect_false ceph osd test-reweight-by-utilization 110 .5 0 --no-increasing
expect_false ceph osd test-reweight-by-utilization 110 .5 -10 --no-increasing
2014-08-19 03:57:28 +00:00
ceph osd reweight-by-pg 110
2016-03-02 22:34:48 +00:00
ceph osd test-reweight-by-pg 110 .5
2014-08-19 03:57:28 +00:00
ceph osd reweight-by-pg 110 rbd
2016-03-02 22:34:48 +00:00
ceph osd reweight-by-pg 110 .5 rbd
2014-08-19 03:57:28 +00:00
expect_false ceph osd reweight-by-pg 110 boguspoolasdfasdfasdf
2014-06-25 00:11:52 +00:00
}
2013-08-03 03:50:20 +00:00
2014-06-25 00:11:52 +00:00
function test_mon_heap_profiler( )
{
2014-06-25 00:48:24 +00:00
do_test = 1
2014-06-24 22:16:17 +00:00
set +e
2014-06-25 23:55:46 +00:00
# expect 'heap' commands to be correctly parsed
2014-06-25 00:48:24 +00:00
ceph heap stats 2>$TMPFILE
if [ [ $? -eq 22 && ` grep 'tcmalloc not enabled' $TMPFILE ` ] ] ; then
echo "tcmalloc not enabled; skip heap profiler test"
do_test = 0
fi
set -e
[ [ $do_test -eq 0 ] ] && return 0
2014-06-25 23:55:46 +00:00
ceph heap start_profiler
ceph heap dump
ceph heap stop_profiler
ceph heap release
2014-06-25 00:11:52 +00:00
}
2013-09-20 16:06:30 +00:00
2014-06-25 00:11:52 +00:00
function test_osd_bench( )
{
2014-06-25 23:55:46 +00:00
# test osd bench limits
# As we should not rely on defaults (as they may change over time),
# lets inject some values and perform some simple tests
# max iops: 10 # 100 IOPS
# max throughput: 10485760 # 10MB/s
# max block size: 2097152 # 2MB
# duration: 10 # 10 seconds
2016-05-05 05:49:43 +00:00
local args = " \
2014-06-25 23:55:46 +00:00
--osd-bench-duration 10 \
--osd-bench-max-block-size 2097152 \
--osd-bench-large-size-max-throughput 10485760 \
--osd-bench-small-size-max-iops 10"
2016-05-05 05:49:43 +00:00
ceph tell osd.0 injectargs ${ args ## }
2014-06-25 23:55:46 +00:00
# anything with a bs larger than 2097152 must fail
expect_false ceph tell osd.0 bench 1 2097153
# but using 'osd_bench_max_bs' must succeed
ceph tell osd.0 bench 1 2097152
# we assume 1MB as a large bs; anything lower is a small bs
# for a 4096 bytes bs, for 10 seconds, we are limited by IOPS
2014-06-25 20:25:29 +00:00
# max count: 409600 (bytes)
2014-06-25 23:55:46 +00:00
# more than max count must not be allowed
expect_false ceph tell osd.0 bench 409601 4096
# but 409600 must be succeed
ceph tell osd.0 bench 409600 4096
# for a large bs, we are limited by throughput.
2014-06-25 20:25:29 +00:00
# for a 2MB block size for 10 seconds, assuming 10MB/s throughput,
# the max count will be (10MB * 10s) = 100MB
# max count: 104857600 (bytes)
2014-06-25 23:55:46 +00:00
# more than max count must not be allowed
2014-06-25 20:25:29 +00:00
expect_false ceph tell osd.0 bench 104857601 2097152
# up to max count must be allowed
ceph tell osd.0 bench 104857600 2097152
2014-06-25 00:11:52 +00:00
}
2016-01-27 01:34:47 +00:00
function test_osd_negative_filestore_merge_threshold( )
{
$SUDO ceph daemon osd.0 config set filestore_merge_threshold -1
expect_config_value "osd.0" "filestore_merge_threshold" -1
}
2014-12-30 10:03:32 +00:00
function test_mon_tell( )
{
ceph tell mon.a version
ceph tell mon.b version
expect_false ceph tell mon.foo version
sleep 1
ceph_watch_start debug
ceph tell mon.a version
ceph_watch_wait 'mon.0 \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
ceph_watch_start debug
ceph tell mon.b version
ceph_watch_wait 'mon.1 \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
}
2014-06-24 21:39:34 +00:00
2015-02-25 14:32:50 +00:00
function test_mon_crushmap_validation( )
{
2016-07-05 13:01:00 +00:00
local map = $TEMP_DIR /map
2015-02-25 14:32:50 +00:00
ceph osd getcrushmap -o $map
2015-02-26 19:21:49 +00:00
2016-07-15 08:20:22 +00:00
local crushtool_path = " ${ TEMP_DIR } /crushtool "
2015-02-26 19:21:49 +00:00
touch " ${ crushtool_path } "
chmod +x " ${ crushtool_path } "
local crushtool_path_old = ` ceph-conf --show-config-value crushtool`
2015-07-23 21:04:53 +00:00
ceph tell mon.\* injectargs --crushtool " ${ crushtool_path } "
2015-02-26 19:21:49 +00:00
printf "%s\n" \
" #!/bin/sh
cat > /dev/null
exit 0" > " ${ crushtool_path } "
ceph osd setcrushmap -i $map
printf "%s\n" \
" #!/bin/sh
cat > /dev/null
exit 1" > " ${ crushtool_path } "
expect_false ceph osd setcrushmap -i $map
printf "%s\n" \
" #!/bin/sh
cat > /dev/null
echo 'TEST FAIL' >& 2
exit 1" > " ${ crushtool_path } "
expect_false ceph osd setcrushmap -i $map 2> $TMPFILE
2015-09-20 21:42:45 +00:00
check_response "Error EINVAL: Failed crushmap test: TEST FAIL"
2015-02-26 19:21:49 +00:00
local mon_lease = ` ceph-conf --show-config-value mon_lease`
test " ${ mon_lease } " -gt 0
printf "%s\n" \
" #!/bin/sh
cat > /dev/null
sleep $(( mon_lease - 1 )) " > " ${ crushtool_path } "
2015-02-25 14:32:50 +00:00
ceph osd setcrushmap -i $map
2015-02-26 19:21:49 +00:00
printf "%s\n" \
" #!/bin/sh
cat > /dev/null
sleep $(( mon_lease + 1 )) " > " ${ crushtool_path } "
expect_false ceph osd setcrushmap -i $map 2> $TMPFILE
2015-09-20 21:42:45 +00:00
check_response " Error EINVAL: Failed crushmap test: ${ crushtool_path } : timed out ( ${ mon_lease } sec) "
2015-02-26 19:21:49 +00:00
2015-07-23 21:04:53 +00:00
ceph tell mon.\* injectargs --crushtool " ${ crushtool_path_old } "
2015-02-26 19:21:49 +00:00
rm -f " ${ crushtool_path } "
2015-02-25 14:32:50 +00:00
}
2015-03-24 04:51:15 +00:00
function test_mon_ping( )
{
ceph ping mon.a
ceph ping mon.b
expect_false ceph ping mon.foo
2015-07-23 21:04:53 +00:00
ceph ping mon.\*
2015-03-24 04:51:15 +00:00
}
2015-05-08 18:50:35 +00:00
function test_mon_deprecated_commands( )
{
# current DEPRECATED commands are:
# ceph compact
# ceph scrub
# ceph sync force
#
# Testing should be accomplished by setting
# 'mon_debug_deprecated_as_obsolete = true' and expecting ENOTSUP for
# each one of these commands.
ceph tell mon.a injectargs '--mon-debug-deprecated-as-obsolete'
expect_false ceph tell mon.a compact 2> $TMPFILE
2015-07-18 16:13:26 +00:00
check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2015-05-08 18:50:35 +00:00
expect_false ceph tell mon.a scrub 2> $TMPFILE
2015-07-18 16:13:26 +00:00
check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2015-05-08 18:50:35 +00:00
expect_false ceph tell mon.a sync force 2> $TMPFILE
2015-07-18 16:13:26 +00:00
check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2015-05-08 18:50:35 +00:00
ceph tell mon.a injectargs '--no-mon-debug-deprecated-as-obsolete'
}
2016-01-12 14:08:41 +00:00
function test_mon_cephdf_commands( )
{
# ceph df detail:
# pool section:
# RAW USED The near raw used per pool in raw total
ceph osd pool create cephdf_for_test 32 32 replicated
ceph osd pool set cephdf_for_test size 2
dd if = /dev/zero of = ./cephdf_for_test bs = 4k count = 1
rados put cephdf_for_test cephdf_for_test -p cephdf_for_test
#wait for update
2016-03-23 10:13:48 +00:00
for i in ` seq 1 10` ; do
rados -p cephdf_for_test ls - | grep -q cephdf_for_test && break
sleep 1
done
2016-01-12 14:08:41 +00:00
cal_raw_used_size = ` ceph df detail | grep cephdf_for_test | awk -F ' ' '{printf "%d\n", 2 * $4}' `
raw_used_size = ` ceph df detail | grep cephdf_for_test | awk -F ' ' '{print $11}' `
ceph osd pool delete cephdf_for_test cephdf_for_test --yes-i-really-really-mean-it
rm ./cephdf_for_test
expect_false test $cal_raw_used_size != $raw_used_size
}
2014-06-24 21:39:34 +00:00
#
# New tests should be added to the TESTS array below
#
# Individual tests may be run using the '-t <testname>' argument
# The user can specify '-t <testname>' as many times as she wants
#
# Tests will be run in order presented in the TESTS array, or in
# the order specified by the '-t <testname>' options.
#
# '-l' will list all the available test names
# '-h' will show usage
#
# The test maintains backward compatibility: not specifying arguments
# will run all tests following the order they appear in the TESTS array.
#
2014-06-24 21:46:44 +00:00
set +x
2014-12-02 22:16:05 +00:00
MON_TESTS += " mon_injectargs"
MON_TESTS += " mon_injectargs_SI"
MON_TESTS += " tiering"
MON_TESTS += " auth"
MON_TESTS += " auth_profiles"
MON_TESTS += " mon_misc"
MON_TESTS += " mon_mon"
MON_TESTS += " mon_osd"
MON_TESTS += " mon_osd_pool"
MON_TESTS += " mon_osd_pool_quota"
MON_TESTS += " mon_pg"
MON_TESTS += " mon_osd_pool_set"
MON_TESTS += " mon_osd_tiered_pool_set"
MON_TESTS += " mon_osd_erasure_code"
MON_TESTS += " mon_osd_misc"
MON_TESTS += " mon_heap_profiler"
2014-12-30 10:03:32 +00:00
MON_TESTS += " mon_tell"
2015-02-25 14:32:50 +00:00
MON_TESTS += " mon_crushmap_validation"
2015-03-24 04:51:15 +00:00
MON_TESTS += " mon_ping"
2015-05-08 18:50:35 +00:00
MON_TESTS += " mon_deprecated_commands"
2015-11-10 07:54:33 +00:00
MON_TESTS += " mon_caps"
2016-05-18 00:14:07 +00:00
MON_TESTS += " mon_cephdf_commands"
2014-12-02 22:16:05 +00:00
OSD_TESTS += " osd_bench"
2016-01-27 01:34:47 +00:00
OSD_TESTS += " osd_negative_filestore_merge_threshold"
2015-08-13 17:41:47 +00:00
OSD_TESTS += " tiering_agent"
2014-12-02 22:16:05 +00:00
MDS_TESTS += " mds_tell"
MDS_TESTS += " mon_mds"
2015-07-07 06:54:24 +00:00
MDS_TESTS += " mon_mds_metadata"
2014-12-02 22:16:05 +00:00
TESTS += $MON_TESTS
TESTS += $OSD_TESTS
TESTS += $MDS_TESTS
2014-10-18 22:51:41 +00:00
2014-06-24 21:39:34 +00:00
#
# "main" follows
#
function list_tests( )
{
echo "AVAILABLE TESTS"
2014-12-02 22:16:05 +00:00
for i in $TESTS ; do
2014-06-24 21:39:34 +00:00
echo " $i "
done
}
function usage( )
{
echo " usage: $0 [-h|-l|-t <testname> [-t <testname>...]] "
}
tests_to_run = ( )
2014-09-06 17:52:21 +00:00
sanity_check = true
2014-06-24 21:39:34 +00:00
while [ [ $# -gt 0 ] ] ; do
opt = $1
case " $opt " in
"-l" )
do_list = 1
; ;
2014-07-01 06:42:58 +00:00
"--asok-does-not-need-root" )
SUDO = ""
; ;
2014-09-06 17:52:21 +00:00
"--no-sanity-check" )
sanity_check = false
; ;
2014-10-18 22:51:41 +00:00
"--test-mon" )
2014-12-02 22:16:05 +00:00
tests_to_run += " $MON_TESTS "
2014-10-18 22:51:41 +00:00
; ;
"--test-osd" )
2014-12-02 22:16:05 +00:00
tests_to_run += " $OSD_TESTS "
2014-10-18 22:51:41 +00:00
; ;
"--test-mds" )
2014-12-02 22:16:05 +00:00
tests_to_run += " $MDS_TESTS "
2014-10-18 22:51:41 +00:00
; ;
2014-06-24 21:39:34 +00:00
"-t" )
shift
if [ [ -z " $1 " ] ] ; then
echo "missing argument to '-t'"
usage ;
exit 1
fi
2014-12-02 22:16:05 +00:00
tests_to_run += " $1 "
2014-06-24 21:39:34 +00:00
; ;
"-h" )
usage ;
exit 0
; ;
esac
shift
done
if [ [ $do_list -eq 1 ] ] ; then
list_tests ;
exit 0
fi
2014-12-02 22:16:05 +00:00
if test -z " $tests_to_run " ; then
tests_to_run = " $TESTS "
2014-06-24 21:39:34 +00:00
fi
2014-09-06 17:52:21 +00:00
if $sanity_check ; then
wait_no_osd_down
fi
2014-12-02 22:16:05 +00:00
for i in $tests_to_run ; do
2014-09-06 17:52:21 +00:00
if $sanity_check ; then
check_no_osd_down
fi
2014-06-24 21:46:44 +00:00
set -x
2014-09-06 16:58:08 +00:00
test_${ i }
2014-06-24 21:46:44 +00:00
set +x
2014-06-24 21:39:34 +00:00
done
2014-09-06 17:52:21 +00:00
if $sanity_check ; then
check_no_osd_down
fi
2014-06-24 21:39:34 +00:00
2014-06-24 21:46:44 +00:00
set -x
2012-12-19 16:37:42 +00:00
echo OK