2013-05-10 18:26:24 +00:00
|
|
|
#!/bin/bash -x
|
2012-12-19 16:37:42 +00:00
|
|
|
|
|
|
|
set -e
|
2013-12-15 20:41:00 +00:00
|
|
|
set -o functrace
|
|
|
|
PS4=' ${FUNCNAME[0]}: $LINENO: '
|
2012-12-19 16:37:42 +00:00
|
|
|
|
2014-02-04 11:55:19 +00:00
|
|
|
function get_pg()
|
2013-05-10 18:26:24 +00:00
|
|
|
{
|
|
|
|
local pool obj map_output pg
|
|
|
|
pool=$1
|
|
|
|
obj=$2
|
|
|
|
declare -a map_output
|
|
|
|
map_output=($(ceph osd map $1 $2))
|
|
|
|
for (( i=0; i<${#map_output[*]}; i++ )) ; do
|
|
|
|
if [ "${map_output[$i]}" == "pg" ] ; then
|
|
|
|
pg=${map_output[((i+2))]}
|
|
|
|
break
|
|
|
|
fi
|
|
|
|
done
|
|
|
|
pg=$(echo $pg | sed 's/[()]//g')
|
|
|
|
echo $pg
|
|
|
|
}
|
|
|
|
|
2014-02-04 11:55:19 +00:00
|
|
|
function expect_false()
|
2013-05-31 04:50:02 +00:00
|
|
|
{
|
|
|
|
set -x
|
|
|
|
if "$@"; then return 1; else return 0; fi
|
|
|
|
}
|
|
|
|
|
2014-01-14 11:39:24 +00:00
|
|
|
TMPDIR=/tmp/cephtool$$
|
|
|
|
mkdir $TMPDIR
|
|
|
|
trap "rm -fr $TMPDIR" 0
|
|
|
|
|
|
|
|
TMPFILE=$TMPDIR/test_invalid.$$
|
2013-08-03 03:50:20 +00:00
|
|
|
|
|
|
|
function check_response()
|
|
|
|
{
|
2014-02-04 11:55:19 +00:00
|
|
|
expected_stderr_string=$1
|
|
|
|
retcode=$2
|
|
|
|
expected_retcode=$3
|
|
|
|
if [ "$expected_retcode" -a $retcode != $expected_retcode ] ; then
|
|
|
|
echo "return code invalid: got $retcode, expected $expected_retcode" >&2
|
2013-08-03 03:50:20 +00:00
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
|
2014-02-04 11:55:19 +00:00
|
|
|
if ! grep "$expected_stderr_string" $TMPFILE >/dev/null 2>&1 ; then
|
|
|
|
echo "Didn't find $expected_stderr_string in stderr output" >&2
|
2013-08-03 03:50:20 +00:00
|
|
|
echo "Stderr: " >&2
|
|
|
|
cat $TMPFILE >&2
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-08-29 00:47:42 +00:00
|
|
|
# tiering
|
|
|
|
ceph osd pool create cache 2
|
|
|
|
ceph osd pool create cache2 2
|
|
|
|
ceph osd tier add data cache
|
|
|
|
ceph osd tier add data cache2
|
|
|
|
expect_false ceph osd tier add metadata cache
|
|
|
|
ceph osd tier cache-mode cache writeback
|
|
|
|
ceph osd tier cache-mode cache readonly
|
|
|
|
ceph osd tier cache-mode cache none
|
2013-08-29 00:49:48 +00:00
|
|
|
ceph osd tier set-overlay data cache
|
|
|
|
expect_false ceph osd tier set-overlay data cache2
|
|
|
|
expect_false ceph osd tier remove data cache
|
|
|
|
ceph osd tier remove-overlay data
|
|
|
|
ceph osd tier set-overlay data cache2
|
|
|
|
ceph osd tier remove-overlay data
|
2013-08-29 00:47:42 +00:00
|
|
|
ceph osd tier remove data cache
|
|
|
|
ceph osd tier add metadata cache
|
2013-08-29 00:49:48 +00:00
|
|
|
expect_false ceph osd tier set-overlay data cache
|
|
|
|
ceph osd tier set-overlay metadata cache
|
|
|
|
ceph osd tier remove-overlay metadata
|
2013-08-29 00:47:42 +00:00
|
|
|
ceph osd tier remove metadata cache
|
|
|
|
ceph osd tier remove data cache2
|
2014-03-04 05:11:17 +00:00
|
|
|
|
|
|
|
# make sure a non-empty pool fails
|
|
|
|
rados -p cache2 put /etc/passwd /etc/passwd
|
|
|
|
while ! ceph df | grep cache2 | grep ' 1 ' ; do
|
|
|
|
echo waiting for pg stats to flush
|
|
|
|
sleep 2
|
|
|
|
done
|
|
|
|
expect_false ceph osd tier add data cache2
|
|
|
|
ceph osd tier add data cache2 --force-nonempty
|
|
|
|
ceph osd tier remove data cache2
|
|
|
|
|
2013-08-29 00:47:42 +00:00
|
|
|
ceph osd pool delete cache cache --yes-i-really-really-mean-it
|
|
|
|
ceph osd pool delete cache2 cache2 --yes-i-really-really-mean-it
|
2013-08-03 03:50:20 +00:00
|
|
|
|
2014-03-03 19:33:13 +00:00
|
|
|
# convenient add-cache command
|
|
|
|
ceph osd pool create cache3 2
|
|
|
|
ceph osd tier add-cache data cache3 1024000
|
|
|
|
ceph osd dump | grep cache3 | grep bloom | grep 'false_positive_probability: 0.05' | grep 'target_bytes 1024000' | grep '1200s x4'
|
|
|
|
ceph osd tier remove data cache3
|
|
|
|
ceph osd pool delete cache3 cache3 --yes-i-really-really-mean-it
|
|
|
|
|
2014-03-05 18:58:37 +00:00
|
|
|
# check health check
|
|
|
|
ceph osd pool create cache4 2
|
|
|
|
ceph osd pool set cache4 target_max_objects 5
|
|
|
|
ceph osd pool set cache4 target_max_bytes 1000
|
|
|
|
for f in `seq 1 5` ; do
|
|
|
|
rados -p cache4 put foo$f /etc/passwd
|
|
|
|
done
|
|
|
|
while ! ceph df | grep cache4 | grep ' 5 ' ; do
|
|
|
|
echo waiting for pg stats to flush
|
|
|
|
sleep 2
|
|
|
|
done
|
|
|
|
ceph health | grep WARN | grep cache4
|
|
|
|
ceph health detail | grep cache4 | grep 'target max' | grep objects
|
|
|
|
ceph health detail | grep cache4 | grep 'target max' | grep 'B'
|
|
|
|
ceph osd pool delete cache4 cache4 --yes-i-really-really-mean-it
|
|
|
|
|
2013-05-10 18:26:24 +00:00
|
|
|
# Assumes there are at least 3 MDSes and two OSDs
|
|
|
|
#
|
|
|
|
|
|
|
|
ceph auth add client.xx mon allow osd "allow *"
|
2013-06-27 00:07:48 +00:00
|
|
|
ceph auth export client.xx >client.xx.keyring
|
|
|
|
ceph auth add client.xx -i client.xx.keyring
|
2013-12-18 02:16:59 +00:00
|
|
|
rm -f client.xx.keyring
|
2013-05-10 18:26:24 +00:00
|
|
|
ceph auth list | grep client.xx
|
|
|
|
ceph auth get client.xx | grep caps | grep mon
|
|
|
|
ceph auth get client.xx | grep caps | grep osd
|
|
|
|
ceph auth get-key client.xx
|
|
|
|
ceph auth print-key client.xx
|
|
|
|
ceph auth print_key client.xx
|
|
|
|
ceph auth caps client.xx osd "allow rw"
|
2013-07-10 21:14:01 +00:00
|
|
|
expect_false "ceph auth get client.xx | grep caps | grep mon"
|
2013-05-10 18:26:24 +00:00
|
|
|
ceph auth get client.xx | grep osd | grep "allow rw"
|
|
|
|
ceph auth export | grep client.xx
|
|
|
|
ceph auth export -o authfile
|
|
|
|
ceph auth import -i authfile
|
|
|
|
ceph auth export -o authfile2
|
|
|
|
diff authfile authfile2
|
|
|
|
rm authfile authfile2
|
|
|
|
ceph auth del client.xx
|
|
|
|
|
2013-05-22 20:08:11 +00:00
|
|
|
# with and without verbosity
|
|
|
|
ceph osd dump | grep '^epoch'
|
|
|
|
ceph --concise osd dump | grep '^epoch'
|
|
|
|
|
2013-05-10 18:26:24 +00:00
|
|
|
# df
|
2014-01-27 15:47:33 +00:00
|
|
|
ceph df > $TMPFILE
|
|
|
|
grep GLOBAL $TMPFILE
|
|
|
|
grep -v DIRTY $TMPFILE
|
|
|
|
ceph df detail > $TMPFILE
|
|
|
|
grep CATEGORY $TMPFILE
|
|
|
|
grep DIRTY $TMPFILE
|
|
|
|
ceph df --format json > $TMPFILE
|
|
|
|
grep 'total_space' $TMPFILE
|
|
|
|
grep -v 'dirty' $TMPFILE
|
|
|
|
ceph df detail --format json > $TMPFILE
|
|
|
|
grep 'rd_kb' $TMPFILE
|
|
|
|
grep 'dirty' $TMPFILE
|
2013-05-10 18:26:24 +00:00
|
|
|
ceph df --format xml | grep '<total_space>'
|
|
|
|
ceph df detail --format xml | grep '<rd_kb>'
|
|
|
|
|
|
|
|
ceph fsid
|
|
|
|
ceph health
|
|
|
|
ceph health detail
|
|
|
|
ceph health --format json-pretty
|
|
|
|
ceph health detail --format xml-pretty
|
|
|
|
|
2014-01-14 11:39:24 +00:00
|
|
|
ceph -w > $TMPDIR/$$ &
|
2013-06-12 21:00:24 +00:00
|
|
|
wpid="$!"
|
2013-05-10 18:26:24 +00:00
|
|
|
mymsg="this is a test log message $$.$(date)"
|
|
|
|
ceph log "$mymsg"
|
2013-06-12 21:00:24 +00:00
|
|
|
sleep 3
|
2014-01-14 11:39:24 +00:00
|
|
|
if ! grep "$mymsg" $TMPDIR/$$; then
|
2013-06-12 21:00:24 +00:00
|
|
|
# in case it is very slow (mon thrashing or something)
|
|
|
|
sleep 30
|
2014-01-14 11:39:24 +00:00
|
|
|
grep "$mymsg" $TMPDIR/$$
|
2013-06-12 21:00:24 +00:00
|
|
|
fi
|
|
|
|
kill $wpid
|
|
|
|
|
2013-07-17 00:14:09 +00:00
|
|
|
ceph mds cluster_down
|
|
|
|
ceph mds cluster_up
|
2013-05-10 18:26:24 +00:00
|
|
|
|
2013-06-20 18:28:26 +00:00
|
|
|
ceph mds compat rm_incompat 4
|
|
|
|
ceph mds compat rm_incompat 4
|
2013-05-10 18:26:24 +00:00
|
|
|
|
|
|
|
ceph mds compat show
|
2013-05-31 04:50:02 +00:00
|
|
|
expect_false ceph mds deactivate 2
|
2013-05-10 18:26:24 +00:00
|
|
|
ceph mds dump
|
|
|
|
# XXX mds fail, but how do you undo it?
|
2014-01-14 11:39:24 +00:00
|
|
|
mdsmapfile=$TMPDIR/mdsmap.$$
|
2013-06-20 18:28:26 +00:00
|
|
|
current_epoch=$(ceph mds getmap -o $mdsmapfile --no-log-to-stderr 2>&1 | grep epoch | sed 's/.*epoch //')
|
2013-05-10 18:26:24 +00:00
|
|
|
[ -s $mdsmapfile ]
|
|
|
|
((epoch = current_epoch + 1))
|
|
|
|
ceph mds setmap -i $mdsmapfile $epoch
|
|
|
|
rm $mdsmapfile
|
|
|
|
|
|
|
|
ceph mds newfs 0 1 --yes-i-really-mean-it
|
|
|
|
ceph osd pool create data2 10
|
|
|
|
poolnum=$(ceph osd dump | grep 'pool.*data2' | awk '{print $2;}')
|
|
|
|
ceph mds add_data_pool $poolnum
|
2013-10-14 23:46:58 +00:00
|
|
|
ceph mds add_data_pool rbd
|
2013-05-10 18:26:24 +00:00
|
|
|
ceph mds remove_data_pool $poolnum
|
2013-10-14 23:46:58 +00:00
|
|
|
ceph mds remove_data_pool rbd
|
2013-05-10 18:26:24 +00:00
|
|
|
ceph osd pool delete data2 data2 --yes-i-really-really-mean-it
|
|
|
|
ceph mds set_max_mds 4
|
|
|
|
ceph mds set_max_mds 3
|
2014-01-09 12:31:41 +00:00
|
|
|
ceph mds set max_mds 4
|
|
|
|
expect_false ceph mds set max_mds asdf
|
2014-02-02 20:02:08 +00:00
|
|
|
expect_false ceph mds set inline_data true
|
|
|
|
ceph mds set inline_data true --yes-i-really-mean-it
|
|
|
|
ceph mds set inline_data yes --yes-i-really-mean-it
|
|
|
|
ceph mds set inline_data 1 --yes-i-really-mean-it
|
|
|
|
expect_false ceph mds set inline_data --yes-i-really-mean-it
|
|
|
|
ceph mds set inline_data false
|
|
|
|
ceph mds set inline_data no
|
|
|
|
ceph mds set inline_data 0
|
|
|
|
expect_false ceph mds set inline_data asdf
|
2014-02-07 14:11:57 +00:00
|
|
|
ceph mds set max_file_size 1048576
|
2014-01-09 12:31:41 +00:00
|
|
|
expect_false ceph mds set max_file_size 123asdf
|
2014-02-03 21:19:14 +00:00
|
|
|
|
|
|
|
expect_false ceph mds set allow_new_snaps
|
|
|
|
expect_false ceph mds set allow_new_snaps true
|
|
|
|
ceph mds set allow_new_snaps true --yes-i-really-mean-it
|
|
|
|
ceph mds set allow_new_snaps 0
|
|
|
|
ceph mds set allow_new_snaps false
|
|
|
|
ceph mds set allow_new_snaps no
|
|
|
|
expect_false ceph mds set allow_new_snaps taco
|
|
|
|
|
2013-05-10 18:26:24 +00:00
|
|
|
ceph mds stat
|
|
|
|
# ceph mds tell mds.a getmap
|
|
|
|
# ceph mds rm
|
|
|
|
# ceph mds rmfailed
|
|
|
|
# ceph mds set_state
|
|
|
|
# ceph mds stop
|
|
|
|
|
|
|
|
# no mon add/remove
|
|
|
|
ceph mon dump
|
2014-01-14 11:39:24 +00:00
|
|
|
ceph mon getmap -o $TMPDIR/monmap.$$
|
|
|
|
[ -s $TMPDIR/monmap.$$ ]
|
2013-05-10 18:26:24 +00:00
|
|
|
# ceph mon tell
|
|
|
|
ceph mon_status
|
|
|
|
|
|
|
|
bl=192.168.0.1:0/1000
|
|
|
|
ceph osd blacklist add $bl
|
|
|
|
ceph osd blacklist ls | grep $bl
|
|
|
|
ceph osd blacklist rm $bl
|
2013-09-27 01:00:31 +00:00
|
|
|
expect_false "ceph osd blacklist ls | grep $bl"
|
|
|
|
|
|
|
|
bl=192.168.0.1
|
|
|
|
# test without nonce, invalid nonce
|
|
|
|
ceph osd blacklist add $bl
|
|
|
|
ceph osd blacklist ls | grep $bl
|
|
|
|
ceph osd blacklist rm $bl
|
|
|
|
expect_false "ceph osd blacklist ls | grep $bl"
|
|
|
|
expect_false "ceph osd blacklist $bl/-1"
|
|
|
|
expect_false "ceph osd blacklist $bl/foo"
|
2013-05-10 18:26:24 +00:00
|
|
|
|
|
|
|
ceph osd crush tunables legacy
|
2013-12-17 20:21:41 +00:00
|
|
|
ceph osd crush show-tunables | grep argonaut
|
2013-05-10 18:26:24 +00:00
|
|
|
ceph osd crush tunables bobtail
|
2013-12-17 20:21:41 +00:00
|
|
|
ceph osd crush show-tunables | grep bobtail
|
2014-02-11 16:47:15 +00:00
|
|
|
ceph osd crush tunables firefly
|
|
|
|
ceph osd crush show-tunables | grep firefly
|
2013-05-10 18:26:24 +00:00
|
|
|
|
|
|
|
# how do I tell when these are done?
|
2013-06-20 18:28:26 +00:00
|
|
|
ceph osd scrub 0
|
|
|
|
ceph osd deep-scrub 0
|
|
|
|
ceph osd repair 0
|
|
|
|
|
2014-01-27 23:30:09 +00:00
|
|
|
for f in noup nodown noin noout noscrub nodeep-scrub nobackfill norecover notieragent
|
2013-08-07 15:35:48 +00:00
|
|
|
do
|
|
|
|
ceph osd set $f
|
|
|
|
ceph osd unset $f
|
|
|
|
done
|
|
|
|
expect_false ceph osd set bogus
|
|
|
|
expect_false ceph osd unset bogus
|
|
|
|
|
2013-05-10 18:26:24 +00:00
|
|
|
ceph osd set noup
|
|
|
|
ceph osd down 0
|
|
|
|
ceph osd dump | grep 'osd.0 down'
|
|
|
|
ceph osd unset noup
|
|
|
|
for ((i=0; i < 100; i++)); do
|
|
|
|
if ! ceph osd dump | grep 'osd.0 up'; then
|
|
|
|
echo "waiting for osd.0 to come back up"
|
|
|
|
sleep 10
|
|
|
|
else
|
|
|
|
break
|
|
|
|
fi
|
|
|
|
done
|
2014-03-06 20:35:19 +00:00
|
|
|
|
|
|
|
ceph osd thrash 10
|
2014-03-12 00:03:23 +00:00
|
|
|
ceph osd down `seq 0 31` # force everything down so that we can trust up
|
2014-03-07 23:21:30 +00:00
|
|
|
# make sure everything gets back up+in.
|
2014-03-06 20:35:19 +00:00
|
|
|
for ((i=0; i < 100; i++)); do
|
2014-03-07 23:21:30 +00:00
|
|
|
if ceph osd dump | grep ' down '; then
|
2014-03-06 20:35:19 +00:00
|
|
|
echo "waiting for osd(s) to come back up"
|
|
|
|
sleep 10
|
|
|
|
else
|
|
|
|
break
|
|
|
|
fi
|
|
|
|
done
|
2014-03-07 23:21:30 +00:00
|
|
|
# if you have more osds than this you are on your own
|
|
|
|
for f in `seq 0 31`; do
|
|
|
|
ceph osd in $f || true
|
|
|
|
done
|
2014-03-06 20:35:19 +00:00
|
|
|
|
2013-05-10 18:26:24 +00:00
|
|
|
ceph osd dump | grep 'osd.0 up'
|
|
|
|
ceph osd find 1
|
2013-11-09 14:06:54 +00:00
|
|
|
ceph osd metadata 1 | grep 'distro'
|
2013-05-10 18:26:24 +00:00
|
|
|
ceph osd out 0
|
|
|
|
ceph osd dump | grep 'osd.0.*out'
|
|
|
|
ceph osd in 0
|
|
|
|
ceph osd dump | grep 'osd.0.*in'
|
|
|
|
ceph osd find 0
|
|
|
|
|
2014-01-14 11:39:24 +00:00
|
|
|
f=$TMPDIR/map.$$
|
2013-05-10 18:26:24 +00:00
|
|
|
ceph osd getcrushmap -o $f
|
|
|
|
[ -s $f ]
|
|
|
|
rm $f
|
|
|
|
ceph osd getmap -o $f
|
|
|
|
[ -s $f ]
|
|
|
|
rm $f
|
|
|
|
save=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
|
|
|
|
ceph osd setmaxosd 10
|
|
|
|
ceph osd getmaxosd | grep 'max_osd = 10'
|
|
|
|
ceph osd setmaxosd $save
|
|
|
|
ceph osd getmaxosd | grep "max_osd = $save"
|
|
|
|
|
2013-07-17 16:36:36 +00:00
|
|
|
for id in `ceph osd ls` ; do
|
|
|
|
ceph tell osd.$id version
|
|
|
|
done
|
|
|
|
|
2013-12-15 21:59:51 +00:00
|
|
|
ceph osd rm 0 2>&1 | grep 'EBUSY'
|
|
|
|
|
2013-06-20 18:28:26 +00:00
|
|
|
id=`ceph osd create`
|
|
|
|
ceph osd lost $id --yes-i-really-mean-it
|
|
|
|
ceph osd rm $id
|
2013-07-18 01:17:29 +00:00
|
|
|
|
|
|
|
uuid=`uuidgen`
|
|
|
|
id=`ceph osd create $uuid`
|
|
|
|
id2=`ceph osd create $uuid`
|
|
|
|
[ "$id" = "$id2" ]
|
|
|
|
ceph osd rm $id
|
|
|
|
|
2013-05-10 18:26:24 +00:00
|
|
|
ceph osd ls
|
|
|
|
ceph osd lspools | grep data
|
|
|
|
ceph osd map data foo | grep 'pool.*data.*object.*foo.*pg.*up.*acting'
|
|
|
|
|
2013-07-10 21:14:01 +00:00
|
|
|
ceph osd pause
|
2013-05-10 18:26:24 +00:00
|
|
|
ceph osd dump | grep 'flags pauserd,pausewr'
|
2013-07-10 21:14:01 +00:00
|
|
|
ceph osd unpause
|
2012-12-19 16:37:42 +00:00
|
|
|
|
|
|
|
ceph osd tree
|
2013-05-10 18:26:24 +00:00
|
|
|
|
|
|
|
ceph osd pool mksnap data datasnap
|
|
|
|
rados -p data lssnap | grep datasnap
|
|
|
|
ceph osd pool rmsnap data datasnap
|
|
|
|
|
|
|
|
ceph osd pool create data2 10
|
|
|
|
ceph osd pool rename data2 data3
|
|
|
|
ceph osd lspools | grep data3
|
|
|
|
ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
|
|
|
|
|
2013-12-22 06:04:36 +00:00
|
|
|
ceph osd pool create replicated 12 12 replicated
|
|
|
|
ceph osd pool create replicated 12 12 replicated
|
2013-12-18 17:42:20 +00:00
|
|
|
ceph osd pool create replicated 12 12 # default is replicated
|
|
|
|
ceph osd pool create replicated 12 # default is replicated, pgp_num = pg_num
|
2013-12-20 21:00:31 +00:00
|
|
|
# should fail because the type is not the same
|
2013-12-21 14:49:19 +00:00
|
|
|
expect_false ceph osd pool create replicated 12 12 erasure
|
2013-12-18 17:42:20 +00:00
|
|
|
ceph osd lspools | grep replicated
|
|
|
|
ceph osd pool delete replicated replicated --yes-i-really-really-mean-it
|
|
|
|
|
2013-05-10 18:26:24 +00:00
|
|
|
ceph osd stat | grep up,
|
|
|
|
|
|
|
|
ceph pg debug unfound_objects_exist
|
|
|
|
ceph pg debug degraded_pgs_exist
|
|
|
|
ceph pg deep-scrub 0.0
|
2012-12-19 16:37:42 +00:00
|
|
|
ceph pg dump
|
2013-08-05 20:06:21 +00:00
|
|
|
ceph pg dump pgs_brief --format=json
|
|
|
|
ceph pg dump pgs --format=json
|
|
|
|
ceph pg dump pools --format=json
|
|
|
|
ceph pg dump osds --format=json
|
|
|
|
ceph pg dump sum --format=json
|
|
|
|
ceph pg dump all --format=json
|
|
|
|
ceph pg dump pgs_brief osds --format=json
|
|
|
|
ceph pg dump pools osds pgs_brief --format=json
|
2013-05-10 18:26:24 +00:00
|
|
|
ceph pg dump_json
|
|
|
|
ceph pg dump_pools_json
|
|
|
|
ceph pg dump_stuck inactive
|
|
|
|
ceph pg dump_stuck unclean
|
|
|
|
ceph pg dump_stuck stale
|
2013-06-20 18:28:26 +00:00
|
|
|
# can't test this...
|
2013-05-10 18:26:24 +00:00
|
|
|
# ceph pg force_create_pg
|
2014-01-14 11:39:24 +00:00
|
|
|
ceph pg getmap -o $TMPDIR/map.$$
|
|
|
|
[ -s $TMPDIR/map.$$ ]
|
2013-05-10 18:26:24 +00:00
|
|
|
ceph pg map 0.0 | grep acting
|
|
|
|
ceph pg repair 0.0
|
|
|
|
ceph pg scrub 0.0
|
2013-06-20 18:28:26 +00:00
|
|
|
|
2013-05-10 18:26:24 +00:00
|
|
|
ceph pg send_pg_creates
|
|
|
|
ceph pg set_full_ratio 0.90
|
|
|
|
ceph pg dump --format=plain | grep '^full_ratio 0.9'
|
|
|
|
ceph pg set_full_ratio 0.95
|
|
|
|
ceph pg set_nearfull_ratio 0.90
|
|
|
|
ceph pg dump --format=plain | grep '^nearfull_ratio 0.9'
|
|
|
|
ceph pg set_nearfull_ratio 0.85
|
|
|
|
ceph pg stat | grep 'pgs:'
|
2013-07-27 07:26:41 +00:00
|
|
|
ceph pg 0.0 query
|
|
|
|
ceph tell 0.0 query
|
2013-05-10 18:26:24 +00:00
|
|
|
ceph quorum enter
|
|
|
|
ceph quorum_status
|
|
|
|
ceph report | grep osd_stats
|
|
|
|
ceph status
|
|
|
|
ceph -s
|
|
|
|
# ceph sync force
|
2012-12-19 16:37:42 +00:00
|
|
|
|
|
|
|
ceph tell osd.0 version
|
2013-05-31 04:50:02 +00:00
|
|
|
expect_false ceph tell osd.9999 version
|
|
|
|
expect_false ceph tell osd.foo version
|
2013-01-19 02:28:44 +00:00
|
|
|
|
2013-05-10 18:26:24 +00:00
|
|
|
ceph tell osd.0 dump_pg_recovery_stats | grep Started
|
|
|
|
|
2013-01-19 02:30:03 +00:00
|
|
|
ceph osd reweight 0 0.9
|
2013-05-31 04:50:02 +00:00
|
|
|
expect_false ceph osd reweight 0 -1
|
2013-01-19 02:30:03 +00:00
|
|
|
ceph osd reweight 0 1
|
2012-12-19 16:37:42 +00:00
|
|
|
|
2013-12-23 19:30:13 +00:00
|
|
|
ceph osd primary-affinity osd.0 .9
|
|
|
|
expect_false ceph osd primary-affinity osd.0 -2
|
|
|
|
ceph osd primary-affinity osd.0 1
|
|
|
|
|
2013-01-19 06:35:32 +00:00
|
|
|
for s in pg_num pgp_num size min_size crash_replay_interval crush_ruleset; do
|
|
|
|
ceph osd pool get data $s
|
|
|
|
done
|
|
|
|
|
2013-12-15 20:41:45 +00:00
|
|
|
old_size=$(ceph osd pool get data size | sed -e 's/size: //')
|
|
|
|
(( new_size = old_size + 1 ))
|
|
|
|
ceph osd pool set data size $new_size
|
|
|
|
ceph osd pool get data size | grep "size: $new_size"
|
|
|
|
ceph osd pool set data size $old_size
|
2013-01-19 06:35:32 +00:00
|
|
|
|
2014-03-16 16:00:25 +00:00
|
|
|
ceph osd pool create pool_erasure 12 12 erasure
|
2014-02-19 14:05:50 +00:00
|
|
|
set +e
|
|
|
|
ceph osd pool set pool_erasure size 4444 2>$TMPFILE
|
|
|
|
check_response 'not change the size'
|
|
|
|
set -e
|
2014-02-04 11:55:19 +00:00
|
|
|
|
2013-12-04 05:39:03 +00:00
|
|
|
ceph osd pool set data hashpspool true
|
|
|
|
ceph osd pool set data hashpspool false
|
2013-12-05 21:05:12 +00:00
|
|
|
ceph osd pool set data hashpspool 0
|
2013-12-04 05:39:03 +00:00
|
|
|
ceph osd pool set data hashpspool 1
|
|
|
|
expect_false ceph osd pool set data hashpspool asdf
|
|
|
|
expect_false ceph osd pool set data hashpspool 2
|
2013-10-11 00:43:48 +00:00
|
|
|
|
2013-10-10 22:40:29 +00:00
|
|
|
ceph osd pool set rbd hit_set_type explicit_hash
|
2014-03-01 08:22:14 +00:00
|
|
|
ceph osd pool get rbd hit_set_type | grep "hit_set_type: explicit_hash"
|
2013-10-10 22:40:29 +00:00
|
|
|
ceph osd pool set rbd hit_set_type explicit_object
|
2014-03-01 08:22:14 +00:00
|
|
|
ceph osd pool get rbd hit_set_type | grep "hit_set_type: explicit_object"
|
2013-10-10 22:40:29 +00:00
|
|
|
ceph osd pool set rbd hit_set_type bloom
|
2014-03-01 08:22:14 +00:00
|
|
|
ceph osd pool get rbd hit_set_type | grep "hit_set_type: bloom"
|
2013-10-10 22:40:29 +00:00
|
|
|
expect_false ceph osd pool set rbd hit_set_type i_dont_exist
|
|
|
|
ceph osd pool set rbd hit_set_period 123
|
2014-03-01 08:22:14 +00:00
|
|
|
ceph osd pool get rbd hit_set_period | grep "hit_set_period: 123"
|
2013-10-10 22:40:29 +00:00
|
|
|
ceph osd pool set rbd hit_set_count 12
|
2014-03-01 08:22:14 +00:00
|
|
|
ceph osd pool get rbd hit_set_count | grep "hit_set_count: 12"
|
2013-10-10 22:40:29 +00:00
|
|
|
ceph osd pool set rbd hit_set_fpp .01
|
2014-03-01 08:22:14 +00:00
|
|
|
ceph osd pool get rbd hit_set_fpp | grep "hit_set_fpp: 0.01"
|
2013-10-10 22:40:29 +00:00
|
|
|
|
2014-01-10 04:36:13 +00:00
|
|
|
ceph osd pool set rbd target_max_objects 123
|
|
|
|
ceph osd pool set rbd target_max_bytes 123456
|
|
|
|
ceph osd pool set rbd cache_target_dirty_ratio .123
|
|
|
|
expect_false ceph osd pool set rbd cache_target_dirty_ratio -.2
|
|
|
|
expect_false ceph osd pool set rbd cache_target_dirty_ratio 1.1
|
|
|
|
ceph osd pool set rbd cache_target_full_ratio .123
|
|
|
|
ceph osd pool set rbd cache_target_full_ratio 1.0
|
|
|
|
ceph osd pool set rbd cache_target_full_ratio 0
|
|
|
|
expect_false ceph osd pool set rbd cache_target_full_ratio 1.1
|
|
|
|
ceph osd pool set rbd cache_min_flush_age 123
|
|
|
|
ceph osd pool set rbd cache_min_evict_age 234
|
|
|
|
|
2013-12-25 12:19:56 +00:00
|
|
|
ceph osd pool get rbd crush_ruleset | grep 'crush_ruleset: 0'
|
2013-01-19 06:35:32 +00:00
|
|
|
|
2014-03-03 15:28:04 +00:00
|
|
|
|
2014-03-31 17:01:43 +00:00
|
|
|
ceph osd erasure-code-profile set fooprofile a=b c=d
|
|
|
|
ceph osd erasure-code-profile set fooprofile a=b c=d
|
|
|
|
expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f
|
|
|
|
ceph osd erasure-code-profile set fooprofile a=b c=d e=f --force
|
|
|
|
ceph osd erasure-code-profile set fooprofile a=b c=d e=f
|
|
|
|
expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f g=h
|
|
|
|
|
2012-12-19 16:37:42 +00:00
|
|
|
|
2013-08-03 03:50:20 +00:00
|
|
|
set +e
|
|
|
|
|
|
|
|
# expect error about missing 'pool' argument
|
2014-02-04 11:55:19 +00:00
|
|
|
ceph osd map 2>$TMPFILE; check_response 'pool' $? 22
|
2013-08-03 03:50:20 +00:00
|
|
|
|
|
|
|
# expect error about unused argument foo
|
2014-02-04 11:55:19 +00:00
|
|
|
ceph osd ls foo 2>$TMPFILE; check_response 'unused' $? 22
|
2013-08-03 03:50:20 +00:00
|
|
|
|
|
|
|
# expect "not in range" for invalid full ratio
|
2014-02-04 11:55:19 +00:00
|
|
|
ceph pg set_full_ratio 95 2>$TMPFILE; check_response 'not in range' $? 22
|
2013-08-03 03:50:20 +00:00
|
|
|
|
|
|
|
# expect "not in range" for invalid overload percentage
|
2014-02-04 11:55:19 +00:00
|
|
|
ceph osd reweight-by-utilization 80 2>$TMPFILE; check_response 'not in range' $? 22
|
2013-08-03 03:50:20 +00:00
|
|
|
|
2013-09-20 16:06:30 +00:00
|
|
|
# expect 'heap' commands to be correctly parsed
|
|
|
|
ceph heap stats
|
|
|
|
ceph heap start_profiler
|
|
|
|
ceph heap dump
|
|
|
|
ceph heap stop_profiler
|
|
|
|
ceph heap release
|
|
|
|
|
2014-03-03 15:28:04 +00:00
|
|
|
|
|
|
|
# test osd bench limits
|
|
|
|
# As we should not rely on defaults (as they may change over time),
|
|
|
|
# lets inject some values and perform some simple tests
|
|
|
|
# max iops: 10 # 100 IOPS
|
|
|
|
# max throughput: 10485760 # 10MB/s
|
|
|
|
# max block size: 2097152 # 2MB
|
|
|
|
# duration: 10 # 10 seconds
|
|
|
|
|
|
|
|
ceph tell osd.0 injectargs "\
|
|
|
|
--osd-bench-duration 10 \
|
|
|
|
--osd-bench-max-block-size 2097152 \
|
|
|
|
--osd-bench-large-size-max-throughput 10485760 \
|
|
|
|
--osd-bench-small-size-max-iops 10"
|
|
|
|
|
|
|
|
# anything with a bs larger than 2097152 must fail
|
|
|
|
expect_false ceph tell osd.0 bench 1 2097153
|
|
|
|
# but using 'osd_bench_max_bs' must succeed
|
|
|
|
ceph tell osd.0 bench 1 2097152
|
|
|
|
|
|
|
|
# we assume 1MB as a large bs; anything lower is a small bs
|
|
|
|
# for a 4096 bytes bs, for 10 seconds, we are limited by IOPS
|
|
|
|
# max count: 409600
|
|
|
|
|
|
|
|
# more than max count must not be allowed
|
|
|
|
expect_false ceph tell osd.0 bench 409601 4096
|
|
|
|
# but 409600 must be succeed
|
|
|
|
ceph tell osd.0 bench 409600 4096
|
|
|
|
|
|
|
|
# for a large bs, we are limited by throughput.
|
|
|
|
# for a 2MB block size for 10 seconds, out max count is 50
|
|
|
|
# max count: 50
|
|
|
|
|
|
|
|
# more than max count must not be allowed
|
|
|
|
expect_false ceph tell osd.0 bench 51 2097152
|
|
|
|
# but 50 must succeed
|
|
|
|
ceph tell osd.0 bench 50 2097152
|
|
|
|
|
|
|
|
|
2012-12-19 16:37:42 +00:00
|
|
|
echo OK
|