From 58ddae49cc222b534b81a23923e4d67ae937ff7d Mon Sep 17 00:00:00 2001 From: Kefu Chai Date: Thu, 9 Jun 2016 14:41:08 +0800 Subject: [PATCH 1/2] cmake: respect CEPH_*_VIRTUALENV when running run-tox-ceph-* so ceph-{detect-init,disk}/run-tox.sh is able to pick it up Signed-off-by: Kefu Chai --- src/test/CMakeLists.txt | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/test/CMakeLists.txt b/src/test/CMakeLists.txt index ee0392f89be..5a74b1b2529 100644 --- a/src/test/CMakeLists.txt +++ b/src/test/CMakeLists.txt @@ -516,6 +516,9 @@ add_dependencies(check run-tox-ceph-disk) add_test(NAME run-tox-ceph-detect-init COMMAND bash ${CMAKE_SOURCE_DIR}/src/ceph-detect-init/run-tox.sh WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/src) add_dependencies(check run-tox-ceph-detect-init) +set(CEPH_DISK_VIRTUALENV ${CEPH_BUILD_VIRTUALENV}/ceph-disk-virtualenv) +set(CEPH_DETECT_INIT_VIRTUALENV ${CEPH_BUILD_VIRTUALENV}/ceph-detect-init-virtualenv) + set_property(TEST run-tox-ceph-disk run-tox-ceph-detect-init @@ -523,6 +526,8 @@ set_property(TEST CEPH_ROOT=${CMAKE_SOURCE_DIR} CEPH_BIN=${CMAKE_RUNTIME_OUTPUT_DIRECTORY} CEPH_LIB=${CMAKE_LIBRARY_OUTPUT_DIRECTORY} + CEPH_DISK_VIRTUALENV=${CEPH_DISK_VIRTUALENV} + CEPH_DETECT_INIT_VIRTUALENV=${CEPH_DETECT_INIT_VIRTUALENV} LD_LIBRARY_PATH=${CMAKE_BINARY_DIR}/lib PATH=$ENV{PATH}:${CMAKE_RUNTIME_OUTPUT_DIRECTORY}:${CMAKE_SOURCE_DIR}/src PYTHONPATH=${CMAKE_SOURCE_DIR}/src/pybind From ad3253de81eaa01a917022f8f0ba6f6286d43f35 Mon Sep 17 00:00:00 2001 From: Kefu Chai Date: Thu, 9 Jun 2016 16:03:21 +0800 Subject: [PATCH 2/2] test: ceph-disk.sh: use ceph-helper.sh helpers so we can use the utlitiy functions offered by ceph-helper.sh, and enable cmake to run this test. * ceph-disk/tests/ceph-disk.sh: it checks ../ for the executables for which fixtures are set up. but this assumption does not hold when we are using cmake for out-of-source build. so we need to pass the build directory to ceph-disk.sh. ${CEPH_BIN} is used for this purpose in this change, it is ".." by default, so this change is backward compatible. and one can run tox without setting up the ${CEPH_BIN} env variable. * ceph-disk/test-requirements.txt: bump up tox to 2.0+ in the case of "make check", both cmake and automake pass ${CEPH_BIN} env variables to the "make check" tests, so we can simpy pass it down via tox.ini's "passenv" setting. per https://testrun.org/tox/latest/example/basic.html#passing-down-environment-variables, this feature was introduce in tox 2.0. so bump up the minimum required tox version to 2.0 in ceph-disk/test-requirements.txt. * ceph-disk/tox.ini: pass CEPH_BIN, CEPH_BIN, and CEPH_ROOT down to ceph-disk.sh Signed-off-by: Kefu Chai --- src/ceph-disk/test-requirements.txt | 2 +- src/ceph-disk/tests/ceph-disk.sh | 322 +++++++++++++--------------- src/ceph-disk/tox.ini | 1 + 3 files changed, 156 insertions(+), 169 deletions(-) diff --git a/src/ceph-disk/test-requirements.txt b/src/ceph-disk/test-requirements.txt index fa6d3a7ce86..70a42d94098 100644 --- a/src/ceph-disk/test-requirements.txt +++ b/src/ceph-disk/test-requirements.txt @@ -7,5 +7,5 @@ testrepository>=0.0.17 testtools>=0.9.32 mock pytest -tox +tox>=2.0 flake8 diff --git a/src/ceph-disk/tests/ceph-disk.sh b/src/ceph-disk/tests/ceph-disk.sh index ba0b4fd20dc..3c471d4b64a 100644 --- a/src/ceph-disk/tests/ceph-disk.sh +++ b/src/ceph-disk/tests/ceph-disk.sh @@ -16,61 +16,29 @@ # GNU Library Public License for more details. # -# -# Removes btrfs subvolumes under the given directory param -# -function teardown_btrfs() { - local btrfs_base_dir=$1 +# ceph-disk.sh is launched by tox which expects tox.ini in current +# directory. so we cannot run ceph-disk.sh in build directory directly, +# and hence not able to use detect-build-env-vars.sh to set the build +# env vars. +if [ -z "$CEPH_ROOT" ] || [ -z "$CEPH_BIN" ] || [ -z "$CEPH_LIB" ]; then + CEPH_ROOT=`readlink -f $(dirname $0)/../../..` + CEPH_BIN=$CEPH_ROOT + CEPH_LIB=$CEPH_ROOT/.libs +fi +source $CEPH_ROOT/qa/workunits/ceph-helpers.sh - btrfs_dirs=`ls -l $btrfs_base_dir | egrep '^d' | awk '{print $9}'` - for btrfs_dir in $btrfs_dirs - do - btrfs_subdirs=`ls -l $btrfs_base_dir/$btrfs_dir | egrep '^d' | awk '{print $9}'` - for btrfs_subdir in $btrfs_subdirs - do - btrfs subvolume delete $btrfs_base_dir/$btrfs_dir/$btrfs_subdir - done - done -} +set -x PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: ' -export PATH=..:.:$PATH # make sure program from sources are preferred +export PATH=$CEPH_BIN:.:$PATH # make sure program from sources are preferred export PATH=../ceph-detect-init/virtualenv/bin:$PATH export PATH=virtualenv/bin:$PATH -DIR=test-ceph-disk +export LD_LIBRARY_PATH=$CEPH_LIB : ${CEPH_DISK:=ceph-disk} -OSD_DATA=$DIR/osd -MON_ID=a -MONA=127.0.0.1:7451 -TEST_POOL=rbd -FSID=$(uuidgen) -export CEPH_CONF=$DIR/ceph.conf -export CEPH_ARGS="--fsid $FSID" -CEPH_ARGS+=" --chdir=" -CEPH_ARGS+=" --journal-dio=false" -CEPH_ARGS+=" --run-dir=$DIR" -CEPH_ARGS+=" --osd-failsafe-full-ratio=.99" -CEPH_ARGS+=" --mon-host=$MONA" -CEPH_ARGS+=" --log-file=$DIR/\$name.log" -CEPH_ARGS+=" --pid-file=$DIR/\$name.pidfile" -if test -d ../.libs ; then - CEPH_ARGS+=" --erasure-code-dir=../.libs" - CEPH_ARGS+=" --compression-dir=../.libs" -fi -CEPH_ARGS+=" --auth-supported=none" -CEPH_ARGS+=" --osd-journal-size=100" -CEPH_ARGS+=" --debug-mon=20" -CEPH_ARGS+=" --debug-osd=20" -CEPH_ARGS+=" --debug-bdev=20" -CEPH_ARGS+=" --debug-bluestore=20" -CEPH_ARGS+=" --osd-max-object-name-len=460" -CEPH_ARGS+=" --osd-max-object-namespace-len=64" CEPH_DISK_ARGS= -CEPH_DISK_ARGS+=" --statedir=$DIR" -CEPH_DISK_ARGS+=" --sysconfdir=$DIR" -CEPH_DISK_ARGS+=" --prepend-to-path=" CEPH_DISK_ARGS+=" --verbose" +CEPH_DISK_ARGS+=" --prepend-to-path=" TIMEOUT=360 cat=$(which cat) @@ -81,133 +49,124 @@ rm=$(which rm) uuidgen=$(which uuidgen) function setup() { - teardown - mkdir $DIR - mkdir $OSD_DATA - touch $DIR/ceph.conf # so ceph-disk think ceph is the cluster + local dir=$1 + teardown $dir + mkdir -p $dir/osd + touch $dir/ceph.conf # so ceph-disk think ceph is the cluster } function teardown() { - kill_daemons - if [ $(stat -f -c '%T' .) == "btrfs" ]; then - rm -fr $DIR/*/*db - teardown_btrfs $DIR - fi - grep " $(pwd)/$DIR/" < /proc/mounts | while read mounted rest ; do - umount $mounted - done - rm -fr $DIR -} - -function run_mon() { - local mon_dir=$DIR/$MON_ID - - ceph-mon \ - --id $MON_ID \ - --mkfs \ - --mon-data=$mon_dir \ - --mon-initial-members=$MON_ID \ - "$@" - - ceph-mon \ - --id $MON_ID \ - --mon-data=$mon_dir \ - --mon-osd-full-ratio=.99 \ - --mon-data-avail-crit=1 \ - --mon-cluster-log-file=$mon_dir/log \ - --public-addr $MONA \ - "$@" -} - -function kill_daemons() { - if ! test -e $DIR ; then + local dir=$1 + if ! test -e $dir ; then return fi - for pidfile in $(find $DIR | grep pidfile) ; do - pid=$(cat $pidfile) - for try in 0 1 1 1 2 3 ; do - kill $pid 2>/dev/null || break - sleep $try - done + kill_daemons + if [ $(stat -f -c '%T' .) == "btrfs" ]; then + rm -fr $dir/*/*db + __teardown_btrfs $dir + fi + grep " $(pwd)/$dir/" < /proc/mounts | while read mounted rest ; do + umount $mounted done + rm -fr $dir } function command_fixture() { + local dir=$1 + shift local command=$1 + shift local fpath=`readlink -f $(which $command)` - [ "$fpath" = `readlink -f ../$command` ] || [ "$fpath" = `readlink -f $(pwd)/$command` ] || return 1 + [ "$fpath" = `readlink -f $CEPH_BIN/$command` ] || [ "$fpath" = `readlink -f $(pwd)/$command` ] || return 1 - cat > $DIR/$command < $dir/$command <&1 | grep -q 'not full block device' || return 1 @@ -249,46 +209,48 @@ function test_zap() { # ceph-disk prepare returns immediately on success if the magic file # exists in the --osd-data directory. function test_activate_dir_magic() { + local dir=$1 local uuid=$($uuidgen) - local osd_data=$DIR/osd + local osd_data=$dir/osd echo a failure to create the fsid file implies the magic file is not created mkdir -p $osd_data/fsid CEPH_ARGS="--fsid $uuid" \ - ${CEPH_DISK} $CEPH_DISK_ARGS prepare $osd_data > $DIR/out 2>&1 - grep --quiet 'Is a directory' $DIR/out || return 1 + ${CEPH_DISK} $CEPH_DISK_ARGS prepare $osd_data > $dir/out 2>&1 + grep --quiet 'Is a directory' $dir/out || return 1 ! [ -f $osd_data/magic ] || return 1 rmdir $osd_data/fsid echo successfully prepare the OSD CEPH_ARGS="--fsid $uuid" \ - ${CEPH_DISK} $CEPH_DISK_ARGS prepare $osd_data 2>&1 | tee $DIR/out - grep --quiet 'Preparing osd data dir' $DIR/out || return 1 + ${CEPH_DISK} $CEPH_DISK_ARGS prepare $osd_data 2>&1 | tee $dir/out + grep --quiet 'Preparing osd data dir' $dir/out || return 1 grep --quiet $uuid $osd_data/ceph_fsid || return 1 [ -f $osd_data/magic ] || return 1 echo will not override an existing OSD CEPH_ARGS="--fsid $($uuidgen)" \ - ${CEPH_DISK} $CEPH_DISK_ARGS prepare $osd_data 2>&1 | tee $DIR/out - grep --quiet 'Data dir .* already exists' $DIR/out || return 1 + ${CEPH_DISK} $CEPH_DISK_ARGS prepare $osd_data 2>&1 | tee $dir/out + grep --quiet 'Data dir .* already exists' $dir/out || return 1 grep --quiet $uuid $osd_data/ceph_fsid || return 1 } function test_pool_read_write() { local osd_uuid=$1 + local TEST_POOL=rbd $timeout $TIMEOUT ceph osd pool set $TEST_POOL size 1 || return 1 local id=$(ceph osd create $osd_uuid) local weight=1 ceph osd crush add osd.$id $weight root=default host=localhost || return 1 - echo FOO > $DIR/BAR - $timeout $TIMEOUT rados --pool $TEST_POOL put BAR $DIR/BAR || return 1 - $timeout $TIMEOUT rados --pool $TEST_POOL get BAR $DIR/BAR.copy || return 1 - $diff $DIR/BAR $DIR/BAR.copy || return 1 + echo FOO > $dir/BAR + $timeout $TIMEOUT rados --pool $TEST_POOL put BAR $dir/BAR || return 1 + $timeout $TIMEOUT rados --pool $TEST_POOL get BAR $dir/BAR.copy || return 1 + $diff $dir/BAR $dir/BAR.copy || return 1 } function test_activate() { @@ -296,8 +258,6 @@ function test_activate() { local to_activate=$2 local osd_uuid=$($uuidgen) - $mkdir -p $OSD_DATA - ${CEPH_DISK} $CEPH_DISK_ARGS \ prepare --osd-uuid $osd_uuid $to_prepare || return 1 @@ -310,17 +270,22 @@ function test_activate() { } function test_activate_dir() { - run_mon + local dir=$1 + shift - local osd_data=$DIR/dir + run_mon $dir a + $@ + + local osd_data=$dir/dir $mkdir -p $osd_data test_activate $osd_data $osd_data || return 1 } function test_activate_dir_bluestore() { - run_mon + local dir=$1 + run_mon $dir a - local osd_data=$DIR/dir + local osd_data=$dir/dir $mkdir -p $osd_data local to_prepare=$osd_data local to_activate=$osd_data @@ -339,50 +304,78 @@ function test_activate_dir_bluestore() { } function test_find_cluster_by_uuid() { - setup - test_activate_dir 2>&1 | tee $DIR/test_find - ! grep "No cluster conf found in $DIR" $DIR/test_find || return 1 - teardown + local dir=$1 + test_activate_dir $dir 2>&1 | tee $dir/test_find + ! grep "No cluster conf found in $dir" $dir/test_find || return 1 + teardown $dir - setup - rm $DIR/ceph.conf - test_activate_dir > $DIR/test_find 2>&1 - grep --quiet "No cluster conf found in $DIR" $DIR/test_find || return 1 - teardown + setup $dir + test_activate_dir $dir "rm $dir/ceph.conf" > $dir/test_find 2>&1 + cp $dir/test_find /tmp + grep --quiet "No cluster conf found in $dir" $dir/test_find || return 1 } # http://tracker.ceph.com/issues/9653 function test_keyring_path() { - test_activate_dir 2>&1 | tee $DIR/test_keyring - grep --quiet "keyring $DIR/bootstrap-osd/ceph.keyring" $DIR/test_keyring || return 1 + local dir=$1 + test_activate_dir $dir 2>&1 | tee $dir/test_keyring + grep --quiet "keyring $dir/bootstrap-osd/ceph.keyring" $dir/test_keyring || return 1 } # http://tracker.ceph.com/issues/13522 function ceph_osd_fail_once_fixture() { + local dir=$1 local command=ceph-osd local fpath=`readlink -f $(which $command)` - [ "$fpath" = `readlink -f ../$command` ] || [ "$fpath" = `readlink -f $(pwd)/$command` ] || return 1 + [ "$fpath" = `readlink -f $CEPH_BIN/$command` ] || [ "$fpath" = `readlink -f $(pwd)/$command` ] || return 1 - cat > $DIR/$command < $dir/$command <' : there must be only one + export CEPH_ARGS + CEPH_ARGS+=" --fsid=$(uuidgen)" + CEPH_ARGS+=" --auth-supported=none" + CEPH_ARGS+=" --mon-host=$CEPH_MON" + CEPH_ARGS+=" --chdir=" + CEPH_ARGS+=" --journal-dio=false" + CEPH_ARGS+=" --erasure-code-dir=$CEPH_LIB" + CEPH_ARGS+=" --plugin-dir=$CEPH_LIB" + CEPH_ARGS+=" --compression-dir=$CEPH_LIB" + CEPH_ARGS+=" --log-file=$dir/\$name.log" + CEPH_ARGS+=" --pid-file=$dir/\$name.pidfile" + CEPH_ARGS+=" --osd-class-dir=$CEPH_LIB" + CEPH_ARGS+=" --run-dir=$dir" + CEPH_ARGS+=" --osd-failsafe-full-ratio=.99" + CEPH_ARGS+=" --osd-journal-size=100" + CEPH_ARGS+=" --debug-osd=20" + CEPH_ARGS+=" --debug-bdev=20" + CEPH_ARGS+=" --debug-bluestore=20" + CEPH_ARGS+=" --osd-max-object-name-len=460" + CEPH_ARGS+=" --osd-max-object-namespace-len=64 " local default_actions default_actions+="test_path " default_actions+="test_no_path " @@ -396,23 +389,16 @@ function run() { default_actions+="test_activate_dir_bluestore " default_actions+="test_ceph_osd_mkfs " local actions=${@:-$default_actions} - local status for action in $actions ; do - setup + setup $dir || return 1 set -x - $action - status=$? + $action $dir || return 1 set +x - teardown - if test $status != 0 ; then - break - fi + teardown $dir || return 1 done - rm -fr virtualenv-$DIR - return $status } -run $@ +main test-ceph-disk "$@" # Local Variables: # compile-command: "cd .. ; test/ceph-disk.sh # test_activate_dir" diff --git a/src/ceph-disk/tox.ini b/src/ceph-disk/tox.ini index ef8b8d0414a..ec0dad938be 100644 --- a/src/ceph-disk/tox.ini +++ b/src/ceph-disk/tox.ini @@ -5,6 +5,7 @@ envlist = flake8,py27 setenv = VIRTUAL_ENV={envdir} CEPH_DISK={envbindir}/coverage run --append --source=ceph_disk -- {envbindir}/ceph-disk +passenv = CEPH_ROOT CEPH_BIN CEPH_LIB usedevelop = true deps = {env:NO_INDEX:}