2019-10-23 15:54:33 +00:00
|
|
|
#!/bin/bash -ex
|
|
|
|
|
2019-11-15 16:05:26 +00:00
|
|
|
SCRIPT_NAME=$(basename ${BASH_SOURCE[0]})
|
2019-11-13 23:28:21 +00:00
|
|
|
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
|
|
|
|
2020-04-17 20:17:02 +00:00
|
|
|
# cleanup during exit
|
|
|
|
[ -z "$CLEANUP" ] && CLEANUP=true
|
|
|
|
|
2019-11-03 16:09:06 +00:00
|
|
|
FSID='00000000-0000-0000-0000-0000deadbeef'
|
2019-11-13 23:28:21 +00:00
|
|
|
|
2019-11-06 15:24:17 +00:00
|
|
|
# images that are used
|
2021-01-26 16:19:53 +00:00
|
|
|
IMAGE_MASTER=${IMAGE_MASTER:-'quay.ceph.io/ceph-ci/ceph:master'}
|
|
|
|
IMAGE_PACIFIC=${IMAGE_PACIFIC:-'quay.ceph.io/ceph-ci/ceph:pacific'}
|
2021-01-27 03:46:24 +00:00
|
|
|
#IMAGE_OCTOPUS=${IMAGE_OCTOPUS:-'quay.ceph.io/ceph-ci/ceph:octopus'}
|
2021-03-25 12:33:26 +00:00
|
|
|
IMAGE_DEFAULT=${IMAGE_MASTER}
|
2019-11-15 16:05:26 +00:00
|
|
|
|
|
|
|
OSD_IMAGE_NAME="${SCRIPT_NAME%.*}_osd.img"
|
|
|
|
OSD_IMAGE_SIZE='6G'
|
2020-02-08 23:51:26 +00:00
|
|
|
OSD_TO_CREATE=2
|
2019-11-15 16:05:26 +00:00
|
|
|
OSD_VG_NAME=${SCRIPT_NAME%.*}
|
|
|
|
OSD_LV_NAME=${SCRIPT_NAME%.*}
|
|
|
|
|
2020-01-31 15:22:01 +00:00
|
|
|
CEPHADM_SRC_DIR=${SCRIPT_DIR}/../../../src/cephadm
|
|
|
|
CEPHADM_SAMPLES_DIR=${CEPHADM_SRC_DIR}/samples
|
|
|
|
|
2019-11-03 16:09:06 +00:00
|
|
|
[ -z "$SUDO" ] && SUDO=sudo
|
|
|
|
|
2019-12-11 19:57:45 +00:00
|
|
|
if [ -z "$CEPHADM" ]; then
|
2020-01-31 15:22:01 +00:00
|
|
|
CEPHADM=${CEPHADM_SRC_DIR}/cephadm
|
2019-11-03 16:09:06 +00:00
|
|
|
fi
|
|
|
|
|
2019-12-11 19:57:45 +00:00
|
|
|
# at this point, we need $CEPHADM set
|
|
|
|
if ! [ -x "$CEPHADM" ]; then
|
|
|
|
echo "cephadm not found. Please set \$CEPHADM"
|
2019-11-06 15:01:25 +00:00
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
|
2019-12-09 16:53:31 +00:00
|
|
|
# add image to args
|
2021-03-25 12:33:26 +00:00
|
|
|
CEPHADM_ARGS="$CEPHADM_ARGS --image $IMAGE_DEFAULT"
|
2019-12-09 16:53:31 +00:00
|
|
|
|
|
|
|
# combine into a single var
|
2019-12-11 19:57:45 +00:00
|
|
|
CEPHADM_BIN="$CEPHADM"
|
|
|
|
CEPHADM="$SUDO $CEPHADM_BIN $CEPHADM_ARGS"
|
2019-12-09 16:53:31 +00:00
|
|
|
|
2019-10-23 15:54:33 +00:00
|
|
|
# clean up previous run(s)?
|
2019-12-11 19:57:45 +00:00
|
|
|
$CEPHADM rm-cluster --fsid $FSID --force
|
2020-01-13 21:54:32 +00:00
|
|
|
$SUDO vgchange -an $OSD_VG_NAME || true
|
2019-12-02 19:33:27 +00:00
|
|
|
loopdev=$($SUDO losetup -a | grep $(basename $OSD_IMAGE_NAME) | awk -F : '{print $1}')
|
2019-11-15 16:05:26 +00:00
|
|
|
if ! [ "$loopdev" = "" ]; then
|
2019-12-02 19:33:27 +00:00
|
|
|
$SUDO losetup -d $loopdev
|
2019-11-15 16:05:26 +00:00
|
|
|
fi
|
2019-10-23 15:54:33 +00:00
|
|
|
|
2020-04-17 20:17:02 +00:00
|
|
|
# TMPDIR for test data
|
|
|
|
[ -d "$TMPDIR" ] || TMPDIR=$(mktemp -d tmp.$SCRIPT_NAME.XXXXXX)
|
2020-09-09 08:14:33 +00:00
|
|
|
[ -d "$TMPDIR_TEST_MULTIPLE_MOUNTS" ] || TMPDIR_TEST_MULTIPLE_MOUNTS=$(mktemp -d tmp.$SCRIPT_NAME.XXXXXX)
|
2020-04-17 20:17:02 +00:00
|
|
|
|
|
|
|
function cleanup()
|
|
|
|
{
|
|
|
|
if [ $CLEANUP = false ]; then
|
|
|
|
# preserve the TMPDIR state
|
|
|
|
echo "========================"
|
|
|
|
echo "!!! CLEANUP=$CLEANUP !!!"
|
|
|
|
echo
|
|
|
|
echo "TMPDIR=$TMPDIR"
|
|
|
|
echo "========================"
|
|
|
|
return
|
|
|
|
fi
|
|
|
|
|
|
|
|
dump_all_logs $FSID
|
|
|
|
rm -rf $TMPDIR
|
|
|
|
}
|
|
|
|
trap cleanup EXIT
|
|
|
|
|
2019-10-23 15:54:33 +00:00
|
|
|
function expect_false()
|
|
|
|
{
|
|
|
|
set -x
|
2020-02-28 16:14:41 +00:00
|
|
|
if eval "$@"; then return 1; else return 0; fi
|
2019-10-23 15:54:33 +00:00
|
|
|
}
|
|
|
|
|
2020-02-25 20:47:59 +00:00
|
|
|
function is_available()
|
|
|
|
{
|
|
|
|
local name="$1"
|
|
|
|
local condition="$2"
|
|
|
|
local tries="$3"
|
|
|
|
|
|
|
|
local num=0
|
|
|
|
while ! eval "$condition"; do
|
|
|
|
num=$(($num + 1))
|
|
|
|
if [ "$num" -ge $tries ]; then
|
|
|
|
echo "$name is not available"
|
|
|
|
false
|
|
|
|
fi
|
|
|
|
sleep 5
|
|
|
|
done
|
|
|
|
|
|
|
|
echo "$name is available"
|
|
|
|
true
|
|
|
|
}
|
|
|
|
|
2020-02-28 21:46:59 +00:00
|
|
|
function dump_log()
|
|
|
|
{
|
2020-04-08 19:35:02 +00:00
|
|
|
local fsid="$1"
|
|
|
|
local name="$2"
|
|
|
|
local num_lines="$3"
|
2020-02-28 21:46:59 +00:00
|
|
|
|
|
|
|
if [ -z $num_lines ]; then
|
|
|
|
num_lines=100
|
|
|
|
fi
|
|
|
|
|
|
|
|
echo '-------------------------'
|
|
|
|
echo 'dump daemon log:' $name
|
|
|
|
echo '-------------------------'
|
|
|
|
|
2020-04-08 19:35:02 +00:00
|
|
|
$CEPHADM logs --fsid $fsid --name $name -- --no-pager -n $num_lines
|
2020-02-28 21:46:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
function dump_all_logs()
|
|
|
|
{
|
2020-04-08 19:35:02 +00:00
|
|
|
local fsid="$1"
|
|
|
|
local names=$($CEPHADM ls | jq -r '.[] | select(.fsid == "'$fsid'").name')
|
2020-02-28 21:46:59 +00:00
|
|
|
|
|
|
|
echo 'dumping logs for daemons: ' $names
|
|
|
|
for name in $names; do
|
2020-04-08 19:35:02 +00:00
|
|
|
dump_log $fsid $name
|
2020-02-28 21:46:59 +00:00
|
|
|
done
|
|
|
|
}
|
|
|
|
|
2020-02-28 16:14:41 +00:00
|
|
|
function nfs_stop()
|
|
|
|
{
|
|
|
|
# stop the running nfs server
|
|
|
|
local units="nfs-server nfs-kernel-server"
|
|
|
|
for unit in $units; do
|
2021-01-27 03:46:24 +00:00
|
|
|
if systemctl status $unit < /dev/null; then
|
2020-02-28 16:14:41 +00:00
|
|
|
$SUDO systemctl stop $unit
|
|
|
|
fi
|
|
|
|
done
|
|
|
|
|
|
|
|
# ensure the NFS port is no longer in use
|
|
|
|
expect_false "$SUDO ss -tlnp '( sport = :nfs )' | grep LISTEN"
|
|
|
|
}
|
|
|
|
|
2019-11-21 20:41:51 +00:00
|
|
|
## prepare + check host
|
2019-12-11 19:57:45 +00:00
|
|
|
$SUDO $CEPHADM check-host
|
2019-11-21 20:41:51 +00:00
|
|
|
|
2020-08-12 00:01:48 +00:00
|
|
|
## run a gather-facts (output to stdout)
|
|
|
|
$SUDO $CEPHADM gather-facts
|
|
|
|
|
2019-10-23 15:54:33 +00:00
|
|
|
## version + --image
|
2021-01-26 16:19:53 +00:00
|
|
|
$SUDO CEPHADM_IMAGE=$IMAGE_PACIFIC $CEPHADM_BIN version
|
|
|
|
$SUDO CEPHADM_IMAGE=$IMAGE_PACIFIC $CEPHADM_BIN version \
|
|
|
|
| grep 'ceph version 16'
|
2021-01-27 03:46:24 +00:00
|
|
|
#$SUDO CEPHADM_IMAGE=$IMAGE_OCTOPUS $CEPHADM_BIN version
|
|
|
|
#$SUDO CEPHADM_IMAGE=$IMAGE_OCTOPUS $CEPHADM_BIN version \
|
|
|
|
# | grep 'ceph version 15'
|
2019-12-11 19:57:45 +00:00
|
|
|
$SUDO $CEPHADM_BIN --image $IMAGE_MASTER version | grep 'ceph version'
|
2019-10-23 15:54:33 +00:00
|
|
|
|
|
|
|
# try force docker; this won't work if docker isn't installed
|
2021-01-27 03:46:09 +00:00
|
|
|
systemctl status docker > /dev/null && ( $CEPHADM --docker version | grep 'ceph version' ) || echo "docker not installed"
|
2019-10-23 15:54:33 +00:00
|
|
|
|
2019-10-24 13:41:33 +00:00
|
|
|
## test shell before bootstrap, when crash dir isn't (yet) present on this host
|
2019-12-11 19:57:45 +00:00
|
|
|
$CEPHADM shell --fsid $FSID -- ceph -v | grep 'ceph version'
|
2020-02-10 21:44:08 +00:00
|
|
|
$CEPHADM shell --fsid $FSID -e FOO=BAR -- printenv | grep FOO=BAR
|
2019-10-24 13:41:33 +00:00
|
|
|
|
2021-05-11 10:13:27 +00:00
|
|
|
# test stdin
|
|
|
|
echo foo | $CEPHADM shell -- cat | grep -q foo
|
|
|
|
|
2019-10-23 15:54:33 +00:00
|
|
|
## bootstrap
|
|
|
|
ORIG_CONFIG=`mktemp -p $TMPDIR`
|
|
|
|
CONFIG=`mktemp -p $TMPDIR`
|
2019-12-16 21:46:25 +00:00
|
|
|
MONCONFIG=`mktemp -p $TMPDIR`
|
2019-10-23 15:54:33 +00:00
|
|
|
KEYRING=`mktemp -p $TMPDIR`
|
|
|
|
IP=127.0.0.1
|
|
|
|
cat <<EOF > $ORIG_CONFIG
|
|
|
|
[global]
|
2019-12-06 21:43:23 +00:00
|
|
|
log to file = true
|
2020-02-22 06:21:03 +00:00
|
|
|
osd crush chooseleaf type = 0
|
2019-10-23 15:54:33 +00:00
|
|
|
EOF
|
2019-12-11 19:57:45 +00:00
|
|
|
$CEPHADM bootstrap \
|
2019-10-23 15:54:33 +00:00
|
|
|
--mon-id a \
|
|
|
|
--mgr-id x \
|
|
|
|
--mon-ip $IP \
|
|
|
|
--fsid $FSID \
|
|
|
|
--config $ORIG_CONFIG \
|
|
|
|
--output-config $CONFIG \
|
2019-11-20 02:41:17 +00:00
|
|
|
--output-keyring $KEYRING \
|
2020-03-18 13:44:10 +00:00
|
|
|
--output-pub-ssh-key $TMPDIR/ceph.pub \
|
2020-03-16 19:38:15 +00:00
|
|
|
--allow-overwrite \
|
2020-03-17 19:53:58 +00:00
|
|
|
--skip-mon-network \
|
2020-12-22 21:40:41 +00:00
|
|
|
--skip-monitoring-stack \
|
|
|
|
--with-exporter
|
2019-10-23 15:54:33 +00:00
|
|
|
test -e $CONFIG
|
|
|
|
test -e $KEYRING
|
|
|
|
rm -f $ORIG_CONFIG
|
|
|
|
|
|
|
|
$SUDO test -e /var/log/ceph/$FSID/ceph-mon.a.log
|
|
|
|
$SUDO test -e /var/log/ceph/$FSID/ceph-mgr.x.log
|
|
|
|
|
|
|
|
for u in ceph.target \
|
|
|
|
ceph-$FSID.target \
|
|
|
|
ceph-$FSID@mon.a \
|
|
|
|
ceph-$FSID@mgr.x; do
|
|
|
|
systemctl is-enabled $u
|
|
|
|
systemctl is-active $u
|
|
|
|
done
|
|
|
|
systemctl | grep system-ceph | grep -q .slice # naming is escaped and annoying
|
|
|
|
|
2019-10-23 16:25:12 +00:00
|
|
|
# check ceph -s works (via shell w/ passed config/keyring)
|
2019-12-11 19:57:45 +00:00
|
|
|
$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
|
2019-10-23 16:25:12 +00:00
|
|
|
ceph -s | grep $FSID
|
|
|
|
|
2020-03-18 15:12:59 +00:00
|
|
|
for t in mon mgr node-exporter prometheus grafana; do
|
|
|
|
$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
|
|
|
|
ceph orch apply $t --unmanaged
|
|
|
|
done
|
|
|
|
|
2019-10-23 15:54:33 +00:00
|
|
|
## ls
|
2019-12-11 19:57:45 +00:00
|
|
|
$CEPHADM ls | jq '.[]' | jq 'select(.name == "mon.a").fsid' \
|
2019-10-23 15:54:33 +00:00
|
|
|
| grep $FSID
|
2019-12-11 19:57:45 +00:00
|
|
|
$CEPHADM ls | jq '.[]' | jq 'select(.name == "mgr.x").fsid' \
|
2019-10-23 15:54:33 +00:00
|
|
|
| grep $FSID
|
|
|
|
|
2020-02-08 17:32:58 +00:00
|
|
|
# make sure the version is returned correctly
|
|
|
|
$CEPHADM ls | jq '.[]' | jq 'select(.name == "mon.a").version' | grep -q \\.
|
|
|
|
|
2019-10-23 15:54:33 +00:00
|
|
|
## deploy
|
|
|
|
# add mon.b
|
2019-12-16 21:46:25 +00:00
|
|
|
cp $CONFIG $MONCONFIG
|
2020-03-19 19:43:56 +00:00
|
|
|
echo "public addrv = [v2:$IP:3301,v1:$IP:6790]" >> $MONCONFIG
|
2019-12-11 19:57:45 +00:00
|
|
|
$CEPHADM deploy --name mon.b \
|
2019-10-23 15:54:33 +00:00
|
|
|
--fsid $FSID \
|
|
|
|
--keyring /var/lib/ceph/$FSID/mon.a/keyring \
|
2020-03-19 19:43:56 +00:00
|
|
|
--config $MONCONFIG
|
2019-10-23 15:54:33 +00:00
|
|
|
for u in ceph-$FSID@mon.b; do
|
|
|
|
systemctl is-enabled $u
|
|
|
|
systemctl is-active $u
|
|
|
|
done
|
2020-03-19 19:43:56 +00:00
|
|
|
cond="$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
|
|
|
|
ceph mon stat | grep '2 mons'"
|
|
|
|
is_available "mon.b" "$cond" 30
|
2019-10-23 15:54:33 +00:00
|
|
|
|
|
|
|
# add mgr.y
|
2019-12-11 19:57:45 +00:00
|
|
|
$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
|
2019-10-23 16:25:12 +00:00
|
|
|
ceph auth get-or-create mgr.y \
|
2019-10-23 15:54:33 +00:00
|
|
|
mon 'allow profile mgr' \
|
|
|
|
osd 'allow *' \
|
|
|
|
mds 'allow *' > $TMPDIR/keyring.mgr.y
|
2019-12-11 19:57:45 +00:00
|
|
|
$CEPHADM deploy --name mgr.y \
|
2019-10-23 15:54:33 +00:00
|
|
|
--fsid $FSID \
|
|
|
|
--keyring $TMPDIR/keyring.mgr.y \
|
|
|
|
--config $CONFIG
|
|
|
|
for u in ceph-$FSID@mgr.y; do
|
|
|
|
systemctl is-enabled $u
|
|
|
|
systemctl is-active $u
|
|
|
|
done
|
2019-12-16 21:46:25 +00:00
|
|
|
|
2019-10-23 15:54:33 +00:00
|
|
|
for f in `seq 1 30`; do
|
2019-12-11 19:57:45 +00:00
|
|
|
if $CEPHADM shell --fsid $FSID \
|
2019-10-23 16:25:12 +00:00
|
|
|
--config $CONFIG --keyring $KEYRING -- \
|
|
|
|
ceph -s -f json-pretty \
|
2019-10-23 15:54:33 +00:00
|
|
|
| jq '.mgrmap.num_standbys' | grep -q 1 ; then break; fi
|
|
|
|
sleep 1
|
|
|
|
done
|
2019-12-11 19:57:45 +00:00
|
|
|
$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
|
2019-10-23 16:25:12 +00:00
|
|
|
ceph -s -f json-pretty \
|
2019-10-23 15:54:33 +00:00
|
|
|
| jq '.mgrmap.num_standbys' | grep -q 1
|
|
|
|
|
2019-11-15 16:05:26 +00:00
|
|
|
# add osd.{1,2,..}
|
|
|
|
dd if=/dev/zero of=$TMPDIR/$OSD_IMAGE_NAME bs=1 count=0 seek=$OSD_IMAGE_SIZE
|
2019-12-02 19:33:27 +00:00
|
|
|
loop_dev=$($SUDO losetup -f)
|
2019-11-21 00:25:09 +00:00
|
|
|
$SUDO vgremove -f $OSD_VG_NAME || true
|
2019-11-20 17:31:19 +00:00
|
|
|
$SUDO losetup $loop_dev $TMPDIR/$OSD_IMAGE_NAME
|
|
|
|
$SUDO pvcreate $loop_dev && $SUDO vgcreate $OSD_VG_NAME $loop_dev
|
2020-03-11 00:40:07 +00:00
|
|
|
|
|
|
|
# osd boostrap keyring
|
|
|
|
$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
|
|
|
|
ceph auth get client.bootstrap-osd > $TMPDIR/keyring.bootstrap.osd
|
|
|
|
|
2020-03-24 13:53:26 +00:00
|
|
|
# create lvs first so ceph-volume doesn't overlap with lv creation
|
2019-11-15 16:05:26 +00:00
|
|
|
for id in `seq 0 $((--OSD_TO_CREATE))`; do
|
2019-11-20 17:31:19 +00:00
|
|
|
$SUDO lvcreate -l $((100/$OSD_TO_CREATE))%VG -n $OSD_LV_NAME.$id $OSD_VG_NAME
|
2020-03-24 13:53:26 +00:00
|
|
|
done
|
|
|
|
|
|
|
|
for id in `seq 0 $((--OSD_TO_CREATE))`; do
|
2020-03-11 00:40:07 +00:00
|
|
|
device_name=/dev/$OSD_VG_NAME/$OSD_LV_NAME.$id
|
2020-04-07 21:27:35 +00:00
|
|
|
CEPH_VOLUME="$CEPHADM ceph-volume \
|
|
|
|
--fsid $FSID \
|
|
|
|
--config $CONFIG \
|
|
|
|
--keyring $TMPDIR/keyring.bootstrap.osd --"
|
2020-03-11 00:40:07 +00:00
|
|
|
|
|
|
|
# prepare the osd
|
2020-04-07 21:27:35 +00:00
|
|
|
$CEPH_VOLUME lvm prepare --bluestore --data $device_name --no-systemd
|
|
|
|
$CEPH_VOLUME lvm batch --no-auto $device_name --yes --no-systemd
|
2020-03-11 00:40:07 +00:00
|
|
|
|
|
|
|
# osd id and osd fsid
|
2020-04-07 21:27:35 +00:00
|
|
|
$CEPH_VOLUME lvm list --format json $device_name > $TMPDIR/osd.map
|
2020-03-11 00:40:07 +00:00
|
|
|
osd_id=$($SUDO cat $TMPDIR/osd.map | jq -cr '.. | ."ceph.osd_id"? | select(.)')
|
|
|
|
osd_fsid=$($SUDO cat $TMPDIR/osd.map | jq -cr '.. | ."ceph.osd_fsid"? | select(.)')
|
|
|
|
|
|
|
|
# deploy the osd
|
|
|
|
$CEPHADM deploy --name osd.$osd_id \
|
|
|
|
--fsid $FSID \
|
|
|
|
--keyring $TMPDIR/keyring.bootstrap.osd \
|
|
|
|
--config $CONFIG \
|
|
|
|
--osd-fsid $osd_fsid
|
2019-11-15 16:05:26 +00:00
|
|
|
done
|
|
|
|
|
2020-01-31 15:22:01 +00:00
|
|
|
# add node-exporter
|
2021-03-25 12:33:26 +00:00
|
|
|
${CEPHADM//--image $IMAGE_DEFAULT/} deploy \
|
2020-02-25 21:13:37 +00:00
|
|
|
--name node-exporter.a --fsid $FSID
|
2020-02-25 20:47:59 +00:00
|
|
|
cond="curl 'http://localhost:9100' | grep -q 'Node Exporter'"
|
2020-04-27 21:11:49 +00:00
|
|
|
is_available "node-exporter" "$cond" 10
|
2020-01-31 15:22:01 +00:00
|
|
|
|
|
|
|
# add prometheus
|
|
|
|
cat ${CEPHADM_SAMPLES_DIR}/prometheus.json | \
|
2021-03-25 12:33:26 +00:00
|
|
|
${CEPHADM//--image $IMAGE_DEFAULT/} deploy \
|
2020-02-25 21:13:37 +00:00
|
|
|
--name prometheus.a --fsid $FSID --config-json -
|
2020-02-25 20:47:59 +00:00
|
|
|
cond="curl 'localhost:9095/api/v1/query?query=up'"
|
2020-04-27 21:11:49 +00:00
|
|
|
is_available "prometheus" "$cond" 10
|
2020-01-31 15:22:01 +00:00
|
|
|
|
|
|
|
# add grafana
|
|
|
|
cat ${CEPHADM_SAMPLES_DIR}/grafana.json | \
|
2021-03-25 12:33:26 +00:00
|
|
|
${CEPHADM//--image $IMAGE_DEFAULT/} deploy \
|
2020-02-25 21:13:37 +00:00
|
|
|
--name grafana.a --fsid $FSID --config-json -
|
2020-02-25 20:47:59 +00:00
|
|
|
cond="curl --insecure 'https://localhost:3000' | grep -q 'grafana'"
|
2020-04-27 21:11:49 +00:00
|
|
|
is_available "grafana" "$cond" 50
|
2020-01-31 15:22:01 +00:00
|
|
|
|
2020-02-02 06:22:20 +00:00
|
|
|
# add nfs-ganesha
|
2020-02-28 16:14:41 +00:00
|
|
|
nfs_stop
|
2020-02-02 06:22:20 +00:00
|
|
|
nfs_rados_pool=$(cat ${CEPHADM_SAMPLES_DIR}/nfs.json | jq -r '.["pool"]')
|
|
|
|
$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
|
|
|
|
ceph osd pool create $nfs_rados_pool 64
|
|
|
|
$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
|
|
|
|
rados --pool nfs-ganesha --namespace nfs-ns create conf-nfs.a
|
2020-03-18 15:12:59 +00:00
|
|
|
$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
|
|
|
|
ceph orch pause
|
2020-02-02 06:22:20 +00:00
|
|
|
$CEPHADM deploy --name nfs.a \
|
|
|
|
--fsid $FSID \
|
|
|
|
--keyring $KEYRING \
|
|
|
|
--config $CONFIG \
|
|
|
|
--config-json ${CEPHADM_SAMPLES_DIR}/nfs.json
|
|
|
|
cond="$SUDO ss -tlnp '( sport = :nfs )' | grep 'ganesha.nfsd'"
|
|
|
|
is_available "nfs" "$cond" 10
|
2020-03-18 15:12:59 +00:00
|
|
|
$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
|
|
|
|
ceph orch resume
|
2020-02-02 06:22:20 +00:00
|
|
|
|
2020-09-16 10:06:36 +00:00
|
|
|
# add alertmanager via custom container
|
|
|
|
alertmanager_image=$(cat ${CEPHADM_SAMPLES_DIR}/custom_container.json | jq -r '.image')
|
|
|
|
tcp_ports=$(cat ${CEPHADM_SAMPLES_DIR}/custom_container.json | jq -r '.ports | map_values(.|tostring) | join(" ")')
|
|
|
|
cat ${CEPHADM_SAMPLES_DIR}/custom_container.json | \
|
2021-03-25 12:33:26 +00:00
|
|
|
${CEPHADM//--image $IMAGE_DEFAULT/} \
|
2020-09-16 10:06:36 +00:00
|
|
|
--image $alertmanager_image \
|
|
|
|
deploy \
|
|
|
|
--tcp-ports "$tcp_ports" \
|
|
|
|
--name container.alertmanager.a \
|
|
|
|
--fsid $FSID \
|
|
|
|
--config-json -
|
|
|
|
cond="$CEPHADM enter --fsid $FSID --name container.alertmanager.a -- test -f \
|
|
|
|
/etc/alertmanager/alertmanager.yml"
|
|
|
|
is_available "alertmanager.yml" "$cond" 10
|
|
|
|
cond="curl 'http://localhost:9093' | grep -q 'Alertmanager'"
|
|
|
|
is_available "alertmanager" "$cond" 10
|
|
|
|
|
2020-12-22 21:40:41 +00:00
|
|
|
# Fetch the token we need to access the exporter API
|
|
|
|
token=$($CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING ceph cephadm get-exporter-config | jq -r '.token')
|
|
|
|
[[ ! -z "$token" ]]
|
|
|
|
|
|
|
|
# check all exporter threads active
|
|
|
|
cond="curl -k -s -H \"Authorization: Bearer $token\" \
|
|
|
|
https://localhost:9443/v1/metadata/health | \
|
|
|
|
jq -r '.tasks | select(.disks == \"active\" and .daemons == \"active\" and .host == \"active\")'"
|
|
|
|
is_available "exporter_threads_active" "$cond" 3
|
|
|
|
|
|
|
|
# check we deployed for all hosts
|
2021-01-29 15:07:15 +00:00
|
|
|
$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING ceph orch ls --service-type cephadm-exporter --format json
|
|
|
|
host_pattern=$($CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING ceph orch ls --service-type cephadm-exporter --format json | jq -r '.[0].placement.host_pattern')
|
2020-12-22 21:40:41 +00:00
|
|
|
[[ "$host_pattern" = "*" ]]
|
|
|
|
|
2019-10-23 15:54:33 +00:00
|
|
|
## run
|
2019-10-23 16:34:46 +00:00
|
|
|
# WRITE ME
|
|
|
|
|
|
|
|
## unit
|
2019-12-11 19:57:45 +00:00
|
|
|
$CEPHADM unit --fsid $FSID --name mon.a -- is-enabled
|
|
|
|
$CEPHADM unit --fsid $FSID --name mon.a -- is-active
|
|
|
|
expect_false $CEPHADM unit --fsid $FSID --name mon.xyz -- is-active
|
|
|
|
$CEPHADM unit --fsid $FSID --name mon.a -- disable
|
|
|
|
expect_false $CEPHADM unit --fsid $FSID --name mon.a -- is-enabled
|
|
|
|
$CEPHADM unit --fsid $FSID --name mon.a -- enable
|
|
|
|
$CEPHADM unit --fsid $FSID --name mon.a -- is-enabled
|
2019-10-23 16:34:46 +00:00
|
|
|
|
2019-10-23 15:54:33 +00:00
|
|
|
## shell
|
2019-12-12 13:36:23 +00:00
|
|
|
$CEPHADM shell --fsid $FSID -- true
|
2019-12-11 19:57:45 +00:00
|
|
|
$CEPHADM shell --fsid $FSID -- test -d /var/log/ceph
|
2020-04-17 20:35:30 +00:00
|
|
|
expect_false $CEPHADM --timeout 10 shell --fsid $FSID -- sleep 60
|
|
|
|
$CEPHADM --timeout 60 shell --fsid $FSID -- sleep 10
|
2020-09-09 08:14:33 +00:00
|
|
|
$CEPHADM shell --fsid $FSID --mount $TMPDIR $TMPDIR_TEST_MULTIPLE_MOUNTS -- stat /mnt/$(basename $TMPDIR)
|
2019-10-23 16:34:46 +00:00
|
|
|
|
2019-10-23 15:54:33 +00:00
|
|
|
## enter
|
2019-12-11 19:57:45 +00:00
|
|
|
expect_false $CEPHADM enter
|
|
|
|
$CEPHADM enter --fsid $FSID --name mon.a -- test -d /var/lib/ceph/mon/ceph-a
|
|
|
|
$CEPHADM enter --fsid $FSID --name mgr.x -- test -d /var/lib/ceph/mgr/ceph-x
|
|
|
|
$CEPHADM enter --fsid $FSID --name mon.a -- pidof ceph-mon
|
|
|
|
expect_false $CEPHADM enter --fsid $FSID --name mgr.x -- pidof ceph-mon
|
|
|
|
$CEPHADM enter --fsid $FSID --name mgr.x -- pidof ceph-mgr
|
2020-02-12 16:56:22 +00:00
|
|
|
# this triggers a bug in older versions of podman, including 18.04's 1.6.2
|
2020-04-17 20:35:30 +00:00
|
|
|
#expect_false $CEPHADM --timeout 5 enter --fsid $FSID --name mon.a -- sleep 30
|
|
|
|
$CEPHADM --timeout 60 enter --fsid $FSID --name mon.a -- sleep 10
|
2019-12-18 20:50:10 +00:00
|
|
|
|
2019-10-23 15:54:33 +00:00
|
|
|
## ceph-volume
|
2019-12-11 19:57:45 +00:00
|
|
|
$CEPHADM ceph-volume --fsid $FSID -- inventory --format=json \
|
2019-10-23 15:54:33 +00:00
|
|
|
| jq '.[]'
|
|
|
|
|
2020-04-17 20:17:02 +00:00
|
|
|
## preserve test state
|
|
|
|
[ $CLEANUP = false ] && exit 0
|
|
|
|
|
2019-10-23 15:54:33 +00:00
|
|
|
## rm-daemon
|
|
|
|
# mon and osd require --force
|
2019-12-11 19:57:45 +00:00
|
|
|
expect_false $CEPHADM rm-daemon --fsid $FSID --name mon.a
|
2019-10-23 15:54:33 +00:00
|
|
|
# mgr does not
|
2019-12-11 19:57:45 +00:00
|
|
|
$CEPHADM rm-daemon --fsid $FSID --name mgr.x
|
2019-10-23 15:54:33 +00:00
|
|
|
|
2021-04-30 14:16:16 +00:00
|
|
|
expect_false $CEPHADM zap-osds --fsid $FSID
|
|
|
|
$CEPHADM zap-osds --fsid $FSID --force
|
|
|
|
|
2019-10-23 15:54:33 +00:00
|
|
|
## rm-cluster
|
2021-04-30 14:16:16 +00:00
|
|
|
expect_false $CEPHADM rm-cluster --fsid $FSID --zap-osds
|
|
|
|
$CEPHADM rm-cluster --fsid $FSID --force --zap-osds
|
2019-10-23 15:54:33 +00:00
|
|
|
|
|
|
|
echo PASS
|