mirror of
https://github.com/ceph/ceph
synced 2025-01-01 08:32:24 +00:00
mgr/dashboard: cephadm-e2e script: improvements
Improvements and some adaptations related to the jenkins job. Fixes: https://tracker.ceph.com/issues/51612 Signed-off-by: Alfonso Martínez <almartin@redhat.com>
This commit is contained in:
parent
aeafb8dd46
commit
65b75000b7
@ -430,7 +430,14 @@ run-cephadm-e2e-tests.sh
|
||||
Orchestrator backend behave correctly.
|
||||
|
||||
Prerequisites: you need to install `KCLI
|
||||
<https://kcli.readthedocs.io/en/latest/>`_ in your local machine.
|
||||
<https://kcli.readthedocs.io/en/latest/>`_ and Node.js in your local machine.
|
||||
|
||||
Configure KCLI plan requirements::
|
||||
|
||||
$ sudo chown -R $(id -un) /var/lib/libvirt/images
|
||||
$ mkdir -p /var/lib/libvirt/images/ceph-dashboard dashboard
|
||||
$ kcli create pool -p /var/lib/libvirt/images/ceph-dashboard dashboard
|
||||
$ kcli create network -c 192.168.100.0/24 dashboard
|
||||
|
||||
Note:
|
||||
This script is aimed to be run as jenkins job so the cleanup is triggered only in a jenkins
|
||||
@ -439,10 +446,17 @@ Note:
|
||||
Start E2E tests by running::
|
||||
|
||||
$ cd <your/ceph/repo/dir>
|
||||
$ sudo chown -R $(id -un) src/pybind/mgr/dashboard/frontend/dist src/pybind/mgr/dashboard/frontend/node_modules
|
||||
$ sudo chown -R $(id -un) src/pybind/mgr/dashboard/frontend/{dist,node_modules,src/environments}
|
||||
$ ./src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh
|
||||
$ kcli delete plan -y ceph # After tests finish.
|
||||
|
||||
You can also start a cluster in development mode and later run E2E tests by running::
|
||||
|
||||
$ ./src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh --dev-mode
|
||||
$ # Work on your feature, bug fix, ...
|
||||
$ ./src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh
|
||||
$ # Remember to kill the npm build watch process i.e.: pkill -f "ng build"
|
||||
|
||||
Other running options
|
||||
.....................
|
||||
|
||||
|
@ -2,21 +2,14 @@
|
||||
|
||||
export PATH=/root/bin:$PATH
|
||||
mkdir /root/bin
|
||||
{% if ceph_dev_folder is defined %}
|
||||
cp /mnt/{{ ceph_dev_folder }}/src/cephadm/cephadm /root/bin/cephadm
|
||||
{% else %}
|
||||
cd /root/bin
|
||||
curl --silent --remote-name --location https://raw.githubusercontent.com/ceph/ceph/master/src/cephadm/cephadm
|
||||
{% endif %}
|
||||
|
||||
cp /mnt/{{ ceph_dev_folder }}/src/cephadm/cephadm /root/bin/cephadm
|
||||
chmod +x /root/bin/cephadm
|
||||
mkdir -p /etc/ceph
|
||||
mon_ip=$(ifconfig eth0 | grep 'inet ' | awk '{ print $2}')
|
||||
{% if ceph_dev_folder is defined %}
|
||||
cephadm bootstrap --mon-ip $mon_ip --initial-dashboard-password {{ admin_password }} --allow-fqdn-hostname --dashboard-password-noupdate --shared_ceph_folder /mnt/{{ ceph_dev_folder }}
|
||||
{% else %}
|
||||
cephadm bootstrap --mon-ip $mon_ip --initial-dashboard-password {{ admin_password }} --allow-fqdn-hostname --dashboard-password-noupdate
|
||||
{% endif %}
|
||||
fsid=$(cat /etc/ceph/ceph.conf | grep fsid | awk '{ print $3}')
|
||||
|
||||
cephadm bootstrap --mon-ip $mon_ip --initial-dashboard-password {{ admin_password }} --allow-fqdn-hostname --dashboard-password-noupdate --shared_ceph_folder /mnt/{{ ceph_dev_folder }}
|
||||
|
||||
{% for number in range(1, nodes) %}
|
||||
ssh-copy-id -f -i /etc/ceph/ceph.pub -o StrictHostKeyChecking=no root@{{ prefix }}-node-0{{ number }}.{{ domain }}
|
||||
{% endfor %}
|
||||
|
@ -1,7 +1,7 @@
|
||||
parameters:
|
||||
nodes: 3
|
||||
pool: default
|
||||
network: default
|
||||
pool: ceph-dashboard
|
||||
network: ceph-dashboard
|
||||
domain: cephlab.com
|
||||
prefix: ceph
|
||||
numcpus: 1
|
||||
@ -26,15 +26,14 @@ parameters:
|
||||
- {{ network }}
|
||||
disks: {{ disks }}
|
||||
pool: {{ pool }}
|
||||
{% if ceph_dev_folder is defined %}
|
||||
sharedfolders: [{{ ceph_dev_folder }}]
|
||||
{% endif %}
|
||||
files:
|
||||
- bootstrap-cluster.sh
|
||||
cmds:
|
||||
- dnf -y install python3 chrony lvm2 podman
|
||||
- sed -i "s/SELINUX=enforcing/SELINUX=permissive/" /etc/selinux/config
|
||||
- setenforce 0
|
||||
{% if number == 0 %}
|
||||
scripts:
|
||||
- bootstrap-cluster.sh
|
||||
- bash /root/bootstrap-cluster.sh
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
@ -2,67 +2,23 @@
|
||||
|
||||
set -ex
|
||||
|
||||
cleanup() {
|
||||
if [[ -n "$JENKINS_HOME" ]]; then
|
||||
printf "\n\nStarting cleanup...\n\n"
|
||||
kcli delete plan -y ceph || true
|
||||
sudo podman container prune -f
|
||||
printf "\n\nCleanup completed.\n\n"
|
||||
fi
|
||||
}
|
||||
|
||||
on_error() {
|
||||
if [ "$1" != "0" ]; then
|
||||
printf "\n\nERROR $1 thrown on line $2\n\n"
|
||||
printf "\n\nCollecting info...\n\n"
|
||||
for vm_id in 0 1 2
|
||||
do
|
||||
local vm="ceph-node-0${vm_id}"
|
||||
printf "\n\nDisplaying journalctl from VM ${vm}:\n\n"
|
||||
kcli ssh -u root -- ${vm} 'journalctl --no-tail --no-pager -t cloud-init' || true
|
||||
printf "\n\nEnd of journalctl from VM ${vm}\n\n"
|
||||
printf "\n\nDisplaying podman logs:\n\n"
|
||||
kcli ssh -u root -- ${vm} 'podman logs --names --since 30s $(podman ps -aq)' || true
|
||||
done
|
||||
printf "\n\nTEST FAILED.\n\n"
|
||||
fi
|
||||
}
|
||||
|
||||
trap 'on_error $? $LINENO' ERR
|
||||
trap 'cleanup $? $LINENO' EXIT
|
||||
|
||||
sed -i '/ceph-node-/d' $HOME/.ssh/known_hosts
|
||||
|
||||
: ${CEPH_DEV_FOLDER:=${PWD}}
|
||||
|
||||
# Required to start dashboard.
|
||||
cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend
|
||||
NG_CLI_ANALYTICS=false npm ci
|
||||
npm run build
|
||||
|
||||
cd ${CEPH_DEV_FOLDER}
|
||||
kcli delete plan -y ceph || true
|
||||
kcli create plan -f ./src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml -P ceph_dev_folder=${CEPH_DEV_FOLDER} ceph
|
||||
|
||||
while [[ -z $(kcli ssh -u root -- ceph-node-00 'journalctl --no-tail --no-pager -t cloud-init' | grep "Dashboard is now available") ]]; do
|
||||
sleep 30
|
||||
kcli list vm
|
||||
# Uncomment for debugging purposes.
|
||||
#kcli ssh -u root -- ceph-node-00 'podman ps -a'
|
||||
#kcli ssh -u root -- ceph-node-00 'podman logs --names --since 30s $(podman ps -aq)'
|
||||
kcli ssh -u root -- ceph-node-00 'journalctl -n 100 --no-pager -t cloud-init'
|
||||
done
|
||||
|
||||
cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend
|
||||
npx cypress info
|
||||
|
||||
: ${CYPRESS_BASE_URL:=''}
|
||||
: ${CYPRESS_LOGIN_USER:='admin'}
|
||||
: ${CYPRESS_LOGIN_PWD:='password'}
|
||||
: ${CYPRESS_ARGS:=''}
|
||||
: ${DASHBOARD_PORT:='8443'}
|
||||
|
||||
get_vm_ip () {
|
||||
local ip=$(kcli info vm "$1" -f ip -v | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')
|
||||
echo -n $ip
|
||||
}
|
||||
|
||||
if [[ -z "${CYPRESS_BASE_URL}" ]]; then
|
||||
CYPRESS_BASE_URL="https://$(kcli info vm ceph-node-00 -f ip -v | sed -e 's/[^0-9.]//'):8443"
|
||||
CEPH_NODE_00_IP="$(get_vm_ip ceph-node-00)"
|
||||
if [[ -z "${CEPH_NODE_00_IP}" ]]; then
|
||||
. "$(dirname $0)"/start-cluster.sh
|
||||
fi
|
||||
CYPRESS_BASE_URL="https://$(get_vm_ip ceph-node-00):${DASHBOARD_PORT}"
|
||||
fi
|
||||
|
||||
export CYPRESS_BASE_URL CYPRESS_LOGIN_USER CYPRESS_LOGIN_PWD
|
||||
@ -78,4 +34,8 @@ cypress_run () {
|
||||
npx cypress run ${CYPRESS_ARGS} --browser chrome --headless --config "$override_config"
|
||||
}
|
||||
|
||||
: ${CEPH_DEV_FOLDER:=${PWD}}
|
||||
|
||||
cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend
|
||||
|
||||
cypress_run "orchestrator/workflow/*-spec.ts"
|
||||
|
79
src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh
Executable file
79
src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh
Executable file
@ -0,0 +1,79 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -ex
|
||||
|
||||
cleanup() {
|
||||
set +x
|
||||
if [[ -n "$JENKINS_HOME" ]]; then
|
||||
printf "\n\nStarting cleanup...\n\n"
|
||||
kcli delete plan -y ceph || true
|
||||
docker container prune -f
|
||||
printf "\n\nCleanup completed.\n\n"
|
||||
fi
|
||||
}
|
||||
|
||||
on_error() {
|
||||
set +x
|
||||
if [ "$1" != "0" ]; then
|
||||
printf "\n\nERROR $1 thrown on line $2\n\n"
|
||||
printf "\n\nCollecting info...\n\n"
|
||||
for vm_id in 0 1 2
|
||||
do
|
||||
local vm="ceph-node-0${vm_id}"
|
||||
printf "\n\nDisplaying journalctl from VM ${vm}:\n\n"
|
||||
kcli ssh -u root -- ${vm} 'journalctl --no-tail --no-pager -t cloud-init' || true
|
||||
printf "\n\nEnd of journalctl from VM ${vm}\n\n"
|
||||
printf "\n\nDisplaying container logs:\n\n"
|
||||
kcli ssh -u root -- ${vm} 'podman logs --names --since 30s $(podman ps -aq)' || true
|
||||
done
|
||||
printf "\n\nTEST FAILED.\n\n"
|
||||
fi
|
||||
}
|
||||
|
||||
trap 'on_error $? $LINENO' ERR
|
||||
trap 'cleanup $? $LINENO' EXIT
|
||||
|
||||
sed -i '/ceph-node-/d' $HOME/.ssh/known_hosts
|
||||
|
||||
: ${CEPH_DEV_FOLDER:=${PWD}}
|
||||
EXTRA_PARAMS=''
|
||||
DEV_MODE=''
|
||||
# Check script args/options.
|
||||
for arg in "$@"; do
|
||||
shift
|
||||
case "$arg" in
|
||||
"--dev-mode") DEV_MODE='true'; EXTRA_PARAMS="-P dev_mode=${DEV_MODE}" ;;
|
||||
esac
|
||||
done
|
||||
|
||||
kcli delete plan -y ceph || true
|
||||
|
||||
# Build dashboard frontend (required to start the module).
|
||||
cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend
|
||||
NG_CLI_ANALYTICS=false npm ci
|
||||
FRONTEND_BUILD_OPTS='-- --prod'
|
||||
if [[ -n "${DEV_MODE}" ]]; then
|
||||
FRONTEND_BUILD_OPTS+=' --deleteOutputPath=false --watch'
|
||||
fi
|
||||
npm run build ${FRONTEND_BUILD_OPTS} &
|
||||
|
||||
cd ${CEPH_DEV_FOLDER}
|
||||
: ${VM_IMAGE:='fedora34'}
|
||||
: ${VM_IMAGE_URL:='https://fedora.mirror.liteserver.nl/linux/releases/34/Cloud/x86_64/images/Fedora-Cloud-Base-34-1.2.x86_64.qcow2'}
|
||||
kcli download image -p ceph-dashboard -u ${VM_IMAGE_URL} ${VM_IMAGE}
|
||||
kcli delete plan -y ceph || true
|
||||
kcli create plan -f ./src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml \
|
||||
-P ceph_dev_folder=${CEPH_DEV_FOLDER} \
|
||||
${EXTRA_PARAMS} ceph
|
||||
|
||||
: ${CLUSTER_DEBUG:=0}
|
||||
: ${DASHBOARD_CHECK_INTERVAL:=10}
|
||||
while [[ -z $(kcli ssh -u root -- ceph-node-00 'journalctl --no-tail --no-pager -t cloud-init' | grep "Dashboard is now available") ]]; do
|
||||
sleep ${DASHBOARD_CHECK_INTERVAL}
|
||||
kcli list vm
|
||||
if [[ ${CLUSTER_DEBUG} != 0 ]]; then
|
||||
kcli ssh -u root -- ceph-node-00 'podman ps -a'
|
||||
kcli ssh -u root -- ceph-node-00 'podman logs --names --since 30s $(podman ps -aq)'
|
||||
fi
|
||||
kcli ssh -u root -- ceph-node-00 'journalctl -n 100 --no-pager -t cloud-init'
|
||||
done
|
Loading…
Reference in New Issue
Block a user