mirror of
https://github.com/ceph/ceph
synced 2025-04-01 14:51:13 +00:00
qa/workunits/rbd: new test for rbd-mirror peer config-key secrets
Fixes: http://tracker.ceph.com/issues/24688 Signed-off-by: Jason Dillaman <dillaman@redhat.com>
This commit is contained in:
parent
900e30d1bb
commit
d3a1a831ac
qa
suites/rbd/mirror/workloads
workunits/rbd
@ -0,0 +1,12 @@
|
||||
meta:
|
||||
- desc: run the rbd_mirror.sh workunit to test the rbd-mirror daemon
|
||||
tasks:
|
||||
- workunit:
|
||||
clients:
|
||||
cluster1.client.mirror: [rbd/rbd_mirror.sh]
|
||||
env:
|
||||
# override workunit setting of CEPH_ARGS='--cluster'
|
||||
CEPH_ARGS: ''
|
||||
RBD_MIRROR_INSTANCES: '4'
|
||||
RBD_MIRROR_USE_EXISTING_CLUSTER: '1'
|
||||
RBD_MIRROR_CONFIG_KEY: '1'
|
@ -81,7 +81,7 @@ if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
|
||||
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
|
||||
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
|
||||
|
||||
all_admin_daemons ${CLUSTER1} rbd mirror stop ${POOL} ${CLUSTER2}
|
||||
all_admin_daemons ${CLUSTER1} rbd mirror stop ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
|
||||
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
|
||||
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
|
||||
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
|
||||
@ -91,7 +91,7 @@ if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
|
||||
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
|
||||
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
|
||||
|
||||
all_admin_daemons ${CLUSTER1} rbd mirror start ${POOL} ${CLUSTER2}
|
||||
all_admin_daemons ${CLUSTER1} rbd mirror start ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
|
||||
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
|
||||
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
|
||||
|
||||
@ -99,17 +99,17 @@ if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
|
||||
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
|
||||
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
|
||||
|
||||
all_admin_daemons ${CLUSTER1} rbd mirror restart ${POOL} ${CLUSTER2}
|
||||
all_admin_daemons ${CLUSTER1} rbd mirror restart ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
|
||||
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
|
||||
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
|
||||
|
||||
all_admin_daemons ${CLUSTER1} rbd mirror stop ${POOL} ${CLUSTER2}
|
||||
all_admin_daemons ${CLUSTER1} rbd mirror stop ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
|
||||
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
|
||||
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
|
||||
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
|
||||
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
|
||||
|
||||
all_admin_daemons ${CLUSTER1} rbd mirror restart ${POOL} ${CLUSTER2}
|
||||
all_admin_daemons ${CLUSTER1} rbd mirror restart ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
|
||||
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
|
||||
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
|
||||
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
|
||||
|
@ -17,7 +17,7 @@ is_leader()
|
||||
test -n "${pool}" || pool=${POOL}
|
||||
|
||||
admin_daemon "${CLUSTER1}:${instance}" \
|
||||
rbd mirror status ${pool} ${CLUSTER2} |
|
||||
rbd mirror status ${pool} ${CLUSTER2}${PEER_CLUSTER_SUFFIX} |
|
||||
grep '"leader": true'
|
||||
}
|
||||
|
||||
|
@ -8,18 +8,19 @@
|
||||
#
|
||||
# There are several env variables useful when troubleshooting a test failure:
|
||||
#
|
||||
# RBD_MIRROR_NOCLEANUP - if not empty, don't run the cleanup (stop processes,
|
||||
# destroy the clusters and remove the temp directory)
|
||||
# on exit, so it is possible to check the test state
|
||||
# after failure.
|
||||
# RBD_MIRROR_TEMDIR - use this path when creating the temporary directory
|
||||
# (should not exist) instead of running mktemp(1).
|
||||
# RBD_MIRROR_ARGS - use this to pass additional arguments to started
|
||||
# rbd-mirror daemons.
|
||||
# RBD_MIRROR_VARGS - use this to pass additional arguments to vstart.sh
|
||||
# when starting clusters.
|
||||
# RBD_MIRROR_INSTANCES - number of daemons to start per cluster
|
||||
#
|
||||
# RBD_MIRROR_NOCLEANUP - if not empty, don't run the cleanup (stop processes,
|
||||
# destroy the clusters and remove the temp directory)
|
||||
# on exit, so it is possible to check the test state
|
||||
# after failure.
|
||||
# RBD_MIRROR_TEMDIR - use this path when creating the temporary directory
|
||||
# (should not exist) instead of running mktemp(1).
|
||||
# RBD_MIRROR_ARGS - use this to pass additional arguments to started
|
||||
# rbd-mirror daemons.
|
||||
# RBD_MIRROR_VARGS - use this to pass additional arguments to vstart.sh
|
||||
# when starting clusters.
|
||||
# RBD_MIRROR_INSTANCES - number of daemons to start per cluster
|
||||
# RBD_MIRROR_CONFIG_KEY - if not empty, use config-key for remote cluster
|
||||
# secrets
|
||||
# The cleanup can be done as a separate step, running the script with
|
||||
# `cleanup ${RBD_MIRROR_TEMDIR}' arguments.
|
||||
#
|
||||
@ -78,6 +79,7 @@ RBD_MIRROR_INSTANCES=${RBD_MIRROR_INSTANCES:-2}
|
||||
|
||||
CLUSTER1=cluster1
|
||||
CLUSTER2=cluster2
|
||||
PEER_CLUSTER_SUFFIX=
|
||||
POOL=mirror
|
||||
PARENT_POOL=mirror_parent
|
||||
TEMPDIR=
|
||||
@ -194,7 +196,7 @@ create_users()
|
||||
for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
|
||||
CEPH_ARGS='' ceph --cluster "${cluster}" \
|
||||
auth get-or-create client.${MIRROR_USER_ID_PREFIX}${instance} \
|
||||
mon 'profile rbd' osd 'profile rbd' >> \
|
||||
mon 'profile rbd-mirror' osd 'profile rbd' >> \
|
||||
${CEPH_ROOT}/run/${cluster}/keyring
|
||||
done
|
||||
}
|
||||
@ -209,7 +211,7 @@ update_users()
|
||||
for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
|
||||
CEPH_ARGS='' ceph --cluster "${cluster}" \
|
||||
auth caps client.${MIRROR_USER_ID_PREFIX}${instance} \
|
||||
mon 'profile rbd' osd 'profile rbd'
|
||||
mon 'profile rbd-mirror' osd 'profile rbd'
|
||||
done
|
||||
}
|
||||
|
||||
@ -241,6 +243,10 @@ setup_pools()
|
||||
{
|
||||
local cluster=$1
|
||||
local remote_cluster=$2
|
||||
local mon_map_file
|
||||
local mon_addr
|
||||
local admin_key_file
|
||||
local uuid
|
||||
|
||||
CEPH_ARGS='' ceph --cluster ${cluster} osd pool create ${POOL} 64 64
|
||||
CEPH_ARGS='' ceph --cluster ${cluster} osd pool create ${PARENT_POOL} 64 64
|
||||
@ -251,8 +257,26 @@ setup_pools()
|
||||
rbd --cluster ${cluster} mirror pool enable ${POOL} pool
|
||||
rbd --cluster ${cluster} mirror pool enable ${PARENT_POOL} image
|
||||
|
||||
rbd --cluster ${cluster} mirror pool peer add ${POOL} ${remote_cluster}
|
||||
rbd --cluster ${cluster} mirror pool peer add ${PARENT_POOL} ${remote_cluster}
|
||||
if [ -z ${RBD_MIRROR_CONFIG_KEY} ]; then
|
||||
rbd --cluster ${cluster} mirror pool peer add ${POOL} ${remote_cluster}
|
||||
rbd --cluster ${cluster} mirror pool peer add ${PARENT_POOL} ${remote_cluster}
|
||||
else
|
||||
mon_map_file=${TEMPDIR}/${remote_cluster}.monmap
|
||||
ceph --cluster ${remote_cluster} mon getmap > ${mon_map_file}
|
||||
mon_addr=$(monmaptool --print ${mon_map_file} | grep -E 'mon\.' | head -n 1 | sed -E 's/^[0-9]+: ([^/]+).+$/\1/')
|
||||
|
||||
admin_key_file=${TEMPDIR}/${remote_cluster}.client.${CEPH_ID}.key
|
||||
CEPH_ARGS='' ceph --cluster ${remote_cluster} auth get-key client.${CEPH_ID} > ${admin_key_file}
|
||||
|
||||
rbd --cluster ${cluster} mirror pool peer add ${POOL} client.${CEPH_ID}@${remote_cluster}-DNE \
|
||||
--remote-mon-host ${mon_addr} --remote-key-file ${admin_key_file}
|
||||
|
||||
uuid=$(rbd --cluster ${cluster} mirror pool peer add ${PARENT_POOL} client.${CEPH_ID}@${remote_cluster}-DNE)
|
||||
rbd --cluster ${cluster} mirror pool peer set ${PARENT_POOL} ${uuid} mon-host ${mon_addr}
|
||||
rbd --cluster ${cluster} mirror pool peer set ${PARENT_POOL} ${uuid} key-file ${admin_key_file}
|
||||
|
||||
PEER_CLUSTER_SUFFIX=-DNE
|
||||
fi
|
||||
}
|
||||
|
||||
setup_tempdir()
|
||||
|
Loading…
Reference in New Issue
Block a user