qa/workunits/rbd: Add tests for rbd_support module recovery

... after the module's RADOS client is blocklisted.

Signed-off-by: Ramana Raja <rraja@redhat.com>
This commit is contained in:
Ramana Raja 2023-02-04 22:36:16 -05:00
parent cc0468738e
commit a2f15d4b2f

View File

@ -1246,6 +1246,44 @@ test_trash_purge_schedule() {
ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
}
test_trash_purge_schedule_recovery() {
echo "testing recovery of trash_purge_schedule handler after module's RADOS client is blocklisted..."
remove_images
ceph osd pool create rbd3 8
rbd pool init rbd3
rbd namespace create rbd3/ns1
rbd trash purge schedule add -p rbd3/ns1 2d
rbd trash purge schedule ls -p rbd3 -R | grep 'rbd3 *ns1 *every 2d'
# Fetch and blocklist the rbd_support module's RADOS client
CLIENT_ADDR=$(ceph mgr dump | jq .active_clients[] |
jq 'select(.name == "rbd_support")' |
jq -r '[.addrvec[0].addr, "/", .addrvec[0].nonce|tostring] | add')
ceph osd blocklist add $CLIENT_ADDR
ceph osd blocklist ls | grep $CLIENT_ADDR
# Check that you can add a trash purge schedule after a few retries
expect_fail rbd trash purge schedule add -p rbd3 10m
sleep 10
for i in `seq 24`; do
rbd trash purge schedule add -p rbd3 10m && break
sleep 10
done
rbd trash purge schedule ls -p rbd3 -R | grep 'every 10m'
# Verify that the schedule present before client blocklisting is preserved
rbd trash purge schedule ls -p rbd3 -R | grep 'rbd3 *ns1 *every 2d'
rbd trash purge schedule remove -p rbd3 10m
rbd trash purge schedule remove -p rbd3/ns1 2d
rbd trash purge schedule ls -p rbd3 -R | expect_fail grep 'every 10m'
rbd trash purge schedule ls -p rbd3 -R | expect_fail grep 'rbd3 *ns1 *every 2d'
ceph osd pool rm rbd3 rbd3 --yes-i-really-really-mean-it
}
test_mirror_snapshot_schedule() {
echo "testing mirror snapshot schedule..."
remove_images
@ -1358,6 +1396,54 @@ test_mirror_snapshot_schedule() {
ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
}
test_mirror_snapshot_schedule_recovery() {
echo "testing recovery of mirror snapshot scheduler after module's RADOS client is blocklisted..."
remove_images
ceph osd pool create rbd3 8
rbd pool init rbd3
rbd namespace create rbd3/ns1
rbd mirror pool enable rbd3 image
rbd mirror pool enable rbd3/ns1 image
rbd mirror pool peer add rbd3 cluster1
rbd create $RBD_CREATE_ARGS -s 1 rbd3/ns1/test1
rbd mirror image enable rbd3/ns1/test1 snapshot
test "$(rbd mirror image status rbd3/ns1/test1 |
grep -c mirror.primary)" = '1'
rbd mirror snapshot schedule add -p rbd3/ns1 --image test1 1m
test "$(rbd mirror snapshot schedule ls -p rbd3/ns1 --image test1)" = 'every 1m'
# Fetch and blocklist rbd_support module's RADOS client
CLIENT_ADDR=$(ceph mgr dump | jq .active_clients[] |
jq 'select(.name == "rbd_support")' |
jq -r '[.addrvec[0].addr, "/", .addrvec[0].nonce|tostring] | add')
ceph osd blocklist add $CLIENT_ADDR
ceph osd blocklist ls | grep $CLIENT_ADDR
# Check that you can add a mirror snapshot schedule after a few retries
expect_fail rbd mirror snapshot schedule add -p rbd3/ns1 --image test1 2m
sleep 10
for i in `seq 24`; do
rbd mirror snapshot schedule add -p rbd3/ns1 --image test1 2m && break
sleep 10
done
rbd mirror snapshot schedule ls -p rbd3/ns1 --image test1 | grep 'every 2m'
# Verify that the schedule present before client blocklisting is preserved
rbd mirror snapshot schedule ls -p rbd3/ns1 --image test1 | grep 'every 1m'
rbd mirror snapshot schedule rm -p rbd3/ns1 --image test1 2m
rbd mirror snapshot schedule rm -p rbd3/ns1 --image test1 1m
rbd mirror snapshot schedule ls -p rbd3/ns1 --image test1 | expect_fail grep 'every 2m'
rbd mirror snapshot schedule ls -p rbd3/ns1 --image test1 | expect_fail grep 'every 1m'
rbd snap purge rbd3/ns1/test1
rbd rm rbd3/ns1/test1
ceph osd pool rm rbd3 rbd3 --yes-i-really-really-mean-it
}
test_perf_image_iostat() {
echo "testing perf image iostat..."
remove_images
@ -1413,6 +1499,55 @@ test_perf_image_iostat() {
ceph osd pool rm rbd1 rbd1 --yes-i-really-really-mean-it
}
test_perf_image_iostat_recovery() {
echo "testing recovery of perf handler after module's RADOS client is blocklisted..."
remove_images
ceph osd pool create rbd3 8
rbd pool init rbd3
rbd namespace create rbd3/ns
IMAGE_SPECS=("rbd3/test1" "rbd3/ns/test2")
for spec in "${IMAGE_SPECS[@]}"; do
# ensure all images are created without a separate data pool
# as we filter iostat by specific pool specs below
rbd create $RBD_CREATE_ARGS --size 10G --rbd-default-data-pool '' $spec
done
BENCH_PIDS=()
for spec in "${IMAGE_SPECS[@]}"; do
rbd bench --io-type write --io-pattern rand --io-total 10G --io-threads 1 \
--rbd-cache false $spec >/dev/null 2>&1 &
BENCH_PIDS+=($!)
done
test "$(rbd perf image iostat --format json rbd3 |
jq -r 'map(.image) | sort | join(" ")')" = 'test1'
# Fetch and blocklist the rbd_support module's RADOS client
CLIENT_ADDR=$(ceph mgr dump | jq .active_clients[] |
jq 'select(.name == "rbd_support")' |
jq -r '[.addrvec[0].addr, "/", .addrvec[0].nonce|tostring] | add')
ceph osd blocklist add $CLIENT_ADDR
ceph osd blocklist ls | grep $CLIENT_ADDR
expect_fail rbd perf image iostat --format json rbd3/ns
sleep 10
for i in `seq 24`; do
test "$(rbd perf image iostat --format json rbd3/ns |
jq -r 'map(.image) | sort | join(" ")')" = 'test2' && break
sleep 10
done
for pid in "${BENCH_PIDS[@]}"; do
kill $pid
done
wait
remove_images
ceph osd pool rm rbd3 rbd3 --yes-i-really-really-mean-it
}
test_mirror_pool_peer_bootstrap_create() {
echo "testing mirror pool peer bootstrap create..."
remove_images
@ -1508,6 +1643,45 @@ test_tasks_removed_pool() {
remove_images
}
test_tasks_recovery() {
echo "testing task handler recovery after module's RADOS client is blocklisted..."
remove_images
ceph osd pool create rbd2 8
rbd pool init rbd2
rbd create $RBD_CREATE_ARGS --size 1G rbd2/img1
rbd bench --io-type write --io-pattern seq --io-size 1M --io-total 1G rbd2/img1
rbd snap create rbd2/img1@snap
rbd snap protect rbd2/img1@snap
rbd clone rbd2/img1@snap rbd2/clone1
# Fetch and blocklist rbd_support module's RADOS client
CLIENT_ADDR=$(ceph mgr dump | jq .active_clients[] |
jq 'select(.name == "rbd_support")' |
jq -r '[.addrvec[0].addr, "/", .addrvec[0].nonce|tostring] | add')
ceph osd blocklist add $CLIENT_ADDR
ceph osd blocklist ls | grep $CLIENT_ADDR
expect_fail ceph rbd task add flatten rbd2/clone1
sleep 10
for i in `seq 24`; do
ceph rbd task add flatten rbd2/clone1 && break
sleep 10
done
test "$(ceph rbd task list)" != "[]"
for i in {1..12}; do
rbd info rbd2/clone1 | grep 'parent: ' || break
sleep 10
done
rbd info rbd2/clone1 | expect_fail grep 'parent: '
rbd snap unprotect rbd2/img1@snap
test "$(ceph rbd task list)" = "[]"
ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
}
test_pool_image_args
test_rename
test_ls
@ -1529,9 +1703,13 @@ test_clone_v2
test_thick_provision
test_namespace
test_trash_purge_schedule
test_trash_purge_schedule_recovery
test_mirror_snapshot_schedule
test_mirror_snapshot_schedule_recovery
test_perf_image_iostat
test_perf_image_iostat_recovery
test_mirror_pool_peer_bootstrap_create
test_tasks_removed_pool
test_tasks_recovery
echo OK