librbd: preserve CEPH_OSD_FLAG_FULL_TRY in create_ioctx()

The obvious use case is an image with a separate data pool but it could
be useful in other places too.

While at it, set_namespace() call in handle_v2_get_data_pool() is
redundant since create_ioctx() already takes care of it.

Fixes: https://tracker.ceph.com/issues/52961
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
This commit is contained in:
Ilya Dryomov 2021-10-16 21:08:55 +02:00
parent c19250ce3c
commit 7cc7efae2a
3 changed files with 49 additions and 18 deletions

View File

@ -149,6 +149,9 @@ int create_ioctx(librados::IoCtx& src_io_ctx, const std::string& pool_desc,
dst_io_ctx->set_namespace(
pool_namespace ? *pool_namespace : src_io_ctx.get_namespace());
if (src_io_ctx.get_pool_full_try()) {
dst_io_ctx->set_pool_full_try();
}
return 0;
}

View File

@ -483,7 +483,6 @@ Context *OpenRequest<I>::handle_v2_get_data_pool(int *result) {
}
m_image_ctx->data_ctx.close();
} else {
m_image_ctx->data_ctx.set_namespace(m_image_ctx->md_ctx.get_namespace());
m_image_ctx->rebuild_data_io_context();
}
} else {

View File

@ -2118,32 +2118,22 @@ TEST_F(TestLibRBD, ConcurrentCreatesUnvalidatedPool)
rados_ioctx_destroy(ioctx);
}
TEST_F(TestLibRBD, CreateThickRemoveFullTry)
static void remove_full_try(rados_ioctx_t ioctx, const std::string& image_name,
const std::string& data_pool_name)
{
REQUIRE(!is_librados_test_stub(_rados));
rados_ioctx_t ioctx;
auto pool_name = create_pool(true);
ASSERT_EQ(0, rados_ioctx_create(_cluster, pool_name.c_str(), &ioctx));
int order = 0;
auto image_name = get_temp_image_name();
uint64_t quota = 10 << 20;
uint64_t size = 5 * quota;
ASSERT_EQ(0, create_image(ioctx, image_name.c_str(), size, &order));
// FIXME: this is a workaround for rbd_trash object being created
// on the first remove -- pre-create it to avoid bumping into quota
ASSERT_EQ(0, rbd_remove(ioctx, image_name.c_str()));
ASSERT_EQ(0, create_image(ioctx, image_name.c_str(), size, &order));
std::string cmdstr = "{\"prefix\": \"osd pool set-quota\", \"pool\": \"" +
pool_name + "\", \"field\": \"max_bytes\", \"val\": \"" +
data_pool_name + "\", \"field\": \"max_bytes\", \"val\": \"" +
std::to_string(quota) + "\"}";
char *cmd[1];
cmd[0] = (char *)cmdstr.c_str();
ASSERT_EQ(0, rados_mon_command(_cluster, (const char **)cmd, 1, "", 0,
nullptr, 0, nullptr, 0));
ASSERT_EQ(0, rados_mon_command(rados_ioctx_get_cluster(ioctx),
(const char **)cmd, 1, "", 0, nullptr, 0,
nullptr, 0));
rados_set_pool_full_try(ioctx);
@ -2168,8 +2158,47 @@ TEST_F(TestLibRBD, CreateThickRemoveFullTry)
ASSERT_EQ(0, rbd_close(image));
// make sure we have latest map that marked the pool full
ASSERT_EQ(0, rados_wait_for_latest_osdmap(_cluster));
ASSERT_EQ(0, rados_wait_for_latest_osdmap(rados_ioctx_get_cluster(ioctx)));
ASSERT_EQ(0, rbd_remove(ioctx, image_name.c_str()));
}
TEST_F(TestLibRBD, RemoveFullTry)
{
REQUIRE(!is_librados_test_stub(_rados));
rados_ioctx_t ioctx;
auto pool_name = create_pool(true);
ASSERT_EQ(0, rados_ioctx_create(_cluster, pool_name.c_str(), &ioctx));
// cancel out rbd_default_data_pool -- we need an image without
// a separate data pool
ASSERT_EQ(0, rbd_pool_metadata_set(ioctx, "conf_rbd_default_data_pool",
pool_name.c_str()));
int order = 0;
auto image_name = get_temp_image_name();
// FIXME: this is a workaround for rbd_trash object being created
// on the first remove -- pre-create it to avoid bumping into quota
ASSERT_EQ(0, create_image(ioctx, image_name.c_str(), 0, &order));
ASSERT_EQ(0, rbd_remove(ioctx, image_name.c_str()));
remove_full_try(ioctx, image_name, pool_name);
rados_ioctx_destroy(ioctx);
}
TEST_F(TestLibRBD, RemoveFullTryDataPool)
{
REQUIRE_FORMAT_V2();
REQUIRE(!is_librados_test_stub(_rados));
rados_ioctx_t ioctx;
auto pool_name = create_pool(true);
auto data_pool_name = create_pool(true);
ASSERT_EQ(0, rados_ioctx_create(_cluster, pool_name.c_str(), &ioctx));
ASSERT_EQ(0, rbd_pool_metadata_set(ioctx, "conf_rbd_default_data_pool",
data_pool_name.c_str()));
auto image_name = get_temp_image_name();
remove_full_try(ioctx, image_name, data_pool_name);
rados_ioctx_destroy(ioctx);
}