From 19a58750f8b1dfda3dd7d1fb2de8e17b6bf9b207 Mon Sep 17 00:00:00 2001 From: Igor Fedotov Date: Thu, 8 Dec 2022 02:10:57 +0300 Subject: [PATCH] cls/queue: use larger read chunks in queue_list_entries Fixes: https://tracker.ceph.com/issues/58190 Signed-off-by: Igor Fedotov --- src/cls/queue/cls_queue_src.cc | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/src/cls/queue/cls_queue_src.cc b/src/cls/queue/cls_queue_src.cc index bd973352d42..b34d9929b93 100644 --- a/src/cls/queue/cls_queue_src.cc +++ b/src/cls/queue/cls_queue_src.cc @@ -14,6 +14,9 @@ using ceph::bufferlist; using ceph::decode; using ceph::encode; +const uint64_t page_size = 4096; +const uint64_t large_chunk_size = 1ul << 22; + int queue_write_head(cls_method_context_t hctx, cls_queue_head& head) { bufferlist bl; @@ -43,7 +46,7 @@ int queue_write_head(cls_method_context_t hctx, cls_queue_head& head) int queue_read_head(cls_method_context_t hctx, cls_queue_head& head) { - uint64_t chunk_size = 1024, start_offset = 0; + uint64_t chunk_size = page_size, start_offset = 0; bufferlist bl_head; const auto ret = cls_cxx_read(hctx, start_offset, chunk_size, &bl_head); @@ -281,7 +284,6 @@ int queue_list_entries(cls_method_context_t hctx, const cls_queue_list_op& op, c } op_ret.is_truncated = true; - uint64_t chunk_size = 1024; uint64_t contiguous_data_size = 0, size_to_read = 0; bool wrap_around = false; @@ -310,11 +312,7 @@ int queue_list_entries(cls_method_context_t hctx, const cls_queue_list_op& op, c bufferlist bl_chunk; //Read chunk size at a time, if it is less than contiguous data size, else read contiguous data size - if (contiguous_data_size > chunk_size) { - size_to_read = chunk_size; - } else { - size_to_read = contiguous_data_size; - } + size_to_read = std::min(contiguous_data_size, large_chunk_size); CLS_LOG(10, "INFO: queue_list_entries(): size_to_read is %lu", size_to_read); if (size_to_read == 0) { next_marker = head.tail;