librbd/cache/pwl: use m_bytes_allocated_cap for both rwl and ssd

Follow rwl mode and use AbstractWriteLog::m_bytes_allocated_cap
instead of m_log_pool_ring_buffer_size specific to ssd.  This fixes
"bytes available" calculation in STATS output.

Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
This commit is contained in:
Ilya Dryomov 2021-04-28 14:27:12 +02:00
parent e2bbf4167f
commit 27dd7f85ae
5 changed files with 14 additions and 13 deletions

View File

@ -1463,7 +1463,7 @@ template <typename I>
bool AbstractWriteLog<I>::check_allocation(C_BlockIORequestT *req,
uint64_t &bytes_cached, uint64_t &bytes_dirtied, uint64_t &bytes_allocated,
uint64_t &num_lanes, uint64_t &num_log_entries,
uint64_t &num_unpublished_reserves, uint64_t bytes_allocated_cap){
uint64_t &num_unpublished_reserves) {
bool alloc_succeeds = true;
bool no_space = false;
{
@ -1487,11 +1487,11 @@ bool AbstractWriteLog<I>::check_allocation(C_BlockIORequestT *req,
no_space = true; /* Entries must be retired */
}
/* Don't attempt buffer allocate if we've exceeded the "full" threshold */
if (m_bytes_allocated + bytes_allocated > bytes_allocated_cap) {
if (m_bytes_allocated + bytes_allocated > m_bytes_allocated_cap) {
if (!req->has_io_waited_for_buffers()) {
req->set_io_waited_for_buffers(true);
ldout(m_image_ctx.cct, 1) << "Waiting for allocation cap (cap="
<< bytes_allocated_cap
<< m_bytes_allocated_cap
<< ", allocated=" << m_bytes_allocated
<< ") in write [" << *req << "]" << dendl;
}

View File

@ -356,7 +356,7 @@ protected:
uint64_t &bytes_cached, uint64_t &bytes_dirtied,
uint64_t &bytes_allocated,
uint64_t &num_lanes, uint64_t &num_log_entries,
uint64_t &num_unpublished_reserves, uint64_t bytes_allocated_cap);
uint64_t &num_unpublished_reserves);
void append_scheduled(
pwl::GenericLogOperations &ops, bool &ops_remain, bool &appending,
bool isRWL=false);

View File

@ -932,9 +932,9 @@ bool WriteLog<I>::alloc_resources(C_BlockIORequestT *req) {
req->setup_buffer_resources(&bytes_cached, &bytes_dirtied, &bytes_allocated,
&num_lanes, &num_log_entries, &num_unpublished_reserves);
alloc_succeeds = this->check_allocation(req, bytes_cached, bytes_dirtied, bytes_allocated,
num_lanes, num_log_entries, num_unpublished_reserves,
this->m_bytes_allocated_cap);
alloc_succeeds = this->check_allocation(req, bytes_cached, bytes_dirtied,
bytes_allocated, num_lanes, num_log_entries,
num_unpublished_reserves);
std::vector<WriteBufferAllocation>& buffers = req->get_resources_buffers();
if (!alloc_succeeds) {

View File

@ -139,7 +139,9 @@ bool WriteLog<I>::initialize_pool(Context *on_finish,
num_small_writes = MAX_LOG_ENTRIES;
}
assert(num_small_writes > 2);
m_log_pool_ring_buffer_size = this->m_log_pool_config_size - DATA_RING_BUFFER_OFFSET;
/* Size of ring buffer */
this->m_bytes_allocated_cap =
this->m_log_pool_config_size - DATA_RING_BUFFER_OFFSET;
/* Log ring empty */
m_first_free_entry = DATA_RING_BUFFER_OFFSET;
m_first_valid_entry = DATA_RING_BUFFER_OFFSET;
@ -303,8 +305,7 @@ bool WriteLog<I>::alloc_resources(C_BlockIORequestT *req) {
alloc_succeeds = this->check_allocation(req, bytes_cached, bytes_dirtied,
bytes_allocated, num_lanes,
num_log_entries,
num_unpublished_reserves,
m_log_pool_ring_buffer_size);
num_unpublished_reserves);
req->set_allocated(alloc_succeeds);
return alloc_succeeds;
}
@ -535,9 +536,10 @@ void WriteLog<I>::process_work() {
CephContext *cct = m_image_ctx.cct;
int max_iterations = 4;
bool wake_up_requested = false;
uint64_t aggressive_high_water_bytes = m_log_pool_ring_buffer_size * AGGRESSIVE_RETIRE_HIGH_WATER;
uint64_t aggressive_high_water_bytes =
this->m_bytes_allocated_cap * AGGRESSIVE_RETIRE_HIGH_WATER;
uint64_t aggressive_high_water_entries = this->m_total_log_entries * AGGRESSIVE_RETIRE_HIGH_WATER;
uint64_t high_water_bytes = m_log_pool_ring_buffer_size * RETIRE_HIGH_WATER;
uint64_t high_water_bytes = this->m_bytes_allocated_cap * RETIRE_HIGH_WATER;
uint64_t high_water_entries = this->m_total_log_entries * RETIRE_HIGH_WATER;
ldout(cct, 20) << dendl;

View File

@ -99,7 +99,6 @@ private:
WriteLogPoolRootUpdateList m_poolroot_to_update; /* pool root list to update to SSD */
bool m_updating_pool_root = false;
uint64_t m_log_pool_ring_buffer_size; /* Size of ring buffer */
std::atomic<int> m_async_update_superblock = {0};
BlockDevice *bdev = nullptr;
uint64_t pool_size;