mirror of
https://github.com/ceph/ceph
synced 2025-02-21 18:17:42 +00:00
librbd/cache/pwl: use m_bytes_allocated_cap for both rwl and ssd
Follow rwl mode and use AbstractWriteLog::m_bytes_allocated_cap instead of m_log_pool_ring_buffer_size specific to ssd. This fixes "bytes available" calculation in STATS output. Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
This commit is contained in:
parent
e2bbf4167f
commit
27dd7f85ae
6
src/librbd/cache/pwl/AbstractWriteLog.cc
vendored
6
src/librbd/cache/pwl/AbstractWriteLog.cc
vendored
@ -1463,7 +1463,7 @@ template <typename I>
|
||||
bool AbstractWriteLog<I>::check_allocation(C_BlockIORequestT *req,
|
||||
uint64_t &bytes_cached, uint64_t &bytes_dirtied, uint64_t &bytes_allocated,
|
||||
uint64_t &num_lanes, uint64_t &num_log_entries,
|
||||
uint64_t &num_unpublished_reserves, uint64_t bytes_allocated_cap){
|
||||
uint64_t &num_unpublished_reserves) {
|
||||
bool alloc_succeeds = true;
|
||||
bool no_space = false;
|
||||
{
|
||||
@ -1487,11 +1487,11 @@ bool AbstractWriteLog<I>::check_allocation(C_BlockIORequestT *req,
|
||||
no_space = true; /* Entries must be retired */
|
||||
}
|
||||
/* Don't attempt buffer allocate if we've exceeded the "full" threshold */
|
||||
if (m_bytes_allocated + bytes_allocated > bytes_allocated_cap) {
|
||||
if (m_bytes_allocated + bytes_allocated > m_bytes_allocated_cap) {
|
||||
if (!req->has_io_waited_for_buffers()) {
|
||||
req->set_io_waited_for_buffers(true);
|
||||
ldout(m_image_ctx.cct, 1) << "Waiting for allocation cap (cap="
|
||||
<< bytes_allocated_cap
|
||||
<< m_bytes_allocated_cap
|
||||
<< ", allocated=" << m_bytes_allocated
|
||||
<< ") in write [" << *req << "]" << dendl;
|
||||
}
|
||||
|
2
src/librbd/cache/pwl/AbstractWriteLog.h
vendored
2
src/librbd/cache/pwl/AbstractWriteLog.h
vendored
@ -356,7 +356,7 @@ protected:
|
||||
uint64_t &bytes_cached, uint64_t &bytes_dirtied,
|
||||
uint64_t &bytes_allocated,
|
||||
uint64_t &num_lanes, uint64_t &num_log_entries,
|
||||
uint64_t &num_unpublished_reserves, uint64_t bytes_allocated_cap);
|
||||
uint64_t &num_unpublished_reserves);
|
||||
void append_scheduled(
|
||||
pwl::GenericLogOperations &ops, bool &ops_remain, bool &appending,
|
||||
bool isRWL=false);
|
||||
|
6
src/librbd/cache/pwl/rwl/WriteLog.cc
vendored
6
src/librbd/cache/pwl/rwl/WriteLog.cc
vendored
@ -932,9 +932,9 @@ bool WriteLog<I>::alloc_resources(C_BlockIORequestT *req) {
|
||||
req->setup_buffer_resources(&bytes_cached, &bytes_dirtied, &bytes_allocated,
|
||||
&num_lanes, &num_log_entries, &num_unpublished_reserves);
|
||||
|
||||
alloc_succeeds = this->check_allocation(req, bytes_cached, bytes_dirtied, bytes_allocated,
|
||||
num_lanes, num_log_entries, num_unpublished_reserves,
|
||||
this->m_bytes_allocated_cap);
|
||||
alloc_succeeds = this->check_allocation(req, bytes_cached, bytes_dirtied,
|
||||
bytes_allocated, num_lanes, num_log_entries,
|
||||
num_unpublished_reserves);
|
||||
|
||||
std::vector<WriteBufferAllocation>& buffers = req->get_resources_buffers();
|
||||
if (!alloc_succeeds) {
|
||||
|
12
src/librbd/cache/pwl/ssd/WriteLog.cc
vendored
12
src/librbd/cache/pwl/ssd/WriteLog.cc
vendored
@ -139,7 +139,9 @@ bool WriteLog<I>::initialize_pool(Context *on_finish,
|
||||
num_small_writes = MAX_LOG_ENTRIES;
|
||||
}
|
||||
assert(num_small_writes > 2);
|
||||
m_log_pool_ring_buffer_size = this->m_log_pool_config_size - DATA_RING_BUFFER_OFFSET;
|
||||
/* Size of ring buffer */
|
||||
this->m_bytes_allocated_cap =
|
||||
this->m_log_pool_config_size - DATA_RING_BUFFER_OFFSET;
|
||||
/* Log ring empty */
|
||||
m_first_free_entry = DATA_RING_BUFFER_OFFSET;
|
||||
m_first_valid_entry = DATA_RING_BUFFER_OFFSET;
|
||||
@ -303,8 +305,7 @@ bool WriteLog<I>::alloc_resources(C_BlockIORequestT *req) {
|
||||
alloc_succeeds = this->check_allocation(req, bytes_cached, bytes_dirtied,
|
||||
bytes_allocated, num_lanes,
|
||||
num_log_entries,
|
||||
num_unpublished_reserves,
|
||||
m_log_pool_ring_buffer_size);
|
||||
num_unpublished_reserves);
|
||||
req->set_allocated(alloc_succeeds);
|
||||
return alloc_succeeds;
|
||||
}
|
||||
@ -535,9 +536,10 @@ void WriteLog<I>::process_work() {
|
||||
CephContext *cct = m_image_ctx.cct;
|
||||
int max_iterations = 4;
|
||||
bool wake_up_requested = false;
|
||||
uint64_t aggressive_high_water_bytes = m_log_pool_ring_buffer_size * AGGRESSIVE_RETIRE_HIGH_WATER;
|
||||
uint64_t aggressive_high_water_bytes =
|
||||
this->m_bytes_allocated_cap * AGGRESSIVE_RETIRE_HIGH_WATER;
|
||||
uint64_t aggressive_high_water_entries = this->m_total_log_entries * AGGRESSIVE_RETIRE_HIGH_WATER;
|
||||
uint64_t high_water_bytes = m_log_pool_ring_buffer_size * RETIRE_HIGH_WATER;
|
||||
uint64_t high_water_bytes = this->m_bytes_allocated_cap * RETIRE_HIGH_WATER;
|
||||
uint64_t high_water_entries = this->m_total_log_entries * RETIRE_HIGH_WATER;
|
||||
|
||||
ldout(cct, 20) << dendl;
|
||||
|
1
src/librbd/cache/pwl/ssd/WriteLog.h
vendored
1
src/librbd/cache/pwl/ssd/WriteLog.h
vendored
@ -99,7 +99,6 @@ private:
|
||||
WriteLogPoolRootUpdateList m_poolroot_to_update; /* pool root list to update to SSD */
|
||||
bool m_updating_pool_root = false;
|
||||
|
||||
uint64_t m_log_pool_ring_buffer_size; /* Size of ring buffer */
|
||||
std::atomic<int> m_async_update_superblock = {0};
|
||||
BlockDevice *bdev = nullptr;
|
||||
uint64_t pool_size;
|
||||
|
Loading…
Reference in New Issue
Block a user