perfcounters: fl -> time, use u64 nsec instead of double

(Almost) all current float users are actually time values, so switch to
a utime_t-based interface and internally using nsec in a u64.  This avoids
using floating point in librbd, which is problematic for windows VMs that
leave the FPU in an unfriendly state.

There are two non-time users in the mds and osd that log the CPU load.
Just multiply those values by 100 and report as ints instead.

Fixes: #3521
Signed-off-by: Sage Weil <sage@inktank.com>
This commit is contained in:
Sage Weil 2012-11-26 14:34:44 -08:00
parent 3a0ee8e49d
commit 94423ac90f
22 changed files with 130 additions and 128 deletions

View File

@ -360,11 +360,11 @@ int Client::init()
// logger
PerfCountersBuilder plb(cct, "client", l_c_first, l_c_last);
plb.add_fl_avg(l_c_reply, "reply");
plb.add_fl_avg(l_c_lat, "lat");
plb.add_fl_avg(l_c_wrlat, "wrlat");
plb.add_fl_avg(l_c_owrlat, "owrlat");
plb.add_fl_avg(l_c_ordlat, "ordlat");
plb.add_time_avg(l_c_reply, "reply");
plb.add_time_avg(l_c_lat, "lat");
plb.add_time_avg(l_c_wrlat, "wrlat");
plb.add_time_avg(l_c_owrlat, "owrlat");
plb.add_time_avg(l_c_ordlat, "ordlat");
logger = plb.create_perf_counters();
cct->get_perfcounters_collection()->add(logger);
@ -1258,8 +1258,8 @@ int Client::make_request(MetaRequest *request,
utime_t lat = ceph_clock_now(cct);
lat -= request->sent_stamp;
ldout(cct, 20) << "lat " << lat << dendl;
logger->finc(l_c_lat,(double)lat);
logger->finc(l_c_reply,(double)lat);
logger->tinc(l_c_lat, lat);
logger->tinc(l_c_reply, lat);
request->put();
@ -5746,7 +5746,7 @@ int Client::_write(Fh *f, int64_t offset, uint64_t size, const char *buf)
// time
utime_t lat = ceph_clock_now(cct);
lat -= start;
logger->finc(l_c_wrlat,(double)lat);
logger->tinc(l_c_wrlat, lat);
// assume success for now. FIXME.
uint64_t totalwritten = size;

View File

@ -2356,9 +2356,9 @@ int SyntheticClient::object_rw(int nobj, int osize, int wrpc,
lat -= start;
if (client->logger) {
if (write)
client->logger->fset(l_c_owrlat, lat);
client->logger->tset(l_c_owrlat, lat);
else
client->logger->fset(l_c_ordlat, lat);
client->logger->tset(l_c_ordlat, lat);
}
}

View File

@ -95,7 +95,7 @@ class Finisher {
finisher_thread(this) {
PerfCountersBuilder b(cct, string("finisher-") + name,
l_finisher_first, l_finisher_last);
b.add_fl_avg(l_finisher_queue_len, "queue_len");
b.add_time_avg(l_finisher_queue_len, "queue_len");
logger = b.create_perf_counters();
cct->get_perfcounters_collection()->add(logger);
logger->set(l_finisher_queue_len, 0);

View File

@ -30,7 +30,7 @@ Mutex::Mutex(const char *n, bool r, bool ld,
if (cct) {
PerfCountersBuilder b(cct, string("mutex-") + name,
l_mutex_first, l_mutex_last);
b.add_fl_avg(l_mutex_wait, "wait");
b.add_time_avg(l_mutex_wait, "wait");
logger = b.create_perf_counters();
cct->get_perfcounters_collection()->add(logger);
logger->set(l_mutex_wait, 0);
@ -89,7 +89,7 @@ void Mutex::Lock(bool no_lockdep) {
start = ceph_clock_now(cct);
int r = pthread_mutex_lock(&_m);
if (logger && cct && cct->_conf->mutex_perf_counter)
logger->finc(l_mutex_wait,
logger->tinc(l_mutex_wait,
ceph_clock_now(cct) - start);
assert(r == 0);
if (lockdep && g_lockdep) _locked();

View File

@ -43,7 +43,7 @@ Throttle::Throttle(CephContext *cct, std::string n, int64_t m)
b.add_u64_counter(l_throttle_take_sum, "take_sum");
b.add_u64_counter(l_throttle_put, "put");
b.add_u64_counter(l_throttle_put_sum, "put_sum");
b.add_fl_avg(l_throttle_wait, "wait");
b.add_time_avg(l_throttle_wait, "wait");
logger = b.create_perf_counters();
cct->get_perfcounters_collection()->add(logger);
@ -90,7 +90,7 @@ bool Throttle::_wait(int64_t c)
if (waited) {
ldout(cct, 3) << "_wait finished waiting" << dendl;
utime_t dur = ceph_clock_now(cct) - start;
logger->finc(l_throttle_wait, dur);
logger->tinc(l_throttle_wait, dur);
}
delete cv;

View File

@ -106,7 +106,7 @@ void PerfCounters::inc(int idx, uint64_t amt)
perf_counter_data_any_d& data(m_data[idx - m_lower_bound - 1]);
if (!(data.type & PERFCOUNTER_U64))
return;
data.u.u64 += amt;
data.u64 += amt;
if (data.type & PERFCOUNTER_LONGRUNAVG)
data.avgcount++;
}
@ -123,8 +123,8 @@ void PerfCounters::dec(int idx, uint64_t amt)
assert(!(data.type & PERFCOUNTER_LONGRUNAVG));
if (!(data.type & PERFCOUNTER_U64))
return;
assert(data.u.u64 >= amt);
data.u.u64 -= amt;
assert(data.u64 >= amt);
data.u64 -= amt;
}
void PerfCounters::set(int idx, uint64_t amt)
@ -138,7 +138,7 @@ void PerfCounters::set(int idx, uint64_t amt)
perf_counter_data_any_d& data(m_data[idx - m_lower_bound - 1]);
if (!(data.type & PERFCOUNTER_U64))
return;
data.u.u64 = amt;
data.u64 = amt;
if (data.type & PERFCOUNTER_LONGRUNAVG)
data.avgcount++;
}
@ -154,10 +154,10 @@ uint64_t PerfCounters::get(int idx) const
const perf_counter_data_any_d& data(m_data[idx - m_lower_bound - 1]);
if (!(data.type & PERFCOUNTER_U64))
return 0;
return data.u.u64;
return data.u64;
}
void PerfCounters::finc(int idx, double amt)
void PerfCounters::tinc(int idx, utime_t amt)
{
if (!m_cct->_conf->perf)
return;
@ -166,14 +166,14 @@ void PerfCounters::finc(int idx, double amt)
assert(idx > m_lower_bound);
assert(idx < m_upper_bound);
perf_counter_data_any_d& data(m_data[idx - m_lower_bound - 1]);
if (!(data.type & PERFCOUNTER_FLOAT))
if (!(data.type & PERFCOUNTER_TIME))
return;
data.u.dbl += amt;
data.u64 += amt.to_nsec();
if (data.type & PERFCOUNTER_LONGRUNAVG)
data.avgcount++;
}
void PerfCounters::fset(int idx, double amt)
void PerfCounters::tset(int idx, utime_t amt)
{
if (!m_cct->_conf->perf)
return;
@ -182,25 +182,25 @@ void PerfCounters::fset(int idx, double amt)
assert(idx > m_lower_bound);
assert(idx < m_upper_bound);
perf_counter_data_any_d& data(m_data[idx - m_lower_bound - 1]);
if (!(data.type & PERFCOUNTER_FLOAT))
if (!(data.type & PERFCOUNTER_TIME))
return;
data.u.dbl = amt;
data.u64 = amt.to_nsec();
if (data.type & PERFCOUNTER_LONGRUNAVG)
assert(0);
}
double PerfCounters::fget(int idx) const
utime_t PerfCounters::tget(int idx) const
{
if (!m_cct->_conf->perf)
return 0.0;
return utime_t();
Mutex::Locker lck(m_lock);
assert(idx > m_lower_bound);
assert(idx < m_upper_bound);
const perf_counter_data_any_d& data(m_data[idx - m_lower_bound - 1]);
if (!(data.type & PERFCOUNTER_FLOAT))
return 0.0;
return data.u.dbl;
if (!(data.type & PERFCOUNTER_TIME))
return utime_t();
return utime_t(data.u64 / 1000000000ull, data.u64 % 1000000000ull);
}
void PerfCounters::write_json_to_buf(bufferlist& bl, bool schema)
@ -253,9 +253,9 @@ PerfCounters::PerfCounters(CephContext *cct, const std::string &name,
PerfCounters::perf_counter_data_any_d::perf_counter_data_any_d()
: name(NULL),
type(PERFCOUNTER_NONE),
u64(0),
avgcount(0)
{
memset(&u, 0, sizeof(u));
}
void PerfCounters::perf_counter_data_any_d::write_schema_json(char *buf, size_t buf_sz) const
@ -269,12 +269,12 @@ void PerfCounters::perf_counter_data_any_d::write_json(char *buf, size_t buf_sz
if (type & PERFCOUNTER_U64) {
snprintf(buf, buf_sz, "\"%s\":{\"avgcount\":%" PRId64 ","
"\"sum\":%" PRId64 "}",
name, avgcount, u.u64);
name, avgcount, u64);
}
else if (type & PERFCOUNTER_FLOAT) {
else if (type & PERFCOUNTER_TIME) {
snprintf(buf, buf_sz, "\"%s\":{\"avgcount\":%" PRId64 ","
"\"sum\":%g}",
name, avgcount, u.dbl);
"\"sum\":%llu.%09llu}",
name, avgcount, u64 / 1000000000ull, u64 % 1000000000ull);
}
else {
assert(0);
@ -283,10 +283,10 @@ void PerfCounters::perf_counter_data_any_d::write_json(char *buf, size_t buf_sz
else {
if (type & PERFCOUNTER_U64) {
snprintf(buf, buf_sz, "\"%s\":%" PRId64,
name, u.u64);
name, u64);
}
else if (type & PERFCOUNTER_FLOAT) {
snprintf(buf, buf_sz, "\"%s\":%g", name, u.dbl);
else if (type & PERFCOUNTER_TIME) {
snprintf(buf, buf_sz, "\"%s\":%llu.%09llu", name, u64 / 1000000000ull, u64 % 1000000000ull);
}
else {
assert(0);
@ -322,14 +322,14 @@ void PerfCountersBuilder::add_u64_avg(int idx, const char *name)
add_impl(idx, name, PERFCOUNTER_U64 | PERFCOUNTER_LONGRUNAVG);
}
void PerfCountersBuilder::add_fl(int idx, const char *name)
void PerfCountersBuilder::add_time(int idx, const char *name)
{
add_impl(idx, name, PERFCOUNTER_FLOAT);
add_impl(idx, name, PERFCOUNTER_TIME);
}
void PerfCountersBuilder::add_fl_avg(int idx, const char *name)
void PerfCountersBuilder::add_time_avg(int idx, const char *name)
{
add_impl(idx, name, PERFCOUNTER_FLOAT | PERFCOUNTER_LONGRUNAVG);
add_impl(idx, name, PERFCOUNTER_TIME | PERFCOUNTER_LONGRUNAVG);
}
void PerfCountersBuilder::add_impl(int idx, const char *name, int ty)

View File

@ -19,6 +19,7 @@
#include "common/config_obs.h"
#include "common/Mutex.h"
#include "include/buffer.h"
#include "include/utime.h"
#include <stdint.h>
#include <string>
@ -31,7 +32,7 @@ class PerfCountersCollectionTest;
enum perfcounter_type_d
{
PERFCOUNTER_NONE = 0,
PERFCOUNTER_FLOAT = 0x1,
PERFCOUNTER_TIME = 0x1,
PERFCOUNTER_U64 = 0x2,
PERFCOUNTER_LONGRUNAVG = 0x4,
PERFCOUNTER_COUNTER = 0x8,
@ -51,15 +52,15 @@ enum perfcounter_type_d
* and accessed. For a counter, use the inc(counter, amount) function (note
* that amount defaults to 1 if you don't set it). For a value, use the
* set(index, value) function.
* (For floats, use the finc and fset variants.)
* (For time, use the tinc and tset variants.)
*
* If for some reason you would like to reset your counters, you can do so using
* the set functions even if they are counters, and you can also
* increment your values if for some reason you wish to.
*
* For the floating-point average, it returns the current value and
* For the time average, it returns the current value and
* the "avgcount" member when read off. avgcount is incremented when you call
* finc. Calling fset on an average is an error and will assert out.
* tinc. Calling tset on an average is an error and will assert out.
*/
class PerfCounters
{
@ -71,9 +72,9 @@ public:
void set(int idx, uint64_t v);
uint64_t get(int idx) const;
void fset(int idx, double v);
void finc(int idx, double v);
double fget(int idx) const;
void tset(int idx, utime_t v);
void tinc(int idx, utime_t v);
utime_t tget(int idx) const;
void write_json_to_buf(ceph::bufferlist& bl, bool schema);
@ -96,10 +97,7 @@ private:
const char *name;
enum perfcounter_type_d type;
union {
uint64_t u64;
double dbl;
} u;
uint64_t u64;
uint64_t avgcount;
};
typedef std::vector<perf_counter_data_any_d> perf_counter_data_vec_t;
@ -167,8 +165,8 @@ public:
void add_u64(int key, const char *name);
void add_u64_counter(int key, const char *name);
void add_u64_avg(int key, const char *name);
void add_fl(int key, const char *name);
void add_fl_avg(int key, const char *name);
void add_time(int key, const char *name);
void add_time_avg(int key, const char *name);
PerfCounters* create_perf_counters();
private:
PerfCountersBuilder(const PerfCountersBuilder &rhs);

View File

@ -72,6 +72,10 @@ public:
__u32& sec_ref() { return tv.tv_sec; }
__u32& nsec_ref() { return tv.tv_nsec; }
uint64_t to_nsec() const {
return (uint64_t)tv.tv_nsec + (uint64_t)tv.tv_sec * 1000000000ull;
}
void copy_to_timeval(struct timeval *v) const {
v->tv_sec = tv.tv_sec;
v->tv_usec = tv.tv_nsec/1000;

View File

@ -105,11 +105,11 @@ namespace librbd {
}
switch (aio_type) {
case AIO_TYPE_READ:
ictx->perfcounter->finc(l_librbd_aio_rd_latency, elapsed); break;
ictx->perfcounter->tinc(l_librbd_aio_rd_latency, elapsed); break;
case AIO_TYPE_WRITE:
ictx->perfcounter->finc(l_librbd_aio_wr_latency, elapsed); break;
ictx->perfcounter->tinc(l_librbd_aio_wr_latency, elapsed); break;
case AIO_TYPE_DISCARD:
ictx->perfcounter->finc(l_librbd_aio_discard_latency, elapsed); break;
ictx->perfcounter->tinc(l_librbd_aio_discard_latency, elapsed); break;
default:
lderr(ictx->cct) << "completed invalid aio_type: " << aio_type << dendl;
break;

View File

@ -189,23 +189,23 @@ namespace librbd {
plb.add_u64_counter(l_librbd_rd, "rd");
plb.add_u64_counter(l_librbd_rd_bytes, "rd_bytes");
plb.add_fl_avg(l_librbd_rd_latency, "rd_latency");
plb.add_time_avg(l_librbd_rd_latency, "rd_latency");
plb.add_u64_counter(l_librbd_wr, "wr");
plb.add_u64_counter(l_librbd_wr_bytes, "wr_bytes");
plb.add_fl_avg(l_librbd_wr_latency, "wr_latency");
plb.add_time_avg(l_librbd_wr_latency, "wr_latency");
plb.add_u64_counter(l_librbd_discard, "discard");
plb.add_u64_counter(l_librbd_discard_bytes, "discard_bytes");
plb.add_fl_avg(l_librbd_discard_latency, "discard_latency");
plb.add_time_avg(l_librbd_discard_latency, "discard_latency");
plb.add_u64_counter(l_librbd_flush, "flush");
plb.add_u64_counter(l_librbd_aio_rd, "aio_rd");
plb.add_u64_counter(l_librbd_aio_rd_bytes, "aio_rd_bytes");
plb.add_fl_avg(l_librbd_aio_rd_latency, "aio_rd_latency");
plb.add_time_avg(l_librbd_aio_rd_latency, "aio_rd_latency");
plb.add_u64_counter(l_librbd_aio_wr, "aio_wr");
plb.add_u64_counter(l_librbd_aio_wr_bytes, "aio_wr_bytes");
plb.add_fl_avg(l_librbd_aio_wr_latency, "aio_wr_latency");
plb.add_time_avg(l_librbd_aio_wr_latency, "aio_wr_latency");
plb.add_u64_counter(l_librbd_aio_discard, "aio_discard");
plb.add_u64_counter(l_librbd_aio_discard_bytes, "aio_discard_bytes");
plb.add_fl_avg(l_librbd_aio_discard_latency, "aio_discard_latency");
plb.add_time_avg(l_librbd_aio_discard_latency, "aio_discard_latency");
plb.add_u64_counter(l_librbd_snap_create, "snap_create");
plb.add_u64_counter(l_librbd_snap_remove, "snap_remove");
plb.add_u64_counter(l_librbd_snap_rollback, "snap_rollback");

View File

@ -2152,7 +2152,7 @@ reprotect_and_return_err:
}
elapsed = ceph_clock_now(ictx->cct) - start_time;
ictx->perfcounter->finc(l_librbd_rd_latency, elapsed);
ictx->perfcounter->tinc(l_librbd_rd_latency, elapsed);
ictx->perfcounter->inc(l_librbd_rd);
ictx->perfcounter->inc(l_librbd_rd_bytes, mylen);
return total_read;
@ -2235,7 +2235,7 @@ reprotect_and_return_err:
return ret;
elapsed = ceph_clock_now(ictx->cct) - start_time;
ictx->perfcounter->finc(l_librbd_wr_latency, elapsed);
ictx->perfcounter->tinc(l_librbd_wr_latency, elapsed);
ictx->perfcounter->inc(l_librbd_wr);
ictx->perfcounter->inc(l_librbd_wr_bytes, mylen);
return mylen;

View File

@ -200,7 +200,7 @@ void MDS::create_logger()
mds_plb.add_u64_counter(l_mds_req, "req"); // FIXME: nobody is actually setting this
mds_plb.add_u64_counter(l_mds_reply, "reply");
mds_plb.add_fl_avg(l_mds_replyl, "replyl");
mds_plb.add_time_avg(l_mds_replyl, "replyl");
mds_plb.add_u64_counter(l_mds_fw, "fw");
mds_plb.add_u64_counter(l_mds_dir_f, "dir_f");
@ -243,7 +243,7 @@ void MDS::create_logger()
mds_plb.add_u64_counter(l_mds_trino, "trino");
mds_plb.add_u64_counter(l_mds_tlock, "tlock");
mds_plb.add_fl(l_mds_l, "l");
mds_plb.add_u64(l_mds_l, "l");
mds_plb.add_u64(l_mds_q, "q");
mds_plb.add_u64(l_mds_popanyd, "popanyd"); // FIXME: unused
mds_plb.add_u64(l_mds_popnest, "popnest");
@ -561,7 +561,7 @@ void MDS::tick()
if (logger) {
req_rate = logger->get(l_mds_req);
logger->fset(l_mds_l, (int)load.mds_load());
logger->set(l_mds_l, 100 * load.mds_load());
logger->set(l_mds_q, messenger->get_dispatch_queue_len());
logger->set(l_mds_sm, mdcache->num_subtrees());

View File

@ -832,8 +832,8 @@ void Server::early_reply(MDRequest *mdr, CInode *tracei, CDentry *tracedn)
mdr->did_early_reply = true;
mds->logger->inc(l_mds_reply);
double lat = ceph_clock_now(g_ceph_context) - mdr->client_request->get_recv_stamp();
mds->logger->finc(l_mds_replyl, lat);
utime_t lat = ceph_clock_now(g_ceph_context) - mdr->client_request->get_recv_stamp();
mds->logger->tinc(l_mds_replyl, lat);
dout(20) << "lat " << lat << dendl;
}
@ -874,8 +874,8 @@ void Server::reply_request(MDRequest *mdr, MClientReply *reply, CInode *tracei,
if (!did_early_reply && !is_replay) {
mds->logger->inc(l_mds_reply);
double lat = ceph_clock_now(g_ceph_context) - mdr->client_request->get_recv_stamp();
mds->logger->finc(l_mds_replyl, lat);
utime_t lat = ceph_clock_now(g_ceph_context) - mdr->client_request->get_recv_stamp();
mds->logger->tinc(l_mds_replyl, lat);
dout(20) << "lat " << lat << dendl;
if (tracei)

View File

@ -840,7 +840,7 @@ void FileJournal::queue_completions_thru(uint64_t seq)
<< " " << next.finish
<< " lat " << lat << dendl;
if (logger) {
logger->finc(l_os_j_lat, lat);
logger->tinc(l_os_j_lat, lat);
}
if (next.finish)
finisher->queue(next.finish);

View File

@ -442,7 +442,7 @@ FileStore::FileStore(const std::string &base, const std::string &jdev, const cha
plb.add_u64(l_os_jq_max_bytes, "journal_queue_max_bytes");
plb.add_u64(l_os_jq_bytes, "journal_queue_bytes");
plb.add_u64_counter(l_os_j_bytes, "journal_bytes");
plb.add_fl_avg(l_os_j_lat, "journal_latency");
plb.add_time_avg(l_os_j_lat, "journal_latency");
plb.add_u64_counter(l_os_j_wr, "journal_wr");
plb.add_u64_avg(l_os_j_wr_bytes, "journal_wr_bytes");
plb.add_u64(l_os_oq_max_ops, "op_queue_max_ops");
@ -451,12 +451,12 @@ FileStore::FileStore(const std::string &base, const std::string &jdev, const cha
plb.add_u64(l_os_oq_max_bytes, "op_queue_max_bytes");
plb.add_u64(l_os_oq_bytes, "op_queue_bytes");
plb.add_u64_counter(l_os_bytes, "bytes");
plb.add_fl_avg(l_os_apply_lat, "apply_latency");
plb.add_time_avg(l_os_apply_lat, "apply_latency");
plb.add_u64(l_os_committing, "committing");
plb.add_u64_counter(l_os_commit, "commitcycle");
plb.add_fl_avg(l_os_commit_len, "commitcycle_interval");
plb.add_fl_avg(l_os_commit_lat, "commitcycle_latency");
plb.add_time_avg(l_os_commit_len, "commitcycle_interval");
plb.add_time_avg(l_os_commit_lat, "commitcycle_latency");
plb.add_u64_counter(l_os_j_full, "journal_full");
logger = plb.create_perf_counters();
@ -1977,7 +1977,7 @@ void FileStore::_finish_op(OpSequencer *osr)
utime_t lat = ceph_clock_now(g_ceph_context);
lat -= o->start;
logger->finc(l_os_apply_lat, lat);
logger->tinc(l_os_apply_lat, lat);
if (o->onreadable_sync) {
o->onreadable_sync->finish(0);
@ -3420,8 +3420,8 @@ void FileStore::sync_entry()
dout(10) << "sync_entry commit took " << lat << ", interval was " << dur << dendl;
logger->inc(l_os_commit);
logger->finc(l_os_commit_lat, lat);
logger->finc(l_os_commit_len, dur);
logger->tinc(l_os_commit_lat, lat);
logger->tinc(l_os_commit_len, dur);
apply_manager.commit_finish();

View File

@ -958,33 +958,33 @@ void OSD::create_logger()
osd_plb.add_u64_counter(l_osd_op, "op"); // client ops
osd_plb.add_u64_counter(l_osd_op_inb, "op_in_bytes"); // client op in bytes (writes)
osd_plb.add_u64_counter(l_osd_op_outb, "op_out_bytes"); // client op out bytes (reads)
osd_plb.add_fl_avg(l_osd_op_lat, "op_latency"); // client op latency
osd_plb.add_time_avg(l_osd_op_lat, "op_latency"); // client op latency
osd_plb.add_u64_counter(l_osd_op_r, "op_r"); // client reads
osd_plb.add_u64_counter(l_osd_op_r_outb, "op_r_out_bytes"); // client read out bytes
osd_plb.add_fl_avg(l_osd_op_r_lat, "op_r_latency"); // client read latency
osd_plb.add_time_avg(l_osd_op_r_lat, "op_r_latency"); // client read latency
osd_plb.add_u64_counter(l_osd_op_w, "op_w"); // client writes
osd_plb.add_u64_counter(l_osd_op_w_inb, "op_w_in_bytes"); // client write in bytes
osd_plb.add_fl_avg(l_osd_op_w_rlat, "op_w_rlat"); // client write readable/applied latency
osd_plb.add_fl_avg(l_osd_op_w_lat, "op_w_latency"); // client write latency
osd_plb.add_time_avg(l_osd_op_w_rlat, "op_w_rlat"); // client write readable/applied latency
osd_plb.add_time_avg(l_osd_op_w_lat, "op_w_latency"); // client write latency
osd_plb.add_u64_counter(l_osd_op_rw, "op_rw"); // client rmw
osd_plb.add_u64_counter(l_osd_op_rw_inb, "op_rw_in_bytes"); // client rmw in bytes
osd_plb.add_u64_counter(l_osd_op_rw_outb,"op_rw_out_bytes"); // client rmw out bytes
osd_plb.add_fl_avg(l_osd_op_rw_rlat,"op_rw_rlat"); // client rmw readable/applied latency
osd_plb.add_fl_avg(l_osd_op_rw_lat, "op_rw_latency"); // client rmw latency
osd_plb.add_time_avg(l_osd_op_rw_rlat,"op_rw_rlat"); // client rmw readable/applied latency
osd_plb.add_time_avg(l_osd_op_rw_lat, "op_rw_latency"); // client rmw latency
osd_plb.add_u64_counter(l_osd_sop, "subop"); // subops
osd_plb.add_u64_counter(l_osd_sop_inb, "subop_in_bytes"); // subop in bytes
osd_plb.add_fl_avg(l_osd_sop_lat, "subop_latency"); // subop latency
osd_plb.add_time_avg(l_osd_sop_lat, "subop_latency"); // subop latency
osd_plb.add_u64_counter(l_osd_sop_w, "subop_w"); // replicated (client) writes
osd_plb.add_u64_counter(l_osd_sop_w_inb, "subop_w_in_bytes"); // replicated write in bytes
osd_plb.add_fl_avg(l_osd_sop_w_lat, "subop_w_latency"); // replicated write latency
osd_plb.add_time_avg(l_osd_sop_w_lat, "subop_w_latency"); // replicated write latency
osd_plb.add_u64_counter(l_osd_sop_pull, "subop_pull"); // pull request
osd_plb.add_fl_avg(l_osd_sop_pull_lat, "subop_pull_latency");
osd_plb.add_time_avg(l_osd_sop_pull_lat, "subop_pull_latency");
osd_plb.add_u64_counter(l_osd_sop_push, "subop_push"); // push (write)
osd_plb.add_u64_counter(l_osd_sop_push_inb, "subop_push_in_bytes");
osd_plb.add_fl_avg(l_osd_sop_push_lat, "subop_push_latency");
osd_plb.add_time_avg(l_osd_sop_push_lat, "subop_push_latency");
osd_plb.add_u64_counter(l_osd_pull, "pull"); // pull requests sent
osd_plb.add_u64_counter(l_osd_push, "push"); // push messages
@ -995,7 +995,7 @@ void OSD::create_logger()
osd_plb.add_u64_counter(l_osd_rop, "recovery_ops"); // recovery ops (started)
osd_plb.add_fl(l_osd_loadavg, "loadavg");
osd_plb.add_u64(l_osd_loadavg, "loadavg");
osd_plb.add_u64(l_osd_buf, "buffer_bytes"); // total ceph::buffer bytes
osd_plb.add_u64(l_osd_pg, "numpg"); // num pgs
@ -2014,7 +2014,7 @@ void OSD::heartbeat()
// get CPU load avg
double loadavgs[1];
if (getloadavg(loadavgs, 1) == 1)
logger->fset(l_osd_loadavg, loadavgs[0]);
logger->set(l_osd_loadavg, 100 * loadavgs[0]);
dout(30) << "heartbeat checking stats" << dendl;

View File

@ -1033,23 +1033,23 @@ void ReplicatedPG::log_op_stats(OpContext *ctx)
osd->logger->inc(l_osd_op_outb, outb);
osd->logger->inc(l_osd_op_inb, inb);
osd->logger->finc(l_osd_op_lat, latency);
osd->logger->tinc(l_osd_op_lat, latency);
if (m->may_read() && m->may_write()) {
osd->logger->inc(l_osd_op_rw);
osd->logger->inc(l_osd_op_rw_inb, inb);
osd->logger->inc(l_osd_op_rw_outb, outb);
osd->logger->finc(l_osd_op_rw_rlat, rlatency);
osd->logger->finc(l_osd_op_rw_lat, latency);
osd->logger->tinc(l_osd_op_rw_rlat, rlatency);
osd->logger->tinc(l_osd_op_rw_lat, latency);
} else if (m->may_read()) {
osd->logger->inc(l_osd_op_r);
osd->logger->inc(l_osd_op_r_outb, outb);
osd->logger->finc(l_osd_op_r_lat, latency);
osd->logger->tinc(l_osd_op_r_lat, latency);
} else if (m->may_write()) {
osd->logger->inc(l_osd_op_w);
osd->logger->inc(l_osd_op_w_inb, inb);
osd->logger->finc(l_osd_op_w_rlat, rlatency);
osd->logger->finc(l_osd_op_w_lat, latency);
osd->logger->tinc(l_osd_op_w_rlat, rlatency);
osd->logger->tinc(l_osd_op_w_lat, latency);
} else
assert(0);
@ -1071,11 +1071,11 @@ void ReplicatedPG::log_subop_stats(OpRequestRef op, int tag_inb, int tag_lat)
osd->logger->inc(l_osd_sop);
osd->logger->inc(l_osd_sop_inb, inb);
osd->logger->finc(l_osd_sop_lat, latency);
osd->logger->tinc(l_osd_sop_lat, latency);
if (tag_inb)
osd->logger->inc(tag_inb, inb);
osd->logger->finc(tag_lat, latency);
osd->logger->tinc(tag_lat, latency);
dout(15) << "log_subop_stats " << *op->request << " inb " << inb << " latency " << latency << dendl;
}

View File

@ -405,7 +405,7 @@ void Journaler::_finish_flush(int r, uint64_t start, utime_t stamp)
if (logger) {
utime_t lat = ceph_clock_now(cct);
lat -= stamp;
logger->finc(logger_key_lat, lat);
logger->tinc(logger_key_lat, lat);
}
// adjust safe_pos

View File

@ -537,7 +537,7 @@ void ObjectCacher::perf_start()
"data_overwritten_while_flushing");
plb.add_u64_counter(l_objectcacher_write_ops_blocked, "write_ops_blocked");
plb.add_u64_counter(l_objectcacher_write_bytes_blocked, "write_bytes_blocked");
plb.add_fl(l_objectcacher_write_time_blocked, "write_time_blocked");
plb.add_time(l_objectcacher_write_time_blocked, "write_time_blocked");
perfcounter = plb.create_perf_counters();
cct->get_perfcounters_collection()->add(perfcounter);
@ -1318,7 +1318,7 @@ int ObjectCacher::_wait_for_write(OSDWrite *wr, uint64_t len, ObjectSet *oset, M
perfcounter->inc(l_objectcacher_write_ops_blocked);
perfcounter->inc(l_objectcacher_write_bytes_blocked, len);
utime_t blocked = ceph_clock_now(cct) - start;
perfcounter->finc(l_objectcacher_write_time_blocked, (double) blocked);
perfcounter->tinc(l_objectcacher_write_time_blocked, blocked);
}
return ret;
}

View File

@ -28,10 +28,10 @@ int rgw_perf_start(CephContext *cct)
plb.add_u64_counter(l_rgw_get, "get");
plb.add_u64_counter(l_rgw_get_b, "get_b");
plb.add_fl_avg(l_rgw_get_lat, "get_initial_lat");
plb.add_time_avg(l_rgw_get_lat, "get_initial_lat");
plb.add_u64_counter(l_rgw_put, "put");
plb.add_u64_counter(l_rgw_put_b, "put_b");
plb.add_fl_avg(l_rgw_put_lat, "put_initial_lat");
plb.add_time_avg(l_rgw_put_lat, "put_initial_lat");
plb.add_u64(l_rgw_qlen, "qlen");
plb.add_u64(l_rgw_qactive, "qactive");

View File

@ -388,7 +388,7 @@ int RGWGetObj::read_user_manifest_part(rgw_bucket& bucket, RGWObjEnt& ent, RGWAc
cur_ofs += len;
ofs += len;
ret = 0;
perfcounter->finc(l_rgw_get_lat,
perfcounter->tinc(l_rgw_get_lat,
(ceph_clock_now(s->cct) - start_time));
send_response_data(bl);
@ -449,7 +449,7 @@ int RGWGetObj::iterate_user_manifest_parts(rgw_bucket& bucket, string& obj_prefi
found_end = true;
}
perfcounter->finc(l_rgw_get_lat,
perfcounter->tinc(l_rgw_get_lat,
(ceph_clock_now(s->cct) - start_time));
if (found_start) {
@ -588,7 +588,7 @@ void RGWGetObj::execute()
ofs += len;
ret = 0;
perfcounter->finc(l_rgw_get_lat,
perfcounter->tinc(l_rgw_get_lat,
(ceph_clock_now(s->cct) - start_time));
ret = send_response_data(bl);
bl.clear();
@ -1369,7 +1369,7 @@ void RGWPutObj::execute()
ret = processor->complete(etag, attrs);
done:
dispose_processor(processor);
perfcounter->finc(l_rgw_put_lat,
perfcounter->tinc(l_rgw_put_lat,
(ceph_clock_now(s->cct) - s->time));
}

View File

@ -91,8 +91,8 @@ static PerfCounters* setup_test_perfcounters1(CephContext *cct)
PerfCountersBuilder bld(cct, "test_perfcounter_1",
TEST_PERFCOUNTERS1_ELEMENT_FIRST, TEST_PERFCOUNTERS1_ELEMENT_LAST);
bld.add_u64(TEST_PERFCOUNTERS1_ELEMENT_1, "element1");
bld.add_fl(TEST_PERFCOUNTERS1_ELEMENT_2, "element2");
bld.add_fl_avg(TEST_PERFCOUNTERS1_ELEMENT_3, "element3");
bld.add_time(TEST_PERFCOUNTERS1_ELEMENT_2, "element2");
bld.add_time_avg(TEST_PERFCOUNTERS1_ELEMENT_3, "element3");
return bld.create_perf_counters();
}
@ -104,18 +104,18 @@ TEST(PerfCounters, SinglePerfCounters) {
std::string msg;
ASSERT_EQ("", client.do_request("perfcounters_dump", &msg));
ASSERT_EQ(sd("{'test_perfcounter_1':{'element1':0,"
"'element2':0,'element3':{'avgcount':0,'sum':0}}}"), msg);
"'element2':0.000000000,'element3':{'avgcount':0,'sum':0.000000000}}}"), msg);
fake_pf->inc(TEST_PERFCOUNTERS1_ELEMENT_1);
fake_pf->fset(TEST_PERFCOUNTERS1_ELEMENT_2, 0.5);
fake_pf->finc(TEST_PERFCOUNTERS1_ELEMENT_3, 100.0);
fake_pf->tset(TEST_PERFCOUNTERS1_ELEMENT_2, utime_t(0, 500000000));
fake_pf->tinc(TEST_PERFCOUNTERS1_ELEMENT_3, utime_t(100, 0));
ASSERT_EQ("", client.do_request("perfcounters_dump", &msg));
ASSERT_EQ(sd("{'test_perfcounter_1':{'element1':1,"
"'element2':0.5,'element3':{'avgcount':1,'sum':100}}}"), msg);
fake_pf->finc(TEST_PERFCOUNTERS1_ELEMENT_3, 0.0);
fake_pf->finc(TEST_PERFCOUNTERS1_ELEMENT_3, 25.0);
"'element2':0.500000000,'element3':{'avgcount':1,'sum':100.000000000}}}"), msg);
fake_pf->tinc(TEST_PERFCOUNTERS1_ELEMENT_3, utime_t());
fake_pf->tinc(TEST_PERFCOUNTERS1_ELEMENT_3, utime_t(25,0));
ASSERT_EQ("", client.do_request("perfcounters_dump", &msg));
ASSERT_EQ(sd("{'test_perfcounter_1':{'element1':1,'element2':0.5,"
"'element3':{'avgcount':3,'sum':125}}}"), msg);
ASSERT_EQ(sd("{'test_perfcounter_1':{'element1':1,'element2':0.500000000,"
"'element3':{'avgcount':3,'sum':125.000000000}}}"), msg);
}
enum {
@ -130,7 +130,7 @@ static PerfCounters* setup_test_perfcounter2(CephContext *cct)
PerfCountersBuilder bld(cct, "test_perfcounter_2",
TEST_PERFCOUNTERS2_ELEMENT_FIRST, TEST_PERFCOUNTERS2_ELEMENT_LAST);
bld.add_u64(TEST_PERFCOUNTERS2_ELEMENT_FOO, "foo");
bld.add_fl(TEST_PERFCOUNTERS2_ELEMENT_BAR, "bar");
bld.add_time(TEST_PERFCOUNTERS2_ELEMENT_BAR, "bar");
return bld.create_perf_counters();
}
@ -145,19 +145,19 @@ TEST(PerfCounters, MultiplePerfCounters) {
std::string msg;
ASSERT_EQ("", client.do_request("perfcounters_dump", &msg));
ASSERT_EQ(sd("{'test_perfcounter_1':{'element1':0,'element2':0,'element3':"
"{'avgcount':0,'sum':0}},'test_perfcounter_2':{'foo':0,'bar':0}}"), msg);
ASSERT_EQ(sd("{'test_perfcounter_1':{'element1':0,'element2':0.000000000,'element3':"
"{'avgcount':0,'sum':0.000000000}},'test_perfcounter_2':{'foo':0,'bar':0.000000000}}"), msg);
fake_pf1->inc(TEST_PERFCOUNTERS1_ELEMENT_1);
fake_pf1->inc(TEST_PERFCOUNTERS1_ELEMENT_1, 5);
ASSERT_EQ("", client.do_request("perfcounters_dump", &msg));
ASSERT_EQ(sd("{'test_perfcounter_1':{'element1':6,'element2':0,'element3':"
"{'avgcount':0,'sum':0}},'test_perfcounter_2':{'foo':0,'bar':0}}"), msg);
ASSERT_EQ(sd("{'test_perfcounter_1':{'element1':6,'element2':0.000000000,'element3':"
"{'avgcount':0,'sum':0.000000000}},'test_perfcounter_2':{'foo':0,'bar':0.000000000}}"), msg);
coll->remove(fake_pf2);
ASSERT_EQ("", client.do_request("perfcounters_dump", &msg));
ASSERT_EQ(sd("{'test_perfcounter_1':{'element1':6,'element2':0,"
"'element3':{'avgcount':0,'sum':0}}}"), msg);
ASSERT_EQ(sd("{'test_perfcounter_1':{'element1':6,'element2':0.000000000,"
"'element3':{'avgcount':0,'sum':0.000000000}}}"), msg);
ASSERT_EQ("", client.do_request("perfcounters_schema", &msg));
ASSERT_EQ(sd("{'test_perfcounter_1':{'element1':{'type':2},"
"'element2':{'type':1},'element3':{'type':5}}}"), msg);