common/ceph_common: add heartbeat perf counters unconditionally

rgw and rbd_mirror were enabling a perfcounter monitoring the heartbeatmap.
Enable this unconditionally for all ccts.  This is simpler and more
useful!

Add a CINIT_FLAG to avoid starting them up. This is only used by the unit
tests which do not expect all the cct crap and we don't want to
break every time we add a new cct counter.  That means removing
the CephContextPerfCounters test.

Signed-off-by: Sage Weil <sage@redhat.com>
This commit is contained in:
Sage Weil 2017-05-04 15:35:47 -05:00
parent 0d166f49dc
commit 941930e5cf
6 changed files with 44 additions and 54 deletions

View File

@ -44,6 +44,12 @@
using ceph::bufferlist;
using ceph::HeartbeatMap;
// for CINIT_FLAGS
#include "common/common_init.h"
#include <iostream>
#include <pthread.h>
#ifdef WITH_SEASTAR
CephContext::CephContext()
: _conf{ceph::common::local_conf()},
@ -195,7 +201,7 @@ public:
_cct->_heartbeat_map->check_touch_file();
// refresh the perf coutners
_cct->refresh_perf_values();
_cct->_refresh_perf_values();
}
return NULL;
}
@ -603,8 +609,7 @@ CephContext::CephContext(uint32_t module_type_,
_crypto_aes(NULL),
_plugin_registry(NULL),
_lockdep_obs(NULL),
crush_location(this),
_cct_perf(NULL)
crush_location(this)
{
_log = new ceph::logging::Log(&_conf->subsys);
_log->start();
@ -744,6 +749,9 @@ void CephContext::start_service_thread()
_service_thread->create("service");
}
if (!(get_init_flags() & CINIT_FLAG_NO_CCT_PERF_COUNTERS))
_enable_perf_counter();
// make logs flush on_exit()
if (_conf->log_flush_on_exit)
_log->set_flush_on_exit();
@ -780,6 +788,9 @@ void CephContext::join_service_thread()
thread->exit_thread();
thread->join();
delete thread;
if (!(get_init_flags() & CINIT_FLAG_NO_CCT_PERF_COUNTERS))
_disable_perf_counter();
}
uint32_t CephContext::get_module_type() const
@ -802,34 +813,28 @@ PerfCountersCollection *CephContext::get_perfcounters_collection()
return _perf_counters_collection;
}
void CephContext::enable_perf_counter()
void CephContext::_enable_perf_counter()
{
assert(!_cct_perf);
PerfCountersBuilder plb(this, "cct", l_cct_first, l_cct_last);
plb.add_u64(l_cct_total_workers, "total_workers", "Total workers");
plb.add_u64(l_cct_unhealthy_workers, "unhealthy_workers", "Unhealthy workers");
PerfCounters *perf_tmp = plb.create_perf_counters();
std::unique_lock<ceph::spinlock> lg(_cct_perf_lock);
ceph_assert(_cct_perf == NULL);
_cct_perf = perf_tmp;
lg.unlock();
_cct_perf = plb.create_perf_counters();
_perf_counters_collection->add(_cct_perf);
}
void CephContext::disable_perf_counter()
void CephContext::_disable_perf_counter()
{
if (!_cct_perf) {
return;
}
_perf_counters_collection->remove(_cct_perf);
std::lock_guard<ceph::spinlock> lg(_cct_perf_lock);
delete _cct_perf;
_cct_perf = NULL;
_cct_perf = nullptr;
}
void CephContext::refresh_perf_values()
void CephContext::_refresh_perf_values()
{
std::lock_guard<ceph::spinlock> lg(_cct_perf_lock);
if (_cct_perf) {
_cct_perf->set(l_cct_total_workers, _heartbeat_map->get_total_workers());
_cct_perf->set(l_cct_unhealthy_workers, _heartbeat_map->get_unhealthy_workers());

View File

@ -137,22 +137,6 @@ public:
return _heartbeat_map;
}
/**
* Enable the performance counter, currently we only have counter for the
* number of total/unhealthy workers.
*/
void enable_perf_counter();
/**
* Disable the performance counter.
*/
void disable_perf_counter();
/**
* Refresh perf counter values.
*/
void refresh_perf_values();
/**
* Get the admin socket associated with this CephContext.
*
@ -333,8 +317,22 @@ private:
l_cct_unhealthy_workers,
l_cct_last
};
PerfCounters *_cct_perf;
ceph::spinlock _cct_perf_lock;
PerfCounters *_cct_perf = nullptr;
/**
* Enable the performance counters.
*/
void _enable_perf_counter();
/**
* Disable the performance counter.
*/
void _disable_perf_counter();
/**
* Refresh perf counter values.
*/
void _refresh_perf_values();
friend class CephContextObs;
};

View File

@ -39,6 +39,9 @@ enum common_init_flags_t {
// don't contact mons for config
CINIT_FLAG_NO_MON_CONFIG = 0x20,
// don't expose default cct perf counters
CINIT_FLAG_NO_CCT_PERF_COUNTERS = 0x40,
};
#ifndef WITH_SEASTAR

View File

@ -274,9 +274,6 @@ int main(int argc, const char **argv)
init_timer.add_event_after(g_conf()->rgw_init_timeout, new C_InitTimeout);
mutex.Unlock();
// Enable the perf counter before starting the service thread
g_ceph_context->enable_perf_counter();
common_init_finish(g_ceph_context);
init_async_signal_handler();

View File

@ -52,7 +52,8 @@ int main(int argc, char **argv) {
std::vector<const char*> args;
auto cct = global_init(&defaults, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE|
CINIT_FLAG_NO_CCT_PERF_COUNTERS);
common_init_finish(g_ceph_context);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
@ -188,19 +189,6 @@ TEST(PerfCounters, MultiplePerfCounters) {
ASSERT_EQ("{}", msg);
}
TEST(PerfCounters, CephContextPerfCounters) {
// Enable the perf counter
g_ceph_context->enable_perf_counter();
AdminSocketClient client(get_rand_socket_path());
std::string msg;
ASSERT_EQ("", client.do_request("{ \"prefix\": \"perf dump\", \"format\": \"json\" }", &msg));
ASSERT_EQ(sd("{\"cct\":{\"total_workers\":0,\"unhealthy_workers\":0}}"), msg);
// Restore to avoid impact to other test cases
g_ceph_context->disable_perf_counter();
}
TEST(PerfCounters, ResetPerfCounters) {
AdminSocketClient client(get_rand_socket_path());
std::string msg;

View File

@ -49,7 +49,6 @@ int main(int argc, const char **argv)
if (g_conf()->daemonize) {
global_init_daemonize(g_ceph_context);
}
g_ceph_context->enable_perf_counter();
common_init_finish(g_ceph_context);