Merge remote-tracking branch 'gh/wip-7988'

Conflicts:
	src/mds/RecoveryQueue.cc

Reviewed-by: Sage Weil <sage@redhat.com>
This commit is contained in:
Sage Weil 2014-08-29 16:10:16 -07:00
commit 21e25e77c2
38 changed files with 1057 additions and 282 deletions

View File

@ -15,6 +15,7 @@
#include "include/types.h"
#include "include/str_map.h"
#include "msg/Messenger.h"
#include "msg/Message.h"
@ -38,6 +39,30 @@
#include "common/config.h"
#define dout_subsys ceph_subsys_monc
#undef dout_prefix
#define dout_prefix _prefix(_dout, this)
static ostream& _prefix(std::ostream *_dout, LogClient *logc) {
return *_dout << "log_client ";
}
static ostream& _prefix(std::ostream *_dout, LogChannel *lc) {
return *_dout << "log_channel(" << lc->get_log_channel() << ") ";
}
LogChannel::LogChannel(CephContext *cct, LogClient *lc, const string &channel)
: cct(cct), parent(lc), channel_lock("LogChannel::channel_lock"),
log_channel(channel), log_to_syslog(false), log_to_monitors(false)
{
}
LogChannel::LogChannel(CephContext *cct, LogClient *lc,
const string &channel, const string &facility,
const string &prio)
: cct(cct), parent(lc), channel_lock("LogChannel::channel_lock"),
log_channel(channel), log_prio(prio), syslog_facility(facility),
log_to_syslog(false), log_to_monitors(false)
{
}
LogClient::LogClient(CephContext *cct, Messenger *m, MonMap *mm,
enum logclient_flag_t flags)
@ -46,7 +71,7 @@ LogClient::LogClient(CephContext *cct, Messenger *m, MonMap *mm,
{
}
LogClientTemp::LogClientTemp(clog_type type_, LogClient &parent_)
LogClientTemp::LogClientTemp(clog_type type_, LogChannel &parent_)
: type(type_), parent(parent_)
{
}
@ -63,45 +88,40 @@ LogClientTemp::~LogClientTemp()
parent.do_log(type, ss);
}
void LogClient::do_log(clog_type type, std::stringstream& ss)
void LogChannel::do_log(clog_type prio, std::stringstream& ss)
{
while (!ss.eof()) {
string s;
getline(ss, s);
if (!s.empty())
do_log(type, s);
do_log(prio, s);
}
}
void LogClient::do_log(clog_type type, const std::string& s)
void LogChannel::do_log(clog_type prio, const std::string& s)
{
Mutex::Locker l(log_lock);
int lvl = (type == CLOG_ERROR ? -1 : 0);
ldout(cct,lvl) << "log " << type << " : " << s << dendl;
Mutex::Locker l(channel_lock);
int lvl = (prio == CLOG_ERROR ? -1 : 0);
ldout(cct,lvl) << "log " << prio << " : " << s << dendl;
LogEntry e;
e.who = messenger->get_myinst();
// who will be set when we queue the entry on LogClient
//e.who = messenger->get_myinst();
e.stamp = ceph_clock_now(cct);
e.seq = ++last_log;
e.type = type;
// seq will be set when we queue the entry on LogClient
// e.seq = ++last_log;
e.prio = prio;
e.msg = s;
e.channel = get_log_channel();
// log to syslog?
if (cct->_conf->clog_to_syslog) {
e.log_to_syslog(cct->_conf->clog_to_syslog_level,
cct->_conf->clog_to_syslog_facility);
if (do_log_to_syslog()) {
ldout(cct,0) << __func__ << " log to syslog" << dendl;
e.log_to_syslog(get_log_prio(), get_syslog_facility());
}
// log to monitor?
if (cct->_conf->clog_to_monitors) {
log_queue.push_back(e);
// if we are a monitor, queue for ourselves, synchronously
if (is_mon) {
assert(messenger->get_myname().is_mon());
ldout(cct,10) << "send_log to self" << dendl;
Message *log = _get_mon_log_message();
messenger->get_loopback_connection()->send_message(log);
}
if (log_to_monitors) {
parent->queue(e);
}
}
@ -169,7 +189,31 @@ Message *LogClient::_get_mon_log_message()
return log;
}
void LogClient::handle_log_ack(MLogAck *m)
void LogClient::_send_to_mon()
{
assert(log_lock.is_locked());
assert(is_mon);
assert(messenger->get_myname().is_mon());
ldout(cct,10) << __func__ << "log to self" << dendl;
Message *log = _get_mon_log_message();
messenger->get_loopback_connection()->send_message(log);
}
version_t LogClient::queue(LogEntry &entry)
{
Mutex::Locker l(log_lock);
entry.seq = ++last_log;
entry.who = messenger->get_myinst();
log_queue.push_back(entry);
if (is_mon) {
_send_to_mon();
}
return entry.seq;
}
bool LogClient::handle_log_ack(MLogAck *m)
{
Mutex::Locker l(log_lock);
ldout(cct,10) << "handle_log_ack " << *m << dendl;
@ -184,6 +228,6 @@ void LogClient::handle_log_ack(MLogAck *m)
ldout(cct,10) << " logged " << entry << dendl;
q = log_queue.erase(q);
}
m->put();
return true;
}

View File

@ -29,10 +29,12 @@ class MonMap;
class Message;
struct Connection;
class LogChannel;
class LogClientTemp
{
public:
LogClientTemp(clog_type type_, LogClient &parent_);
LogClientTemp(clog_type type_, LogChannel &parent_);
LogClientTemp(const LogClientTemp &rhs);
~LogClientTemp();
@ -44,22 +46,28 @@ public:
private:
clog_type type;
LogClient &parent;
LogChannel &parent;
stringstream ss;
};
class LogClient
/** Manage where we output to and at which priority
*
* Not to be confused with the LogClient, which is the almighty coordinator
* of channels. We just deal with the boring part of the logging: send to
* syslog, send to file, generate LogEntry and queue it for the LogClient.
*
* Past queueing the LogEntry, the LogChannel is done with the whole thing.
* LogClient will deal with sending and handling of LogEntries.
*/
class LogChannel
{
public:
enum logclient_flag_t {
NO_FLAGS = 0,
FLAG_MON = 0x1,
};
LogClient(CephContext *cct, Messenger *m, MonMap *mm,
enum logclient_flag_t flags);
void handle_log_ack(MLogAck *m);
LogChannel(CephContext *cct, LogClient *lc, const std::string &channel);
LogChannel(CephContext *cct, LogClient *lc,
const std::string &channel,
const std::string &facility,
const std::string &prio);
LogClientTemp debug() {
return LogClientTemp(CLOG_DEBUG, *this);
@ -92,14 +100,106 @@ public:
do_log(CLOG_SEC, s);
}
void set_log_to_monitors(bool v) {
log_to_monitors = v;
}
void set_log_to_syslog(bool v) {
log_to_syslog = v;
}
void set_log_channel(const std::string& v) {
log_channel = v;
}
void set_log_prio(const std::string& v) {
log_prio = v;
}
void set_syslog_facility(const std::string& v) {
syslog_facility = v;
}
std::string get_log_prio() { return log_prio; }
std::string get_log_channel() { return log_channel; }
std::string get_syslog_facility() { return syslog_facility; }
bool must_log_to_syslog() { return log_to_syslog; }
/**
* Do we want to log to syslog?
*
* @return true if log_to_syslog is true and both channel and prio
* are not empty; false otherwise.
*/
bool do_log_to_syslog() {
return must_log_to_syslog() &&
!log_prio.empty() && !log_channel.empty();
}
bool must_log_to_monitors() { return log_to_monitors; }
typedef shared_ptr<LogChannel> Ref;
private:
void do_log(clog_type prio, std::stringstream& ss);
void do_log(clog_type prio, const std::string& s);
CephContext *cct;
LogClient *parent;
Mutex channel_lock;
std::string log_channel;
std::string log_prio;
std::string syslog_facility;
bool log_to_syslog;
bool log_to_monitors;
friend class LogClientTemp;
};
typedef LogChannel::Ref LogChannelRef;
class LogClient
{
public:
enum logclient_flag_t {
NO_FLAGS = 0,
FLAG_MON = 0x1,
};
LogClient(CephContext *cct, Messenger *m, MonMap *mm,
enum logclient_flag_t flags);
virtual ~LogClient() {
channels.clear();
}
bool handle_log_ack(MLogAck *m);
void reset_session();
Message *get_mon_log_message();
bool are_pending();
LogChannelRef create_channel() {
return create_channel(CLOG_CHANNEL_DEFAULT);
}
LogChannelRef create_channel(const std::string& name) {
LogChannelRef c;
if (channels.count(name))
c = channels[name];
else {
c = LogChannelRef(new LogChannel(cct, this, name));
channels[name] = c;
}
return c;
}
void destroy_channel(const std::string& name) {
if (channels.count(name))
channels.erase(name);
}
void shutdown() {
channels.clear();
}
version_t queue(LogEntry &entry);
private:
void do_log(clog_type type, std::stringstream& ss);
void do_log(clog_type type, const std::string& s);
Message *_get_mon_log_message();
void _send_to_mon();
CephContext *cct;
Messenger *messenger;
@ -110,7 +210,7 @@ private:
version_t last_log;
std::deque<LogEntry> log_queue;
friend class LogClientTemp;
};
std::map<std::string, LogChannelRef> channels;
};
#endif

View File

@ -130,38 +130,66 @@ int string_to_syslog_facility(string s)
return LOG_USER;
}
string clog_type_to_string(clog_type t)
{
switch (t) {
case CLOG_DEBUG:
return "debug";
case CLOG_INFO:
return "info";
case CLOG_WARN:
return "warn";
case CLOG_ERROR:
return "err";
case CLOG_SEC:
return "crit";
default:
assert(0);
return 0;
}
}
void LogEntry::log_to_syslog(string level, string facility)
{
int min = string_to_syslog_level(level);
int l = clog_type_to_syslog_level(type);
int l = clog_type_to_syslog_level(prio);
if (l <= min) {
int f = string_to_syslog_facility(facility);
syslog(l | f, "%s", stringify(*this).c_str());
syslog(l | f, "%s %lu : %s",
stringify(who).c_str(),
seq,
msg.c_str());
}
}
void LogEntry::encode(bufferlist& bl) const
{
ENCODE_START(2, 2, bl);
__u16 t = type;
ENCODE_START(3, 2, bl);
__u16 t = prio;
::encode(who, bl);
::encode(stamp, bl);
::encode(seq, bl);
::encode(t, bl);
::encode(msg, bl);
::encode(channel, bl);
ENCODE_FINISH(bl);
}
void LogEntry::decode(bufferlist::iterator& bl)
{
DECODE_START_LEGACY_COMPAT_LEN(2, 2, 2, bl);
DECODE_START_LEGACY_COMPAT_LEN(3, 2, 2, bl);
__u16 t;
::decode(who, bl);
::decode(stamp, bl);
::decode(seq, bl);
::decode(t, bl);
type = (clog_type)t;
prio = (clog_type)t;
::decode(msg, bl);
if (struct_v >= 3) {
::decode(channel, bl);
} else {
channel = CLOG_CHANNEL_DEFAULT;
}
DECODE_FINISH(bl);
}
@ -170,7 +198,8 @@ void LogEntry::dump(Formatter *f) const
f->dump_stream("who") << who;
f->dump_stream("stamp") << stamp;
f->dump_unsigned("seq", seq);
f->dump_stream("type") << type;
f->dump_string("channel", channel);
f->dump_stream("priority") << prio;
f->dump_string("message", msg);
}

View File

@ -32,6 +32,11 @@ typedef enum {
CLOG_ERROR = 4,
} clog_type;
static const std::string CLOG_CHANNEL_NONE = "none";
static const std::string CLOG_CHANNEL_DEFAULT = "default";
static const std::string CLOG_CHANNEL_CLUSTER = "cluster";
static const std::string CLOG_CHANNEL_AUDIT = "audit";
/*
* Given a clog log_type, return the equivalent syslog priority
*/
@ -40,6 +45,8 @@ int clog_type_to_syslog_level(clog_type t);
int string_to_syslog_level(string s);
int string_to_syslog_facility(string s);
string clog_type_to_string(clog_type t);
struct LogEntryKey {
entity_inst_t who;
@ -64,8 +71,9 @@ struct LogEntry {
entity_inst_t who;
utime_t stamp;
uint64_t seq;
clog_type type;
clog_type prio;
string msg;
string channel;
LogEntryKey key() const { return LogEntryKey(who, stamp, seq); }
@ -125,7 +133,8 @@ inline ostream& operator<<(ostream& out, clog_type t)
inline ostream& operator<<(ostream& out, const LogEntry& e)
{
return out << e.stamp << " " << e.who << " " << e.seq << " : " << e.type << " " << e.msg;
return out << e.stamp << " " << e.who << " " << e.seq << " : "
<< e.channel << " " << e.prio << " " << e.msg;
}
#endif

View File

@ -44,15 +44,21 @@ OPTION(err_to_syslog, OPT_BOOL, false)
OPTION(log_flush_on_exit, OPT_BOOL, true) // default changed by common_preinit()
OPTION(log_stop_at_utilization, OPT_FLOAT, .97) // stop logging at (near) full
OPTION(clog_to_monitors, OPT_BOOL, true)
OPTION(clog_to_syslog, OPT_BOOL, false)
OPTION(clog_to_syslog_level, OPT_STR, "info") // this level and above
OPTION(clog_to_syslog_facility, OPT_STR, "daemon")
// options will take k/v pairs, or single-item that will be assumed as general
// default for all, regardless of channel.
// e.g., "info" would be taken as the same as "default=info"
// also, "default=daemon audit=local0" would mean
// "default all to 'daemon', override 'audit' with 'local0'
OPTION(clog_to_monitors, OPT_STR, "default=true")
OPTION(clog_to_syslog, OPT_STR, "false")
OPTION(clog_to_syslog_level, OPT_STR, "info") // this level and above
OPTION(clog_to_syslog_facility, OPT_STR, "default=daemon audit=local0")
OPTION(mon_cluster_log_to_syslog, OPT_BOOL, false)
OPTION(mon_cluster_log_to_syslog, OPT_STR, "default=false")
OPTION(mon_cluster_log_to_syslog_level, OPT_STR, "info") // this level and above
OPTION(mon_cluster_log_to_syslog_facility, OPT_STR, "daemon")
OPTION(mon_cluster_log_file, OPT_STR, "/var/log/ceph/$cluster.log")
OPTION(mon_cluster_log_file, OPT_STR,
"default=/var/log/ceph/$cluster.$channel.log cluster=/var/log/ceph/$cluster.log")
OPTION(mon_cluster_log_file_level, OPT_STR, "info")
DEFAULT_SUBSYS(0, 5)

View File

@ -23,9 +23,11 @@
using namespace std;
int get_str_map(const string &str,
ostream &ss,
map<string,string> *str_map)
int get_json_str_map(
const string &str,
ostream &ss,
map<string,string> *str_map,
bool fallback_to_plain)
{
json_spirit::mValue json;
try {
@ -46,23 +48,83 @@ int get_str_map(const string &str,
++i) {
(*str_map)[i->first] = i->second.get_str();
}
} catch (json_spirit::Error_position &e) {
// fallback to key=value format
list<string> pairs;
get_str_list(str, "\t\n ", pairs);
for (list<string>::iterator i = pairs.begin(); i != pairs.end(); ++i) {
size_t equal = i->find('=');
if (equal == string::npos)
(*str_map)[*i] = string();
else {
const string key = i->substr(0, equal);
equal++;
const string value = i->substr(equal);
(*str_map)[key] = value;
}
if (fallback_to_plain) {
// fallback to key=value format
get_str_map(str, "\t\n ", str_map);
} else {
return -EINVAL;
}
}
return 0;
}
int get_str_map(
const string &str,
const char *delims,
map<string,string> *str_map)
{
list<string> pairs;
get_str_list(str, delims, pairs);
for (list<string>::iterator i = pairs.begin(); i != pairs.end(); ++i) {
size_t equal = i->find('=');
if (equal == string::npos)
(*str_map)[*i] = string();
else {
const string key = i->substr(0, equal);
equal++;
const string value = i->substr(equal);
(*str_map)[key] = value;
}
}
return 0;
}
int get_str_map(
const string &str,
map<string,string> *str_map)
{
const char *delims = ",;\t\n ";
return get_str_map(str, delims, str_map);
}
string get_str_map_value(
const map<string,string> &str_map,
const string &key,
const string *def_val)
{
map<string,string>::const_iterator p = str_map.find(key);
// key exists in str_map
if (p != str_map.end()) {
// but value is empty
if (p->second.empty())
return p->first;
// and value is not empty
return p->second;
}
// key DNE in str_map and def_val was specified
if (def_val != NULL)
return *def_val;
// key DNE in str_map, no def_val was specified
return string();
}
string get_str_map_key(
const map<string,string> &str_map,
const string &key,
const string *fallback_key)
{
map<string,string>::const_iterator p = str_map.find(key);
if (p != str_map.end())
return p->second;
if (fallback_key != NULL) {
p = str_map.find(*fallback_key);
if (p != str_map.end())
return p->second;
}
return string();
}

View File

@ -31,29 +31,107 @@
* string, integer etc. ), -EINVAL is returned and **ss** is set to
* a human readable error message.
*
* If **str** is no valid JSON, it is assumed to be a string
* containing white space separated key=value pairs. A white space is
* either space, tab or newline. The value is optional, in which case
* it defaults to an empty string. For example:
* If **str** is no valid JSON and if **fallback_to_plain** is set to true
* (default: true) it is assumed to be a string containing white space
* separated key=value pairs. A white space is either space, tab or newline.
* Function **get_str_map** will be leveraged to parse the plain-text
* key/value pairs.
*
* insert your own=political statement=here
* @param [in] str JSON or plain text key/value pairs
* @param [out] ss human readable message on error
* @param [out] str_map key/value pairs read from str
* @param [in] fallback_to_plain attempt parsing as plain-text if json fails
* @return **0** on success or a -EINVAL on error.
*/
extern int get_json_str_map(
const std::string &str,
std::ostream &ss,
std::map<std::string,std::string> *str_map,
bool fallback_to_plain = true);
/**
* Parse **str** and set **str_map** with the key/value pairs read from
* it. The format of **str** is a number of custom key[=value] pairs in
* plain text format.
*
* The string will be parsed taking **delims** as field delimiters for
* key/values. The value is optional resulting in an empty string when
* not provided. For example, using white space as delimiters:
*
* insert your own=political/ideological statement=here
*
* will be parsed into:
*
* { "insert": "",
* "your": "",
* "own": "policital",
* "own": "political/ideological",
* "statement": "here" }
*
* Returns 0 on success.
* Alternative delimiters may be provided. For instance, specifying
* "white space and slash", for the above statement, would be parsed
* into:
*
* @param [in] str JSON or plain text key/value pairs
* @param [out] ss human readable message on error
* @param [out] str_map key/value pairs read from str
* @return **0** on success or a -EINVAL on error.
* { "insert": "",
* "your": "",
* "own": "political",
* "ideological": "",
* "statement": "here" }
*
* See how adding '/' to the delimiters field will spawn a new key without
* a set value.
*
* Always returns 0, as there is no condition for failure.
*
* @param [in] str plain text key/value pairs
* @param [out] str_map key/value pairs parsed from str
* @param [in] delim field delimiters to be used for parsing str
* @return **0**
*/
extern int get_str_map(const std::string &str,
std::ostream &ss,
std::map<std::string,std::string> *str_map);
extern int get_str_map(
const std::string &str,
const char *delims,
std::map<std::string,std::string> *str_map);
extern int get_str_map(
const std::string &str,
std::map<std::string,std::string> *str_map);
/**
* Returns the value of **key** in **str_map** if available.
*
* If **key** is not available in **str_map**, and if **def_val** is
* not-NULL then returns **def_val**. Otherwise checks if the value of
* **key** is an empty string and if so will return **key**.
* If the map contains **key**, the function returns the value of **key**.
*
* @param[in] str_map Map to obtain **key** from
* @param[in] key The key to search for in the map
* @param[in] def_val The value to return in case **key** is not present
*/
extern std::string get_str_map_value(
const std::map<std::string,std::string> &str_map,
const std::string &key,
const std::string *def_val = NULL);
/**
* Returns the value of **key** in **str_map** if available.
*
* If **key** is available in **str_map** returns the value of **key**.
*
* If **key** is not available in **str_map**, and if **def_key**
* is not-NULL and available in **str_map**, then returns the value
* of **def_key**.
*
* Otherwise returns an empty string.
*
* @param[in] str_map Map to obtain **key** or **def_key** from
* @param[in] key Key to obtain the value of from **str_map**
* @param[in] def_key Key to fallback to if **key** is not present
* in **str_map**
*/
extern std::string get_str_map_key(
const std::map<std::string,std::string> &str_map,
const std::string &key,
const std::string *fallback_key = NULL);
#endif

View File

@ -870,10 +870,10 @@ void librados::RadosClient::handle_log(MLog *m)
for (std::deque<LogEntry>::iterator it = m->entries.begin(); it != m->entries.end(); ++it) {
LogEntry e = *it;
ostringstream ss;
ss << e.stamp << " " << e.who.name << " " << e.type << " " << e.msg;
ss << e.stamp << " " << e.who.name << " " << e.prio << " " << e.msg;
string line = ss.str();
string who = stringify(e.who);
string level = stringify(e.type);
string level = stringify(e.prio);
struct timespec stamp;
e.stamp.to_timespec(&stamp);

View File

@ -1393,7 +1393,7 @@ void CDir::_tmap_fetch(const string& want_dn)
void CDir::_tmap_fetched(bufferlist& bl, const string& want_dn, int r)
{
LogClient &clog = cache->mds->clog;
LogChannelRef clog = cache->mds->clog;
dout(10) << "_tmap_fetched " << bl.length() << " bytes for " << *this
<< " want_dn=" << want_dn << dendl;
@ -1412,7 +1412,7 @@ void CDir::_tmap_fetched(bufferlist& bl, const string& want_dn, int r)
::decode(omap, p);
if (!p.end()) {
clog.warn() << "tmap buffer of dir " << dirfrag() << " has "
clog->warn() << "tmap buffer of dir " << dirfrag() << " has "
<< bl.length() - p.get_off() << " extra bytes\n";
}
bl.clear();
@ -1452,7 +1452,7 @@ void CDir::_omap_fetch(const string& want_dn)
void CDir::_omap_fetched(bufferlist& hdrbl, map<string, bufferlist>& omap,
const string& want_dn, int r)
{
LogClient &clog = cache->mds->clog;
LogChannelRef clog = cache->mds->clog;
dout(10) << "_fetched header " << hdrbl.length() << " bytes "
<< omap.size() << " keys for " << *this
<< " want_dn=" << want_dn << dendl;
@ -1469,7 +1469,7 @@ void CDir::_omap_fetched(bufferlist& hdrbl, map<string, bufferlist>& omap,
}
dout(0) << "_fetched missing object for " << *this << dendl;
clog.error() << "dir " << dirfrag() << " object missing on disk; some files may be lost\n";
clog->error() << "dir " << dirfrag() << " object missing on disk; some files may be lost\n";
log_mark_dirty();
@ -1488,7 +1488,7 @@ void CDir::_omap_fetched(bufferlist& hdrbl, map<string, bufferlist>& omap,
bufferlist::iterator p = hdrbl.begin();
::decode(got_fnode, p);
if (!p.end()) {
clog.warn() << "header buffer of dir " << dirfrag() << " has "
clog->warn() << "header buffer of dir " << dirfrag() << " has "
<< hdrbl.length() - p.get_off() << " extra bytes\n";
}
}
@ -1668,7 +1668,7 @@ void CDir::_omap_fetched(bufferlist& hdrbl, map<string, bufferlist>& omap,
string dirpath, inopath;
this->inode->make_path_string(dirpath);
in->make_path_string(inopath);
clog.error() << "loaded dup inode " << inode_data.inode.ino
clog->error() << "loaded dup inode " << inode_data.inode.ino
<< " [" << first << "," << last << "] v" << inode_data.inode.version
<< " at " << dirpath << "/" << dname
<< ", but inode " << in->vino() << " v" << in->inode.version

View File

@ -1797,7 +1797,7 @@ void CInode::_finish_frag_update(CDir *dir, MutationRef& mut)
/* for more info on scatterlocks, see comments by Locker::scatter_writebehind */
void CInode::finish_scatter_gather_update(int type)
{
LogClient &clog = mdcache->mds->clog;
LogChannelRef clog = mdcache->mds->clog;
dout(10) << "finish_scatter_gather_update " << type << " on " << *this << dendl;
assert(is_auth());
@ -1838,7 +1838,7 @@ void CInode::finish_scatter_gather_update(int type)
if (pf->fragstat.nfiles < 0 ||
pf->fragstat.nsubdirs < 0) {
clog.error() << "bad/negative dir size on "
clog->error() << "bad/negative dir size on "
<< dir->dirfrag() << " " << pf->fragstat << "\n";
assert(!"bad/negative fragstat" == g_conf->mds_verify_scatter);
@ -1872,7 +1872,7 @@ void CInode::finish_scatter_gather_update(int type)
break;
}
if (all) {
clog.error() << "unmatched fragstat on " << ino() << ", inode has "
clog->error() << "unmatched fragstat on " << ino() << ", inode has "
<< pi->dirstat << ", dirfrags have " << dirstat << "\n";
assert(!"unmatched fragstat" == g_conf->mds_verify_scatter);
// trust the dirfrags for now
@ -1884,7 +1884,7 @@ void CInode::finish_scatter_gather_update(int type)
if (pi->dirstat.nfiles < 0 ||
pi->dirstat.nsubdirs < 0) {
clog.error() << "bad/negative fragstat on " << ino()
clog->error() << "bad/negative fragstat on " << ino()
<< ", inode has " << pi->dirstat << "\n";
assert(!"bad/negative fragstat" == g_conf->mds_verify_scatter);
@ -1967,7 +1967,7 @@ void CInode::finish_scatter_gather_update(int type)
break;
}
if (all) {
clog.error() << "unmatched rstat on " << ino() << ", inode has "
clog->error() << "unmatched rstat on " << ino() << ", inode has "
<< pi->rstat << ", dirfrags have " << rstat << "\n";
assert(!"unmatched rstat" == g_conf->mds_verify_scatter);
// trust the dirfrag for now

View File

@ -104,7 +104,7 @@ void InoTable::replay_alloc_id(inodeno_t id)
free.erase(id);
projected_free.erase(id);
} else {
mds->clog.error() << "journal replay alloc " << id
mds->clog->error() << "journal replay alloc " << id
<< " not in free " << free << "\n";
}
projected_version = ++version;
@ -118,7 +118,7 @@ void InoTable::replay_alloc_ids(interval_set<inodeno_t>& ids)
free.subtract(ids);
projected_free.subtract(ids);
} else {
mds->clog.error() << "journal replay alloc " << ids << ", only "
mds->clog->error() << "journal replay alloc " << ids << ", only "
<< is << " is in free " << free << "\n";
free.subtract(is);
projected_free.subtract(is);

View File

@ -2081,7 +2081,7 @@ void MDCache::predirty_journal_parents(MutationRef mut, EMetaBlob *blob,
if (pi->dirstat.size() < 0)
assert(!"negative dirstat size" == g_conf->mds_verify_scatter);
if (pi->dirstat.size() != pf->fragstat.size()) {
mds->clog.error() << "unmatched fragstat size on single dirfrag "
mds->clog->error() << "unmatched fragstat size on single dirfrag "
<< parent->dirfrag() << ", inode has " << pi->dirstat
<< ", dirfrag has " << pf->fragstat << "\n";
@ -2128,7 +2128,7 @@ void MDCache::predirty_journal_parents(MutationRef mut, EMetaBlob *blob,
if (parent->get_frag() == frag_t()) { // i.e., we are the only frag
if (pi->rstat.rbytes != pf->rstat.rbytes) {
mds->clog.error() << "unmatched rstat rbytes on single dirfrag "
mds->clog->error() << "unmatched rstat rbytes on single dirfrag "
<< parent->dirfrag() << ", inode has " << pi->rstat
<< ", dirfrag has " << pf->rstat << "\n";
@ -5396,8 +5396,8 @@ void MDCache::export_remaining_imported_caps()
cap_imports.clear();
if (warn_str.peek() != EOF) {
mds->clog.warn() << "failed to reconnect caps for missing inodes:" << "\n";
mds->clog.warn(warn_str);
mds->clog->warn() << "failed to reconnect caps for missing inodes:" << "\n";
mds->clog->warn(warn_str);
}
}
@ -5561,8 +5561,8 @@ void MDCache::open_snap_parents()
++q)
warn_str << " client." << q->first << " snapid " << q->second << "\n";
}
mds->clog.warn() << "open_snap_parents has:" << "\n";
mds->clog.warn(warn_str);
mds->clog->warn() << "open_snap_parents has:" << "\n";
mds->clog->warn(warn_str);
}
assert(rejoin_waiters.empty());
assert(missing_snap_parents.empty());

View File

@ -99,7 +99,7 @@ MDS::MDS(const std::string &n, Messenger *m, MonClient *mc) :
standby_replaying(false),
messenger(m),
monc(mc),
clog(m->cct, messenger, &mc->monmap, LogClient::NO_FLAGS),
log_client(m->cct, messenger, &mc->monmap, LogClient::NO_FLAGS),
op_tracker(cct, m->cct->_conf->mds_enable_op_tracker),
finisher(cct),
sessionmap(this), asok_hook(NULL) {
@ -109,6 +109,8 @@ MDS::MDS(const std::string &n, Messenger *m, MonClient *mc) :
last_tid = 0;
clog = log_client.create_channel();
monc->set_messenger(messenger);
mdsmap = new MDSMap;
@ -583,7 +585,7 @@ int MDS::init(MDSMap::DaemonState wanted_state)
finisher.start();
// tell monc about log_client so it will know about mon session resets
monc->set_log_client(&clog);
monc->set_log_client(&log_client);
int r = monc->authenticate();
if (r < 0) {
@ -757,7 +759,7 @@ void MDS::check_ops_in_flight()
for (vector<string>::iterator i = warnings.begin();
i != warnings.end();
++i) {
clog.warn() << *i;
clog->warn() << *i;
}
}
return;
@ -961,17 +963,17 @@ void MDS::handle_command(MMonCommand *m)
else if (m->cmd[0] == "cpu_profiler") {
ostringstream ss;
cpu_profiler_handle_command(m->cmd, ss);
clog.info() << ss.str();
clog->info() << ss.str();
}
else if (m->cmd[0] == "heap") {
if (!ceph_using_tcmalloc())
clog.info() << "tcmalloc not enabled, can't use heap profiler commands\n";
clog->info() << "tcmalloc not enabled, can't use heap profiler commands\n";
else {
ostringstream ss;
vector<std::string> cmdargs;
cmdargs.insert(cmdargs.begin(), m->cmd.begin()+1, m->cmd.end());
ceph_heap_profiler_handle_command(cmdargs, ss);
clog.info() << ss.str();
clog->info() << ss.str();
}
} else dout(0) << "unrecognized command! " << m->cmd << dendl;
m->put();

View File

@ -160,7 +160,8 @@ class MDS : public Dispatcher, public md_config_obs_t {
MDSMap *mdsmap;
Objecter *objecter;
Filer *filer; // for reading/writing to/from osds
LogClient clog;
LogClient log_client;
LogChannelRef clog;
// sub systems
Server *server;

View File

@ -79,8 +79,8 @@ void RecoveryQueue::_start(CInode *in)
// blech
if (pi->client_ranges.size() && !pi->get_max_size()) {
mds->clog.warn() << "bad client_range " << pi->client_ranges
<< " on ino " << pi->ino << "\n";
mds->clog->warn() << "bad client_range " << pi->client_ranges
<< " on ino " << pi->ino << "\n";
}
if (pi->client_ranges.size() && pi->get_max_size()) {

View File

@ -536,7 +536,7 @@ void Server::find_idle_sessions()
utime_t age = now;
age -= session->last_cap_renew;
mds->clog.info() << "closing stale session " << session->info.inst
mds->clog->info() << "closing stale session " << session->info.inst
<< " after " << age << "\n";
dout(10) << "autoclosing stale session " << session->info.inst << " last " << session->last_cap_renew << dendl;
kill_session(session, NULL);
@ -640,7 +640,7 @@ void Server::handle_client_reconnect(MClientReconnect *m)
if (!mds->is_reconnect()) {
// XXX maybe in the future we can do better than this?
dout(1) << " no longer in reconnect state, ignoring reconnect, sending close" << dendl;
mds->clog.info() << "denied reconnect attempt (mds is "
mds->clog->info() << "denied reconnect attempt (mds is "
<< ceph_mds_state_name(mds->get_state())
<< ") from " << m->get_source_inst()
<< " after " << delay << " (allowed interval " << g_conf->mds_reconnect_timeout << ")\n";
@ -651,7 +651,7 @@ void Server::handle_client_reconnect(MClientReconnect *m)
if (session->is_closed()) {
dout(1) << " session is closed, ignoring reconnect, sending close" << dendl;
mds->clog.info() << "denied reconnect attempt (mds is "
mds->clog->info() << "denied reconnect attempt (mds is "
<< ceph_mds_state_name(mds->get_state())
<< ") from " << m->get_source_inst() << " (session is closed)\n";
m->get_connection()->send_message(new MClientSession(CEPH_SESSION_CLOSE));
@ -661,7 +661,7 @@ void Server::handle_client_reconnect(MClientReconnect *m)
// notify client of success with an OPEN
m->get_connection()->send_message(new MClientSession(CEPH_SESSION_OPEN));
mds->clog.debug() << "reconnect by " << session->info.inst << " after " << delay << "\n";
mds->clog->debug() << "reconnect by " << session->info.inst << " after " << delay << "\n";
// snaprealms
for (vector<ceph_mds_snaprealm_reconnect>::iterator p = m->realms.begin();
@ -1989,7 +1989,7 @@ CInode* Server::prepare_new_inode(MDRequestRef& mdr, CDir *dir, inodeno_t useino
if (useino && useino != in->inode.ino) {
dout(0) << "WARNING: client specified " << useino << " and i allocated " << in->inode.ino << dendl;
mds->clog.error() << mdr->client_request->get_source()
mds->clog->error() << mdr->client_request->get_source()
<< " specified ino " << useino
<< " but mds." << mds->whoami << " allocated " << in->inode.ino << "\n";
//assert(0); // just for now.

View File

@ -1242,7 +1242,7 @@ void EMetaBlob::replay(MDS *mds, LogSegment *logseg, MDSlaveUpdate *slaveup)
ss << "EMetaBlob.replay FIXME had dentry linked to wrong inode " << *dn
<< " " << *dn->get_linkage()->get_inode() << " should be " << p->inode.ino;
dout(0) << ss.str() << dendl;
mds->clog.warn(ss);
mds->clog->warn(ss);
}
dir->unlink_inode(dn);
}
@ -1265,7 +1265,7 @@ void EMetaBlob::replay(MDS *mds, LogSegment *logseg, MDSlaveUpdate *slaveup)
ss << "EMetaBlob.replay FIXME had dentry linked to wrong inode " << *dn
<< " " << *dn->get_linkage()->get_inode() << " should be " << p->inode.ino;
dout(0) << ss.str() << dendl;
mds->clog.warn(ss);
mds->clog->warn(ss);
}
dir->unlink_inode(dn);
}
@ -1476,7 +1476,7 @@ void EMetaBlob::replay(MDS *mds, LogSegment *logseg, MDSlaveUpdate *slaveup)
// [repair bad inotable updates]
if (inotablev > mds->inotable->get_version()) {
mds->clog.error() << "journal replay inotablev mismatch "
mds->clog->error() << "journal replay inotablev mismatch "
<< mds->inotable->get_version() << " -> " << inotablev << "\n";
mds->inotable->force_replay_version(inotablev);
}
@ -1500,13 +1500,13 @@ void EMetaBlob::replay(MDS *mds, LogSegment *logseg, MDSlaveUpdate *slaveup)
if (used_preallocated_ino) {
if (session->info.prealloc_inos.empty()) {
// HRM: badness in the journal
mds->clog.warn() << " replayed op " << client_reqs << " on session for "
mds->clog->warn() << " replayed op " << client_reqs << " on session for "
<< client_name << " with empty prealloc_inos\n";
} else {
inodeno_t next = session->next_ino();
inodeno_t i = session->take_ino(used_preallocated_ino);
if (next != i)
mds->clog.warn() << " replayed op " << client_reqs << " used ino " << i
mds->clog->warn() << " replayed op " << client_reqs << " used ino " << i
<< " but session next is " << next << "\n";
assert(i == used_preallocated_ino);
session->info.used_inos.clear();
@ -1526,7 +1526,7 @@ void EMetaBlob::replay(MDS *mds, LogSegment *logseg, MDSlaveUpdate *slaveup)
}
assert(sessionmapv == mds->sessionmap.version);
} else {
mds->clog.error() << "journal replay sessionmap v " << sessionmapv
mds->clog->error() << "journal replay sessionmap v " << sessionmapv
<< " -(1|2) > table " << mds->sessionmap.version << "\n";
assert(g_conf->mds_wipe_sessions);
mds->sessionmap.wipe();
@ -1627,7 +1627,7 @@ void ESession::replay(MDS *mds)
dout(10) << " reset session " << session->info.inst << " (they reconnected)" << dendl;
}
} else {
mds->clog.error() << "replayed stray Session close event for " << client_inst
mds->clog->error() << "replayed stray Session close event for " << client_inst
<< " from time " << stamp << ", ignoring";
}
}
@ -2488,20 +2488,20 @@ void ESubtreeMap::replay(MDS *mds)
++p) {
CDir *dir = mds->mdcache->get_dirfrag(p->first);
if (!dir) {
mds->clog.error() << " replayed ESubtreeMap at " << get_start_off()
mds->clog->error() << " replayed ESubtreeMap at " << get_start_off()
<< " subtree root " << p->first << " not in cache";
++errors;
continue;
}
if (!mds->mdcache->is_subtree(dir)) {
mds->clog.error() << " replayed ESubtreeMap at " << get_start_off()
mds->clog->error() << " replayed ESubtreeMap at " << get_start_off()
<< " subtree root " << p->first << " not a subtree in cache";
++errors;
continue;
}
if (dir->get_dir_auth().first != mds->whoami) {
mds->clog.error() << " replayed ESubtreeMap at " << get_start_off()
mds->clog->error() << " replayed ESubtreeMap at " << get_start_off()
<< " subtree root " << p->first
<< " is not mine in cache (it's " << dir->get_dir_auth() << ")";
++errors;
@ -2516,13 +2516,13 @@ void ESubtreeMap::replay(MDS *mds)
for (vector<dirfrag_t>::iterator q = p->second.begin(); q != p->second.end(); ++q) {
CDir *b = mds->mdcache->get_dirfrag(*q);
if (!b) {
mds->clog.error() << " replayed ESubtreeMap at " << get_start_off()
mds->clog->error() << " replayed ESubtreeMap at " << get_start_off()
<< " subtree " << p->first << " bound " << *q << " not in cache";
++errors;
continue;
}
if (bounds.count(b) == 0) {
mds->clog.error() << " replayed ESubtreeMap at " << get_start_off()
mds->clog->error() << " replayed ESubtreeMap at " << get_start_off()
<< " subtree " << p->first << " bound " << *q << " not a bound in cache";
++errors;
continue;
@ -2530,20 +2530,20 @@ void ESubtreeMap::replay(MDS *mds)
bounds.erase(b);
}
for (set<CDir*>::iterator q = bounds.begin(); q != bounds.end(); ++q) {
mds->clog.error() << " replayed ESubtreeMap at " << get_start_off()
mds->clog->error() << " replayed ESubtreeMap at " << get_start_off()
<< " subtree " << p->first << " has extra bound in cache " << (*q)->dirfrag();
++errors;
}
if (ambiguous_subtrees.count(p->first)) {
if (!mds->mdcache->have_ambiguous_import(p->first)) {
mds->clog.error() << " replayed ESubtreeMap at " << get_start_off()
mds->clog->error() << " replayed ESubtreeMap at " << get_start_off()
<< " subtree " << p->first << " is ambiguous but is not in our cache";
++errors;
}
} else {
if (mds->mdcache->have_ambiguous_import(p->first)) {
mds->clog.error() << " replayed ESubtreeMap at " << get_start_off()
mds->clog->error() << " replayed ESubtreeMap at " << get_start_off()
<< " subtree " << p->first << " is not ambiguous but is in our cache";
++errors;
}
@ -2557,7 +2557,7 @@ void ESubtreeMap::replay(MDS *mds)
if (dir->get_dir_auth().first != mds->whoami)
continue;
if (subtrees.count(dir->dirfrag()) == 0) {
mds->clog.error() << " replayed ESubtreeMap at " << get_start_off()
mds->clog->error() << " replayed ESubtreeMap at " << get_start_off()
<< " does not include cache subtree " << dir->dirfrag();
++errors;
}

View File

@ -21,7 +21,8 @@ class MLogAck : public Message {
public:
uuid_d fsid;
version_t last;
std::string channel;
MLogAck() : Message(MSG_LOGACK) {}
MLogAck(uuid_d& f, version_t l) : Message(MSG_LOGACK), fsid(f), last(l) {}
private:
@ -36,11 +37,14 @@ public:
void encode_payload(uint64_t features) {
::encode(fsid, payload);
::encode(last, payload);
::encode(channel, payload);
}
void decode_payload() {
bufferlist::iterator p = payload.begin();
::decode(fsid, p);
::decode(last, p);
if (!p.end())
::decode(channel, p);
}
};

View File

@ -226,7 +226,7 @@ void DataHealthService::service_tick()
// already low available disk space.
if (ours.latest_avail_percent <= g_conf->mon_data_avail_warn) {
if (ours.latest_avail_percent != last_warned_percent)
mon->clog.warn()
mon->clog->warn()
<< "reached concerning levels of available space on local monitor storage"
<< " (" << ours.latest_avail_percent << "% free)\n";
last_warned_percent = ours.latest_avail_percent;

View File

@ -30,6 +30,7 @@
#include "common/config.h"
#include "include/assert.h"
#include "include/str_list.h"
#include "include/str_map.h"
#include "include/compat.h"
#define dout_subsys ceph_subsys_mon
@ -84,7 +85,7 @@ void LogMonitor::create_initial()
LogEntry e;
memset(&e.who, 0, sizeof(e.who));
e.stamp = ceph_clock_now(g_ceph_context);
e.type = CLOG_INFO;
e.prio = CLOG_INFO;
std::stringstream ss;
ss << "mkfs " << mon->monmap->get_fsid();
e.msg = ss.str();
@ -102,7 +103,7 @@ void LogMonitor::update_from_paxos(bool *need_bootstrap)
return;
assert(version >= summary.version);
bufferlist blog;
map<string,bufferlist> channel_blog;
version_t latest_full = get_version_latest_full();
dout(10) << __func__ << " latest full " << latest_full << dendl;
@ -131,17 +132,40 @@ void LogMonitor::update_from_paxos(bool *need_bootstrap)
le.decode(p);
dout(7) << "update_from_paxos applying incremental log " << summary.version+1 << " " << le << dendl;
if (g_conf->mon_cluster_log_to_syslog) {
le.log_to_syslog(g_conf->mon_cluster_log_to_syslog_level,
g_conf->mon_cluster_log_to_syslog_facility);
string channel = le.channel;
if (channel.empty()) // keep retrocompatibility
channel = CLOG_CHANNEL_CLUSTER;
if (channels.do_log_to_syslog(channel)) {
string level = channels.get_level(channel);
string facility = channels.get_facility(facility);
if (level.empty() || facility.empty()) {
derr << __func__ << " unable to log to syslog -- level or facility"
<< " not defined (level: " << level << ", facility: "
<< facility << ")" << dendl;
continue;
}
le.log_to_syslog(channels.get_level(channel),
channels.get_facility(channel));
}
if (g_conf->mon_cluster_log_file.length()) {
int min = string_to_syslog_level(g_conf->mon_cluster_log_file_level);
int l = clog_type_to_syslog_level(le.type);
string log_file = channels.get_log_file(channel);
if (!log_file.empty()) {
string log_file_level = channels.get_log_file_level(channel);
if (log_file_level.empty()) {
dout(1) << __func__ << " warning: log file level not defined for"
<< " channel '" << channel << "' yet a log file is --"
<< " will assume lowest level possible" << dendl;
}
int min = string_to_syslog_level(log_file_level);
int l = clog_type_to_syslog_level(le.prio);
if (l <= min) {
stringstream ss;
ss << le << "\n";
blog.append(ss.str());
// init entry if DNE
bufferlist &blog = channel_blog[channel];
blog.append(ss.str());
}
}
@ -151,17 +175,30 @@ void LogMonitor::update_from_paxos(bool *need_bootstrap)
summary.version++;
}
dout(10) << __func__ << " logging for "
<< channel_blog.size() << " channels" << dendl;
for(map<string,bufferlist>::iterator p = channel_blog.begin();
p != channel_blog.end(); ++p) {
if (!p->second.length()) {
dout(15) << __func__ << " channel '" << p->first
<< "': nothing to log" << dendl;
continue;
}
if (blog.length()) {
int fd = ::open(g_conf->mon_cluster_log_file.c_str(), O_WRONLY|O_APPEND|O_CREAT, 0600);
dout(15) << __func__ << " channel '" << p->first
<< "' logging " << p->second.length() << " bytes" << dendl;
string log_file = channels.get_log_file(p->first);
int fd = ::open(log_file.c_str(), O_WRONLY|O_APPEND|O_CREAT, 0600);
if (fd < 0) {
int err = -errno;
dout(1) << "unable to write to " << g_conf->mon_cluster_log_file << ": " << cpp_strerror(err) << dendl;
dout(1) << "unable to write to '" << log_file << "' for channel '"
<< p->first << "': " << cpp_strerror(err) << dendl;
} else {
int err = blog.write_fd(fd);
int err = p->second.write_fd(fd);
if (err < 0) {
dout(1) << "error writing to " << g_conf->mon_cluster_log_file
<< ": " << cpp_strerror(err) << dendl;
dout(1) << "error writing to '" << log_file << "' for channel '"
<< p->first << ": " << cpp_strerror(err) << dendl;
}
VOID_TEMP_FAILURE_RETRY(::close(fd));
}
@ -378,7 +415,7 @@ bool LogMonitor::prepare_command(MMonCommand *m)
le.who = m->get_orig_source_inst();
le.stamp = m->get_recv_stamp();
le.seq = 0;
le.type = CLOG_INFO;
le.prio = CLOG_INFO;
le.msg = str_join(logtext, " ");
pending_summary.add(le);
pending_log.insert(pair<utime_t,LogEntry>(le.stamp, le));
@ -483,7 +520,7 @@ bool LogMonitor::_create_sub_summary(MLog *mlog, int level)
list<LogEntry>::reverse_iterator it = summary.tail.rbegin();
for (; it != summary.tail.rend(); ++it) {
LogEntry e = *it;
if (e.type < level)
if (e.prio < level)
continue;
mlog->entries.push_back(e);
@ -512,7 +549,7 @@ void LogMonitor::_create_sub_incremental(MLog *mlog, int level, version_t sv)
<< " to first_committed " << get_first_committed() << dendl;
LogEntry le;
le.stamp = ceph_clock_now(NULL);
le.type = CLOG_WARN;
le.prio = CLOG_WARN;
ostringstream ss;
ss << "skipped log messages from " << sv << " to " << get_first_committed();
le.msg = ss.str();
@ -533,9 +570,9 @@ void LogMonitor::_create_sub_incremental(MLog *mlog, int level, version_t sv)
LogEntry le;
le.decode(p);
if (le.type < level) {
if (le.prio < level) {
dout(20) << __func__ << " requested " << level
<< " entry " << le.type << dendl;
<< " entry " << le.prio << dendl;
continue;
}
@ -548,3 +585,92 @@ void LogMonitor::_create_sub_incremental(MLog *mlog, int level, version_t sv)
<< mlog->entries.size() << " entries)" << dendl;
}
void LogMonitor::update_log_channels()
{
ostringstream oss;
channels.clear();
int r = get_conf_str_map_helper(g_conf->mon_cluster_log_to_syslog,
oss, &channels.log_to_syslog,
CLOG_CHANNEL_DEFAULT);
if (r < 0) {
derr << __func__ << " error parsing 'mon_cluster_log_to_syslog'" << dendl;
return;
}
r = get_conf_str_map_helper(g_conf->mon_cluster_log_to_syslog_level,
oss, &channels.syslog_level,
CLOG_CHANNEL_DEFAULT);
if (r < 0) {
derr << __func__ << " error parsing 'mon_cluster_log_to_syslog_level'"
<< dendl;
return;
}
r = get_conf_str_map_helper(g_conf->mon_cluster_log_to_syslog_facility,
oss, &channels.syslog_facility,
CLOG_CHANNEL_DEFAULT);
if (r < 0) {
derr << __func__ << " error parsing 'mon_cluster_log_to_syslog_facility'"
<< dendl;
return;
}
r = get_conf_str_map_helper(g_conf->mon_cluster_log_file, oss,
&channels.log_file,
CLOG_CHANNEL_DEFAULT);
if (r < 0) {
derr << __func__ << " error parsing 'mon_cluster_log_file'" << dendl;
return;
}
r = get_conf_str_map_helper(g_conf->mon_cluster_log_file_level, oss,
&channels.log_file_level,
CLOG_CHANNEL_DEFAULT);
if (r < 0) {
derr << __func__ << " error parsing 'mon_cluster_log_file_level'"
<< dendl;
return;
}
}
void LogMonitor::log_channel_info::expand_channel_meta(map<string,string> &m)
{
generic_dout(10) << __func__ << " expand map: " << m << dendl;
for (map<string,string>::iterator p = m.begin(); p != m.end(); ++p) {
m[p->first] = expand_channel_meta(p->second, p->first);
}
generic_dout(10) << __func__ << " expanded map: " << m << dendl;
}
string LogMonitor::log_channel_info::expand_channel_meta(
const string &input,
const string &change_to)
{
size_t pos = string::npos;
string s(input);
while ((pos = s.find(LOG_META_CHANNEL)) != string::npos) {
string tmp = s.substr(0, pos) + change_to;
if (pos+LOG_META_CHANNEL.length() < s.length())
tmp += s.substr(pos+LOG_META_CHANNEL.length());
s = tmp;
}
generic_dout(20) << __func__ << " from '" << input
<< "' to '" << s << "'" << dendl;
return s;
}
void LogMonitor::handle_conf_change(const struct md_config_t *conf,
const std::set<std::string> &changed)
{
if (changed.count("mon_cluster_log_to_syslog") ||
changed.count("mon_cluster_log_to_syslog_level") ||
changed.count("mon_cluster_log_to_syslog_facility") ||
changed.count("mon_cluster_log_file") ||
changed.count("mon_cluster_log_file_level")) {
update_log_channels();
}
}

View File

@ -28,11 +28,75 @@ using namespace std;
class MMonCommand;
class LogMonitor : public PaxosService {
static const string LOG_META_CHANNEL = "$channel";
class LogMonitor : public PaxosService,
public md_config_obs_t {
private:
multimap<utime_t,LogEntry> pending_log;
LogSummary pending_summary, summary;
struct log_channel_info {
map<string,string> log_to_syslog;
map<string,string> syslog_level;
map<string,string> syslog_facility;
map<string,string> log_file;
map<string,string> log_file_level;
void clear() {
log_to_syslog.clear();
syslog_level.clear();
syslog_facility.clear();
log_file.clear();
log_file_level.clear();
}
void expand_channel_meta() {
expand_channel_meta(log_to_syslog);
expand_channel_meta(syslog_level);
expand_channel_meta(syslog_facility);
expand_channel_meta(log_file);
expand_channel_meta(log_file_level);
}
void expand_channel_meta(map<string,string> &m);
string expand_channel_meta(const string &input,
const string &change_to);
bool do_log_to_syslog(const string &channel) {
return (get_str_map_key(log_to_syslog, channel,
&CLOG_CHANNEL_DEFAULT) == "true");
}
string get_facility(const string &channel) {
return get_str_map_key(syslog_facility, channel,
&CLOG_CHANNEL_DEFAULT);
}
string get_level(const string &channel) {
return get_str_map_key(syslog_level, channel,
&CLOG_CHANNEL_DEFAULT);
}
string get_log_file(const string &channel) {
string fname;
if (log_file.count(channel) == 0) {
log_file[channel] = expand_channel_meta(
get_str_map_key(log_file, channel,
&CLOG_CHANNEL_DEFAULT),
channel);
}
return log_file[channel];
}
string get_log_file_level(const string &channel) {
return get_str_map_key(log_file_level, channel,
&CLOG_CHANNEL_DEFAULT);
}
} channels;
void update_log_channels();
void create_initial();
void update_from_paxos(bool *need_bootstrap);
void create_pending(); // prepare a new pending
@ -80,6 +144,12 @@ private:
public:
LogMonitor(Monitor *mn, Paxos *p, const string& service_name)
: PaxosService(mn, p, service_name) { }
void init() {
generic_dout(10) << "LogMonitor::init" << dendl;
g_conf->add_observer(this);
update_log_channels();
}
void tick(); // check state, take actions
@ -94,6 +164,22 @@ private:
*/
int sub_name_to_id(const string& n);
};
void on_shutdown() {
g_conf->remove_observer(this);
}
const char **get_tracked_conf_keys() const {
static const char* KEYS[] = {
"mon_cluster_log_to_syslog",
"mon_cluster_log_to_syslog_level",
"mon_cluster_log_to_syslog_facility",
"mon_cluster_log_file",
"mon_cluster_log_file_level",
NULL
};
return KEYS;
}
void handle_conf_change(const struct md_config_t *conf,
const std::set<std::string> &changed);
};
#endif

View File

@ -525,7 +525,7 @@ bool MDSMonitor::should_propose(double& delay)
void MDSMonitor::_updated(MMDSBeacon *m)
{
dout(10) << "_updated " << m->get_orig_source() << " " << *m << dendl;
mon->clog.info() << m->get_orig_source_inst() << " "
mon->clog->info() << m->get_orig_source_inst() << " "
<< ceph_mds_state_name(m->get_state()) << "\n";
if (m->get_state() == MDSMap::STATE_STOPPED) {
@ -542,7 +542,7 @@ void MDSMonitor::on_active()
update_logger();
if (mon->is_leader())
mon->clog.info() << "mdsmap " << mdsmap << "\n";
mon->clog->info() << "mdsmap " << mdsmap << "\n";
}
void MDSMonitor::get_health(list<pair<health_status_t, string> >& summary,

View File

@ -292,6 +292,7 @@ bool MonClient::ms_dispatch(Message *m)
case MSG_LOGACK:
if (log_client) {
log_client->handle_log_ack(static_cast<MLogAck*>(m));
m->put();
if (more_log_pending) {
send_log();
}

View File

@ -65,6 +65,7 @@
#include "include/color.h"
#include "include/ceph_fs.h"
#include "include/str_list.h"
#include "include/str_map.h"
#include "OSDMonitor.h"
#include "MDSMonitor.h"
@ -141,7 +142,7 @@ Monitor::Monitor(CephContext* cct_, string nm, MonitorDBStore *s,
has_ever_joined(false),
logger(NULL), cluster_logger(NULL), cluster_logger_registered(false),
monmap(map),
clog(cct_, messenger, monmap, LogClient::FLAG_MON),
log_client(cct_, messenger, monmap, LogClient::FLAG_MON),
key_server(cct, &keyring),
auth_cluster_required(cct,
cct->_conf->auth_supported.length() ?
@ -181,6 +182,11 @@ Monitor::Monitor(CephContext* cct_, string nm, MonitorDBStore *s,
{
rank = -1;
clog = log_client.create_channel(CLOG_CHANNEL_CLUSTER);
audit_clog = log_client.create_channel(CLOG_CHANNEL_AUDIT);
update_log_clients();
paxos = new Paxos(this, "paxos");
paxos_service[PAXOS_MDSMAP] = new MDSMonitor(this, paxos, "mdsmap");
@ -263,26 +269,44 @@ void Monitor::do_admin_command(string command, cmdmap_t& cmdmap, string format,
boost::scoped_ptr<Formatter> f(new_formatter(format));
string args;
for (cmdmap_t::iterator p = cmdmap.begin();
p != cmdmap.end(); ++p) {
if (p->first == "prefix")
continue;
if (!args.empty())
args += ", ";
args += cmd_vartype_stringify(p->second);
}
args = "[" + args + "]";
audit_clog->info() << "from='admin socket' "
<< "entity='admin socket' "
<< "cmd=" << command << " "
<< "args=" << args << ": dispatch";
if (command == "mon_status") {
get_mon_status(f.get(), ss);
if (f)
f->flush(ss);
} else if (command == "quorum_status")
} else if (command == "quorum_status") {
_quorum_status(f.get(), ss);
else if (command == "sync_force") {
} else if (command == "sync_force") {
string validate;
if ((!cmd_getval(g_ceph_context, cmdmap, "validate", validate)) ||
(validate != "--yes-i-really-mean-it")) {
ss << "are you SURE? this will mean the monitor store will be erased "
"the next time the monitor is restarted. pass "
"'--yes-i-really-mean-it' if you really do.";
return;
goto abort;
}
sync_force(f.get(), ss);
} else if (command.find("add_bootstrap_peer_hint") == 0) {
_add_bootstrap_peer_hint(command, cmdmap, ss);
if (!_add_bootstrap_peer_hint(command, cmdmap, ss))
goto abort;
} else if (command.find("osdmonitor_prepare_command") == 0) {
_osdmonitor_prepare_command(cmdmap, ss);
if (!_osdmonitor_prepare_command(cmdmap, ss))
goto abort;
} else if (command == "quorum enter") {
elector.start_participating();
start_election();
@ -291,8 +315,20 @@ void Monitor::do_admin_command(string command, cmdmap_t& cmdmap, string format,
start_election();
elector.stop_participating();
ss << "stopped responding to quorum, initiated new election";
} else
} else {
assert(0 == "bad AdminSocket command binding");
}
audit_clog->info() << "from='admin socket' "
<< "entity='admin socket' "
<< "cmd=" << command << " "
<< "args=" << args << ": finished";
return;
abort:
audit_clog->info() << "from='admin socket' "
<< "entity='admin socket' "
<< "cmd=" << command << " "
<< "args=" << args << ": aborted";
}
void Monitor::handle_signal(int signum)
@ -386,6 +422,11 @@ const char** Monitor::get_tracked_conf_keys() const
"mon_lease",
"mon_lease_renew_interval",
"mon_lease_ack_timeout",
// clog & admin clog
"clog_to_monitors",
"clog_to_syslog",
"clog_to_syslog_facility",
"clog_to_syslog_level",
NULL
};
return KEYS;
@ -395,6 +436,87 @@ void Monitor::handle_conf_change(const struct md_config_t *conf,
const std::set<std::string> &changed)
{
sanitize_options();
dout(10) << __func__ << " " << changed << dendl;
if (changed.count("clog_to_monitors") ||
changed.count("clog_to_syslog") ||
changed.count("clog_to_syslog_level") ||
changed.count("clog_to_syslog_facility")) {
update_log_clients();
}
}
void Monitor::update_log_client(
LogChannelRef lc, const string &name,
map<string,string> &log_to_monitors,
map<string,string> &log_to_syslog,
map<string,string> &log_channels,
map<string,string> &log_prios)
{
bool to_monitors = (get_str_map_key(log_to_monitors, name,
&CLOG_CHANNEL_DEFAULT) == "true");
bool to_syslog = (get_str_map_key(log_to_syslog, name,
&CLOG_CHANNEL_DEFAULT) == "true");
string syslog_facility = get_str_map_key(log_channels, name,
&CLOG_CHANNEL_DEFAULT);
string prio = get_str_map_key(log_prios, name, &CLOG_CHANNEL_DEFAULT);
lc->set_log_to_monitors(to_monitors);
lc->set_log_to_syslog(to_syslog);
lc->set_syslog_facility(syslog_facility);
lc->set_log_channel(name);
lc->set_log_prio(prio);
dout(15) << __func__ << " " << name << "("
<< " to_monitors: " << (to_monitors ? "true" : "false")
<< " to_syslog: " << (to_syslog ? "true" : "false")
<< " syslog_facility: " << syslog_facility
<< " prio: " << prio << ")" << dendl;
}
void Monitor::update_log_clients()
{
map<string,string> log_to_monitors;
map<string,string> log_to_syslog;
map<string,string> log_channel;
map<string,string> log_prio;
ostringstream oss;
int r = get_conf_str_map_helper(g_conf->clog_to_monitors, oss,
&log_to_monitors, CLOG_CHANNEL_DEFAULT);
if (r < 0) {
derr << __func__ << " error parsing 'clog_to_monitors'" << dendl;
return;
}
r = get_conf_str_map_helper(g_conf->clog_to_syslog, oss,
&log_to_syslog, CLOG_CHANNEL_DEFAULT);
if (r < 0) {
derr << __func__ << " error parsing 'clog_to_syslog'" << dendl;
return;
}
r = get_conf_str_map_helper(g_conf->clog_to_syslog_facility, oss,
&log_channel, CLOG_CHANNEL_DEFAULT);
if (r < 0) {
derr << __func__ << " error parsing 'clog_to_syslog_facility'" << dendl;
return;
}
r = get_conf_str_map_helper(g_conf->clog_to_syslog_level, oss,
&log_prio, CLOG_CHANNEL_DEFAULT);
if (r < 0) {
derr << __func__ << " error parsing 'clog_to_syslog_level'" << dendl;
return;
}
update_log_client(clog, CLOG_CHANNEL_CLUSTER,
log_to_monitors, log_to_syslog,
log_channel, log_prio);
update_log_client(audit_clog, CLOG_CHANNEL_AUDIT,
log_to_monitors, log_to_syslog,
log_channel, log_prio);
}
int Monitor::sanitize_options()
@ -404,7 +526,7 @@ int Monitor::sanitize_options()
// mon_lease must be greater than mon_lease_renewal; otherwise we
// may incur in leases expiring before they are renewed.
if (g_conf->mon_lease <= g_conf->mon_lease_renew_interval) {
clog.error() << "mon_lease (" << g_conf->mon_lease
clog->error() << "mon_lease (" << g_conf->mon_lease
<< ") must be greater "
<< "than mon_lease_renew_interval ("
<< g_conf->mon_lease_renew_interval << ")";
@ -417,7 +539,7 @@ int Monitor::sanitize_options()
// the monitors happened to be overloaded -- or even under normal load for
// a small enough value.
if (g_conf->mon_lease_ack_timeout <= g_conf->mon_lease) {
clog.error() << "mon_lease_ack_timeout ("
clog->error() << "mon_lease_ack_timeout ("
<< g_conf->mon_lease_ack_timeout
<< ") must be greater than mon_lease ("
<< g_conf->mon_lease << ")";
@ -782,6 +904,8 @@ void Monitor::shutdown()
cluster_logger = NULL;
}
log_client.shutdown();
// unlock before msgr shutdown...
lock.Unlock();
@ -863,33 +987,37 @@ void Monitor::bootstrap()
}
}
void Monitor::_osdmonitor_prepare_command(cmdmap_t& cmdmap, ostream& ss)
bool Monitor::_osdmonitor_prepare_command(cmdmap_t& cmdmap, ostream& ss)
{
if (!is_leader()) {
ss << "mon must be a leader";
return;
return false;
}
string cmd;
cmd_getval(g_ceph_context, cmdmap, "prepare", cmd);
cmdmap["prefix"] = cmdmap["prepare"];
OSDMonitor *monitor = osdmon();
MMonCommand *m = static_cast<MMonCommand *>((new MMonCommand())->get());
if (monitor->prepare_command_impl(m, cmdmap))
bool r = true;
if (monitor->prepare_command_impl(m, cmdmap)) {
ss << "true";
else
} else {
ss << "false";
r = false;
}
m->put();
return r;
}
void Monitor::_add_bootstrap_peer_hint(string cmd, cmdmap_t& cmdmap, ostream& ss)
bool Monitor::_add_bootstrap_peer_hint(string cmd, cmdmap_t& cmdmap, ostream& ss)
{
string addrstr;
if (!cmd_getval(g_ceph_context, cmdmap, "addr", addrstr)) {
ss << "unable to parse address string value '"
<< cmd_vartype_stringify(cmdmap["addr"]) << "'";
return;
return false;
}
dout(10) << "_add_bootstrap_peer_hint '" << cmd << "' '"
<< addrstr << "'" << dendl;
@ -898,12 +1026,12 @@ void Monitor::_add_bootstrap_peer_hint(string cmd, cmdmap_t& cmdmap, ostream& ss
const char *end = 0;
if (!addr.parse(addrstr.c_str(), &end)) {
ss << "failed to parse addr '" << addrstr << "'; syntax is 'add_bootstrap_peer_hint ip[:port]'";
return;
return false;
}
if (is_leader() || is_peon()) {
ss << "mon already active; ignoring bootstrap hint";
return;
return true;
}
if (addr.get_port() == 0)
@ -911,6 +1039,7 @@ void Monitor::_add_bootstrap_peer_hint(string cmd, cmdmap_t& cmdmap, ostream& ss
extra_probe_peers.insert(addr);
ss << "adding peer " << addr << " to list: " << extra_probe_peers;
return true;
}
// called by bootstrap(), or on leader|peon -> electing
@ -1687,7 +1816,7 @@ void Monitor::start_election()
cancel_probe_timeout();
clog.info() << "mon." << name << " calling new monitor election\n";
clog->info() << "mon." << name << " calling new monitor election\n";
elector.call_election();
}
@ -1735,7 +1864,7 @@ void Monitor::win_election(epoch_t epoch, set<int>& active, uint64_t features,
quorum_features = features;
outside_quorum.clear();
clog.info() << "mon." << name << "@" << rank
clog->info() << "mon." << name << "@" << rank
<< " won leader election with quorum " << quorum << "\n";
set_leader_supported_commands(cmdset, cmdsize);
@ -2346,10 +2475,20 @@ void Monitor::handle_command(MMonCommand *m)
if (!_allowed_command(session, module, prefix, cmdmap,
param_str_map, mon_cmd)) {
dout(1) << __func__ << " access denied" << dendl;
audit_clog->info() << "from='" << session->inst << "' "
<< "entity='" << session->auth_handler->get_entity_name()
<< "' cmd=" << m->cmd << ": access denied";
reply_command(m, -EACCES, "access denied", 0);
return;
}
audit_clog->info() << "from='" << session->inst << "' "
<< "entity='"
<< (session->auth_handler ?
stringify(session->auth_handler->get_entity_name())
: "forwarded-request")
<< "' cmd=" << m->cmd << ": dispatch";
if (module == "mds" || module == "fs") {
mdsmon()->dispatch(m);
return;
@ -3115,7 +3254,8 @@ void Monitor::dispatch(MonSession *s, Message *m, const bool src_is_mon)
break;
case MSG_LOGACK:
clog.handle_log_ack((MLogAck*)m);
log_client.handle_log_ack((MLogAck*)m);
m->put();
break;
// monmap
@ -3508,9 +3648,9 @@ void Monitor::handle_timecheck_leader(MTimeCheck *m)
ostringstream ss;
health_status_t status = timecheck_status(ss, skew_bound, latency);
if (status == HEALTH_ERR)
clog.error() << other << " " << ss.str() << "\n";
clog->error() << other << " " << ss.str() << "\n";
else if (status == HEALTH_WARN)
clog.warn() << other << " " << ss.str() << "\n";
clog->warn() << other << " " << ss.str() << "\n";
dout(10) << __func__ << " from " << other << " ts " << m->timestamp
<< " delta " << delta << " skew_bound " << skew_bound
@ -3779,12 +3919,12 @@ int Monitor::scrub()
assert(is_leader());
if ((get_quorum_features() & CEPH_FEATURE_MON_SCRUB) == 0) {
clog.warn() << "scrub not supported by entire quorum\n";
clog->warn() << "scrub not supported by entire quorum\n";
return -EOPNOTSUPP;
}
if (!scrub_result.empty()) {
clog.info() << "scrub already in progress\n";
clog->info() << "scrub already in progress\n";
return -EBUSY;
}
@ -3879,13 +4019,13 @@ void Monitor::scrub_finish()
continue;
if (p->second != mine) {
++errors;
clog.error() << "scrub mismatch" << "\n";
clog.error() << " mon." << rank << " " << mine << "\n";
clog.error() << " mon." << p->first << " " << p->second << "\n";
clog->error() << "scrub mismatch" << "\n";
clog->error() << " mon." << rank << " " << mine << "\n";
clog->error() << " mon." << p->first << " " << p->second << "\n";
}
}
if (!errors)
clog.info() << "scrub ok on " << quorum << ": " << mine << "\n";
clog->info() << "scrub ok on " << quorum << ": " << mine << "\n";
scrub_reset();
}

View File

@ -52,6 +52,7 @@
#include <memory>
#include "include/memory.h"
#include "include/str_map.h"
#include <errno.h>
@ -147,7 +148,9 @@ public:
set<entity_addr_t> extra_probe_peers;
LogClient clog;
LogClient log_client;
LogChannelRef clog;
LogChannelRef audit_clog;
KeyRing keyring;
KeyServer key_server;
@ -644,8 +647,8 @@ public:
const MonCommand *this_cmd);
void get_mon_status(Formatter *f, ostream& ss);
void _quorum_status(Formatter *f, ostream& ss);
void _osdmonitor_prepare_command(cmdmap_t& cmdmap, ostream& ss);
void _add_bootstrap_peer_hint(string cmd, cmdmap_t& cmdmap, ostream& ss);
bool _osdmonitor_prepare_command(cmdmap_t& cmdmap, ostream& ss);
bool _add_bootstrap_peer_hint(string cmd, cmdmap_t& cmdmap, ostream& ss);
void handle_command(class MMonCommand *m);
void handle_route(MRoute *m);
@ -724,8 +727,31 @@ public:
C_Command(Monitor *_mm, MMonCommand *_m, int r, string s, bufferlist rd, version_t v) :
mon(_mm), m(_m), rc(r), rs(s), rdata(rd), version(v){}
void finish(int r) {
if (r >= 0)
if (r >= 0) {
ostringstream ss;
if (!m->get_connection()) {
ss << "connection dropped for command ";
} else {
MonSession *s = m->get_session();
// if client drops we may not have a session to draw information from.
if (s) {
ss << "from='" << s->inst << "' "
<< "entity='";
if (s->auth_handler)
ss << s->auth_handler->get_entity_name();
else
ss << "forwarded-request";
ss << "' ";
} else {
ss << "session dropped for command ";
}
}
ss << "cmd='" << m->cmd << "': finished";
mon->audit_clog->info() << ss.str();
mon->reply_command(m, rc, rs, rdata, version);
}
else if (r == -ECANCELED)
m->put();
else if (r == -EAGAIN)
@ -794,6 +820,12 @@ public:
virtual void handle_conf_change(const struct md_config_t *conf,
const std::set<std::string> &changed);
void update_log_client(LogChannelRef lc, const string &name,
map<string,string> &log_to_monitors,
map<string,string> &log_to_syslog,
map<string,string> &log_channels,
map<string,string> &log_prios);
void update_log_clients();
int sanitize_options();
int preinit();
int init();
@ -983,4 +1015,47 @@ struct MonCommand {
};
WRITE_CLASS_ENCODER(MonCommand)
// Having this here is less than optimal, but we needed to keep it
// somewhere as to avoid code duplication, as it will be needed both
// on the Monitor class and the LogMonitor class.
//
// We are attempting to avoid code duplication in the event that
// changing how the mechanisms currently work will lead to unnecessary
// issues, resulting from the need of changing this function in multiple
// places.
//
// This function is just a helper to perform a task that should not be
// needed anywhere else besides the two functions that shall call it.
//
// This function's only purpose is to check whether a given map has only
// ONE key with an empty value (which would mean that 'get_str_map()' read
// a map in the form of 'VALUE', without any KEY/VALUE pairs) and, in such
// event, to assign said 'VALUE' to a given 'def_key', such that we end up
// with a map of the form "m = { 'def_key' : 'VALUE' }" instead of the
// original "m = { 'VALUE' : '' }".
static inline int get_conf_str_map_helper(
const string &str,
ostringstream &oss,
map<string,string> *m,
const string &def_key)
{
int r = get_str_map(str, m);
if (r < 0) {
generic_derr << __func__ << " error: " << oss.str() << dendl;
return r;
}
if (r >= 0 && m->size() == 1) {
map<string,string>::iterator p = m->begin();
if (p->second.empty()) {
string s = p->first;
m->erase(s);
(*m)[def_key] = s;
}
}
return r;
}
#endif

View File

@ -123,7 +123,7 @@ void MonmapMonitor::on_active()
}
if (mon->is_leader())
mon->clog.info() << "monmap " << *mon->monmap << "\n";
mon->clog->info() << "monmap " << *mon->monmap << "\n";
}
bool MonmapMonitor::preprocess_query(PaxosServiceMessage *m)

View File

@ -401,7 +401,7 @@ void OSDMonitor::on_active()
}
if (mon->is_leader())
mon->clog.info() << "osdmap " << osdmap << "\n";
mon->clog->info() << "osdmap " << osdmap << "\n";
if (!mon->is_leader()) {
list<MOSDFailure*> ls;
@ -981,7 +981,7 @@ bool OSDMonitor::prepare_mark_me_down(MOSDMarkMeDown *m)
assert(osdmap.is_up(target_osd));
assert(osdmap.get_addr(target_osd) == m->get_target().addr);
mon->clog.info() << "osd." << target_osd << " marked itself down\n";
mon->clog->info() << "osd." << target_osd << " marked itself down\n";
pending_inc.new_state[target_osd] = CEPH_OSD_UP;
if (m->request_ack)
wait_for_finished_proposal(new C_AckMarkedDown(this, m));
@ -1127,7 +1127,7 @@ bool OSDMonitor::check_failure(utime_t now, int target_osd, failure_info_t& fi)
dout(1) << " we have enough reports/reporters to mark osd." << target_osd << " down" << dendl;
pending_inc.new_state[target_osd] = CEPH_OSD_UP;
mon->clog.info() << osdmap.get_inst(target_osd) << " failed ("
mon->clog->info() << osdmap.get_inst(target_osd) << " failed ("
<< fi.num_reports << " reports from " << (int)fi.reporters.size() << " peers after "
<< failed_for << " >= grace " << grace << ")\n";
return true;
@ -1151,7 +1151,7 @@ bool OSDMonitor::prepare_failure(MOSDFailure *m)
if (m->if_osd_failed()) {
// add a report
mon->clog.debug() << m->get_target() << " reported failed by "
mon->clog->debug() << m->get_target() << " reported failed by "
<< m->get_orig_source_inst() << "\n";
failure_info_t& fi = failure_info[target_osd];
MOSDFailure *old = fi.add_report(reporter, failed_since, m);
@ -1163,7 +1163,7 @@ bool OSDMonitor::prepare_failure(MOSDFailure *m)
return check_failure(now, target_osd, fi);
} else {
// remove the report
mon->clog.debug() << m->get_target() << " failure report canceled by "
mon->clog->debug() << m->get_target() << " failure report canceled by "
<< m->get_orig_source_inst() << "\n";
if (failure_info.count(target_osd)) {
failure_info_t& fi = failure_info[target_osd];
@ -1445,7 +1445,7 @@ void OSDMonitor::_booted(MOSDBoot *m, bool logit)
<< " w " << m->sb.weight << " from " << m->sb.current_epoch << dendl;
if (logit) {
mon->clog.info() << m->get_orig_source_inst() << " boot\n";
mon->clog->info() << m->get_orig_source_inst() << " boot\n";
}
send_latest(m, m->sb.current_epoch+1);
@ -1496,7 +1496,7 @@ bool OSDMonitor::prepare_alive(MOSDAlive *m)
int from = m->get_orig_source().num();
if (0) { // we probably don't care much about these
mon->clog.debug() << m->get_orig_source_inst() << " alive\n";
mon->clog->debug() << m->get_orig_source_inst() << " alive\n";
}
dout(7) << "prepare_alive want up_thru " << m->want << " have " << m->version
@ -1968,7 +1968,7 @@ void OSDMonitor::tick()
do_propose = true;
mon->clog.info() << "osd." << o << " out (down for " << down << ")\n";
mon->clog->info() << "osd." << o << " out (down for " << down << ")\n";
} else
continue;
}
@ -2057,7 +2057,7 @@ void OSDMonitor::handle_osd_timeouts(const utime_t &now,
} else if (can_mark_down(i)) {
utime_t diff = now - t->second;
if (diff > timeo) {
mon->clog.info() << "osd." << i << " marked down after no pg stats for " << diff << "seconds\n";
mon->clog->info() << "osd." << i << " marked down after no pg stats for " << diff << "seconds\n";
derr << "no osd or pg stats from osd." << i << " since " << t->second << ", " << diff
<< " seconds ago. marking down" << dendl;
pending_inc.new_state[i] = CEPH_OSD_UP;
@ -3014,7 +3014,7 @@ bool OSDMonitor::update_pools_status()
if (pool_is_full)
continue;
mon->clog.info() << "pool '" << pool_name
mon->clog->info() << "pool '" << pool_name
<< "' no longer full; removing FULL flag";
update_pool_flags(it->first, pool.get_flags() & ~pg_pool_t::FLAG_FULL);
@ -3025,12 +3025,12 @@ bool OSDMonitor::update_pools_status()
if (pool.quota_max_bytes > 0 &&
(uint64_t)sum.num_bytes >= pool.quota_max_bytes) {
mon->clog.warn() << "pool '" << pool_name << "' is full"
mon->clog->warn() << "pool '" << pool_name << "' is full"
<< " (reached quota's max_bytes: "
<< si_t(pool.quota_max_bytes) << ")";
} else if (pool.quota_max_objects > 0 &&
(uint64_t)sum.num_objects >= pool.quota_max_objects) {
mon->clog.warn() << "pool '" << pool_name << "' is full"
mon->clog->warn() << "pool '" << pool_name << "' is full"
<< " (reached quota's max_objects: "
<< pool.quota_max_objects << ")";
} else {
@ -3294,9 +3294,9 @@ int OSDMonitor::parse_erasure_code_profile(const vector<string> &erasure_code_pr
map<string,string> *erasure_code_profile_map,
stringstream &ss)
{
int r = get_str_map(g_conf->osd_pool_default_erasure_code_profile,
ss,
erasure_code_profile_map);
int r = get_json_str_map(g_conf->osd_pool_default_erasure_code_profile,
ss,
erasure_code_profile_map);
if (r)
return r;
assert((*erasure_code_profile_map).count("plugin"));

View File

@ -80,7 +80,7 @@ void PGMonitor::on_active()
update_logger();
if (mon->is_leader())
mon->clog.info() << "pgmap " << pg_map << "\n";
mon->clog->info() << "pgmap " << pg_map << "\n";
}
void PGMonitor::update_logger()

View File

@ -995,7 +995,7 @@ void Paxos::warn_on_future_time(utime_t t, entity_name_t from)
utime_t warn_diff = now - last_clock_drift_warn;
if (warn_diff >
pow(g_conf->mon_clock_drift_warn_backoff, clock_drift_warned)) {
mon->clog.warn() << "message from " << from << " was stamped " << diff
mon->clog->warn() << "message from " << from << " was stamped " << diff
<< "s in the future, clocks not synchronized";
last_clock_drift_warn = ceph_clock_now(g_ceph_context);
++clock_drift_warned;

View File

@ -179,7 +179,8 @@ CompatSet OSD::get_osd_compat_set() {
OSDService::OSDService(OSD *osd) :
osd(osd),
cct(osd->cct),
whoami(osd->whoami), store(osd->store), clog(osd->clog),
whoami(osd->whoami), store(osd->store),
log_client(osd->log_client), clog(osd->clog),
pg_recovery_stats(osd->pg_recovery_stats),
infos_oid(OSD::make_infos_oid()),
cluster_messenger(osd->cluster_messenger),
@ -611,9 +612,9 @@ void OSDService::check_nearfull_warning(const osd_stat_t &osd_stat)
}
last_msg = now;
if (cur_state == FULL)
clog.error() << "OSD full dropping all updates " << (int)(ratio * 100) << "% full";
clog->error() << "OSD full dropping all updates " << (int)(ratio * 100) << "% full";
else
clog.warn() << "OSD near full (" << (int)(ratio * 100) << "%)";
clog->warn() << "OSD near full (" << (int)(ratio * 100) << "%)";
}
bool OSDService::check_failsafe_full()
@ -1257,7 +1258,7 @@ void OSDService::handle_misdirected_op(PG *pg, OpRequestRef op)
}
dout(7) << *pg << " misdirected op in " << m->get_map_epoch() << dendl;
clog.warn() << m->get_source_inst() << " misdirected " << m->get_reqid()
clog->warn() << m->get_source_inst() << " misdirected " << m->get_reqid()
<< " pg " << m->get_pg()
<< " to osd." << whoami
<< " not " << pg->acting
@ -1634,7 +1635,8 @@ OSD::OSD(CephContext *cct_, ObjectStore *store_,
logger(NULL),
recoverystate_perf(NULL),
store(store_),
clog(cct, client_messenger, &mc->monmap, LogClient::NO_FLAGS),
log_client(cct, client_messenger, &mc->monmap, LogClient::NO_FLAGS),
clog(log_client.create_channel()),
whoami(id),
dev_path(dev), journal_path(jdev),
dispatch_running(false),
@ -2008,7 +2010,7 @@ int OSD::init()
goto out;
// tell monc about log_client so it will know about mon session resets
monc->set_log_client(&clog);
monc->set_log_client(&log_client);
osd_tp.start();
osd_op_tp.start();
@ -4044,7 +4046,7 @@ void OSD::check_ops_in_flight()
for (vector<string>::iterator i = warnings.begin();
i != warnings.end();
++i) {
clog.warn() << *i;
clog->warn() << *i;
}
}
return;
@ -5313,7 +5315,7 @@ void OSD::do_command(Connection *con, ceph_tid_t tid, vector<string>& cmd, buffe
rs = ss.str();
odata.append(ds);
dout(0) << "do_command r=" << r << " " << rs << dendl;
clog.info() << rs << "\n";
clog->info() << rs << "\n";
if (con) {
MCommandReply *reply = new MCommandReply(r, rs);
reply->set_tid(tid);
@ -6306,25 +6308,25 @@ void OSD::handle_osd_map(MOSDMap *m)
if (service.is_preparing_to_stop() || service.is_stopping()) {
service.got_stop_ack();
} else {
clog.warn() << "map e" << osdmap->get_epoch()
clog->warn() << "map e" << osdmap->get_epoch()
<< " wrongly marked me down";
}
}
else if (!osdmap->get_addr(whoami).probably_equals(client_messenger->get_myaddr()))
clog.error() << "map e" << osdmap->get_epoch()
clog->error() << "map e" << osdmap->get_epoch()
<< " had wrong client addr (" << osdmap->get_addr(whoami)
<< " != my " << client_messenger->get_myaddr() << ")";
else if (!osdmap->get_cluster_addr(whoami).probably_equals(cluster_messenger->get_myaddr()))
clog.error() << "map e" << osdmap->get_epoch()
clog->error() << "map e" << osdmap->get_epoch()
<< " had wrong cluster addr (" << osdmap->get_cluster_addr(whoami)
<< " != my " << cluster_messenger->get_myaddr() << ")";
else if (!osdmap->get_hb_back_addr(whoami).probably_equals(hb_back_server_messenger->get_myaddr()))
clog.error() << "map e" << osdmap->get_epoch()
clog->error() << "map e" << osdmap->get_epoch()
<< " had wrong hb back addr (" << osdmap->get_hb_back_addr(whoami)
<< " != my " << hb_back_server_messenger->get_myaddr() << ")";
else if (osdmap->get_hb_front_addr(whoami) != entity_addr_t() &&
!osdmap->get_hb_front_addr(whoami).probably_equals(hb_front_server_messenger->get_myaddr()))
clog.error() << "map e" << osdmap->get_epoch()
clog->error() << "map e" << osdmap->get_epoch()
<< " had wrong hb front addr (" << osdmap->get_hb_front_addr(whoami)
<< " != my " << hb_front_server_messenger->get_myaddr() << ")";
@ -8093,7 +8095,7 @@ void OSD::handle_op(OpRequestRef& op, OSDMapRef& osdmap)
}
if (!send_map->have_pg_pool(pgid.pool())) {
dout(7) << "dropping request; pool did not exist" << dendl;
clog.warn() << m->get_source_inst() << " invalid " << m->get_reqid()
clog->warn() << m->get_source_inst() << " invalid " << m->get_reqid()
<< " pg " << m->get_pg()
<< " to osd." << whoami
<< " in e" << osdmap->get_epoch()
@ -8103,7 +8105,7 @@ void OSD::handle_op(OpRequestRef& op, OSDMapRef& osdmap)
return;
} else if (send_map->get_pg_acting_role(pgid.pgid, whoami) < 0) {
dout(7) << "we are invalid target" << dendl;
clog.warn() << m->get_source_inst() << " misdirected " << m->get_reqid()
clog->warn() << m->get_source_inst() << " misdirected " << m->get_reqid()
<< " pg " << m->get_pg()
<< " to osd." << whoami
<< " in e" << osdmap->get_epoch()
@ -8520,12 +8522,12 @@ void OSD::check_config()
{
// some sanity checks
if (g_conf->osd_map_cache_size <= g_conf->osd_map_max_advance + 2) {
clog.warn() << "osd_map_cache_size (" << g_conf->osd_map_cache_size << ")"
clog->warn() << "osd_map_cache_size (" << g_conf->osd_map_cache_size << ")"
<< " is not > osd_map_max_advance ("
<< g_conf->osd_map_max_advance << ")";
}
if (g_conf->osd_map_cache_size <= (int)g_conf->osd_pg_epoch_persisted_max_stale + 2) {
clog.warn() << "osd_map_cache_size (" << g_conf->osd_map_cache_size << ")"
clog->warn() << "osd_map_cache_size (" << g_conf->osd_map_cache_size << ")"
<< " is not > osd_pg_epoch_persisted_max_stale ("
<< g_conf->osd_pg_epoch_persisted_max_stale << ")";
}

View File

@ -312,7 +312,8 @@ public:
SharedPtrRegistry<spg_t, DeletingState> deleting_pgs;
const int whoami;
ObjectStore *&store;
LogClient &clog;
LogClient &log_client;
LogChannelRef clog;
PGRecoveryStats &pg_recovery_stats;
hobject_t infos_oid;
private:
@ -924,7 +925,8 @@ protected:
PerfCounters *recoverystate_perf;
ObjectStore *store;
LogClient clog;
LogClient log_client;
LogChannelRef clog;
int whoami;
std::string dev_path, journal_path;

View File

@ -2587,7 +2587,7 @@ int OSDMap::get_erasure_code_profile_default(CephContext *cct,
map<string,string> &profile_map,
ostream *ss)
{
int r = get_str_map(cct->_conf->osd_pool_default_erasure_code_profile,
int r = get_json_str_map(cct->_conf->osd_pool_default_erasure_code_profile,
*ss,
&profile_map);
profile_map["directory"] =

View File

@ -1542,7 +1542,7 @@ void PG::activate(ObjectStore::Transaction& t,
* behind.
*/
// backfill
osd->clog.info() << info.pgid << " restarting backfill on osd." << peer
osd->clog->info() << info.pgid << " restarting backfill on osd." << peer
<< " from (" << pi.log_tail << "," << pi.last_update << "] " << pi.last_backfill
<< " to " << info.last_update;
@ -2890,7 +2890,7 @@ void PG::read_state(ObjectStore *store, bufferlist &bl)
assert(!r);
}
if (oss.str().length())
osd->clog.error() << oss;
osd->clog->error() << oss;
// log any weirdness
log_weirdness();
@ -2899,12 +2899,12 @@ void PG::read_state(ObjectStore *store, bufferlist &bl)
void PG::log_weirdness()
{
if (pg_log.get_tail() != info.log_tail)
osd->clog.error() << info.pgid
osd->clog->error() << info.pgid
<< " info mismatch, log.tail " << pg_log.get_tail()
<< " != info.log_tail " << info.log_tail
<< "\n";
if (pg_log.get_head() != info.last_update)
osd->clog.error() << info.pgid
osd->clog->error() << info.pgid
<< " info mismatch, log.head " << pg_log.get_head()
<< " != info.last_update " << info.last_update
<< "\n";
@ -2912,7 +2912,7 @@ void PG::log_weirdness()
if (!pg_log.get_log().empty()) {
// sloppy check
if ((pg_log.get_log().log.begin()->version <= pg_log.get_tail()))
osd->clog.error() << info.pgid
osd->clog->error() << info.pgid
<< " log bound mismatch, info (" << pg_log.get_tail() << ","
<< pg_log.get_head() << "]"
<< " actual ["
@ -2922,7 +2922,7 @@ void PG::log_weirdness()
}
if (pg_log.get_log().caller_ops.size() > pg_log.get_log().log.size()) {
osd->clog.error() << info.pgid
osd->clog->error() << info.pgid
<< " caller_ops.size " << pg_log.get_log().caller_ops.size()
<< " > log size " << pg_log.get_log().log.size()
<< "\n";
@ -3356,7 +3356,7 @@ void PG::_scan_rollback_obs(
i != rollback_obs.end();
++i) {
if (i->generation < trimmed_to.version) {
osd->clog.error() << "osd." << osd->whoami
osd->clog->error() << "osd." << osd->whoami
<< " pg " << info.pgid
<< " found obsolete rollback obj "
<< *i << " generation < trimmed_to "
@ -3416,7 +3416,7 @@ void PG::_scan_snaps(ScrubMap &smap)
<< dendl;
assert(0);
}
osd->clog.error() << "osd." << osd->whoami
osd->clog->error() << "osd." << osd->whoami
<< " found snap mapper error on pg "
<< info.pgid
<< " oid " << hoid << " snaps in mapper: "
@ -3424,7 +3424,7 @@ void PG::_scan_snaps(ScrubMap &smap)
<< oi_snaps
<< "...repaired";
} else {
osd->clog.error() << "osd." << osd->whoami
osd->clog->error() << "osd." << osd->whoami
<< " found snap mapper error on pg "
<< info.pgid
<< " oid " << hoid << " snaps missing in mapper"
@ -4087,7 +4087,7 @@ void PG::scrub_compare_maps()
dout(2) << ss.str() << dendl;
if (!authoritative.empty() || !scrubber.inconsistent_snapcolls.empty()) {
osd->clog.error(ss);
osd->clog->error(ss);
}
for (map<hobject_t, pg_shard_t>::iterator i = authoritative.begin();
@ -4137,7 +4137,7 @@ void PG::scrub_process_inconsistent()
<< scrubber.missing.size() << " missing, "
<< scrubber.inconsistent.size() << " inconsistent objects\n";
dout(2) << ss.str() << dendl;
osd->clog.error(ss);
osd->clog->error(ss);
if (repair) {
state_clear(PG_STATE_CLEAN);
for (map<hobject_t, pair<ScrubMap::object, pg_shard_t> >::iterator i =
@ -4233,9 +4233,9 @@ void PG::scrub_finish()
oss << ", " << scrubber.fixed << " fixed";
oss << "\n";
if (total_errors)
osd->clog.error(oss);
osd->clog->error(oss);
else
osd->clog.info(oss);
osd->clog->info(oss);
}
// finish up
@ -4402,7 +4402,7 @@ void PG::fulfill_log(
dout(10) << " sending info+missing+log since " << query.since
<< dendl;
if (query.since != eversion_t() && query.since < pg_log.get_tail()) {
osd->clog.error() << info.pgid << " got broken pg_query_t::LOG since " << query.since
osd->clog->error() << info.pgid << " got broken pg_query_t::LOG since " << query.since
<< " when my log.tail is " << pg_log.get_tail()
<< ", sending full log instead\n";
mlog->log = pg_log.get_log(); // primary should not have requested this!!
@ -6292,11 +6292,11 @@ boost::statechart::result PG::RecoveryState::Active::react(const ActMap&)
if (unfound > 0 &&
pg->all_unfound_are_queried_or_lost(pg->get_osdmap())) {
if (pg->cct->_conf->osd_auto_mark_unfound_lost) {
pg->osd->clog.error() << pg->info.pgid << " has " << unfound
pg->osd->clog->error() << pg->info.pgid << " has " << unfound
<< " objects unfound and apparently lost, would automatically marking lost but NOT IMPLEMENTED\n";
//pg->mark_all_unfound_lost(*context< RecoveryMachine >().get_cur_transaction());
} else
pg->osd->clog.error() << pg->info.pgid << " has " << unfound << " objects unfound and apparently lost\n";
pg->osd->clog->error() << pg->info.pgid << " has " << unfound << " objects unfound and apparently lost\n";
}
if (!pg->snap_trimq.empty() &&

View File

@ -1381,7 +1381,7 @@ void ReplicatedPG::do_op(OpRequestRef& op)
if (m->get_object_locator() != oloc) {
dout(10) << " provided locator " << m->get_object_locator()
<< " != object's " << obc->obs.oi.soid << dendl;
osd->clog.warn() << "bad locator " << m->get_object_locator()
osd->clog->warn() << "bad locator " << m->get_object_locator()
<< " on object " << oloc
<< " op " << *m << "\n";
}
@ -3244,7 +3244,7 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops)
r = pgbackend->objects_read_sync(
soid, last, len, &t);
if (!t.is_zero()) {
osd->clog.error() << coll << " " << soid << " sparse-read found data in hole "
osd->clog->error() << coll << " " << soid << " sparse-read found data in hole "
<< last << "~" << len << "\n";
}
}
@ -3272,7 +3272,7 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops)
r = pgbackend->objects_read_sync(
soid, last, len, &t);
if (!t.is_zero()) {
osd->clog.error() << coll << " " << soid << " sparse-read found data in hole "
osd->clog->error() << coll << " " << soid << " sparse-read found data in hole "
<< last << "~" << len << "\n";
}
}
@ -3675,7 +3675,7 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops)
map<snapid_t, interval_set<uint64_t> >::const_iterator coi;
coi = ssc->snapset.clone_overlap.find(ci.cloneid);
if (coi == ssc->snapset.clone_overlap.end()) {
osd->clog.error() << "osd." << osd->whoami << ": inconsistent clone_overlap found for oid "
osd->clog->error() << "osd." << osd->whoami << ": inconsistent clone_overlap found for oid "
<< soid << " clone " << *clone_iter;
result = -EINVAL;
break;
@ -3690,7 +3690,7 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops)
map<snapid_t, uint64_t>::const_iterator si;
si = ssc->snapset.clone_size.find(ci.cloneid);
if (si == ssc->snapset.clone_size.end()) {
osd->clog.error() << "osd." << osd->whoami << ": inconsistent clone_size found for oid "
osd->clog->error() << "osd." << osd->whoami << ": inconsistent clone_size found for oid "
<< soid << " clone " << *clone_iter;
result = -EINVAL;
break;
@ -9947,7 +9947,7 @@ bool ReplicatedPG::start_recovery_ops(
if (missing.num_missing() > 0) {
// this shouldn't happen!
osd->clog.error() << info.pgid << " recovery ending with " << missing.num_missing()
osd->clog->error() << info.pgid << " recovery ending with " << missing.num_missing()
<< ": " << missing.missing << "\n";
return work_in_progress;
}
@ -9955,7 +9955,7 @@ bool ReplicatedPG::start_recovery_ops(
if (needs_recovery()) {
// this shouldn't happen!
// We already checked num_missing() so we must have missing replicas
osd->clog.error() << info.pgid << " recovery ending with missing replicas\n";
osd->clog->error() << info.pgid << " recovery ending with missing replicas\n";
return work_in_progress;
}
@ -10183,9 +10183,9 @@ int ReplicatedPG::prep_object_replica_pushes(
}
}
if (uhoh)
osd->clog.error() << info.pgid << " missing primary copy of " << soid << ", unfound\n";
osd->clog->error() << info.pgid << " missing primary copy of " << soid << ", unfound\n";
else
osd->clog.error() << info.pgid << " missing primary copy of " << soid
osd->clog->error() << info.pgid << " missing primary copy of " << soid
<< ", will try copies on " << missing_loc.get_locations(soid)
<< "\n";
return 0;
@ -11285,7 +11285,7 @@ void ReplicatedPG::agent_setup()
}
if (info.stats.stats_invalid) {
osd->clog.warn() << "pg " << info.pgid << " has invalid (post-split) stats; must scrub before tier agent can activate";
osd->clog->warn() << "pg " << info.pgid << " has invalid (post-split) stats; must scrub before tier agent can activate";
}
agent_choose_mode();
@ -11961,7 +11961,7 @@ void ReplicatedPG::_scrub(ScrubMap& scrubmap)
if (soid.snap == CEPH_SNAPDIR ||
soid.snap == CEPH_NOSNAP) {
if (p->second.attrs.count(SS_ATTR) == 0) {
osd->clog.error() << mode << " " << info.pgid << " " << soid
osd->clog->error() << mode << " " << info.pgid << " " << soid
<< " no '" << SS_ATTR << "' attr";
++scrubber.shallow_errors;
continue;
@ -11974,7 +11974,7 @@ void ReplicatedPG::_scrub(ScrubMap& scrubmap)
// did we finish the last oid?
if (head != hobject_t() &&
!pool.info.allow_incomplete_clones()) {
osd->clog.error() << mode << " " << info.pgid << " " << head
osd->clog->error() << mode << " " << info.pgid << " " << head
<< " missing clones";
++scrubber.shallow_errors;
}
@ -11992,7 +11992,7 @@ void ReplicatedPG::_scrub(ScrubMap& scrubmap)
// basic checks.
if (p->second.attrs.count(OI_ATTR) == 0) {
osd->clog.error() << mode << " " << info.pgid << " " << soid
osd->clog->error() << mode << " " << info.pgid << " " << soid
<< " no '" << OI_ATTR << "' attr";
++scrubber.shallow_errors;
continue;
@ -12002,7 +12002,7 @@ void ReplicatedPG::_scrub(ScrubMap& scrubmap)
object_info_t oi(bv);
if (pgbackend->be_get_ondisk_size(oi.size) != p->second.size) {
osd->clog.error() << mode << " " << info.pgid << " " << soid
osd->clog->error() << mode << " " << info.pgid << " " << soid
<< " on disk size (" << p->second.size
<< ") does not match object info size ("
<< oi.size << ") adjusted for ondisk to ("
@ -12054,19 +12054,19 @@ void ReplicatedPG::_scrub(ScrubMap& scrubmap)
}
}
if (!next_clone.is_min() && next_clone != soid) {
osd->clog.error() << mode << " " << info.pgid << " " << soid
osd->clog->error() << mode << " " << info.pgid << " " << soid
<< " expected clone " << next_clone;
++scrubber.shallow_errors;
}
if (soid.snap == CEPH_NOSNAP || soid.snap == CEPH_SNAPDIR) {
if (soid.snap == CEPH_NOSNAP && !snapset.head_exists) {
osd->clog.error() << mode << " " << info.pgid << " " << soid
osd->clog->error() << mode << " " << info.pgid << " " << soid
<< " snapset.head_exists=false, but head exists";
++scrubber.shallow_errors;
}
if (soid.snap == CEPH_SNAPDIR && snapset.head_exists) {
osd->clog.error() << mode << " " << info.pgid << " " << soid
osd->clog->error() << mode << " " << info.pgid << " " << soid
<< " snapset.head_exists=true, but snapdir exists";
++scrubber.shallow_errors;
}
@ -12081,7 +12081,7 @@ void ReplicatedPG::_scrub(ScrubMap& scrubmap)
stat.num_object_clones++;
if (head == hobject_t()) {
osd->clog.error() << mode << " " << info.pgid << " " << soid
osd->clog->error() << mode << " " << info.pgid << " " << soid
<< " found clone without head";
++scrubber.shallow_errors;
continue;
@ -12092,7 +12092,7 @@ void ReplicatedPG::_scrub(ScrubMap& scrubmap)
}
if (oi.size != snapset.clone_size[*curclone]) {
osd->clog.error() << mode << " " << info.pgid << " " << soid
osd->clog->error() << mode << " " << info.pgid << " " << soid
<< " size " << oi.size << " != clone_size "
<< snapset.clone_size[*curclone];
++scrubber.shallow_errors;
@ -12123,7 +12123,7 @@ void ReplicatedPG::_scrub(ScrubMap& scrubmap)
if (!next_clone.is_min() &&
!pool.info.allow_incomplete_clones()) {
osd->clog.error() << mode << " " << info.pgid
osd->clog->error() << mode << " " << info.pgid
<< " expected clone " << next_clone;
++scrubber.shallow_errors;
}
@ -12169,7 +12169,7 @@ void ReplicatedPG::_scrub_finish()
!info.stats.hitset_stats_invalid) ||
scrub_cstat.sum.num_whiteouts != info.stats.stats.sum.num_whiteouts ||
scrub_cstat.sum.num_bytes != info.stats.stats.sum.num_bytes) {
osd->clog.error() << info.pgid << " " << mode
osd->clog->error() << info.pgid << " " << mode
<< " stat mismatch, got "
<< scrub_cstat.sum.num_objects << "/" << info.stats.stats.sum.num_objects << " objects, "
<< scrub_cstat.sum.num_object_clones << "/" << info.stats.stats.sum.num_object_clones << " clones, "

View File

@ -417,7 +417,7 @@ public:
ceph_tid_t get_tid() { return osd->get_tid(); }
LogClientTemp clog_error() { return osd->clog.error(); }
LogClientTemp clog_error() { return osd->clog->error(); }
/*
* Capture all object state associated with an in-progress read or write.

View File

@ -131,8 +131,16 @@ public:
config_option *opt = config_optionsp + i;
if (opt->type == OPT_STR) {
std::string *str = (std::string *)opt->conf_ptr(this);
if (str->find("$") != string::npos)
after_count++;
size_t pos = 0;
while ((pos = str->find("$", pos)) != string::npos) {
if (str->substr(pos, 8) != "$channel") {
std::cout << "unexpected meta-variable found at pos " << pos
<< " of '" << *str << "'" << std::endl;
after_count++;
}
pos++;
}
}
}
ASSERT_EQ(0, after_count);

View File

@ -25,10 +25,10 @@ TEST(str_map, json) {
map<string,string> str_map;
stringstream ss;
// well formatted
ASSERT_EQ(0, get_str_map("{\"key\": \"value\"}", ss, &str_map));
ASSERT_EQ(0, get_json_str_map("{\"key\": \"value\"}", ss, &str_map));
ASSERT_EQ("value", str_map["key"]);
// well formatted but not a JSON object
ASSERT_EQ(-EINVAL, get_str_map("\"key\"", ss, &str_map));
ASSERT_EQ(-EINVAL, get_json_str_map("\"key\"", ss, &str_map));
ASSERT_NE(string::npos, ss.str().find("must be a JSON object"));
}
@ -37,7 +37,7 @@ TEST(str_map, plaintext) {
{
map<string,string> str_map;
ASSERT_EQ(0, get_str_map(" foo=bar\t\nfrob=nitz yeah right= \n\t",
ss, &str_map));
&str_map));
ASSERT_EQ(4u, str_map.size());
ASSERT_EQ("bar", str_map["foo"]);
ASSERT_EQ("nitz", str_map["frob"]);
@ -46,15 +46,15 @@ TEST(str_map, plaintext) {
}
{
map<string,string> str_map;
ASSERT_EQ(0, get_str_map("that", ss, &str_map));
ASSERT_EQ(0, get_str_map("that", &str_map));
ASSERT_EQ(1u, str_map.size());
ASSERT_EQ("", str_map["that"]);
}
{
map<string,string> str_map;
ASSERT_EQ(0, get_str_map(" \t \n ", ss, &str_map));
ASSERT_EQ(0, get_str_map(" \t \n ", &str_map));
ASSERT_EQ(0u, str_map.size());
ASSERT_EQ(0, get_str_map("", ss, &str_map));
ASSERT_EQ(0, get_str_map("", &str_map));
ASSERT_EQ(0u, str_map.size());
}
}

View File

@ -706,7 +706,7 @@ class OSDStub : public TestStub
e.who = messenger->get_myinst();
e.stamp = now;
e.seq = seq++;
e.type = CLOG_DEBUG;
e.prio = CLOG_DEBUG;
e.msg = "OSDStub::op_log";
m->entries.push_back(e);
}