mirror of
https://github.com/ceph/ceph
synced 2025-03-30 23:40:09 +00:00
Merge pull request #3605 from ceph/wip-da-SCA-20150129
Various SCA and smaller bugfixes Reviewed-by: Sage Weil <sage@redhat.com>
This commit is contained in:
commit
f08e163776
8
src/.gitignore
vendored
8
src/.gitignore
vendored
@ -30,6 +30,7 @@ Makefile
|
||||
/ceph_erasure_code
|
||||
/ceph_erasure_code_benchmark
|
||||
/ceph_erasure_code_non_regression
|
||||
/ceph_perf_objectstore
|
||||
/ceph_psim
|
||||
/ceph_radosacl
|
||||
/ceph_rgw_jsonparser
|
||||
@ -51,6 +52,7 @@ Makefile
|
||||
/ceph-kvstore-tool
|
||||
/ceph_ver.h
|
||||
/dev
|
||||
/get_command_descriptions
|
||||
/init-ceph
|
||||
/keyring
|
||||
/librados-config
|
||||
@ -69,6 +71,8 @@ Makefile
|
||||
/rbd-replay-prep
|
||||
/rest-bench
|
||||
/sample.fetch_config
|
||||
/simple_client
|
||||
/simple_server
|
||||
/TAGS
|
||||
/tags
|
||||
/testmsgr
|
||||
@ -76,8 +80,8 @@ Makefile
|
||||
/test-suite.log
|
||||
/cls_test_*
|
||||
/unittest_*
|
||||
/get_command_descriptions
|
||||
/ceph_perf_objectstore
|
||||
/xio_client
|
||||
/xio_server
|
||||
|
||||
# old dir, may in use by older branches
|
||||
/leveldb
|
||||
|
@ -33,7 +33,7 @@ struct cls_log_list_op {
|
||||
int max_entries; /* upperbound to returned num of entries
|
||||
might return less than that and still be truncated */
|
||||
|
||||
cls_log_list_op() {}
|
||||
cls_log_list_op() : max_entries(0) {}
|
||||
|
||||
void encode(bufferlist& bl) const {
|
||||
ENCODE_START(1, 1, bl);
|
||||
|
@ -366,7 +366,7 @@ static void decode_list_index_key(const string& index_key, cls_rgw_obj_key *key,
|
||||
|
||||
list<string>::iterator iter = vals.begin();
|
||||
key->name = *iter;
|
||||
iter++;
|
||||
++iter;
|
||||
|
||||
assert(iter != vals.end());
|
||||
|
||||
|
@ -34,7 +34,7 @@ struct cls_statelog_list_op {
|
||||
int max_entries; /* upperbound to returned num of entries
|
||||
might return less than that and still be truncated */
|
||||
|
||||
cls_statelog_list_op() {}
|
||||
cls_statelog_list_op() : max_entries(0) {}
|
||||
|
||||
void encode(bufferlist& bl) const {
|
||||
ENCODE_START(1, 1, bl);
|
||||
@ -119,7 +119,7 @@ struct cls_statelog_check_state_op {
|
||||
string object;
|
||||
uint32_t state;
|
||||
|
||||
cls_statelog_check_state_op() {}
|
||||
cls_statelog_check_state_op() : state(0) {}
|
||||
|
||||
void encode(bufferlist& bl) const {
|
||||
ENCODE_START(1, 1, bl);
|
||||
|
@ -16,7 +16,7 @@ struct cls_statelog_entry {
|
||||
bufferlist data;
|
||||
uint32_t state; /* user defined state */
|
||||
|
||||
cls_statelog_entry() {}
|
||||
cls_statelog_entry() : state(0) {}
|
||||
|
||||
void encode(bufferlist& bl) const {
|
||||
ENCODE_START(1, 1, bl);
|
||||
|
@ -57,7 +57,7 @@ void Cycles::init()
|
||||
// After 10ms have elapsed, take the ratio between these readings.
|
||||
|
||||
struct timeval start_time, stop_time;
|
||||
uint64_t start_cycles, micros;
|
||||
uint64_t micros;
|
||||
double old_cycles;
|
||||
|
||||
// There is one tricky aspect, which is that we could get interrupted
|
||||
@ -70,7 +70,7 @@ void Cycles::init()
|
||||
if (gettimeofday(&start_time, NULL) != 0) {
|
||||
assert(0 == "couldn't read clock");
|
||||
}
|
||||
start_cycles = rdtsc();
|
||||
uint64_t start_cycles = rdtsc();
|
||||
while (1) {
|
||||
if (gettimeofday(&stop_time, NULL) != 0) {
|
||||
assert(0 == "couldn't read clock");
|
||||
|
@ -22,7 +22,6 @@
|
||||
*/
|
||||
|
||||
/* Static string length */
|
||||
// cppcheck-suppress sizeofDivisionMemfunc
|
||||
#define SSTRL(x) ((sizeof(x)/sizeof(x[0])) - 1)
|
||||
|
||||
#define LESS_THAN_XESCAPE "<"
|
||||
@ -77,22 +76,27 @@ void escape_xml_attr(const char *buf, char *out)
|
||||
unsigned char c = *b;
|
||||
switch (c) {
|
||||
case '<':
|
||||
// cppcheck-suppress sizeofDivisionMemfunc
|
||||
memcpy(o, LESS_THAN_XESCAPE, SSTRL(LESS_THAN_XESCAPE));
|
||||
o += SSTRL(LESS_THAN_XESCAPE);
|
||||
break;
|
||||
case '&':
|
||||
// cppcheck-suppress sizeofDivisionMemfunc
|
||||
memcpy(o, AMPERSAND_XESCAPE, SSTRL(AMPERSAND_XESCAPE));
|
||||
o += SSTRL(AMPERSAND_XESCAPE);
|
||||
break;
|
||||
case '>':
|
||||
// cppcheck-suppress sizeofDivisionMemfunc
|
||||
memcpy(o, GREATER_THAN_XESCAPE, SSTRL(GREATER_THAN_XESCAPE));
|
||||
o += SSTRL(GREATER_THAN_XESCAPE);
|
||||
break;
|
||||
case '\'':
|
||||
// cppcheck-suppress sizeofDivisionMemfunc
|
||||
memcpy(o, SGL_QUOTE_XESCAPE, SSTRL(SGL_QUOTE_XESCAPE));
|
||||
o += SSTRL(SGL_QUOTE_XESCAPE);
|
||||
break;
|
||||
case '"':
|
||||
// cppcheck-suppress sizeofDivisionMemfunc
|
||||
memcpy(o, DBL_QUOTE_XESCAPE, SSTRL(DBL_QUOTE_XESCAPE));
|
||||
o += SSTRL(DBL_QUOTE_XESCAPE);
|
||||
break;
|
||||
@ -166,22 +170,27 @@ void escape_json_attr(const char *buf, int src_len, char *out)
|
||||
unsigned char c = *b;
|
||||
switch (c) {
|
||||
case '"':
|
||||
// cppcheck-suppress sizeofDivisionMemfunc
|
||||
memcpy(o, DBL_QUOTE_JESCAPE, SSTRL(DBL_QUOTE_JESCAPE));
|
||||
o += SSTRL(DBL_QUOTE_JESCAPE);
|
||||
break;
|
||||
case '\\':
|
||||
// cppcheck-suppress sizeofDivisionMemfunc
|
||||
memcpy(o, BACKSLASH_JESCAPE, SSTRL(BACKSLASH_JESCAPE));
|
||||
o += SSTRL(BACKSLASH_JESCAPE);
|
||||
break;
|
||||
case '/':
|
||||
// cppcheck-suppress sizeofDivisionMemfunc
|
||||
memcpy(o, SLASH_JESCAPE, SSTRL(SLASH_JESCAPE));
|
||||
o += SSTRL(SLASH_JESCAPE);
|
||||
break;
|
||||
case '\t':
|
||||
// cppcheck-suppress sizeofDivisionMemfunc
|
||||
memcpy(o, TAB_JESCAPE, SSTRL(TAB_JESCAPE));
|
||||
o += SSTRL(TAB_JESCAPE);
|
||||
break;
|
||||
case '\n':
|
||||
// cppcheck-suppress sizeofDivisionMemfunc
|
||||
memcpy(o, NEWLINE_JESCAPE, SSTRL(NEWLINE_JESCAPE));
|
||||
o += SSTRL(NEWLINE_JESCAPE);
|
||||
break;
|
||||
|
@ -81,7 +81,7 @@ namespace CrushTreeDumper {
|
||||
if (root == roots.end())
|
||||
return false;
|
||||
push_back(Item(*root, 0, crush->get_bucket_weightf(*root)));
|
||||
root++;
|
||||
++root;
|
||||
}
|
||||
|
||||
qi = front();
|
||||
@ -147,7 +147,7 @@ namespace CrushTreeDumper {
|
||||
f->open_array_section("children");
|
||||
for (list<int>::const_iterator i = qi.children.begin();
|
||||
i != qi.children.end();
|
||||
i++) {
|
||||
++i) {
|
||||
f->dump_int("child", *i);
|
||||
}
|
||||
f->close_section();
|
||||
|
@ -2951,8 +2951,7 @@ bool MDS::ms_verify_authorizer(Connection *con, int peer_type,
|
||||
|
||||
dout(10) << __func__ << ": parsing auth_cap_str='" << auth_cap_str << "'" << dendl;
|
||||
std::ostringstream errstr;
|
||||
int parse_success = s->auth_caps.parse(auth_cap_str, &errstr);
|
||||
if (parse_success == false) {
|
||||
if (!s->auth_caps.parse(auth_cap_str, &errstr)) {
|
||||
dout(1) << __func__ << ": auth cap parse error: " << errstr.str()
|
||||
<< " parsing '" << auth_cap_str << "'" << dendl;
|
||||
}
|
||||
|
@ -23,16 +23,15 @@
|
||||
#include <map>
|
||||
using namespace std;
|
||||
|
||||
#include "auth/AuthSessionHandler.h"
|
||||
#include "common/Mutex.h"
|
||||
#include "include/buffer.h"
|
||||
|
||||
#include "auth/AuthSessionHandler.h"
|
||||
#include "include/buffer.h"
|
||||
#include "msg/Connection.h"
|
||||
#include "net_handler.h"
|
||||
#include "Event.h"
|
||||
#include "msg/Messenger.h"
|
||||
|
||||
#include "Event.h"
|
||||
#include "net_handler.h"
|
||||
|
||||
class AsyncMessenger;
|
||||
|
||||
/*
|
||||
|
@ -50,8 +50,8 @@ int KqueueDriver::add_event(int fd, int cur_mask, int add_mask)
|
||||
<< "add_mask" << add_mask << dendl;
|
||||
struct kevent ke;
|
||||
int filter = 0;
|
||||
filter |= add_mask & EVENT_READABLE ? EVFILT_READ : 0;
|
||||
filter |= add_mask & EVENT_WRITABLE ? EVFILT_WRITE : 0;
|
||||
filter |= (add_mask & EVENT_READABLE) ? EVFILT_READ : 0;
|
||||
filter |= (add_mask & EVENT_WRITABLE) ? EVFILT_WRITE : 0;
|
||||
|
||||
if (filter) {
|
||||
EV_SET(&ke, fd, filter, EV_ADD, 0, 0, NULL);
|
||||
@ -72,8 +72,8 @@ void KqueueDriver::del_event(int fd, int cur_mask, int delmask)
|
||||
struct kevent ee;
|
||||
struct kevent ke;
|
||||
int filter = 0;
|
||||
filter |= delmask & EVENT_READABLE ? EVFILT_READ : 0;
|
||||
filter |= delmask & EVENT_WRITABLE ? EVFILT_WRITE : 0;
|
||||
filter |= (delmask & EVENT_READABLE) ? EVFILT_READ : 0;
|
||||
filter |= (delmask & EVENT_WRITABLE) ? EVFILT_WRITE : 0;
|
||||
|
||||
if (filter) {
|
||||
EV_SET(&ke, fd, filter, EV_DELETE, 0, 0, NULL);
|
||||
@ -93,8 +93,6 @@ int KqueueDriver::event_wait(vector<FiredFileEvent> &fired_events, struct timeva
|
||||
{
|
||||
int retval, numevents = 0;
|
||||
struct timespec timeout;
|
||||
timeout.tv_sec = tvp->tv_sec;
|
||||
timeout.tv_nsec = tvp->tv_usec * 1000;
|
||||
|
||||
if (tvp != NULL) {
|
||||
timeout.tv_sec = tvp->tv_sec;
|
||||
|
@ -29,7 +29,7 @@ class KqueueDriver : public EventDriver {
|
||||
int size;
|
||||
|
||||
public:
|
||||
KqueueDriver(CephContext *c): kqfd(-1), events(NULL), cct(c) {}
|
||||
KqueueDriver(CephContext *c): kqfd(-1), events(NULL), cct(c), size(0) {}
|
||||
virtual ~KqueueDriver() {
|
||||
if (kqfd != -1)
|
||||
close(kqfd);
|
||||
|
@ -194,7 +194,7 @@ static inline XioDispatchHook* pool_alloc_xio_dispatch_hook(
|
||||
sizeof(XioDispatchHook), &mp_mem);
|
||||
if (!!e)
|
||||
return NULL;
|
||||
XioDispatchHook *xhook = (XioDispatchHook*) mp_mem.addr;
|
||||
XioDispatchHook *xhook = static_cast<XioDispatchHook*>(mp_mem.addr);
|
||||
new (xhook) XioDispatchHook(xcon, m, msg_seq, mp_mem);
|
||||
return xhook;
|
||||
}
|
||||
|
@ -639,8 +639,9 @@ xio_place_buffers(buffer::list& bl, XioMsg *xmsg, struct xio_msg*& req,
|
||||
int XioMessenger::bind(const entity_addr_t& addr)
|
||||
{
|
||||
const entity_addr_t *a = &addr;
|
||||
struct entity_addr_t _addr = *a;
|
||||
|
||||
if (a->is_blank_ip()) {
|
||||
struct entity_addr_t _addr = *a;
|
||||
a = &_addr;
|
||||
std::vector <std::string> my_sections;
|
||||
g_conf->get_my_sections(my_sections);
|
||||
@ -717,7 +718,7 @@ static inline XioMsg* pool_alloc_xio_msg(Message *m, XioConnection *xcon,
|
||||
int e = xpool_alloc(xio_msgr_noreg_mpool, sizeof(XioMsg), &mp_mem);
|
||||
if (!!e)
|
||||
return NULL;
|
||||
XioMsg *xmsg = (XioMsg*) mp_mem.addr;
|
||||
XioMsg *xmsg = reinterpret_cast<XioMsg*>(mp_mem.addr);
|
||||
assert(!!xmsg);
|
||||
new (xmsg) XioMsg(m, xcon, mp_mem, ex_cnt);
|
||||
return xmsg;
|
||||
|
@ -25,7 +25,7 @@ int XioDispatchHook::release_msgs()
|
||||
cl_flag = true;
|
||||
|
||||
/* queue for release */
|
||||
xrsp = (XioRsp *) rsp_pool.alloc(sizeof(XioRsp));
|
||||
xrsp = static_cast<XioRsp *>(rsp_pool.alloc(sizeof(XioRsp)));
|
||||
new (xrsp) XioRsp(xcon, this);
|
||||
|
||||
/* merge with portal traffic */
|
||||
|
@ -230,7 +230,7 @@ public:
|
||||
}
|
||||
|
||||
void alloc_trailers(int cnt) {
|
||||
req_arr = (xio_msg_ex*) malloc(cnt * sizeof(xio_msg_ex));
|
||||
req_arr = static_cast<xio_msg_ex*>(malloc(cnt * sizeof(xio_msg_ex)));
|
||||
for (int ix = 0; ix < cnt; ++ix) {
|
||||
xio_msg_ex* xreq = &(req_arr[ix]);
|
||||
new (xreq) xio_msg_ex(this);
|
||||
|
@ -623,13 +623,13 @@ public:
|
||||
list<bufferptr> list = bl.buffers();
|
||||
std::list<bufferptr>::iterator p;
|
||||
|
||||
for(p = list.begin(); p != list.end(); p++) {
|
||||
for(p = list.begin(); p != list.end(); ++p) {
|
||||
assert(p->length() % sizeof(Op) == 0);
|
||||
|
||||
char* raw_p = p->c_str();
|
||||
char* raw_end = raw_p + p->length();
|
||||
while (raw_p < raw_end) {
|
||||
_update_op((Op*)raw_p, cm, om);
|
||||
_update_op(reinterpret_cast<Op*>(raw_p), cm, om);
|
||||
raw_p += sizeof(Op);
|
||||
}
|
||||
}
|
||||
@ -655,7 +655,7 @@ public:
|
||||
map<coll_t, __le32>::iterator coll_index_p;
|
||||
for (coll_index_p = other.coll_index.begin();
|
||||
coll_index_p != other.coll_index.end();
|
||||
coll_index_p++) {
|
||||
++coll_index_p) {
|
||||
cm[coll_index_p->second] = _get_coll_id(coll_index_p->first);
|
||||
}
|
||||
|
||||
@ -663,7 +663,7 @@ public:
|
||||
map<ghobject_t, __le32>::iterator object_index_p;
|
||||
for (object_index_p = other.object_index.begin();
|
||||
object_index_p != other.object_index.end();
|
||||
object_index_p++) {
|
||||
++object_index_p) {
|
||||
om[object_index_p->second] = _get_object_id(object_index_p->first);
|
||||
}
|
||||
|
||||
@ -792,14 +792,14 @@ public:
|
||||
map<coll_t, __le32>::iterator coll_index_p;
|
||||
for (coll_index_p = t->coll_index.begin();
|
||||
coll_index_p != t->coll_index.end();
|
||||
coll_index_p++) {
|
||||
++coll_index_p) {
|
||||
colls[coll_index_p->second] = coll_index_p->first;
|
||||
}
|
||||
|
||||
map<ghobject_t, __le32>::iterator object_index_p;
|
||||
for (object_index_p = t->object_index.begin();
|
||||
object_index_p != t->object_index.end();
|
||||
object_index_p++) {
|
||||
++object_index_p) {
|
||||
objects[object_index_p->second] = object_index_p->first;
|
||||
}
|
||||
}
|
||||
@ -814,7 +814,7 @@ public:
|
||||
Op* decode_op() {
|
||||
assert(ops > 0);
|
||||
|
||||
Op* op = (Op*)op_buffer_p;
|
||||
Op* op = reinterpret_cast<Op*>(op_buffer_p);
|
||||
op_buffer_p += sizeof(Op);
|
||||
ops--;
|
||||
|
||||
@ -880,7 +880,7 @@ private:
|
||||
op_ptr.set_offset(op_ptr.offset() + sizeof(Op));
|
||||
|
||||
char* p = ptr.c_str();
|
||||
return (Op*)p;
|
||||
return reinterpret_cast<Op*>(p);
|
||||
}
|
||||
__le32 _get_coll_id(const coll_t& coll) {
|
||||
map<coll_t, __le32>::iterator c = coll_index.find(coll);
|
||||
@ -1596,7 +1596,6 @@ public:
|
||||
uint32_t _largest_data_off = 0;
|
||||
uint32_t _largest_data_off_in_tbl = 0;
|
||||
uint32_t _fadvise_flags = 0;
|
||||
bool tolerate_collection_add_enoent = false;
|
||||
|
||||
::decode(_ops, bl);
|
||||
::decode(_pad_unused_bytes, bl);
|
||||
@ -1607,6 +1606,7 @@ public:
|
||||
}
|
||||
::decode(tbl, bl);
|
||||
if (struct_v >= 7) {
|
||||
bool tolerate_collection_add_enoent = false;
|
||||
::decode(tolerate_collection_add_enoent, bl);
|
||||
}
|
||||
if (struct_v >= 8) {
|
||||
|
@ -4,7 +4,6 @@
|
||||
#include "include/types.h"
|
||||
|
||||
#include "Ager.h"
|
||||
#include "os/ObjectStore.h"
|
||||
|
||||
#include "common/Clock.h"
|
||||
#include "common/debug.h"
|
||||
|
@ -3672,7 +3672,7 @@ void PG::repair_object(
|
||||
list<pair<ScrubMap::object, pg_shard_t> >::iterator i;
|
||||
for (i = ok_peers->begin();
|
||||
i != ok_peers->end();
|
||||
i++)
|
||||
++i)
|
||||
missing_loc.add_location(soid, i->second);
|
||||
|
||||
pg_log.set_last_requested(0);
|
||||
@ -4199,7 +4199,7 @@ void PG::scrub_compare_maps()
|
||||
list<pair<ScrubMap::object, pg_shard_t> > good_peers;
|
||||
for (list<pg_shard_t>::const_iterator j = i->second.begin();
|
||||
j != i->second.end();
|
||||
j++) {
|
||||
++j) {
|
||||
good_peers.push_back(make_pair(maps[*j]->objects[i->first], *j));
|
||||
}
|
||||
scrubber.authoritative.insert(
|
||||
|
@ -6496,7 +6496,7 @@ void ReplicatedPG::process_copy_chunk(hobject_t oid, ceph_tid_t tid, int r)
|
||||
// cancel and requeue proxy reads on this object
|
||||
kick_proxy_read_blocked(cobc->obs.oi.soid);
|
||||
for (map<ceph_tid_t, ProxyReadOpRef>::iterator it = proxyread_ops.begin();
|
||||
it != proxyread_ops.end(); it++) {
|
||||
it != proxyread_ops.end(); ++it) {
|
||||
if (it->second->soid == cobc->obs.oi.soid) {
|
||||
cancel_proxy_read(it->second);
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ enumerate_images(struct rbd_image_data *data)
|
||||
((strlen(mount_image_name) > 0) &&
|
||||
(strcmp(ip, mount_image_name) == 0))) {
|
||||
fprintf(stderr, "%s, ", ip);
|
||||
im = (rbd_image*) malloc(sizeof(*im));
|
||||
im = static_cast<rbd_image*>(malloc(sizeof(*im)));
|
||||
im->image_name = ip;
|
||||
im->next = *head;
|
||||
*head = im;
|
||||
|
@ -7535,7 +7535,7 @@ int RGWRados::cls_bucket_list(rgw_bucket& bucket, rgw_obj_key& start, const stri
|
||||
if (vcurrents[i] != vends[i])
|
||||
*is_truncated = true;
|
||||
}
|
||||
if (m.size())
|
||||
if (!m.empty())
|
||||
*last_entry = m.rbegin()->first;
|
||||
|
||||
return 0;
|
||||
|
@ -286,7 +286,7 @@ void RGWListBucket_ObjStore_S3::send_versioned_response()
|
||||
dump_owner(s, iter->owner, iter->owner_display_name);
|
||||
s->formatter->close_section();
|
||||
}
|
||||
if (common_prefixes.size() > 0) {
|
||||
if (!common_prefixes.empty()) {
|
||||
map<string, bool>::iterator pref_iter;
|
||||
for (pref_iter = common_prefixes.begin(); pref_iter != common_prefixes.end(); ++pref_iter) {
|
||||
s->formatter->open_array_section("CommonPrefixes");
|
||||
|
@ -64,11 +64,11 @@ class DumbBackend : public Backend {
|
||||
|
||||
public:
|
||||
WriteQueue(
|
||||
DumbBackend *backend,
|
||||
DumbBackend *_backend,
|
||||
time_t ti,
|
||||
ThreadPool *tp) :
|
||||
ThreadPool::WorkQueue<write_item>("DumbBackend::queue", ti, ti*10, tp),
|
||||
backend(backend) {}
|
||||
backend(_backend) {}
|
||||
bool _enqueue(write_item *item) {
|
||||
item_queue.push_back(item);
|
||||
return true;
|
||||
|
@ -471,7 +471,7 @@ TEST_F(IsaErasureCodeTest, isa_vandermonde_exhaustive)
|
||||
for (int l1 = 0; l1 < (k + m); l1++) {
|
||||
map<int, bufferlist> degraded = encoded;
|
||||
set<int> want_to_decode;
|
||||
bool err = true;
|
||||
bool err;
|
||||
degraded.erase(l1);
|
||||
want_to_decode.insert(l1);
|
||||
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
|
||||
@ -598,7 +598,7 @@ TEST_F(IsaErasureCodeTest, isa_cauchy_exhaustive)
|
||||
for (int l1 = 0; l1 < (k + m); l1++) {
|
||||
map<int, bufferlist> degraded = encoded;
|
||||
set<int> want_to_decode;
|
||||
bool err = true;
|
||||
bool err;
|
||||
degraded.erase(l1);
|
||||
want_to_decode.insert(l1);
|
||||
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
|
||||
@ -725,7 +725,7 @@ TEST_F(IsaErasureCodeTest, isa_cauchy_cache_trash)
|
||||
for (int l1 = 0; l1 < (k + m); l1++) {
|
||||
map<int, bufferlist> degraded = encoded;
|
||||
set<int> want_to_decode;
|
||||
bool err = true;
|
||||
bool err;
|
||||
degraded.erase(l1);
|
||||
want_to_decode.insert(l1);
|
||||
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
|
||||
@ -851,7 +851,7 @@ TEST_F(IsaErasureCodeTest, isa_xor_codec)
|
||||
for (int l1 = 0; l1 < (k + m); l1++) {
|
||||
map<int, bufferlist> degraded = encoded;
|
||||
set<int> want_to_decode;
|
||||
bool err = true;
|
||||
bool err;
|
||||
degraded.erase(l1);
|
||||
want_to_decode.insert(l1);
|
||||
err = DecodeAndVerify(Isa, degraded, want_to_decode, enc, length);
|
||||
|
@ -336,6 +336,7 @@ int test_ls(rados_ioctx_t io_ctx, size_t num_expected, ...)
|
||||
image_names.erase(it);
|
||||
} else {
|
||||
ADD_FAILURE() << "Unable to find image " << expected;
|
||||
va_end(ap);
|
||||
return -ENOENT;
|
||||
}
|
||||
}
|
||||
@ -393,6 +394,7 @@ int test_ls_pp(librbd::RBD& rbd, librados::IoCtx& io_ctx, size_t num_expected, .
|
||||
vector<string>::iterator listed_name = find(names.begin(), names.end(), string(expected));
|
||||
if (listed_name == names.end()) {
|
||||
ADD_FAILURE() << "Unable to find image " << expected;
|
||||
va_end(ap);
|
||||
return -ENOENT;
|
||||
}
|
||||
names.erase(listed_name);
|
||||
|
@ -122,7 +122,7 @@ void EventOutput::summary() const
|
||||
}
|
||||
|
||||
std::cout << "Errors: " << scan.errors.size() << std::endl;
|
||||
if (scan.errors.size()) {
|
||||
if (!scan.errors.empty()) {
|
||||
for (JournalScanner::ErrorMap::const_iterator i = scan.errors.begin();
|
||||
i != scan.errors.end(); ++i) {
|
||||
std::cout << " 0x" << std::hex << i->first << std::dec
|
||||
|
Loading…
Reference in New Issue
Block a user