Merge pull request #3881 from ceph/wip-da-SCA-20150304

SCA fixes

Reviewed-by: Kefu Chai <kchai@redhat.com>
This commit is contained in:
Kefu Chai 2015-03-11 23:52:16 +08:00
commit fdf9e1cd6e
19 changed files with 88 additions and 88 deletions

View File

@ -86,11 +86,6 @@ int ErasureCodeShec::minimum_to_decode(const set<int> &want_to_decode,
const set<int> &available_chunks,
set<int> *minimum_chunks)
{
int erased[k + m];
int avails[k + m];
int minimum[k + m];
int dm_ids[k];
if (!minimum_chunks) return -EINVAL;
for (set<int>::iterator it = available_chunks.begin(); it != available_chunks.end(); ++it){
@ -101,6 +96,11 @@ int ErasureCodeShec::minimum_to_decode(const set<int> &want_to_decode,
want_to_decode.begin(), want_to_decode.end())) {
*minimum_chunks = want_to_decode;
} else {
int erased[k + m];
int avails[k + m];
int minimum[k + m];
int dm_ids[k];
for (int i = 0; i < k + m; i++) {
erased[i] = 0;
if (available_chunks.find(i) == available_chunks.end()) {

View File

@ -52,11 +52,11 @@ int ObjectMap::lock()
}
}
int r;
bool broke_lock = false;
CephContext *cct = m_image_ctx.cct;
std::string oid(object_map_name(m_image_ctx.id, CEPH_NOSNAP));
while (true) {
int r;
ldout(cct, 10) << &m_image_ctx << " locking object map" << dendl;
r = rados::cls::lock::lock(&m_image_ctx.md_ctx, oid,
RBD_LOCK_NAME, LOCK_EXCLUSIVE, "", "", "",
@ -73,9 +73,9 @@ int ObjectMap::lock()
lockers_t lockers;
ClsLockType lock_type;
std::string lock_tag;
int r = rados::cls::lock::get_lock_info(&m_image_ctx.md_ctx, oid,
RBD_LOCK_NAME, &lockers,
&lock_type, &lock_tag);
r = rados::cls::lock::get_lock_info(&m_image_ctx.md_ctx, oid,
RBD_LOCK_NAME, &lockers,
&lock_type, &lock_tag);
if (r == -ENOENT) {
continue;
} else if (r < 0) {

View File

@ -1886,9 +1886,9 @@ reprotect_and_return_err:
vector<uint8_t> snap_protection;
vector<uint64_t> snap_flags;
{
int r;
RWLock::WLocker l(ictx->snap_lock);
{
int r;
RWLock::WLocker l2(ictx->parent_lock);
ictx->lockers.clear();
if (ictx->old_format) {
@ -2512,11 +2512,11 @@ reprotect_and_return_err:
}
uint64_t object_size;
uint64_t overlap;
uint64_t overlap_objects;
::SnapContext snapc;
{
uint64_t overlap;
RWLock::RLocker l(ictx->snap_lock);
RWLock::RLocker l2(ictx->parent_lock);

View File

@ -572,63 +572,66 @@ void MDS::set_up_admin_socket()
asok_hook = new MDSSocketHook(this);
r = admin_socket->register_command("status", "status", asok_hook,
"high-level status of MDS");
assert(0 == r);
assert(r == 0);
r = admin_socket->register_command("dump_ops_in_flight",
"dump_ops_in_flight", asok_hook,
"show the ops currently in flight");
assert(0 == r);
assert(r == 0);
r = admin_socket->register_command("ops",
"ops", asok_hook,
"show the ops currently in flight");
assert(0 == r);
assert(r == 0);
r = admin_socket->register_command("dump_historic_ops", "dump_historic_ops",
asok_hook,
"show slowest recent ops");
assert(r == 0);
r = admin_socket->register_command("scrub_path",
"scrub_path name=path,type=CephString",
asok_hook,
"scrub an inode and output results");
assert(r == 0);
r = admin_socket->register_command("flush_path",
"flush_path name=path,type=CephString",
asok_hook,
"flush an inode (and its dirfrags)");
assert(r == 0);
r = admin_socket->register_command("export dir",
"export dir "
"name=path,type=CephString "
"name=rank,type=CephInt",
asok_hook,
"migrate a subtree to named MDS");
assert(0 == r);
assert(r == 0);
r = admin_socket->register_command("session evict",
"session evict name=client_id,type=CephString",
asok_hook,
"Evict a CephFS client");
assert(0 == r);
assert(r == 0);
r = admin_socket->register_command("osdmap barrier",
"osdmap barrier name=target_epoch,type=CephInt",
asok_hook,
"Wait until the MDS has this OSD map epoch");
assert(0 == r);
assert(r == 0);
r = admin_socket->register_command("session ls",
"session ls",
asok_hook,
"Enumerate connected CephFS clients");
assert(0 == r);
assert(r == 0);
r = admin_socket->register_command("flush journal",
"flush journal",
asok_hook,
"Flush the journal to the backing store");
assert(0 == r);
assert(r == 0);
r = admin_socket->register_command("force_readonly",
"force_readonly",
asok_hook,
"Force MDS to read-only mode");
assert(0 == r);
assert(r == 0);
r = admin_socket->register_command("get subtrees",
"get subtrees",
asok_hook,
"Return the subtree map");
assert(0 == r);
assert(r == 0);
}
void MDS::clean_up_admin_socket()

View File

@ -138,8 +138,8 @@ static int on_msg(struct xio_session *session,
ldout(cct,25) << "on_msg session " << session << " xcon " << xcon << dendl;
static uint32_t nreqs;
if (unlikely(XioPool::trace_mempool)) {
static uint32_t nreqs;
if (unlikely((++nreqs % 65536) == 0)) {
xp_stats.dump(__func__, nreqs);
}
@ -525,7 +525,7 @@ xio_count_buffers(buffer::list& bl, int& req_size, int& msg_off, int& req_off)
const std::list<buffer::ptr>& buffers = bl.buffers();
list<bufferptr>::const_iterator pb;
size_t size, off, count;
size_t size, off;
int result;
int first = 1;
@ -541,7 +541,7 @@ xio_count_buffers(buffer::list& bl, int& req_size, int& msg_off, int& req_off)
size = pb->length();
first = 0;
}
count = size - off;
size_t count = size - off;
if (!count) continue;
if (req_size + count > MAX_XIO_BUF_SIZE) {
count = MAX_XIO_BUF_SIZE - req_size;
@ -573,7 +573,7 @@ xio_place_buffers(buffer::list& bl, XioMsg *xmsg, struct xio_msg*& req,
const std::list<buffer::ptr>& buffers = bl.buffers();
list<bufferptr>::const_iterator pb;
struct xio_iovec_ex* iov;
size_t size, off, count;
size_t size, off;
const char *data = NULL;
int first = 1;
@ -589,7 +589,7 @@ xio_place_buffers(buffer::list& bl, XioMsg *xmsg, struct xio_msg*& req,
data = pb->c_str(); // is c_str() efficient?
first = 0;
}
count = size - off;
size_t count = size - off;
if (!count) continue;
if (req_size + count > MAX_XIO_BUF_SIZE) {
count = MAX_XIO_BUF_SIZE - req_size;
@ -755,9 +755,9 @@ int XioMessenger::_send_message_impl(Message* m, XioConnection* xcon)
{
int code = 0;
static uint32_t nreqs;
Mutex::Locker l(xcon->lock);
if (unlikely(XioPool::trace_mempool)) {
static uint32_t nreqs;
if (unlikely((++nreqs % 65536) == 0)) {
xp_stats.dump(__func__, nreqs);
}

View File

@ -208,11 +208,10 @@ public:
// and push them in FIFO order to front of the input queue,
// and mark the connection as flow-controlled
XioSubmit::Queue requeue_q;
XioSubmit *xs;
XioMsg *xmsg;
while (q_iter != send_q.end()) {
xs = &(*q_iter);
XioSubmit *xs = &(*q_iter);
// skip retires and anything for other connections
if ((xs->type != XioSubmit::OUTGOING_MSG) ||
(xs->xcon != xcon))
@ -424,20 +423,18 @@ public:
void shutdown()
{
XioPortal *portal;
int nportals = portals.size();
for (int p_ix = 0; p_ix < nportals; ++p_ix) {
portal = portals[p_ix];
XioPortal *portal = portals[p_ix];
portal->shutdown();
}
}
void join()
{
XioPortal *portal;
int nportals = portals.size();
for (int p_ix = 0; p_ix < nportals; ++p_ix) {
portal = portals[p_ix];
XioPortal *portal = portals[p_ix];
portal->join();
}
}

View File

@ -7528,7 +7528,7 @@ void ReplicatedPG::eval_repop(RepGather *repop)
waiting_for_ack[repop->v].begin();
i != waiting_for_ack[repop->v].end();
++i) {
MOSDOp *m = (MOSDOp*)i->first->get_req();
MOSDOp *m = static_cast<MOSDOp*>(i->first->get_req());
MOSDOpReply *reply = new MOSDOpReply(m, 0, get_osdmap()->get_epoch(), 0, true);
reply->set_reply_versions(repop->ctx->at_version,
i->second);

View File

@ -1068,10 +1068,9 @@ int main(int argc, const char **argv)
g_conf->rgw_enable_gc_threads, g_conf->rgw_enable_quota_threads);
if (!store) {
derr << "Couldn't init storage provider (RADOS)" << dendl;
r = EIO;
return EIO;
}
if (!r)
r = rgw_perf_start(g_ceph_context);
r = rgw_perf_start(g_ceph_context);
rgw_rest_init(g_ceph_context, store->region);

View File

@ -254,13 +254,13 @@ static int get_policy_from_attr(CephContext *cct, RGWRados *store, void *ctx,
return get_bucket_policy_from_attr(cct, store, ctx, bucket_info, bucket_attrs,
policy, instance_obj);
}
return get_obj_policy_from_attr(cct, store, *(RGWObjectCtx *)ctx, bucket_info, bucket_attrs,
return get_obj_policy_from_attr(cct, store, *static_cast<RGWObjectCtx *>(ctx), bucket_info, bucket_attrs,
policy, obj);
}
static int get_obj_attrs(RGWRados *store, struct req_state *s, rgw_obj& obj, map<string, bufferlist>& attrs)
{
RGWRados::Object op_target(store, s->bucket_info, *(RGWObjectCtx *)s->obj_ctx, obj);
RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
RGWRados::Object::Read read_op(&op_target);
read_op.params.attrs = &attrs;
@ -272,7 +272,7 @@ static int get_obj_attrs(RGWRados *store, struct req_state *s, rgw_obj& obj, map
static int get_system_obj_attrs(RGWRados *store, struct req_state *s, rgw_obj& obj, map<string, bufferlist>& attrs,
uint64_t *obj_size, RGWObjVersionTracker *objv_tracker)
{
RGWRados::SystemObject src(store, *(RGWObjectCtx *)s->obj_ctx, obj);
RGWRados::SystemObject src(store, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
RGWRados::SystemObject::Read rop(&src);
rop.stat_params.attrs = &attrs;
@ -339,7 +339,7 @@ static int rgw_build_policies(RGWRados *store, struct req_state *s, bool only_bu
int ret = 0;
rgw_obj_key obj;
RGWUserInfo bucket_owner_info;
RGWObjectCtx& obj_ctx = *(RGWObjectCtx *)s->obj_ctx;
RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
string bi = s->info.args.get(RGW_SYS_PARAM_PREFIX "bucket-instance");
if (!bi.empty()) {
@ -523,6 +523,12 @@ int RGWOp::init_quota()
static bool validate_cors_rule_method(RGWCORSRule *rule, const char *req_meth) {
uint8_t flags = 0;
if (!req_meth) {
dout(5) << "req_meth is null" << dendl;
return false;
}
if (strcmp(req_meth, "GET") == 0) flags = RGW_CORS_GET;
else if (strcmp(req_meth, "POST") == 0) flags = RGW_CORS_POST;
else if (strcmp(req_meth, "PUT") == 0) flags = RGW_CORS_PUT;
@ -628,11 +634,12 @@ bool RGWOp::generate_cors_headers(string& origin, string& method, string& header
req_meth = s->info.method;
}
if (req_meth)
if (req_meth) {
method = req_meth;
/* CORS 6.2.5. */
if (!validate_cors_rule_method(rule, req_meth)) {
return false;
/* CORS 6.2.5. */
if (!validate_cors_rule_method(rule, req_meth)) {
return false;
}
}
/* CORS 6.2.4. */
@ -885,7 +892,7 @@ void RGWGetObj::execute()
perfcounter->inc(l_rgw_get);
int64_t new_ofs, new_end;
RGWRados::Object op_target(store, s->bucket_info, *(RGWObjectCtx *)s->obj_ctx, obj);
RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
RGWRados::Object::Read read_op(&op_target);
ret = get_params();
@ -1309,7 +1316,7 @@ void RGWCreateBucket::execute()
}
/* we need to make sure we read bucket info, it's not read before for this specific request */
RGWObjectCtx& obj_ctx = *(RGWObjectCtx *)s->obj_ctx;
RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
ret = store->get_bucket_info(obj_ctx, s->bucket_name_str, s->bucket_info, NULL, &s->bucket_attrs);
if (ret < 0 && ret != -ENOENT)
return;
@ -1640,8 +1647,8 @@ RGWPutObjProcessor *RGWPutObj::select_processor(RGWObjectCtx& obj_ctx, bool *is_
if (!multipart) {
processor = new RGWPutObjProcessor_Atomic(obj_ctx, s->bucket_info, s->bucket, s->object.name, part_size, s->req_id, s->bucket_info.versioning_enabled());
((RGWPutObjProcessor_Atomic *)processor)->set_olh_epoch(olh_epoch);
((RGWPutObjProcessor_Atomic *)processor)->set_version_id(version_id);
(static_cast<RGWPutObjProcessor_Atomic *>(processor))->set_olh_epoch(olh_epoch);
(static_cast<RGWPutObjProcessor_Atomic *>(processor))->set_version_id(version_id);
} else {
processor = new RGWPutObjProcessor_Multipart(obj_ctx, s->bucket_info, part_size, s);
}
@ -1772,7 +1779,7 @@ void RGWPutObj::execute()
supplied_md5[sizeof(supplied_md5) - 1] = '\0';
}
processor = select_processor(*(RGWObjectCtx *)s->obj_ctx, &multipart);
processor = select_processor(*static_cast<RGWObjectCtx *>(s->obj_ctx), &multipart);
ret = processor->prepare(store, NULL);
if (ret < 0)
@ -1805,7 +1812,7 @@ void RGWPutObj::execute()
/* restart processing with different oid suffix */
dispose_processor(processor);
processor = select_processor(*(RGWObjectCtx *)s->obj_ctx, &multipart);
processor = select_processor(*static_cast<RGWObjectCtx *>(s->obj_ctx), &multipart);
string oid_rand;
char buf[33];
@ -1955,7 +1962,7 @@ void RGWPostObj::execute()
goto done;
}
processor = select_processor(*(RGWObjectCtx *)s->obj_ctx);
processor = select_processor(*static_cast<RGWObjectCtx *>(s->obj_ctx));
ret = processor->prepare(store, NULL);
if (ret < 0)
@ -2169,7 +2176,7 @@ void RGWDeleteObj::execute()
ret = -EINVAL;
rgw_obj obj(s->bucket, s->object);
if (!s->object.empty()) {
RGWObjectCtx *obj_ctx = (RGWObjectCtx *)s->obj_ctx;
RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
obj_ctx->set_atomic(obj);
@ -2251,7 +2258,7 @@ int RGWCopyObj::verify_permission()
}
map<string, bufferlist> src_attrs;
RGWObjectCtx& obj_ctx = *(RGWObjectCtx *)s->obj_ctx;
RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
ret = store->get_bucket_info(obj_ctx, src_bucket_name, src_bucket_info, NULL, &src_attrs);
if (ret < 0)
@ -2376,7 +2383,7 @@ void RGWCopyObj::execute()
rgw_obj src_obj(src_bucket, src_object);
rgw_obj dst_obj(dest_bucket, dest_object);
RGWObjectCtx& obj_ctx = *(RGWObjectCtx *)s->obj_ctx;
RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
obj_ctx.set_atomic(src_obj);
obj_ctx.set_atomic(dst_obj);
@ -2756,7 +2763,7 @@ void RGWInitMultipart::execute()
obj.set_in_extra_data(true);
obj.index_hash_source = s->object.name;
RGWRados::Object op_target(store, s->bucket_info, *(RGWObjectCtx *)s->obj_ctx, obj);
RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
op_target.set_versioning_disabled(true); /* no versioning for multipart meta */
RGWRados::Object::Write obj_op(&op_target);
@ -3068,11 +3075,11 @@ void RGWCompleteMultipart::execute()
store->gen_rand_obj_instance_name(&target_obj);
}
RGWObjectCtx& obj_ctx = *(RGWObjectCtx *)s->obj_ctx;
RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
obj_ctx.set_atomic(target_obj);
RGWRados::Object op_target(store, s->bucket_info, *(RGWObjectCtx *)s->obj_ctx, target_obj);
RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), target_obj);
RGWRados::Object::Write obj_op(&op_target);
obj_op.meta.manifest = &manifest;
@ -3087,7 +3094,7 @@ void RGWCompleteMultipart::execute()
return;
// remove the upload obj
int r = store->delete_obj(*(RGWObjectCtx *)s->obj_ctx, s->bucket_info, meta_obj, 0);
int r = store->delete_obj(*static_cast<RGWObjectCtx *>(s->obj_ctx), s->bucket_info, meta_obj, 0);
if (r < 0) {
ldout(store->ctx(), 0) << "WARNING: failed to remove object " << meta_obj << dendl;
}
@ -3133,7 +3140,7 @@ void RGWAbortMultipart::execute()
int max_parts = 1000;
RGWObjectCtx *obj_ctx = (RGWObjectCtx *)s->obj_ctx;
RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
meta_obj.init_ns(s->bucket, meta_oid, mp_ns);
meta_obj.set_in_extra_data(true);
@ -3306,7 +3313,7 @@ void RGWDeleteMultiObj::execute()
vector<rgw_obj_key>::iterator iter;
RGWMultiDelXMLParser parser;
int num_processed = 0;
RGWObjectCtx *obj_ctx = (RGWObjectCtx *)s->obj_ctx;
RGWObjectCtx *obj_ctx = static_cast<RGWObjectCtx *>(s->obj_ctx);
ret = get_params();
if (ret < 0) {

View File

@ -47,7 +47,7 @@ protected:
uint64_t added_bytes;
uint64_t removed_bytes;
public:
StatsAsyncTestSet() {}
StatsAsyncTestSet() : objs_delta(0), added_bytes(0), removed_bytes(0) {}
bool update(RGWQuotaCacheStats *entry) {
if (entry->async_refresh_time.sec() == 0)
return false;

View File

@ -975,6 +975,8 @@ int RGWPutObjProcessor_Aio::drain_pending()
int RGWPutObjProcessor_Aio::throttle_data(void *handle, bool need_to_wait)
{
bool _wait = need_to_wait;
if (handle) {
struct put_obj_aio_info info;
info.handle = handle;
@ -988,7 +990,7 @@ int RGWPutObjProcessor_Aio::throttle_data(void *handle, bool need_to_wait)
if (r < 0)
return r;
need_to_wait = false;
_wait = false;
}
/* resize window in case messages are draining too fast */
@ -997,13 +999,10 @@ int RGWPutObjProcessor_Aio::throttle_data(void *handle, bool need_to_wait)
}
/* now throttle. Note that need_to_wait should only affect the first IO operation */
if (pending.size() > max_chunks ||
need_to_wait) {
if (pending.size() > max_chunks || _wait) {
int r = wait_pending_front();
if (r < 0)
return r;
need_to_wait = false;
}
return 0;
}
@ -3216,8 +3215,6 @@ int RGWRados::put_system_obj_impl(rgw_obj& obj, uint64_t size, time_t *mtime,
op.mtime(&set_mtime);
op.write_full(data);
string etag;
string content_type;
bufferlist acl_bl;
for (map<string, bufferlist>::iterator iter = attrs.begin(); iter != attrs.end(); ++iter) {
@ -4351,7 +4348,6 @@ int RGWRados::Object::Delete::delete_obj()
index_op.set_bilog_flags(params.bilog_flags);
string tag;
r = index_op.prepare(CLS_RGW_OP_DEL);
if (r < 0)
return r;
@ -4441,7 +4437,6 @@ int RGWRados::delete_obj_index(rgw_obj& obj)
std::string oid, key;
get_obj_bucket_and_oid_loc(obj, bucket, oid, key);
string tag;
RGWRados::Bucket bop(this, bucket);
RGWRados::Bucket::UpdateIndex index_op(&bop, obj, NULL);
@ -4845,7 +4840,6 @@ int RGWRados::set_attrs(void *ctx, rgw_obj& obj,
RGWRados::Bucket bop(this, bucket);
RGWRados::Bucket::UpdateIndex index_op(&bop, obj, state);
string tag;
if (state) {
r = index_op.prepare(CLS_RGW_OP_ADD);
if (r < 0)
@ -4919,7 +4913,6 @@ int RGWRados::Object::Read::prepare(int64_t *pofs, int64_t *pend)
CephContext *cct = store->ctx();
bufferlist etag;
time_t ctime;
off_t ofs = 0;
off_t end = -1;
@ -4955,7 +4948,7 @@ int RGWRados::Object::Read::prepare(int64_t *pofs, int64_t *pend)
/* Convert all times go GMT to make them compatible */
if (conds.mod_ptr || conds.unmod_ptr) {
ctime = astate->mtime;
time_t ctime = astate->mtime;
if (conds.mod_ptr) {
ldout(cct, 10) << "If-Modified-Since: " << *conds.mod_ptr << " Last-Modified: " << ctime << dendl;

View File

@ -276,7 +276,7 @@ void RGWOp_BILog_List::execute() {
RGWBucketInfo bucket_info;
unsigned max_entries;
RGWObjectCtx& obj_ctx = *(RGWObjectCtx *)s->obj_ctx;
RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
if (bucket_name.empty() && bucket_instance.empty()) {
dout(5) << "ERROR: neither bucket nor bucket instance specified" << dendl;
@ -368,7 +368,7 @@ void RGWOp_BILog_Info::execute() {
bucket_instance = s->info.args.get("bucket-instance");
RGWBucketInfo bucket_info;
RGWObjectCtx& obj_ctx = *(RGWObjectCtx *)s->obj_ctx;
RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
if (bucket_name.empty() && bucket_instance.empty()) {
dout(5) << "ERROR: neither bucket nor bucket instance specified" << dendl;
@ -422,7 +422,7 @@ void RGWOp_BILog_Delete::execute() {
RGWBucketInfo bucket_info;
RGWObjectCtx& obj_ctx = *(RGWObjectCtx *)s->obj_ctx;
RGWObjectCtx& obj_ctx = *static_cast<RGWObjectCtx *>(s->obj_ctx);
http_ret = 0;
if ((bucket_name.empty() && bucket_instance.empty()) ||

View File

@ -550,7 +550,7 @@ int authenticate_temp_url(RGWRados *store, req_state *s)
/* need to get user info of bucket owner */
RGWBucketInfo bucket_info;
int ret = store->get_bucket_info(*(RGWObjectCtx *)s->obj_ctx, s->bucket_name_str, bucket_info, NULL);
int ret = store->get_bucket_info(*static_cast<RGWObjectCtx *>(s->obj_ctx), s->bucket_name_str, bucket_info, NULL);
if (ret < 0)
return -EPERM;

View File

@ -275,9 +275,9 @@ int main(int argc, char **argv)
for (unsigned int k = 1; k <= 12; k++) {
for (unsigned int m = 1; (m <= k) && (k + m <= 20); m++) {
for (unsigned int c = 1; c <= m; c++) {
sprintf(param[i].sk, "%d", k);
sprintf(param[i].sm, "%d", m);
sprintf(param[i].sc, "%d", c);
sprintf(param[i].sk, "%u", k);
sprintf(param[i].sm, "%u", m);
sprintf(param[i].sc, "%u", c);
param[i].k = param[i].sk;
param[i].m = param[i].sm;

View File

@ -99,10 +99,9 @@ int main(int argc, char **argv)
void* thread1(void* pParam)
{
TestParam* param = (TestParam*) pParam;
TestParam* param = static_cast<TestParam*>(pParam);
time_t start, end;
int r;
ErasureCodePluginRegistry &instance = ErasureCodePluginRegistry::instance();
@ -136,6 +135,7 @@ void* thread1(void* pParam)
while (kTestSec >= (end - start)) {
//init
int r;
ErasureCodeShec* shec = new ErasureCodeShecReedSolomonVandermonde(
tcache,
ErasureCodeShec::MULTIPLE);

View File

@ -667,7 +667,7 @@ class SyntheticDispatcher : public Dispatcher {
Mutex::Locker l(lock);
list<uint64_t> c = conn_sent[con];
for (list<uint64_t>::iterator it = c.begin();
it != c.end(); it++)
it != c.end(); ++it)
sent.erase(*it);
conn_sent.erase(con);
got_remote_reset = true;

View File

@ -192,9 +192,10 @@ class PerfCase {
}
uint64_t rados_write_4k(int times) {
uint64_t start_time = 0, ticks = 0;
uint64_t ticks = 0;
uint64_t len = Kib *4;
for (int i = 0; i < times; i++) {
uint64_t start_time = 0;
{
Transaction t;
ghobject_t oid = create_object();

View File

@ -700,14 +700,13 @@ public:
void getattrs() {
ghobject_t obj;
int retry;
{
Mutex::Locker locker(lock);
if (!can_unlink())
return ;
wait_for_ready();
retry = 10;
int retry = 10;
do {
obj = get_uniform_random_object();
if (!--retry)

View File

@ -501,7 +501,7 @@ public:
librados::AioCompletion *completion;
LoadGenOp() {}
LoadGenOp(LoadGen *_lg) : lg(_lg), completion(NULL) {}
LoadGenOp(LoadGen *_lg) : id(0), type(0), off(0), len(0), lg(_lg), completion(NULL) {}
};
int max_op;
@ -543,6 +543,7 @@ public:
min_op_len = 1024;
target_throughput = 5 * 1024 * 1024; // B/sec
max_op_len = 2 * 1024 * 1024;
max_ops = 0;
max_backlog = target_throughput * 2;
run_length = 60;