Merge branch 'wip_scrub_tphandle' into next

Fixes: #5159
Reviewed-by: Sage Weil <sage@inktank.com>
This commit is contained in:
Samuel Just 2013-05-23 20:08:11 -07:00
commit 8c1c2d98c6
4 changed files with 66 additions and 33 deletions

View File

@ -423,6 +423,7 @@ OPTION(osd_scrub_min_interval, OPT_FLOAT, 60*60*24) // if load is low
OPTION(osd_scrub_max_interval, OPT_FLOAT, 7*60*60*24) // regardless of load
OPTION(osd_deep_scrub_interval, OPT_FLOAT, 60*60*24*7) // once a week
OPTION(osd_deep_scrub_stride, OPT_INT, 524288)
OPTION(osd_scan_list_ping_tp_interval, OPT_U64, 100)
OPTION(osd_auto_weight, OPT_BOOL, false)
OPTION(osd_class_dir, OPT_STR, CEPH_LIBDIR "/rados-classes") // where rados plugins are stored
OPTION(osd_check_for_log_corruption, OPT_BOOL, false)

View File

@ -1408,8 +1408,10 @@ protected:
osd->scrub_queue.pop_front();
return pg;
}
void _process(PG *pg) {
pg->scrub();
void _process(
PG *pg,
ThreadPool::TPHandle &handle) {
pg->scrub(handle);
pg->put("ScrubWQ");
}
void _clear() {
@ -1493,7 +1495,9 @@ protected:
rep_scrub_queue.pop_front();
return msg;
}
void _process(MOSDRepScrub *msg) {
void _process(
MOSDRepScrub *msg,
ThreadPool::TPHandle &handle) {
osd->osd_lock.Lock();
if (osd->is_stopping()) {
osd->osd_lock.Unlock();
@ -1502,7 +1506,7 @@ protected:
if (osd->_have_pg(msg->pgid)) {
PG *pg = osd->_lookup_lock_pg(msg->pgid);
osd->osd_lock.Unlock();
pg->replica_scrub(msg);
pg->replica_scrub(msg, handle);
msg->put();
pg->unlock();
} else {

View File

@ -3263,7 +3263,9 @@ void PG::sub_op_scrub_map(OpRequestRef op)
/*
* pg lock may or may not be held
*/
void PG::_scan_list(ScrubMap &map, vector<hobject_t> &ls, bool deep)
void PG::_scan_list(
ScrubMap &map, vector<hobject_t> &ls, bool deep,
ThreadPool::TPHandle &handle)
{
dout(10) << "_scan_list scanning " << ls.size() << " objects"
<< (deep ? " deeply" : "") << dendl;
@ -3271,6 +3273,7 @@ void PG::_scan_list(ScrubMap &map, vector<hobject_t> &ls, bool deep)
for (vector<hobject_t>::iterator p = ls.begin();
p != ls.end();
++p, i++) {
handle.reset_tp_timeout();
hobject_t poid = *p;
struct stat st;
@ -3290,6 +3293,7 @@ void PG::_scan_list(ScrubMap &map, vector<hobject_t> &ls, bool deep)
while ( (r = osd->store->read(coll, poid, pos,
g_conf->osd_deep_scrub_stride, bl,
true)) > 0) {
handle.reset_tp_timeout();
h << bl;
pos += bl.length();
bl.clear();
@ -3319,7 +3323,14 @@ void PG::_scan_list(ScrubMap &map, vector<hobject_t> &ls, bool deep)
ObjectMap::ObjectMapIterator iter = osd->store->get_omap_iterator(
coll, poid);
assert(iter);
uint64_t keys_scanned = 0;
for (iter->seek_to_first(); iter->valid() ; iter->next()) {
if (g_conf->osd_scan_list_ping_tp_interval &&
(keys_scanned % g_conf->osd_scan_list_ping_tp_interval == 0)) {
handle.reset_tp_timeout();
}
++keys_scanned;
dout(25) << "CRC key " << iter->key() << " value "
<< string(iter->value().c_str(), iter->value().length()) << dendl;
@ -3596,8 +3607,10 @@ void PG::_scan_snaps(ScrubMap &smap)
* build a scrub map over a chunk without releasing the lock
* only used by chunky scrub
*/
int PG::build_scrub_map_chunk(ScrubMap &map,
hobject_t start, hobject_t end, bool deep)
int PG::build_scrub_map_chunk(
ScrubMap &map,
hobject_t start, hobject_t end, bool deep,
ThreadPool::TPHandle &handle)
{
dout(10) << "build_scrub_map" << dendl;
dout(20) << "scrub_map_chunk [" << start << "," << end << ")" << dendl;
@ -3612,7 +3625,7 @@ int PG::build_scrub_map_chunk(ScrubMap &map,
return ret;
}
_scan_list(map, ls, deep);
_scan_list(map, ls, deep, handle);
_scan_snaps(map);
// pg attrs
@ -3629,7 +3642,7 @@ int PG::build_scrub_map_chunk(ScrubMap &map,
* build a (sorted) summary of pg content for purposes of scrubbing
* called while holding pg lock
*/
void PG::build_scrub_map(ScrubMap &map)
void PG::build_scrub_map(ScrubMap &map, ThreadPool::TPHandle &handle)
{
dout(10) << "build_scrub_map" << dendl;
@ -3646,7 +3659,7 @@ void PG::build_scrub_map(ScrubMap &map)
vector<hobject_t> ls;
osd->store->collection_list(coll, ls);
_scan_list(map, ls, false);
_scan_list(map, ls, false, handle);
lock();
_scan_snaps(map);
@ -3671,7 +3684,9 @@ void PG::build_scrub_map(ScrubMap &map)
* build a summary of pg content changed starting after v
* called while holding pg lock
*/
void PG::build_inc_scrub_map(ScrubMap &map, eversion_t v)
void PG::build_inc_scrub_map(
ScrubMap &map, eversion_t v,
ThreadPool::TPHandle &handle)
{
map.valid_through = last_update_applied;
map.incr_since = v;
@ -3695,7 +3710,7 @@ void PG::build_inc_scrub_map(ScrubMap &map, eversion_t v)
}
}
_scan_list(map, ls, false);
_scan_list(map, ls, false, handle);
// pg attrs
osd->store->collection_getattrs(coll, map.attrs);
@ -3743,7 +3758,9 @@ void PG::repair_object(const hobject_t& soid, ScrubMap::object *po, int bad_peer
* for pushes to complete in case of recent recovery. Build a single
* scrubmap of objects that are in the range [msg->start, msg->end).
*/
void PG::replica_scrub(MOSDRepScrub *msg)
void PG::replica_scrub(
MOSDRepScrub *msg,
ThreadPool::TPHandle &handle)
{
assert(!scrubber.active_rep_scrub);
dout(7) << "replica_scrub" << dendl;
@ -3777,7 +3794,9 @@ void PG::replica_scrub(MOSDRepScrub *msg)
return;
}
build_scrub_map_chunk(map, msg->start, msg->end, msg->deep);
build_scrub_map_chunk(
map, msg->start, msg->end, msg->deep,
handle);
} else {
if (msg->scrub_from > eversion_t()) {
@ -3792,10 +3811,10 @@ void PG::replica_scrub(MOSDRepScrub *msg)
return;
}
}
build_inc_scrub_map(map, msg->scrub_from);
build_inc_scrub_map(map, msg->scrub_from, handle);
scrubber.finalizing = 0;
} else {
build_scrub_map(map);
build_scrub_map(map, handle);
}
if (msg->map_epoch < info.history.same_interval_since) {
@ -3823,7 +3842,7 @@ void PG::replica_scrub(MOSDRepScrub *msg)
* scrub will be chunky if all OSDs in PG support chunky scrub
* scrub will fall back to classic in any other case
*/
void PG::scrub()
void PG::scrub(ThreadPool::TPHandle &handle)
{
lock();
if (deleting) {
@ -3868,9 +3887,9 @@ void PG::scrub()
}
if (scrubber.is_chunky) {
chunky_scrub();
chunky_scrub(handle);
} else {
classic_scrub();
classic_scrub(handle);
}
unlock();
@ -3915,7 +3934,7 @@ void PG::scrub()
* Flag set when we're in the finalize stage.
*
*/
void PG::classic_scrub()
void PG::classic_scrub(ThreadPool::TPHandle &handle)
{
if (!scrubber.active) {
dout(10) << "scrub start" << dendl;
@ -3946,7 +3965,7 @@ void PG::classic_scrub()
// Unlocks and relocks...
scrubber.primary_scrubmap = ScrubMap();
build_scrub_map(scrubber.primary_scrubmap);
build_scrub_map(scrubber.primary_scrubmap, handle);
if (scrubber.epoch_start != info.history.same_interval_since) {
dout(10) << "scrub pg changed, aborting" << dendl;
@ -3993,7 +4012,7 @@ void PG::classic_scrub()
if (scrubber.primary_scrubmap.valid_through != log.head) {
ScrubMap incr;
build_inc_scrub_map(incr, scrubber.primary_scrubmap.valid_through);
build_inc_scrub_map(incr, scrubber.primary_scrubmap.valid_through, handle);
scrubber.primary_scrubmap.merge_incr(incr);
}
@ -4076,7 +4095,7 @@ void PG::classic_scrub()
* scrubber.state encodes the current state of the scrub (refer to state diagram
* for details).
*/
void PG::chunky_scrub()
void PG::chunky_scrub(ThreadPool::TPHandle &handle)
{
// check for map changes
if (scrubber.is_chunky_scrub_active()) {
@ -4209,7 +4228,8 @@ void PG::chunky_scrub()
// build my own scrub map
ret = build_scrub_map_chunk(scrubber.primary_scrubmap,
scrubber.start, scrubber.end,
scrubber.deep);
scrubber.deep,
handle);
if (ret < 0) {
dout(5) << "error building scrub map: " << ret << ", aborting" << dendl;
scrub_clear_state();

View File

@ -43,6 +43,7 @@
#include "messages/MOSDRepScrub.h"
#include "messages/MOSDPGLog.h"
#include "common/tracked_int_ptr.hpp"
#include "common/WorkQueue.h"
#include <list>
#include <memory>
@ -1041,24 +1042,29 @@ public:
map<hobject_t, int> &authoritative,
map<hobject_t, set<int> > &inconsistent_snapcolls,
ostream &errorstream);
void scrub();
void classic_scrub();
void chunky_scrub();
void scrub(ThreadPool::TPHandle &handle);
void classic_scrub(ThreadPool::TPHandle &handle);
void chunky_scrub(ThreadPool::TPHandle &handle);
void scrub_compare_maps();
void scrub_process_inconsistent();
void scrub_finalize();
void scrub_finish();
void scrub_clear_state();
bool scrub_gather_replica_maps();
void _scan_list(ScrubMap &map, vector<hobject_t> &ls, bool deep);
void _scan_list(
ScrubMap &map, vector<hobject_t> &ls, bool deep,
ThreadPool::TPHandle &handle);
void _scan_snaps(ScrubMap &map);
void _request_scrub_map_classic(int replica, eversion_t version);
void _request_scrub_map(int replica, eversion_t version,
hobject_t start, hobject_t end, bool deep);
int build_scrub_map_chunk(ScrubMap &map,
hobject_t start, hobject_t end, bool deep);
void build_scrub_map(ScrubMap &map);
void build_inc_scrub_map(ScrubMap &map, eversion_t v);
int build_scrub_map_chunk(
ScrubMap &map,
hobject_t start, hobject_t end, bool deep,
ThreadPool::TPHandle &handle);
void build_scrub_map(ScrubMap &map, ThreadPool::TPHandle &handle);
void build_inc_scrub_map(
ScrubMap &map, eversion_t v, ThreadPool::TPHandle &handle);
virtual void _scrub(ScrubMap &map) { }
virtual void _scrub_clear_state() { }
virtual void _scrub_finish() { }
@ -1077,7 +1083,9 @@ public:
void reg_next_scrub();
void unreg_next_scrub();
void replica_scrub(class MOSDRepScrub *op);
void replica_scrub(
class MOSDRepScrub *op,
ThreadPool::TPHandle &handle);
void sub_op_scrub_map(OpRequestRef op);
void sub_op_scrub_reserve(OpRequestRef op);
void sub_op_scrub_reserve_reply(OpRequestRef op);