1
0
mirror of https://github.com/ceph/ceph synced 2025-02-19 00:47:49 +00:00

Merge pull request from majianpeng/cache-tier

Cache tier bug fixs

Reviewed-by: Sage Weil <sage@redhat.com>
This commit is contained in:
Sage Weil 2015-03-25 10:27:56 -07:00
commit cc3527a51e
3 changed files with 30 additions and 16 deletions
qa/workunits/cephtool
src/osdc

View File

@ -335,8 +335,7 @@ function test_tiering()
ceph osd pool create cachepool 2
ceph osd tier add-cache datapool cachepool 1024000
ceph osd tier cache-mode cachepool writeback
dd if=/dev/zero of=/tmp/add-cache bs=4K count=1
rados -p datapool put object /tmp/add-cache
rados -p datapool put object /etc/passwd
rados -p cachepool stat object
rados -p cachepool cache-flush object
rados -p datapool stat object
@ -344,7 +343,6 @@ function test_tiering()
ceph osd tier remove datapool cachepool
ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
rm -rf /tmp/add-cache
# protection against pool removal when used as tiers
ceph osd pool create datapool 2
@ -362,12 +360,17 @@ function test_tiering()
# check health check
ceph osd pool create datapool 2
ceph osd pool create cache4 2
ceph osd tier add datapool cache4
ceph osd tier add-cache datapool cache4 1024000
ceph osd tier cache-mode cache4 writeback
tmpfile=$(mktemp|grep tmp)
dd if=/dev/zero of=$tmpfile bs=4K count=1
ceph osd pool set cache4 target_max_objects 5
ceph osd pool set cache4 target_max_bytes 1000
#4096 * 5 = 20480, 20480 near/at 21000,
ceph osd pool set cache4 target_max_bytes 21000
for f in `seq 1 5` ; do
rados -p cache4 put foo$f /etc/passwd
rados -p cache4 put foo$f $tmpfile
done
rm -f $tmpfile
while ! ceph df | grep cache4 | grep ' 5 ' ; do
echo waiting for pg stats to flush
sleep 2
@ -375,6 +378,7 @@ function test_tiering()
ceph health | grep WARN | grep cache4
ceph health detail | grep cache4 | grep 'target max' | grep objects
ceph health detail | grep cache4 | grep 'target max' | grep 'B'
ceph osd tier remove-overlay datapool
ceph osd tier remove datapool cache4
ceph osd pool delete cache4 cache4 --yes-i-really-really-mean-it
ceph osd pool delete datapool datapool --yes-i-really-really-mean-it

View File

@ -739,7 +739,7 @@ void Objecter::_linger_submit(LingerOp *info)
// Populate Op::target
OSDSession *s = NULL;
_calc_target(&info->target);
_calc_target(&info->target, &info->last_force_resend);
// Create LingerOp<->OSDSession relation
int r = _get_session(info->target.osd, &s, lc);
@ -931,7 +931,7 @@ void Objecter::_scan_requests(OSDSession *s,
Op *op = p->second;
++p; // check_op_pool_dne() may touch ops; prevent iterator invalidation
ldout(cct, 10) << " checking op " << op->tid << dendl;
int r = _calc_target(&op->target);
int r = _calc_target(&op->target, &op->last_force_resend);
switch (r) {
case RECALC_OP_TARGET_NO_ACTION:
if (!force_resend &&
@ -1147,7 +1147,7 @@ void Objecter::handle_osd_map(MOSDMap *m)
p != need_resend_linger.end(); ++p) {
LingerOp *op = *p;
if (!op->session) {
_calc_target(&op->target);
_calc_target(&op->target, &op->last_force_resend);
OSDSession *s = NULL;
int const r = _get_session(op->target.osd, &s, lc);
assert(r == 0);
@ -2120,7 +2120,7 @@ ceph_tid_t Objecter::_op_submit(Op *op, RWLock::Context& lc)
assert(op->session == NULL);
OSDSession *s = NULL;
bool const check_for_latest_map = _calc_target(&op->target) == RECALC_OP_TARGET_POOL_DNE;
bool const check_for_latest_map = _calc_target(&op->target, &op->last_force_resend) == RECALC_OP_TARGET_POOL_DNE;
// Try to get a session, including a retry if we need to take write lock
int r = _get_session(op->target.osd, &s, lc);
@ -2398,7 +2398,7 @@ int64_t Objecter::get_object_pg_hash_position(int64_t pool, const string& key,
return p->raw_hash_to_pg(p->hash_key(key, ns));
}
int Objecter::_calc_target(op_target_t *t, bool any_change)
int Objecter::_calc_target(op_target_t *t, epoch_t *last_force_resend, bool any_change)
{
assert(rwlock.is_locked());
@ -2414,7 +2414,11 @@ int Objecter::_calc_target(op_target_t *t, bool any_change)
bool force_resend = false;
bool need_check_tiering = false;
if (osdmap->get_epoch() == pi->last_force_op_resend) {
force_resend = true;
if (last_force_resend && *last_force_resend < pi->last_force_op_resend) {
*last_force_resend = pi->last_force_op_resend;
force_resend = true;
} else if (last_force_resend == 0)
force_resend = true;
}
if (t->target_oid.name.empty() || force_resend) {
t->target_oid = t->base_oid;
@ -2658,7 +2662,7 @@ int Objecter::_recalc_linger_op_target(LingerOp *linger_op, RWLock::Context& lc)
{
assert(rwlock.is_wlocked());
int r = _calc_target(&linger_op->target, true);
int r = _calc_target(&linger_op->target, &linger_op->last_force_resend, true);
if (r == RECALC_OP_TARGET_NEED_RESEND) {
ldout(cct, 10) << "recalc_linger_op_target tid " << linger_op->linger_id
<< " pgid " << linger_op->target.pgid

View File

@ -1193,6 +1193,8 @@ public:
int *data_offset;
epoch_t last_force_resend;
Op(const object_t& o, const object_locator_t& ol, vector<OSDOp>& op,
int f, Context *ac, Context *co, version_t *ov, int *offset = NULL) :
session(NULL), incarnation(0),
@ -1214,7 +1216,8 @@ public:
budgeted(false),
should_resend(true),
ctx_budgeted(false),
data_offset(offset) {
data_offset(offset),
last_force_resend(0) {
ops.swap(op);
/* initialize out_* to match op vector */
@ -1549,6 +1552,8 @@ public:
ceph_tid_t ping_tid;
epoch_t map_dne_bound;
epoch_t last_force_resend;
void _queued_async() {
assert(watch_lock.is_locked());
watch_pending_async.push_back(ceph_clock_now(NULL));
@ -1576,7 +1581,8 @@ public:
session(NULL),
register_tid(0),
ping_tid(0),
map_dne_bound(0) {}
map_dne_bound(0),
last_force_resend(0) {}
// no copy!
const LingerOp &operator=(const LingerOp& r);
@ -1731,7 +1737,7 @@ public:
bool _osdmap_full_flag() const;
bool target_should_be_paused(op_target_t *op);
int _calc_target(op_target_t *t, bool any_change=false);
int _calc_target(op_target_t *t, epoch_t *last_force_resend=0, bool any_change=false);
int _map_session(op_target_t *op, OSDSession **s,
RWLock::Context& lc);