mirror of
https://github.com/ceph/ceph
synced 2024-12-30 07:23:11 +00:00
PGLog::proc_replica_log,merge_log: fix bound for last_update
If olog.tail > log.tail, olog.tail is also a bound on last_update. See the changed comment in the commit for details. Also adds unit tests. Fixes: http://tracker.ceph.com/issues/18127 Signed-off-by: Samuel Just <sjust@redhat.com>
This commit is contained in:
parent
7b1293e70d
commit
b03c950135
@ -176,16 +176,21 @@ void PGLog::proc_replica_log(
|
||||
}
|
||||
|
||||
/* Because olog.head >= log.tail, we know that both pgs must at least have
|
||||
* the event represented by log.tail. Thus, lower_bound >= log.tail. It's
|
||||
* the event represented by log.tail. Similarly, because log.head >= olog.tail,
|
||||
* we know that the even represented by olog.tail must be common to both logs.
|
||||
* Furthermore, the event represented by a log tail was necessarily trimmed,
|
||||
* thus neither olog.tail nor log.tail can be divergent. It's
|
||||
* possible that olog/log contain no actual events between olog.head and
|
||||
* log.tail, however, since they might have been split out. Thus, if
|
||||
* we cannot find an event e such that log.tail <= e.version <= log.head,
|
||||
* the last_update must actually be log.tail.
|
||||
* MAX(log.tail, olog.tail), however, since they might have been split out.
|
||||
* Thus, if we cannot find an event e such that
|
||||
* log.tail <= e.version <= log.head, the last_update must actually be
|
||||
* MAX(log.tail, olog.tail).
|
||||
*/
|
||||
eversion_t limit = MAX(olog.tail, log.tail);
|
||||
eversion_t lu =
|
||||
(first_non_divergent == log.log.rend() ||
|
||||
first_non_divergent->version < log.tail) ?
|
||||
log.tail :
|
||||
first_non_divergent->version < limit) ?
|
||||
limit :
|
||||
first_non_divergent->version;
|
||||
|
||||
IndexedLog folog(olog);
|
||||
@ -290,6 +295,7 @@ void PGLog::merge_log(ObjectStore::Transaction& t,
|
||||
// this is just filling in history. it does not affect our
|
||||
// missing set, as that should already be consistent with our
|
||||
// current log.
|
||||
eversion_t orig_tail = log.tail;
|
||||
if (olog.tail < log.tail) {
|
||||
dout(10) << "merge_log extending tail to " << olog.tail << dendl;
|
||||
list<pg_log_entry_t>::iterator from = olog.log.begin();
|
||||
@ -336,19 +342,20 @@ void PGLog::merge_log(ObjectStore::Transaction& t,
|
||||
// find start point in olog
|
||||
list<pg_log_entry_t>::iterator to = olog.log.end();
|
||||
list<pg_log_entry_t>::iterator from = olog.log.end();
|
||||
eversion_t lower_bound = olog.tail;
|
||||
eversion_t lower_bound = MAX(olog.tail, orig_tail);
|
||||
while (1) {
|
||||
if (from == olog.log.begin())
|
||||
break;
|
||||
--from;
|
||||
dout(20) << " ? " << *from << dendl;
|
||||
if (from->version <= log.head) {
|
||||
dout(20) << "merge_log cut point (usually last shared) is " << *from << dendl;
|
||||
lower_bound = from->version;
|
||||
lower_bound = MAX(lower_bound, from->version);
|
||||
++from;
|
||||
break;
|
||||
}
|
||||
}
|
||||
dout(20) << "merge_log cut point (usually last shared) is "
|
||||
<< lower_bound << dendl;
|
||||
mark_dirty_from(lower_bound);
|
||||
|
||||
auto divergent = log.rewind_from_head(lower_bound);
|
||||
|
@ -260,7 +260,8 @@ public:
|
||||
for (list<pg_log_entry_t>::const_iterator i = tcase.auth.begin();
|
||||
i != tcase.auth.end();
|
||||
++i) {
|
||||
omissing.add_next_event(*i);
|
||||
if (i->version > oinfo.last_update)
|
||||
omissing.add_next_event(*i);
|
||||
}
|
||||
verify_missing(tcase, omissing);
|
||||
}
|
||||
@ -1947,6 +1948,34 @@ TEST_F(PGLogTest, merge_log_split_missing_entries_at_head) {
|
||||
run_test_case(t);
|
||||
}
|
||||
|
||||
TEST_F(PGLogTest, olog_tail_gt_log_tail_split) {
|
||||
TestCase t;
|
||||
t.auth.push_back(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(8, 70)));
|
||||
t.auth.push_back(mk_ple_mod(mk_obj(1), mk_evt(15, 150), mk_evt(10, 100)));
|
||||
t.auth.push_back(mk_ple_mod(mk_obj(1), mk_evt(15, 155), mk_evt(15, 150)));
|
||||
|
||||
t.setup();
|
||||
t.set_div_bounds(mk_evt(15, 153), mk_evt(15, 151));
|
||||
t.set_auth_bounds(mk_evt(15, 156), mk_evt(10, 99));
|
||||
t.final.add(mk_obj(1), mk_evt(15, 155), mk_evt(15, 150));
|
||||
run_test_case(t);
|
||||
}
|
||||
|
||||
TEST_F(PGLogTest, olog_tail_gt_log_tail_split2) {
|
||||
TestCase t;
|
||||
t.auth.push_back(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(8, 70)));
|
||||
t.auth.push_back(mk_ple_mod(mk_obj(1), mk_evt(15, 150), mk_evt(10, 100)));
|
||||
t.auth.push_back(mk_ple_mod(mk_obj(1), mk_evt(16, 155), mk_evt(15, 150)));
|
||||
t.div.push_back(mk_ple_mod(mk_obj(1), mk_evt(15, 153), mk_evt(15, 150)));
|
||||
|
||||
t.setup();
|
||||
t.set_div_bounds(mk_evt(15, 153), mk_evt(15, 151));
|
||||
t.set_auth_bounds(mk_evt(16, 156), mk_evt(10, 99));
|
||||
t.final.add(mk_obj(1), mk_evt(16, 155), mk_evt(0, 0));
|
||||
t.toremove.insert(mk_obj(1));
|
||||
run_test_case(t);
|
||||
}
|
||||
|
||||
TEST_F(PGLogTest, filter_log_1) {
|
||||
{
|
||||
clear();
|
||||
|
Loading…
Reference in New Issue
Block a user