Merge pull request #27914 from dzafman/wip-39304

osd: Include dups in copy_after() and copy_up_to()

Reviewed-by: Josh Durgin <jdurgin@redhat.com>
Reviewed-by: Neha Ojha <nojha@redhat.com>
This commit is contained in:
David Zafman 2019-05-10 17:41:31 -07:00 committed by GitHub
commit 624d1172c3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 461 additions and 66 deletions

View File

@ -0,0 +1,136 @@
#!/usr/bin/env bash
#
# Copyright (C) 2019 Red Hat <contact@redhat.com>
#
# Author: David Zafman <dzafman@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
# Fix port????
export CEPH_MON="127.0.0.1:7129" # git grep '\<7129\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON --osd_max_backfills=1 --debug_reserver=20 "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function _common_test() {
local dir=$1
local extra_opts="$2"
local loglen="$3"
local dupslen="$4"
local objects="$5"
local moreobjects=${6:-0}
local OSDS=6
run_mon $dir a || return 1
run_mgr $dir x || return 1
export CEPH_ARGS
export EXTRA_OPTS=" $extra_opts"
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd || return 1
done
create_pool test 1 1
for j in $(seq 1 $objects)
do
rados -p test put obj-${j} /etc/passwd
done
# Mark out all OSDs for this pool
ceph osd out $(ceph pg dump pgs --format=json | jq '.pg_stats[0].up[]')
if [ "$moreobjects" != "0" ]; then
for j in $(seq 1 $moreobjects)
do
rados -p test put obj-more-${j} /etc/passwd
done
fi
sleep 1
wait_for_clean
newprimary=$(ceph pg dump pgs --format=json | jq '.pg_stats[0].up_primary')
kill_daemons
ERRORS=0
_objectstore_tool_nodown $dir $newprimary --no-mon-config --pgid 1.0 --op log | tee $dir/result.log
LOGLEN=$(jq '.pg_log_t.log | length' $dir/result.log)
if [ $LOGLEN != "$loglen" ]; then
echo "FAILED: Wrong log length got $LOGLEN (expected $loglen)"
ERRORS=$(expr $ERRORS + 1)
fi
DUPSLEN=$(jq '.pg_log_t.dups | length' $dir/result.log)
if [ $DUPSLEN != "$dupslen" ]; then
echo "FAILED: Wrong dups length got $DUPSLEN (expected $dupslen)"
ERRORS=$(expr $ERRORS + 1)
fi
grep "copy_up_to\|copy_after" $dir/osd.*.log
rm -f $dir/result.log
if [ $ERRORS != "0" ]; then
echo TEST FAILED
return 1
fi
}
# Cause copy_up_to() to only partially copy logs, copy additional dups, and trim dups
function TEST_backfill_log_1() {
local dir=$1
_common_test $dir "--osd_min_pg_log_entries=1 --osd_max_pg_log_entries=2 --osd_pg_log_dups_tracked=10" 1 9 150
}
# Cause copy_up_to() to only partially copy logs, copy additional dups
function TEST_backfill_log_2() {
local dir=$1
_common_test $dir "--osd_min_pg_log_entries=1 --osd_max_pg_log_entries=2" 1 149 150
}
# Cause copy_after() to only copy logs, no dups
function TEST_recovery_1() {
local dir=$1
_common_test $dir "--osd_min_pg_log_entries=50 --osd_max_pg_log_entries=50 --osd_pg_log_dups_tracked=60 --osd_pg_log_trim_min=10" 40 0 40
}
# Cause copy_after() to copy logs with dups
function TEST_recovery_2() {
local dir=$1
_common_test $dir "--osd_min_pg_log_entries=150 --osd_max_pg_log_entries=150 --osd_pg_log_dups_tracked=3000 --osd_pg_log_trim_min=10" 151 10 141 20
}
main osd-backfill-recovery-log "$@"
# Local Variables:
# compile-command: "make -j4 && ../qa/run-standalone.sh osd-backfill-recovery-log.sh"
# End:

View File

@ -57,8 +57,9 @@ void PGLog::IndexedLog::trim(
auto earliest_dup_version =
log.rbegin()->version.version < cct->_conf->osd_pg_log_dups_tracked
? 0u
: log.rbegin()->version.version - cct->_conf->osd_pg_log_dups_tracked;
: log.rbegin()->version.version - cct->_conf->osd_pg_log_dups_tracked + 1;
lgeneric_subdout(cct, osd, 20) << "earliest_dup_version = " << earliest_dup_version << dendl;
while (!log.empty()) {
const pg_log_entry_t &e = *log.begin();
if (e.version > s)
@ -70,7 +71,6 @@ void PGLog::IndexedLog::trim(
unindex(e); // remove from index,
// add to dup list
lgeneric_subdout(cct, osd, 20) << "earliest_dup_version = " << earliest_dup_version << dendl;
if (e.version.version >= earliest_dup_version) {
if (write_from_dups != nullptr && *write_from_dups > e.version) {
lgeneric_subdout(cct, osd, 20) << "updating write_from_dups from " << *write_from_dups << " to " << e.version << dendl;
@ -572,12 +572,12 @@ bool PGLog::merge_log_dups(const pg_log_t& olog) {
}
// remove any dup entries that overlap with pglog
if (!log.dups.empty() && log.dups.back().version >= log.tail) {
dout(10) << "merge_log removed dups overlapping log entries [" <<
if (!log.dups.empty() && log.dups.back().version > log.tail) {
dout(10) << "merge_log removed dups overlapping log entries (" <<
log.tail << "," << log.dups.back().version << "]" << dendl;
changed = true;
while (!log.dups.empty() && log.dups.back().version >= log.tail) {
while (!log.dups.empty() && log.dups.back().version > log.tail) {
log.unindex(log.dups.back());
mark_dirty_from_dups(log.dups.back().version);
log.dups.pop_back();

View File

@ -2271,7 +2271,7 @@ void PeeringState::activate(
last_peering_reset /* epoch to create pg at */);
// send some recent log, so that op dup detection works well.
m->log.copy_up_to(pg_log.get_log(), cct->_conf->osd_min_pg_log_entries);
m->log.copy_up_to(cct, pg_log.get_log(), cct->_conf->osd_min_pg_log_entries);
m->info.log_tail = m->log.tail;
pi.log_tail = m->log.tail; // sigh...
@ -2284,7 +2284,7 @@ void PeeringState::activate(
get_osdmap_epoch(), info,
last_peering_reset /* epoch to create pg at */);
// send new stuff to append to replicas log
m->log.copy_after(pg_log.get_log(), pi.last_update);
m->log.copy_after(cct, pg_log.get_log(), pi.last_update);
}
// share past_intervals if we are creating the pg on the replica
@ -2580,7 +2580,7 @@ void PeeringState::fulfill_log(
<< ", sending full log instead";
mlog->log = pg_log.get_log(); // primary should not have requested this!!
} else
mlog->log.copy_after(pg_log.get_log(), query.since);
mlog->log.copy_after(cct, pg_log.get_log(), query.since);
}
else if (query.type == pg_query_t::FULLLOG) {
psdout(10) << " sending info+missing+full log" << dendl;

View File

@ -4783,11 +4783,41 @@ void pg_log_t::generate_test_instances(list<pg_log_t*>& o)
o.back()->log.push_back(**p);
}
void pg_log_t::copy_after(const pg_log_t &other, eversion_t v)
static void _handle_dups(CephContext* cct, pg_log_t &target, const pg_log_t &other, unsigned maxdups)
{
auto earliest_dup_version =
target.head.version < maxdups ? 0u : target.head.version - maxdups + 1;
lgeneric_subdout(cct, osd, 20) << "copy_up_to/copy_after earliest_dup_version " << earliest_dup_version << dendl;
for (auto d = other.dups.cbegin(); d != other.dups.cend(); ++d) {
if (d->version.version >= earliest_dup_version) {
lgeneric_subdout(cct, osd, 20)
<< "copy_up_to/copy_after copy dup version "
<< d->version << dendl;
target.dups.push_back(pg_log_dup_t(*d));
}
}
for (auto i = other.log.cbegin(); i != other.log.cend(); ++i) {
ceph_assert(i->version > other.tail);
if (i->version > target.tail)
break;
if (i->version.version >= earliest_dup_version) {
lgeneric_subdout(cct, osd, 20)
<< "copy_up_to/copy_after copy dup from log version "
<< i->version << dendl;
target.dups.push_back(pg_log_dup_t(*i));
}
}
}
void pg_log_t::copy_after(CephContext* cct, const pg_log_t &other, eversion_t v)
{
can_rollback_to = other.can_rollback_to;
head = other.head;
tail = other.tail;
lgeneric_subdout(cct, osd, 20) << __func__ << " v " << v << dendl;
for (auto i = other.log.crbegin(); i != other.log.crend(); ++i) {
ceph_assert(i->version > other.tail);
if (i->version <= v) {
@ -4795,43 +4825,29 @@ void pg_log_t::copy_after(const pg_log_t &other, eversion_t v)
tail = i->version;
break;
}
lgeneric_subdout(cct, osd, 20) << __func__ << " copy log version " << i->version << dendl;
log.push_front(*i);
}
_handle_dups(cct, *this, other, cct->_conf->osd_pg_log_dups_tracked);
}
void pg_log_t::copy_range(const pg_log_t &other, eversion_t from, eversion_t to)
{
can_rollback_to = other.can_rollback_to;
auto i = other.log.crbegin();
ceph_assert(i != other.log.rend());
while (i->version > to) {
++i;
ceph_assert(i != other.log.rend());
}
ceph_assert(i->version == to);
head = to;
for ( ; i != other.log.rend(); ++i) {
if (i->version <= from) {
tail = i->version;
break;
}
log.push_front(*i);
}
}
void pg_log_t::copy_up_to(const pg_log_t &other, int max)
void pg_log_t::copy_up_to(CephContext* cct, const pg_log_t &other, int max)
{
can_rollback_to = other.can_rollback_to;
int n = 0;
head = other.head;
tail = other.tail;
lgeneric_subdout(cct, osd, 20) << __func__ << " max " << max << dendl;
for (auto i = other.log.crbegin(); i != other.log.crend(); ++i) {
ceph_assert(i->version > other.tail);
if (n++ >= max) {
tail = i->version;
break;
}
lgeneric_subdout(cct, osd, 20) << __func__ << " copy log version " << i->version << dendl;
log.push_front(*i);
}
_handle_dups(cct, *this, other, cct->_conf->osd_pg_log_dups_tracked);
}
ostream& pg_log_t::print(ostream& out) const

View File

@ -4156,16 +4156,7 @@ public:
* @param other pg_log_t to copy from
* @param from copy entries after this version
*/
void copy_after(const pg_log_t &other, eversion_t from);
/**
* copy a range of entries from another pg_log_t
*
* @param other pg_log_t to copy from
* @param from copy entries after this version
* @param to up to and including this version
*/
void copy_range(const pg_log_t &other, eversion_t from, eversion_t to);
void copy_after(CephContext* cct, const pg_log_t &other, eversion_t from);
/**
* copy up to N entries
@ -4173,7 +4164,7 @@ public:
* @param other source log
* @param max max number of entries to copy
*/
void copy_up_to(const pg_log_t &other, int max);
void copy_up_to(CephContext* cct, const pg_log_t &other, int max);
std::ostream& print(std::ostream& out) const;

View File

@ -2548,14 +2548,14 @@ TEST_F(PGLogMergeDupsTest, AmEmptyOverlap) {
bool changed = merge_log_dups(olog);
EXPECT_TRUE(changed);
EXPECT_EQ(3u, log.dups.size());
EXPECT_EQ(4u, log.dups.size());
if (3 == log.dups.size()) {
if (4 == log.dups.size()) {
EXPECT_EQ(10u, log.dups.front().version.epoch);
EXPECT_EQ(11u, log.dups.front().version.version);
EXPECT_EQ(11u, log.dups.back().version.epoch);
EXPECT_EQ(1u, log.dups.back().version.version);
EXPECT_EQ(12u, log.dups.back().version.epoch);
EXPECT_EQ(3u, log.dups.back().version.version);
}
check_order();
@ -2601,14 +2601,14 @@ TEST_F(PGLogMergeDupsTest, Later) {
bool changed = merge_log_dups(olog);
EXPECT_TRUE(changed);
EXPECT_EQ(6u, log.dups.size());
EXPECT_EQ(7u, log.dups.size());
if (6 == log.dups.size()) {
if (7 == log.dups.size()) {
EXPECT_EQ(10u, log.dups.front().version.epoch);
EXPECT_EQ(11u, log.dups.front().version.version);
EXPECT_EQ(15u, log.dups.back().version.epoch);
EXPECT_EQ(11u, log.dups.back().version.version);
EXPECT_EQ(16u, log.dups.back().version.epoch);
EXPECT_EQ(14u, log.dups.back().version.version);
}
check_order();
@ -2680,19 +2680,13 @@ struct PGLogTrimTest :
CephContext *cct = g_ceph_context;
using ::testing::Test::SetUp;
void SetUp(unsigned min_entries, unsigned max_entries, unsigned dup_track) {
void SetUp(unsigned dup_track) {
constexpr size_t size = 10;
char min_entries_s[size];
char max_entries_s[size];
char dup_track_s[size];
snprintf(min_entries_s, size, "%u", min_entries);
snprintf(max_entries_s, size, "%u", max_entries);
snprintf(dup_track_s, size, "%u", dup_track);
cct->_conf.set_val_or_die("osd_min_pg_log_entries", min_entries_s);
cct->_conf.set_val_or_die("osd_max_pg_log_entries", max_entries_s);
cct->_conf.set_val_or_die("osd_pg_log_dups_tracked", dup_track_s);
}
}; // struct PGLogTrimTest
@ -2700,17 +2694,15 @@ struct PGLogTrimTest :
TEST_F(PGLogTrimTest, TestMakingCephContext)
{
SetUp(1, 2, 5);
SetUp(5);
EXPECT_EQ(1u, cct->_conf->osd_min_pg_log_entries);
EXPECT_EQ(2u, cct->_conf->osd_max_pg_log_entries);
EXPECT_EQ(5u, cct->_conf->osd_pg_log_dups_tracked);
}
TEST_F(PGLogTrimTest, TestPartialTrim)
{
SetUp(1, 2, 20);
SetUp(20);
PGLog::IndexedLog log;
log.head = mk_evt(24, 0);
log.skip_can_rollback_to_to_head();
@ -2735,7 +2727,7 @@ TEST_F(PGLogTrimTest, TestPartialTrim)
EXPECT_EQ(2u, log.dups.size());
EXPECT_EQ(0u, trimmed_dups.size());
SetUp(1, 2, 15);
SetUp(15);
std::set<eversion_t> trimmed2;
std::set<std::string> trimmed_dups2;
@ -2752,7 +2744,7 @@ TEST_F(PGLogTrimTest, TestPartialTrim)
TEST_F(PGLogTrimTest, TestTrimNoTrimmed) {
SetUp(1, 2, 20);
SetUp(20);
PGLog::IndexedLog log;
log.head = mk_evt(20, 0);
log.skip_can_rollback_to_to_head();
@ -2777,7 +2769,7 @@ TEST_F(PGLogTrimTest, TestTrimNoTrimmed) {
TEST_F(PGLogTrimTest, TestTrimNoDups)
{
SetUp(1, 2, 10);
SetUp(10);
PGLog::IndexedLog log;
log.head = mk_evt(20, 0);
log.skip_can_rollback_to_to_head();
@ -2805,7 +2797,7 @@ TEST_F(PGLogTrimTest, TestTrimNoDups)
TEST_F(PGLogTrimTest, TestNoTrim)
{
SetUp(1, 2, 20);
SetUp(20);
PGLog::IndexedLog log;
log.head = mk_evt(24, 0);
log.skip_can_rollback_to_to_head();
@ -2833,7 +2825,7 @@ TEST_F(PGLogTrimTest, TestNoTrim)
TEST_F(PGLogTrimTest, TestTrimAll)
{
SetUp(1, 2, 20);
SetUp(20);
PGLog::IndexedLog log;
EXPECT_EQ(0u, log.dup_index.size()); // Sanity check
log.head = mk_evt(24, 0);
@ -2863,7 +2855,7 @@ TEST_F(PGLogTrimTest, TestTrimAll)
TEST_F(PGLogTrimTest, TestGetRequest) {
SetUp(1, 2, 20);
SetUp(20);
PGLog::IndexedLog log;
log.head = mk_evt(20, 0);
log.skip_can_rollback_to_to_head();
@ -2988,6 +2980,266 @@ TEST(pg_log_dup_t, get_key_name) {
EXPECT_EQ("dup_0000001234.00000000000000005678", a_key_name);
}
// This tests trim() to make copies of
// 2 log entries (107, 106) and 3 additional for a total
// of 5 dups. Nothing from the original dups is copied.
TEST_F(PGLogTrimTest, TestTrimDups) {
SetUp(5);
PGLog::IndexedLog log;
log.head = mk_evt(21, 107);
log.skip_can_rollback_to_to_head();
log.tail = mk_evt(9, 99);
log.head = mk_evt(9, 99);
entity_name_t client = entity_name_t::CLIENT(777);
log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1),
mk_evt(9, 99), mk_evt(8, 98), osd_reqid_t(client, 8, 1))));
log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(9, 99),
osd_reqid_t(client, 8, 1)));
log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 101), mk_evt(10, 100),
osd_reqid_t(client, 8, 2)));
log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 102), mk_evt(15, 101),
osd_reqid_t(client, 8, 3)));
log.add(mk_ple_mod(mk_obj(1), mk_evt(20, 103), mk_evt(15, 102),
osd_reqid_t(client, 8, 4)));
log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 104), mk_evt(20, 103),
osd_reqid_t(client, 8, 5)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 105), mk_evt(21, 104),
osd_reqid_t(client, 8, 6)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 106), mk_evt(21, 105),
osd_reqid_t(client, 8, 6)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 107), mk_evt(21, 106),
osd_reqid_t(client, 8, 6)));
eversion_t write_from_dups = eversion_t::max();
log.trim(cct, mk_evt(21, 105), nullptr, nullptr, &write_from_dups);
EXPECT_EQ(eversion_t(20, 103), write_from_dups) << log;
EXPECT_EQ(2u, log.log.size()) << log;
EXPECT_EQ(3u, log.dups.size()) << log;
}
// This tests trim() to make copies of
// 4 log entries (107, 106, 105, 104) and 5 additional for a total
// of 9 dups. Only 1 of 2 existing dups are copied.
TEST_F(PGLogTrimTest, TestTrimDups2) {
SetUp(9);
PGLog::IndexedLog log;
log.head = mk_evt(21, 107);
log.skip_can_rollback_to_to_head();
log.tail = mk_evt(9, 99);
log.head = mk_evt(9, 99);
entity_name_t client = entity_name_t::CLIENT(777);
log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1),
mk_evt(9, 98), mk_evt(8, 97), osd_reqid_t(client, 8, 1))));
log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1),
mk_evt(9, 99), mk_evt(8, 98), osd_reqid_t(client, 8, 1))));
log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(9, 99),
osd_reqid_t(client, 8, 1)));
log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 101), mk_evt(10, 100),
osd_reqid_t(client, 8, 2)));
log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 102), mk_evt(15, 101),
osd_reqid_t(client, 8, 3)));
log.add(mk_ple_mod(mk_obj(1), mk_evt(20, 103), mk_evt(15, 102),
osd_reqid_t(client, 8, 4)));
log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 104), mk_evt(20, 103),
osd_reqid_t(client, 8, 5)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 105), mk_evt(21, 104),
osd_reqid_t(client, 8, 6)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 106), mk_evt(21, 105),
osd_reqid_t(client, 8, 6)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 107), mk_evt(21, 106),
osd_reqid_t(client, 8, 6)));
eversion_t write_from_dups = eversion_t::max();
log.trim(cct, mk_evt(20, 103), nullptr, nullptr, &write_from_dups);
EXPECT_EQ(eversion_t(10, 100), write_from_dups) << log;
EXPECT_EQ(4u, log.log.size()) << log;
EXPECT_EQ(5u, log.dups.size()) << log;
}
// This tests copy_up_to() to make copies of
// 2 log entries (107, 106) and 3 additional for a total
// of 5 dups. Nothing from the original dups is copied.
TEST_F(PGLogTrimTest, TestCopyUpTo) {
SetUp(5);
PGLog::IndexedLog log, copy;
log.tail = mk_evt(9, 99);
log.head = mk_evt(9, 99);
entity_name_t client = entity_name_t::CLIENT(777);
log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1),
mk_evt(9, 99), mk_evt(8, 98), osd_reqid_t(client, 8, 1))));
log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(9, 99),
osd_reqid_t(client, 8, 1)));
log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 101), mk_evt(10, 100),
osd_reqid_t(client, 8, 2)));
log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 102), mk_evt(15, 101),
osd_reqid_t(client, 8, 3)));
log.add(mk_ple_mod(mk_obj(1), mk_evt(20, 103), mk_evt(15, 102),
osd_reqid_t(client, 8, 4)));
log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 104), mk_evt(20, 103),
osd_reqid_t(client, 8, 5)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 105), mk_evt(21, 104),
osd_reqid_t(client, 8, 6)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 106), mk_evt(21, 105),
osd_reqid_t(client, 8, 6)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 107), mk_evt(21, 106),
osd_reqid_t(client, 8, 6)));
copy.copy_up_to(cct, log, 2);
EXPECT_EQ(2u, copy.log.size()) << copy;
EXPECT_EQ(copy.head, mk_evt(21, 107)) << copy;
EXPECT_EQ(copy.tail, mk_evt(21, 105)) << copy;
// Tracking 5 means 3 additional as dups
EXPECT_EQ(3u, copy.dups.size()) << copy;
}
// This tests copy_up_to() to make copies of
// 4 log entries (107, 106, 105, 104) and 5 additional for a total
// of 5 dups. Only 1 of 2 existing dups are copied.
TEST_F(PGLogTrimTest, TestCopyUpTo2) {
SetUp(9);
PGLog::IndexedLog log, copy;
log.tail = mk_evt(9, 99);
log.head = mk_evt(9, 99);
entity_name_t client = entity_name_t::CLIENT(777);
log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1),
mk_evt(8, 98), mk_evt(8, 97), osd_reqid_t(client, 8, 1))));
log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1),
mk_evt(9, 99), mk_evt(8, 98), osd_reqid_t(client, 8, 1))));
log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(9, 99),
osd_reqid_t(client, 8, 1)));
log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 101), mk_evt(10, 100),
osd_reqid_t(client, 8, 2)));
log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 102), mk_evt(15, 101),
osd_reqid_t(client, 8, 3)));
log.add(mk_ple_mod(mk_obj(1), mk_evt(20, 103), mk_evt(15, 102),
osd_reqid_t(client, 8, 4)));
log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 104), mk_evt(20, 103),
osd_reqid_t(client, 8, 5)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 105), mk_evt(21, 104),
osd_reqid_t(client, 8, 6)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 106), mk_evt(21, 105),
osd_reqid_t(client, 8, 6)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 107), mk_evt(21, 106),
osd_reqid_t(client, 8, 6)));
copy.copy_up_to(cct, log, 4);
EXPECT_EQ(4u, copy.log.size()) << copy;
EXPECT_EQ(copy.head, mk_evt(21, 107)) << copy;
EXPECT_EQ(copy.tail, mk_evt(20, 103)) << copy;
// Tracking 5 means 3 additional as dups
EXPECT_EQ(5u, copy.dups.size()) << copy;
}
// This tests copy_after() by specifying a version that copies
// 2 log entries (107, 106) and 3 additional for a total
// of 5 dups. Nothing of the original dups is copied.
TEST_F(PGLogTrimTest, TestCopyAfter) {
SetUp(5);
PGLog::IndexedLog log, copy;
log.tail = mk_evt(9, 99);
log.head = mk_evt(9, 99);
entity_name_t client = entity_name_t::CLIENT(777);
log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1),
mk_evt(9, 99), mk_evt(8, 98), osd_reqid_t(client, 8, 1))));
log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(9, 99),
osd_reqid_t(client, 8, 1)));
log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 101), mk_evt(10, 100),
osd_reqid_t(client, 8, 2)));
log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 102), mk_evt(15, 101),
osd_reqid_t(client, 8, 3)));
log.add(mk_ple_mod(mk_obj(1), mk_evt(20, 103), mk_evt(15, 102),
osd_reqid_t(client, 8, 4)));
log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 104), mk_evt(20, 103),
osd_reqid_t(client, 8, 5)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 105), mk_evt(21, 104),
osd_reqid_t(client, 8, 6)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 106), mk_evt(21, 105),
osd_reqid_t(client, 8, 6)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 107), mk_evt(21, 106),
osd_reqid_t(client, 8, 6)));
copy.copy_after(cct, log, mk_evt(21, 105));
EXPECT_EQ(2u, copy.log.size()) << copy;
EXPECT_EQ(copy.head, mk_evt(21, 107)) << copy;
EXPECT_EQ(copy.tail, mk_evt(21, 105)) << copy;
// Tracking 5 means 3 additional as dups
EXPECT_EQ(3u, copy.dups.size()) << copy;
}
// This copies everything dups and log because of the large max dups
// and value passed to copy_after().
TEST_F(PGLogTrimTest, TestCopyAfter2) {
SetUp(3000);
PGLog::IndexedLog log, copy;
log.tail = mk_evt(9, 99);
log.head = mk_evt(9, 99);
entity_name_t client = entity_name_t::CLIENT(777);
log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1),
mk_evt(8, 93), mk_evt(8, 92), osd_reqid_t(client, 8, 1))));
log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1),
mk_evt(8, 94), mk_evt(8, 93), osd_reqid_t(client, 8, 1))));
log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1),
mk_evt(8, 95), mk_evt(8, 94), osd_reqid_t(client, 8, 1))));
log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1),
mk_evt(8, 96), mk_evt(8, 95), osd_reqid_t(client, 8, 1))));
log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1),
mk_evt(8, 97), mk_evt(8, 96), osd_reqid_t(client, 8, 1))));
log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1),
mk_evt(8, 98), mk_evt(8, 97), osd_reqid_t(client, 8, 1))));
log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1),
mk_evt(9, 99), mk_evt(8, 98), osd_reqid_t(client, 8, 1))));
log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(9, 99),
osd_reqid_t(client, 8, 1)));
log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 101), mk_evt(10, 100),
osd_reqid_t(client, 8, 2)));
log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 102), mk_evt(15, 101),
osd_reqid_t(client, 8, 3)));
log.add(mk_ple_mod(mk_obj(1), mk_evt(20, 103), mk_evt(15, 102),
osd_reqid_t(client, 8, 4)));
log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 104), mk_evt(20, 103),
osd_reqid_t(client, 8, 5)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 105), mk_evt(21, 104),
osd_reqid_t(client, 8, 6)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 106), mk_evt(21, 105),
osd_reqid_t(client, 8, 6)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 107), mk_evt(21, 106),
osd_reqid_t(client, 8, 6)));
copy.copy_after(cct, log, mk_evt(9, 99));
EXPECT_EQ(8u, copy.log.size()) << copy;
EXPECT_EQ(copy.head, mk_evt(21, 107)) << copy;
EXPECT_EQ(copy.tail, mk_evt(9, 99)) << copy;
// Tracking 3000 is larger than all entries, so all dups copied
EXPECT_EQ(7u, copy.dups.size()) << copy;
}
// Local Variables:
// compile-command: "cd ../.. ; make unittest_pglog ; ./unittest_pglog --log-to-stderr=true --debug-osd=20 # --gtest_filter=*.* "
// End: