2017-07-07 11:46:41 +00:00
|
|
|
|
// Copyright 2017 The Prometheus Authors
|
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
|
//
|
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
//
|
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
2017-07-06 19:29:26 +00:00
|
|
|
|
package tsdb
|
|
|
|
|
|
|
|
|
|
import (
|
2018-12-05 16:34:42 +00:00
|
|
|
|
"context"
|
2019-02-14 13:29:41 +00:00
|
|
|
|
"fmt"
|
2017-11-21 11:15:02 +00:00
|
|
|
|
"io/ioutil"
|
2018-10-12 09:45:19 +00:00
|
|
|
|
"math"
|
2017-11-21 11:15:02 +00:00
|
|
|
|
"os"
|
2019-01-29 08:26:01 +00:00
|
|
|
|
"path"
|
2017-11-21 11:15:02 +00:00
|
|
|
|
"path/filepath"
|
2017-07-06 19:29:26 +00:00
|
|
|
|
"testing"
|
2018-11-20 10:34:26 +00:00
|
|
|
|
"time"
|
2017-07-06 19:29:26 +00:00
|
|
|
|
|
2017-11-21 11:15:02 +00:00
|
|
|
|
"github.com/go-kit/kit/log"
|
|
|
|
|
"github.com/pkg/errors"
|
2018-12-07 15:49:23 +00:00
|
|
|
|
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
2018-10-12 09:45:19 +00:00
|
|
|
|
"github.com/prometheus/tsdb/chunks"
|
2019-01-24 12:15:32 +00:00
|
|
|
|
"github.com/prometheus/tsdb/fileutil"
|
2018-11-20 10:34:26 +00:00
|
|
|
|
"github.com/prometheus/tsdb/labels"
|
2017-12-07 01:06:14 +00:00
|
|
|
|
"github.com/prometheus/tsdb/testutil"
|
2017-07-06 19:29:26 +00:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
func TestSplitByRange(t *testing.T) {
|
|
|
|
|
cases := []struct {
|
|
|
|
|
trange int64
|
2017-07-13 14:13:59 +00:00
|
|
|
|
ranges [][2]int64
|
|
|
|
|
output [][][2]int64
|
2017-07-06 19:29:26 +00:00
|
|
|
|
}{
|
|
|
|
|
{
|
|
|
|
|
trange: 60,
|
2017-07-13 14:13:59 +00:00
|
|
|
|
ranges: [][2]int64{{0, 10}},
|
|
|
|
|
output: [][][2]int64{
|
|
|
|
|
{{0, 10}},
|
|
|
|
|
},
|
2017-07-06 19:29:26 +00:00
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
trange: 60,
|
2017-07-13 14:13:59 +00:00
|
|
|
|
ranges: [][2]int64{{0, 60}},
|
|
|
|
|
output: [][][2]int64{
|
|
|
|
|
{{0, 60}},
|
|
|
|
|
},
|
2017-07-06 19:29:26 +00:00
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
trange: 60,
|
2017-07-13 14:13:59 +00:00
|
|
|
|
ranges: [][2]int64{{0, 10}, {9, 15}, {30, 60}},
|
|
|
|
|
output: [][][2]int64{
|
|
|
|
|
{{0, 10}, {9, 15}, {30, 60}},
|
|
|
|
|
},
|
2017-07-06 19:29:26 +00:00
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
trange: 60,
|
2017-07-13 14:13:59 +00:00
|
|
|
|
ranges: [][2]int64{{70, 90}, {125, 130}, {130, 180}, {1000, 1001}},
|
|
|
|
|
output: [][][2]int64{
|
|
|
|
|
{{70, 90}},
|
|
|
|
|
{{125, 130}, {130, 180}},
|
|
|
|
|
{{1000, 1001}},
|
|
|
|
|
},
|
2017-07-06 19:29:26 +00:00
|
|
|
|
},
|
2017-07-13 14:13:59 +00:00
|
|
|
|
// Mis-aligned or too-large blocks are ignored.
|
2017-07-06 19:29:26 +00:00
|
|
|
|
{
|
|
|
|
|
trange: 60,
|
2017-07-13 14:13:59 +00:00
|
|
|
|
ranges: [][2]int64{{50, 70}, {70, 80}},
|
|
|
|
|
output: [][][2]int64{
|
|
|
|
|
{{70, 80}},
|
|
|
|
|
},
|
2017-07-06 19:29:26 +00:00
|
|
|
|
},
|
2017-07-13 14:13:59 +00:00
|
|
|
|
{
|
|
|
|
|
trange: 72,
|
|
|
|
|
ranges: [][2]int64{{0, 144}, {144, 216}, {216, 288}},
|
|
|
|
|
output: [][][2]int64{
|
|
|
|
|
{{144, 216}},
|
|
|
|
|
{{216, 288}},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
// Various awkward edge cases easy to hit with negative numbers.
|
2017-07-06 19:29:26 +00:00
|
|
|
|
{
|
|
|
|
|
trange: 60,
|
2017-07-13 14:13:59 +00:00
|
|
|
|
ranges: [][2]int64{{-10, -5}},
|
|
|
|
|
output: [][][2]int64{
|
|
|
|
|
{{-10, -5}},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
trange: 60,
|
|
|
|
|
ranges: [][2]int64{{-60, -50}, {-10, -5}},
|
|
|
|
|
output: [][][2]int64{
|
|
|
|
|
{{-60, -50}, {-10, -5}},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
trange: 60,
|
|
|
|
|
ranges: [][2]int64{{-60, -50}, {-10, -5}, {0, 15}},
|
|
|
|
|
output: [][][2]int64{
|
|
|
|
|
{{-60, -50}, {-10, -5}},
|
|
|
|
|
{{0, 15}},
|
|
|
|
|
},
|
2017-07-06 19:29:26 +00:00
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for _, c := range cases {
|
2017-07-13 14:13:59 +00:00
|
|
|
|
// Transform input range tuples into dirMetas.
|
2017-07-06 19:29:26 +00:00
|
|
|
|
blocks := make([]dirMeta, 0, len(c.ranges))
|
|
|
|
|
for _, r := range c.ranges {
|
|
|
|
|
blocks = append(blocks, dirMeta{
|
|
|
|
|
meta: &BlockMeta{
|
|
|
|
|
MinTime: r[0],
|
|
|
|
|
MaxTime: r[1],
|
|
|
|
|
},
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
2017-07-13 14:13:59 +00:00
|
|
|
|
// Transform output range tuples into dirMetas.
|
|
|
|
|
exp := make([][]dirMeta, len(c.output))
|
|
|
|
|
for i, group := range c.output {
|
|
|
|
|
for _, r := range group {
|
|
|
|
|
exp[i] = append(exp[i], dirMeta{
|
|
|
|
|
meta: &BlockMeta{MinTime: r[0], MaxTime: r[1]},
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-07 01:06:14 +00:00
|
|
|
|
testutil.Equals(t, exp, splitByRange(blocks, c.trange))
|
2017-07-06 19:29:26 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
2017-08-13 08:41:08 +00:00
|
|
|
|
|
|
|
|
|
// See https://github.com/prometheus/prometheus/issues/3064
|
|
|
|
|
func TestNoPanicFor0Tombstones(t *testing.T) {
|
|
|
|
|
metas := []dirMeta{
|
|
|
|
|
{
|
|
|
|
|
dir: "1",
|
|
|
|
|
meta: &BlockMeta{
|
|
|
|
|
MinTime: 0,
|
|
|
|
|
MaxTime: 100,
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
dir: "2",
|
|
|
|
|
meta: &BlockMeta{
|
|
|
|
|
MinTime: 101,
|
|
|
|
|
MaxTime: 200,
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-05 16:34:42 +00:00
|
|
|
|
c, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{50}, nil)
|
2017-12-07 01:06:14 +00:00
|
|
|
|
testutil.Ok(t, err)
|
2017-09-01 09:46:46 +00:00
|
|
|
|
|
2017-08-13 08:41:08 +00:00
|
|
|
|
c.plan(metas)
|
|
|
|
|
}
|
2017-09-01 09:46:46 +00:00
|
|
|
|
|
|
|
|
|
func TestLeveledCompactor_plan(t *testing.T) {
|
2018-03-13 15:30:00 +00:00
|
|
|
|
// This mimicks our default ExponentialBlockRanges with min block size equals to 20.
|
2018-12-05 16:34:42 +00:00
|
|
|
|
compactor, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{
|
2017-09-01 09:46:46 +00:00
|
|
|
|
20,
|
|
|
|
|
60,
|
2018-03-13 14:11:02 +00:00
|
|
|
|
180,
|
|
|
|
|
540,
|
|
|
|
|
1620,
|
2017-09-01 09:46:46 +00:00
|
|
|
|
}, nil)
|
2017-12-07 01:06:14 +00:00
|
|
|
|
testutil.Ok(t, err)
|
2017-09-01 09:46:46 +00:00
|
|
|
|
|
2019-04-08 12:27:06 +00:00
|
|
|
|
cases := map[string]struct {
|
2017-09-01 09:46:46 +00:00
|
|
|
|
metas []dirMeta
|
|
|
|
|
expected []string
|
|
|
|
|
}{
|
2019-04-08 12:27:06 +00:00
|
|
|
|
"Outside Range": {
|
2017-09-01 09:46:46 +00:00
|
|
|
|
metas: []dirMeta{
|
|
|
|
|
metaRange("1", 0, 20, nil),
|
|
|
|
|
},
|
|
|
|
|
expected: nil,
|
|
|
|
|
},
|
2019-04-08 12:27:06 +00:00
|
|
|
|
"We should wait for four blocks of size 20 to appear before compacting.": {
|
2017-09-01 09:46:46 +00:00
|
|
|
|
metas: []dirMeta{
|
|
|
|
|
metaRange("1", 0, 20, nil),
|
|
|
|
|
metaRange("2", 20, 40, nil),
|
|
|
|
|
},
|
|
|
|
|
expected: nil,
|
|
|
|
|
},
|
2019-04-08 12:27:06 +00:00
|
|
|
|
`We should wait for a next block of size 20 to appear before compacting
|
|
|
|
|
the existing ones. We have three, but we ignore the fresh one from WAl`: {
|
2018-03-13 12:30:27 +00:00
|
|
|
|
metas: []dirMeta{
|
|
|
|
|
metaRange("1", 0, 20, nil),
|
|
|
|
|
metaRange("2", 20, 40, nil),
|
|
|
|
|
metaRange("3", 40, 60, nil),
|
|
|
|
|
},
|
|
|
|
|
expected: nil,
|
|
|
|
|
},
|
2019-04-08 12:27:06 +00:00
|
|
|
|
"Block to fill the entire parent range appeared – should be compacted": {
|
2017-09-01 09:46:46 +00:00
|
|
|
|
metas: []dirMeta{
|
|
|
|
|
metaRange("1", 0, 20, nil),
|
|
|
|
|
metaRange("2", 20, 40, nil),
|
|
|
|
|
metaRange("3", 40, 60, nil),
|
2018-03-13 12:30:27 +00:00
|
|
|
|
metaRange("4", 60, 80, nil),
|
2017-09-01 09:46:46 +00:00
|
|
|
|
},
|
|
|
|
|
expected: []string{"1", "2", "3"},
|
|
|
|
|
},
|
2019-04-08 12:27:06 +00:00
|
|
|
|
`Block for the next parent range appeared with gap with size 20. Nothing will happen in the first one
|
|
|
|
|
anymore but we ignore fresh one still, so no compaction`: {
|
2018-03-13 12:30:27 +00:00
|
|
|
|
metas: []dirMeta{
|
|
|
|
|
metaRange("1", 0, 20, nil),
|
|
|
|
|
metaRange("2", 20, 40, nil),
|
|
|
|
|
metaRange("3", 60, 80, nil),
|
|
|
|
|
},
|
|
|
|
|
expected: nil,
|
|
|
|
|
},
|
2019-04-08 12:27:06 +00:00
|
|
|
|
`Block for the next parent range appeared, and we have a gap with size 20 between second and third block.
|
|
|
|
|
We will not get this missed gap anymore and we should compact just these two.`: {
|
2017-09-01 09:46:46 +00:00
|
|
|
|
metas: []dirMeta{
|
|
|
|
|
metaRange("1", 0, 20, nil),
|
|
|
|
|
metaRange("2", 20, 40, nil),
|
|
|
|
|
metaRange("3", 60, 80, nil),
|
2018-03-13 12:30:27 +00:00
|
|
|
|
metaRange("4", 80, 100, nil),
|
2017-09-01 09:46:46 +00:00
|
|
|
|
},
|
|
|
|
|
expected: []string{"1", "2"},
|
|
|
|
|
},
|
2019-04-08 12:27:06 +00:00
|
|
|
|
"We have 20, 20, 20, 60, 60 range blocks. '5' is marked as fresh one": {
|
2017-09-01 09:46:46 +00:00
|
|
|
|
metas: []dirMeta{
|
|
|
|
|
metaRange("1", 0, 20, nil),
|
|
|
|
|
metaRange("2", 20, 40, nil),
|
|
|
|
|
metaRange("3", 40, 60, nil),
|
|
|
|
|
metaRange("4", 60, 120, nil),
|
|
|
|
|
metaRange("5", 120, 180, nil),
|
|
|
|
|
},
|
2018-03-13 14:11:02 +00:00
|
|
|
|
expected: []string{"1", "2", "3"},
|
2017-09-01 09:46:46 +00:00
|
|
|
|
},
|
2019-04-08 12:27:06 +00:00
|
|
|
|
"We have 20, 60, 20, 60, 240 range blocks. We can compact 20 + 60 + 60": {
|
2017-09-01 09:46:46 +00:00
|
|
|
|
metas: []dirMeta{
|
|
|
|
|
metaRange("2", 20, 40, nil),
|
|
|
|
|
metaRange("4", 60, 120, nil),
|
2018-03-13 14:11:02 +00:00
|
|
|
|
metaRange("5", 960, 980, nil), // Fresh one.
|
|
|
|
|
metaRange("6", 120, 180, nil),
|
|
|
|
|
metaRange("7", 720, 960, nil),
|
2018-03-13 12:30:27 +00:00
|
|
|
|
},
|
2018-03-13 14:11:02 +00:00
|
|
|
|
expected: []string{"2", "4", "6"},
|
2017-09-01 09:46:46 +00:00
|
|
|
|
},
|
2019-04-08 12:27:06 +00:00
|
|
|
|
"Do not select large blocks that have many tombstones when there is no fresh block": {
|
2017-09-01 09:46:46 +00:00
|
|
|
|
metas: []dirMeta{
|
2018-03-13 14:11:02 +00:00
|
|
|
|
metaRange("1", 0, 540, &BlockStats{
|
2017-09-01 09:46:46 +00:00
|
|
|
|
NumSeries: 10,
|
|
|
|
|
NumTombstones: 3,
|
|
|
|
|
}),
|
|
|
|
|
},
|
2018-03-13 12:30:27 +00:00
|
|
|
|
expected: nil,
|
|
|
|
|
},
|
2019-04-08 12:27:06 +00:00
|
|
|
|
"Select large blocks that have many tombstones when fresh appears": {
|
2018-03-13 12:30:27 +00:00
|
|
|
|
metas: []dirMeta{
|
2018-03-13 14:11:02 +00:00
|
|
|
|
metaRange("1", 0, 540, &BlockStats{
|
2018-03-13 12:30:27 +00:00
|
|
|
|
NumSeries: 10,
|
|
|
|
|
NumTombstones: 3,
|
|
|
|
|
}),
|
2018-03-13 14:11:02 +00:00
|
|
|
|
metaRange("2", 540, 560, nil),
|
2018-03-13 12:30:27 +00:00
|
|
|
|
},
|
2017-09-01 09:46:46 +00:00
|
|
|
|
expected: []string{"1"},
|
|
|
|
|
},
|
2019-04-08 12:27:06 +00:00
|
|
|
|
"For small blocks, do not compact tombstones, even when fresh appears.": {
|
2017-09-01 09:46:46 +00:00
|
|
|
|
metas: []dirMeta{
|
2018-03-13 14:11:02 +00:00
|
|
|
|
metaRange("1", 0, 60, &BlockStats{
|
2017-09-01 09:46:46 +00:00
|
|
|
|
NumSeries: 10,
|
|
|
|
|
NumTombstones: 3,
|
|
|
|
|
}),
|
2018-03-13 14:11:02 +00:00
|
|
|
|
metaRange("2", 60, 80, nil),
|
2017-09-01 09:46:46 +00:00
|
|
|
|
},
|
|
|
|
|
expected: nil,
|
|
|
|
|
},
|
2019-04-08 12:27:06 +00:00
|
|
|
|
`Regression test: we were stuck in a compact loop where we always recompacted
|
|
|
|
|
the same block when tombstones and series counts were zero`: {
|
2017-09-01 09:46:46 +00:00
|
|
|
|
metas: []dirMeta{
|
2018-03-13 14:11:02 +00:00
|
|
|
|
metaRange("1", 0, 540, &BlockStats{
|
2017-09-01 09:46:46 +00:00
|
|
|
|
NumSeries: 0,
|
|
|
|
|
NumTombstones: 0,
|
|
|
|
|
}),
|
2018-03-13 14:11:02 +00:00
|
|
|
|
metaRange("2", 540, 560, nil),
|
2017-09-01 09:46:46 +00:00
|
|
|
|
},
|
|
|
|
|
expected: nil,
|
|
|
|
|
},
|
2019-04-08 12:27:06 +00:00
|
|
|
|
`Regression test: we were wrongly assuming that new block is fresh from WAL when its ULID is newest.
|
|
|
|
|
We need to actually look on max time instead.
|
|
|
|
|
|
|
|
|
|
With previous, wrong approach "8" block was ignored, so we were wrongly compacting 5 and 7 and introducing
|
|
|
|
|
block overlaps`: {
|
2018-03-13 14:11:02 +00:00
|
|
|
|
metas: []dirMeta{
|
|
|
|
|
metaRange("5", 0, 360, nil),
|
|
|
|
|
metaRange("6", 540, 560, nil), // Fresh one.
|
|
|
|
|
metaRange("7", 360, 420, nil),
|
|
|
|
|
metaRange("8", 420, 540, nil),
|
|
|
|
|
},
|
|
|
|
|
expected: []string{"7", "8"},
|
|
|
|
|
},
|
2019-04-08 12:27:06 +00:00
|
|
|
|
// |--------------|
|
|
|
|
|
// |----------------|
|
|
|
|
|
// |--------------|
|
|
|
|
|
"Overlapping blocks 1": {
|
2019-02-14 13:29:41 +00:00
|
|
|
|
metas: []dirMeta{
|
|
|
|
|
metaRange("1", 0, 20, nil),
|
|
|
|
|
metaRange("2", 19, 40, nil),
|
|
|
|
|
metaRange("3", 40, 60, nil),
|
|
|
|
|
},
|
|
|
|
|
expected: []string{"1", "2"},
|
|
|
|
|
},
|
2019-04-08 12:27:06 +00:00
|
|
|
|
// |--------------|
|
|
|
|
|
// |--------------|
|
|
|
|
|
// |--------------|
|
|
|
|
|
"Overlapping blocks 2": {
|
2019-02-14 13:29:41 +00:00
|
|
|
|
metas: []dirMeta{
|
|
|
|
|
metaRange("1", 0, 20, nil),
|
|
|
|
|
metaRange("2", 20, 40, nil),
|
|
|
|
|
metaRange("3", 30, 50, nil),
|
|
|
|
|
},
|
|
|
|
|
expected: []string{"2", "3"},
|
|
|
|
|
},
|
2019-04-08 12:27:06 +00:00
|
|
|
|
// |--------------|
|
|
|
|
|
// |---------------------|
|
|
|
|
|
// |--------------|
|
|
|
|
|
"Overlapping blocks 3": {
|
2019-02-14 13:29:41 +00:00
|
|
|
|
metas: []dirMeta{
|
|
|
|
|
metaRange("1", 0, 20, nil),
|
|
|
|
|
metaRange("2", 10, 40, nil),
|
|
|
|
|
metaRange("3", 30, 50, nil),
|
|
|
|
|
},
|
|
|
|
|
expected: []string{"1", "2", "3"},
|
|
|
|
|
},
|
2019-04-08 12:27:06 +00:00
|
|
|
|
// |--------------|
|
|
|
|
|
// |--------------------------------|
|
|
|
|
|
// |--------------|
|
|
|
|
|
// |--------------|
|
|
|
|
|
"Overlapping blocks 4": {
|
2019-02-14 13:29:41 +00:00
|
|
|
|
metas: []dirMeta{
|
|
|
|
|
metaRange("5", 0, 360, nil),
|
|
|
|
|
metaRange("6", 340, 560, nil),
|
|
|
|
|
metaRange("7", 360, 420, nil),
|
|
|
|
|
metaRange("8", 420, 540, nil),
|
|
|
|
|
},
|
|
|
|
|
expected: []string{"5", "6", "7", "8"},
|
|
|
|
|
},
|
2019-04-08 12:27:06 +00:00
|
|
|
|
// |--------------|
|
|
|
|
|
// |--------------|
|
|
|
|
|
// |--------------|
|
|
|
|
|
// |--------------|
|
|
|
|
|
"Overlapping blocks 5": {
|
|
|
|
|
metas: []dirMeta{
|
|
|
|
|
metaRange("1", 0, 10, nil),
|
|
|
|
|
metaRange("2", 9, 20, nil),
|
|
|
|
|
metaRange("3", 30, 40, nil),
|
|
|
|
|
metaRange("4", 39, 50, nil),
|
|
|
|
|
},
|
|
|
|
|
expected: []string{"1", "2"},
|
|
|
|
|
},
|
2017-09-01 09:46:46 +00:00
|
|
|
|
}
|
|
|
|
|
|
2019-04-08 12:27:06 +00:00
|
|
|
|
for title, c := range cases {
|
|
|
|
|
if !t.Run(title, func(t *testing.T) {
|
2018-03-13 12:30:27 +00:00
|
|
|
|
res, err := compactor.plan(c.metas)
|
|
|
|
|
testutil.Ok(t, err)
|
|
|
|
|
testutil.Equals(t, c.expected, res)
|
|
|
|
|
}) {
|
|
|
|
|
return
|
|
|
|
|
}
|
2017-09-01 09:46:46 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
2017-11-21 11:15:02 +00:00
|
|
|
|
|
|
|
|
|
func TestRangeWithFailedCompactionWontGetSelected(t *testing.T) {
|
2018-12-05 16:34:42 +00:00
|
|
|
|
compactor, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{
|
2017-11-21 11:15:02 +00:00
|
|
|
|
20,
|
|
|
|
|
60,
|
|
|
|
|
240,
|
|
|
|
|
720,
|
|
|
|
|
2160,
|
|
|
|
|
}, nil)
|
2017-12-07 01:06:14 +00:00
|
|
|
|
testutil.Ok(t, err)
|
2017-11-21 11:15:02 +00:00
|
|
|
|
|
|
|
|
|
cases := []struct {
|
|
|
|
|
metas []dirMeta
|
|
|
|
|
}{
|
|
|
|
|
{
|
|
|
|
|
metas: []dirMeta{
|
|
|
|
|
metaRange("1", 0, 20, nil),
|
|
|
|
|
metaRange("2", 20, 40, nil),
|
|
|
|
|
metaRange("3", 40, 60, nil),
|
2018-03-13 14:11:02 +00:00
|
|
|
|
metaRange("4", 60, 80, nil),
|
2017-11-21 11:15:02 +00:00
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
metas: []dirMeta{
|
|
|
|
|
metaRange("1", 0, 20, nil),
|
|
|
|
|
metaRange("2", 20, 40, nil),
|
|
|
|
|
metaRange("3", 60, 80, nil),
|
2018-03-13 14:11:02 +00:00
|
|
|
|
metaRange("4", 80, 100, nil),
|
2017-11-21 11:15:02 +00:00
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
metas: []dirMeta{
|
|
|
|
|
metaRange("1", 0, 20, nil),
|
|
|
|
|
metaRange("2", 20, 40, nil),
|
|
|
|
|
metaRange("3", 40, 60, nil),
|
|
|
|
|
metaRange("4", 60, 120, nil),
|
|
|
|
|
metaRange("5", 120, 180, nil),
|
2018-03-13 14:11:02 +00:00
|
|
|
|
metaRange("6", 180, 200, nil),
|
2017-11-21 11:15:02 +00:00
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for _, c := range cases {
|
|
|
|
|
c.metas[1].meta.Compaction.Failed = true
|
|
|
|
|
res, err := compactor.plan(c.metas)
|
2017-12-07 01:06:14 +00:00
|
|
|
|
testutil.Ok(t, err)
|
2017-11-21 11:15:02 +00:00
|
|
|
|
|
2017-12-07 01:06:14 +00:00
|
|
|
|
testutil.Equals(t, []string(nil), res)
|
2017-11-21 11:15:02 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestCompactionFailWillCleanUpTempDir(t *testing.T) {
|
2018-12-05 16:34:42 +00:00
|
|
|
|
compactor, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{
|
2017-11-21 11:15:02 +00:00
|
|
|
|
20,
|
|
|
|
|
60,
|
|
|
|
|
240,
|
|
|
|
|
720,
|
|
|
|
|
2160,
|
|
|
|
|
}, nil)
|
2017-12-07 01:06:14 +00:00
|
|
|
|
testutil.Ok(t, err)
|
2017-11-21 11:15:02 +00:00
|
|
|
|
|
|
|
|
|
tmpdir, err := ioutil.TempDir("", "test")
|
2017-12-07 01:06:14 +00:00
|
|
|
|
testutil.Ok(t, err)
|
2019-03-19 13:31:57 +00:00
|
|
|
|
defer func() {
|
|
|
|
|
testutil.Ok(t, os.RemoveAll(tmpdir))
|
|
|
|
|
}()
|
2017-11-21 11:15:02 +00:00
|
|
|
|
|
2017-12-07 01:06:14 +00:00
|
|
|
|
testutil.NotOk(t, compactor.write(tmpdir, &BlockMeta{}, erringBReader{}))
|
2017-11-21 11:15:02 +00:00
|
|
|
|
_, err = os.Stat(filepath.Join(tmpdir, BlockMeta{}.ULID.String()) + ".tmp")
|
2017-12-07 01:06:14 +00:00
|
|
|
|
testutil.Assert(t, os.IsNotExist(err), "directory is not cleaned up")
|
2017-11-21 11:15:02 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func metaRange(name string, mint, maxt int64, stats *BlockStats) dirMeta {
|
|
|
|
|
meta := &BlockMeta{MinTime: mint, MaxTime: maxt}
|
|
|
|
|
if stats != nil {
|
|
|
|
|
meta.Stats = *stats
|
|
|
|
|
}
|
|
|
|
|
return dirMeta{
|
|
|
|
|
dir: name,
|
|
|
|
|
meta: meta,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type erringBReader struct{}
|
|
|
|
|
|
|
|
|
|
func (erringBReader) Index() (IndexReader, error) { return nil, errors.New("index") }
|
|
|
|
|
func (erringBReader) Chunks() (ChunkReader, error) { return nil, errors.New("chunks") }
|
|
|
|
|
func (erringBReader) Tombstones() (TombstoneReader, error) { return nil, errors.New("tombstones") }
|
2019-07-23 08:04:48 +00:00
|
|
|
|
func (erringBReader) Meta() BlockMeta { return BlockMeta{} }
|
2018-10-12 09:45:19 +00:00
|
|
|
|
|
|
|
|
|
type nopChunkWriter struct{}
|
|
|
|
|
|
|
|
|
|
func (nopChunkWriter) WriteChunks(chunks ...chunks.Meta) error { return nil }
|
|
|
|
|
func (nopChunkWriter) Close() error { return nil }
|
|
|
|
|
|
|
|
|
|
func TestCompaction_populateBlock(t *testing.T) {
|
|
|
|
|
var populateBlocksCases = []struct {
|
|
|
|
|
title string
|
|
|
|
|
inputSeriesSamples [][]seriesSamples
|
|
|
|
|
compactMinTime int64
|
|
|
|
|
compactMaxTime int64 // When not defined the test runner sets a default of math.MaxInt64.
|
2019-02-14 13:29:41 +00:00
|
|
|
|
expSeriesSamples []seriesSamples
|
|
|
|
|
expErr error
|
2018-10-12 09:45:19 +00:00
|
|
|
|
}{
|
|
|
|
|
{
|
|
|
|
|
title: "Populate block from empty input should return error.",
|
|
|
|
|
inputSeriesSamples: [][]seriesSamples{},
|
|
|
|
|
expErr: errors.New("cannot populate block from no readers"),
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
// Populate from single block without chunks. We expect these kind of series being ignored.
|
|
|
|
|
inputSeriesSamples: [][]seriesSamples{
|
|
|
|
|
{
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "b"},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
title: "Populate from single block. We expect the same samples at the output.",
|
|
|
|
|
inputSeriesSamples: [][]seriesSamples{
|
|
|
|
|
{
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "b"},
|
|
|
|
|
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
expSeriesSamples: []seriesSamples{
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "b"},
|
|
|
|
|
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
title: "Populate from two blocks.",
|
|
|
|
|
inputSeriesSamples: [][]seriesSamples{
|
|
|
|
|
{
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "b"},
|
|
|
|
|
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "c"},
|
|
|
|
|
chunks: [][]sample{{{t: 1}, {t: 9}}, {{t: 10}, {t: 19}}},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
// no-chunk series should be dropped.
|
|
|
|
|
lset: map[string]string{"a": "empty"},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "b"},
|
|
|
|
|
chunks: [][]sample{{{t: 21}, {t: 30}}},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "c"},
|
|
|
|
|
chunks: [][]sample{{{t: 40}, {t: 45}}},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
expSeriesSamples: []seriesSamples{
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "b"},
|
|
|
|
|
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}, {{t: 21}, {t: 30}}},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "c"},
|
|
|
|
|
chunks: [][]sample{{{t: 1}, {t: 9}}, {{t: 10}, {t: 19}}, {{t: 40}, {t: 45}}},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
title: "Populate from two blocks showing that order is maintained.",
|
|
|
|
|
inputSeriesSamples: [][]seriesSamples{
|
|
|
|
|
{
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "b"},
|
2019-02-14 13:29:41 +00:00
|
|
|
|
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}},
|
2018-10-12 09:45:19 +00:00
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "c"},
|
2019-02-14 13:29:41 +00:00
|
|
|
|
chunks: [][]sample{{{t: 1}, {t: 9}}, {{t: 10}, {t: 19}}},
|
2018-10-12 09:45:19 +00:00
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "b"},
|
2019-02-14 13:29:41 +00:00
|
|
|
|
chunks: [][]sample{{{t: 21}, {t: 30}}},
|
2018-10-12 09:45:19 +00:00
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "c"},
|
2019-02-14 13:29:41 +00:00
|
|
|
|
chunks: [][]sample{{{t: 40}, {t: 45}}},
|
2018-10-12 09:45:19 +00:00
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
expSeriesSamples: []seriesSamples{
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "b"},
|
2019-02-14 13:29:41 +00:00
|
|
|
|
chunks: [][]sample{{{t: 0}, {t: 10}}, {{t: 11}, {t: 20}}, {{t: 21}, {t: 30}}},
|
2018-10-12 09:45:19 +00:00
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "c"},
|
2019-02-14 13:29:41 +00:00
|
|
|
|
chunks: [][]sample{{{t: 1}, {t: 9}}, {{t: 10}, {t: 19}}, {{t: 40}, {t: 45}}},
|
2018-10-12 09:45:19 +00:00
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
2019-02-14 13:29:41 +00:00
|
|
|
|
title: "Populate from two blocks showing that order of series is sorted.",
|
2018-10-12 09:45:19 +00:00
|
|
|
|
inputSeriesSamples: [][]seriesSamples{
|
|
|
|
|
{
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "4"},
|
|
|
|
|
chunks: [][]sample{{{t: 5}, {t: 7}}},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "3"},
|
|
|
|
|
chunks: [][]sample{{{t: 5}, {t: 6}}},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "same"},
|
|
|
|
|
chunks: [][]sample{{{t: 1}, {t: 4}}},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "2"},
|
|
|
|
|
chunks: [][]sample{{{t: 1}, {t: 3}}},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "1"},
|
|
|
|
|
chunks: [][]sample{{{t: 1}, {t: 2}}},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "same"},
|
|
|
|
|
chunks: [][]sample{{{t: 5}, {t: 8}}},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
expSeriesSamples: []seriesSamples{
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "1"},
|
|
|
|
|
chunks: [][]sample{{{t: 1}, {t: 2}}},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "2"},
|
|
|
|
|
chunks: [][]sample{{{t: 1}, {t: 3}}},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "3"},
|
|
|
|
|
chunks: [][]sample{{{t: 5}, {t: 6}}},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "4"},
|
|
|
|
|
chunks: [][]sample{{{t: 5}, {t: 7}}},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "same"},
|
|
|
|
|
chunks: [][]sample{{{t: 1}, {t: 4}}, {{t: 5}, {t: 8}}},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
// This should not happened because head block is making sure the chunks are not crossing block boundaries.
|
|
|
|
|
title: "Populate from single block containing chunk outside of compact meta time range.",
|
|
|
|
|
inputSeriesSamples: [][]seriesSamples{
|
|
|
|
|
{
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "b"},
|
|
|
|
|
chunks: [][]sample{{{t: 1}, {t: 2}}, {{t: 10}, {t: 30}}},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
compactMinTime: 0,
|
|
|
|
|
compactMaxTime: 20,
|
|
|
|
|
expErr: errors.New("found chunk with minTime: 10 maxTime: 30 outside of compacted minTime: 0 maxTime: 20"),
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
// Introduced by https://github.com/prometheus/tsdb/issues/347.
|
|
|
|
|
title: "Populate from single block containing extra chunk",
|
|
|
|
|
inputSeriesSamples: [][]seriesSamples{
|
|
|
|
|
{
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "issue347"},
|
|
|
|
|
chunks: [][]sample{{{t: 1}, {t: 2}}, {{t: 10}, {t: 20}}},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
compactMinTime: 0,
|
|
|
|
|
compactMaxTime: 10,
|
|
|
|
|
expErr: errors.New("found chunk with minTime: 10 maxTime: 20 outside of compacted minTime: 0 maxTime: 10"),
|
|
|
|
|
},
|
|
|
|
|
{
|
2019-03-05 13:48:55 +00:00
|
|
|
|
// Deduplication expected.
|
|
|
|
|
// Introduced by pull/370 and pull/539.
|
2018-10-12 09:45:19 +00:00
|
|
|
|
title: "Populate from two blocks containing duplicated chunk.",
|
|
|
|
|
inputSeriesSamples: [][]seriesSamples{
|
|
|
|
|
{
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "b"},
|
|
|
|
|
chunks: [][]sample{{{t: 1}, {t: 2}}, {{t: 10}, {t: 20}}},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "b"},
|
|
|
|
|
chunks: [][]sample{{{t: 10}, {t: 20}}},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
expSeriesSamples: []seriesSamples{
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"a": "b"},
|
2019-03-05 13:48:55 +00:00
|
|
|
|
chunks: [][]sample{{{t: 1}, {t: 2}}, {{t: 10}, {t: 20}}},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
// Introduced by https://github.com/prometheus/tsdb/pull/539.
|
|
|
|
|
title: "Populate from three blocks that the last two are overlapping.",
|
|
|
|
|
inputSeriesSamples: [][]seriesSamples{
|
|
|
|
|
{
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"before": "fix"},
|
|
|
|
|
chunks: [][]sample{{{t: 0}, {t: 10}, {t: 11}, {t: 20}}},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"after": "fix"},
|
|
|
|
|
chunks: [][]sample{{{t: 0}, {t: 10}, {t: 11}, {t: 20}}},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"before": "fix"},
|
|
|
|
|
chunks: [][]sample{{{t: 19}, {t: 30}}},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"after": "fix"},
|
|
|
|
|
chunks: [][]sample{{{t: 21}, {t: 30}}},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"before": "fix"},
|
|
|
|
|
chunks: [][]sample{{{t: 27}, {t: 35}}},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"after": "fix"},
|
|
|
|
|
chunks: [][]sample{{{t: 27}, {t: 35}}},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
expSeriesSamples: []seriesSamples{
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"after": "fix"},
|
|
|
|
|
chunks: [][]sample{{{t: 0}, {t: 10}, {t: 11}, {t: 20}}, {{t: 21}, {t: 27}, {t: 30}, {t: 35}}},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
lset: map[string]string{"before": "fix"},
|
|
|
|
|
chunks: [][]sample{{{t: 0}, {t: 10}, {t: 11}, {t: 19}, {t: 20}, {t: 27}, {t: 30}, {t: 35}}},
|
2018-10-12 09:45:19 +00:00
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for _, tc := range populateBlocksCases {
|
|
|
|
|
if ok := t.Run(tc.title, func(t *testing.T) {
|
|
|
|
|
blocks := make([]BlockReader, 0, len(tc.inputSeriesSamples))
|
|
|
|
|
for _, b := range tc.inputSeriesSamples {
|
2019-04-25 10:07:04 +00:00
|
|
|
|
ir, cr, mint, maxt := createIdxChkReaders(t, b)
|
2019-02-14 13:29:41 +00:00
|
|
|
|
blocks = append(blocks, &mockBReader{ir: ir, cr: cr, mint: mint, maxt: maxt})
|
2018-10-12 09:45:19 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-12-05 16:34:42 +00:00
|
|
|
|
c, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{0}, nil)
|
2018-10-12 09:45:19 +00:00
|
|
|
|
testutil.Ok(t, err)
|
|
|
|
|
|
|
|
|
|
meta := &BlockMeta{
|
|
|
|
|
MinTime: tc.compactMinTime,
|
|
|
|
|
MaxTime: tc.compactMaxTime,
|
|
|
|
|
}
|
|
|
|
|
if meta.MaxTime == 0 {
|
|
|
|
|
meta.MaxTime = math.MaxInt64
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
iw := &mockIndexWriter{}
|
|
|
|
|
err = c.populateBlock(blocks, meta, iw, nopChunkWriter{})
|
|
|
|
|
if tc.expErr != nil {
|
|
|
|
|
testutil.NotOk(t, err)
|
|
|
|
|
testutil.Equals(t, tc.expErr.Error(), err.Error())
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
testutil.Ok(t, err)
|
|
|
|
|
|
|
|
|
|
testutil.Equals(t, tc.expSeriesSamples, iw.series)
|
|
|
|
|
|
|
|
|
|
// Check if stats are calculated properly.
|
|
|
|
|
s := BlockStats{
|
|
|
|
|
NumSeries: uint64(len(tc.expSeriesSamples)),
|
|
|
|
|
}
|
|
|
|
|
for _, series := range tc.expSeriesSamples {
|
|
|
|
|
s.NumChunks += uint64(len(series.chunks))
|
|
|
|
|
for _, chk := range series.chunks {
|
|
|
|
|
s.NumSamples += uint64(len(chk))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
testutil.Equals(t, s, meta.Stats)
|
|
|
|
|
}); !ok {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-11-20 10:34:26 +00:00
|
|
|
|
|
2019-02-14 13:29:41 +00:00
|
|
|
|
func BenchmarkCompaction(b *testing.B) {
|
|
|
|
|
cases := []struct {
|
|
|
|
|
ranges [][2]int64
|
|
|
|
|
compactionType string
|
|
|
|
|
}{
|
|
|
|
|
{
|
|
|
|
|
ranges: [][2]int64{{0, 100}, {200, 300}, {400, 500}, {600, 700}},
|
|
|
|
|
compactionType: "normal",
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
ranges: [][2]int64{{0, 1000}, {2000, 3000}, {4000, 5000}, {6000, 7000}},
|
|
|
|
|
compactionType: "normal",
|
|
|
|
|
},
|
|
|
|
|
{
|
2019-06-14 12:30:49 +00:00
|
|
|
|
ranges: [][2]int64{{0, 2000}, {3000, 5000}, {6000, 8000}, {9000, 11000}},
|
2019-02-14 13:29:41 +00:00
|
|
|
|
compactionType: "normal",
|
|
|
|
|
},
|
|
|
|
|
{
|
2019-06-14 12:30:49 +00:00
|
|
|
|
ranges: [][2]int64{{0, 5000}, {6000, 11000}, {12000, 17000}, {18000, 23000}},
|
2019-02-14 13:29:41 +00:00
|
|
|
|
compactionType: "normal",
|
|
|
|
|
},
|
|
|
|
|
// 40% overlaps.
|
|
|
|
|
{
|
|
|
|
|
ranges: [][2]int64{{0, 100}, {60, 160}, {120, 220}, {180, 280}},
|
|
|
|
|
compactionType: "vertical",
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
ranges: [][2]int64{{0, 1000}, {600, 1600}, {1200, 2200}, {1800, 2800}},
|
|
|
|
|
compactionType: "vertical",
|
|
|
|
|
},
|
|
|
|
|
{
|
2019-06-14 12:30:49 +00:00
|
|
|
|
ranges: [][2]int64{{0, 2000}, {1200, 3200}, {2400, 4400}, {3600, 5600}},
|
2019-02-14 13:29:41 +00:00
|
|
|
|
compactionType: "vertical",
|
|
|
|
|
},
|
|
|
|
|
{
|
2019-06-14 12:30:49 +00:00
|
|
|
|
ranges: [][2]int64{{0, 5000}, {3000, 8000}, {6000, 11000}, {9000, 14000}},
|
2019-02-14 13:29:41 +00:00
|
|
|
|
compactionType: "vertical",
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nSeries := 10000
|
|
|
|
|
for _, c := range cases {
|
|
|
|
|
nBlocks := len(c.ranges)
|
|
|
|
|
b.Run(fmt.Sprintf("type=%s,blocks=%d,series=%d,samplesPerSeriesPerBlock=%d", c.compactionType, nBlocks, nSeries, c.ranges[0][1]-c.ranges[0][0]+1), func(b *testing.B) {
|
|
|
|
|
dir, err := ioutil.TempDir("", "bench_compaction")
|
|
|
|
|
testutil.Ok(b, err)
|
|
|
|
|
defer func() {
|
|
|
|
|
testutil.Ok(b, os.RemoveAll(dir))
|
|
|
|
|
}()
|
|
|
|
|
blockDirs := make([]string, 0, len(c.ranges))
|
|
|
|
|
var blocks []*Block
|
|
|
|
|
for _, r := range c.ranges {
|
|
|
|
|
block, err := OpenBlock(nil, createBlock(b, dir, genSeries(nSeries, 10, r[0], r[1])), nil)
|
|
|
|
|
testutil.Ok(b, err)
|
|
|
|
|
blocks = append(blocks, block)
|
|
|
|
|
defer func() {
|
|
|
|
|
testutil.Ok(b, block.Close())
|
|
|
|
|
}()
|
|
|
|
|
blockDirs = append(blockDirs, block.Dir())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
c, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{0}, nil)
|
|
|
|
|
testutil.Ok(b, err)
|
|
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
b.ReportAllocs()
|
|
|
|
|
_, err = c.Compact(dir, blockDirs, blocks)
|
|
|
|
|
testutil.Ok(b, err)
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-20 10:34:26 +00:00
|
|
|
|
// TestDisableAutoCompactions checks that we can
|
|
|
|
|
// disable and enable the auto compaction.
|
|
|
|
|
// This is needed for unit tests that rely on
|
|
|
|
|
// checking state before and after a compaction.
|
|
|
|
|
func TestDisableAutoCompactions(t *testing.T) {
|
2019-01-30 09:40:12 +00:00
|
|
|
|
db, delete := openTestDB(t, nil)
|
|
|
|
|
defer func() {
|
|
|
|
|
testutil.Ok(t, db.Close())
|
|
|
|
|
delete()
|
|
|
|
|
}()
|
2018-11-20 10:34:26 +00:00
|
|
|
|
|
|
|
|
|
blockRange := DefaultOptions.BlockRanges[0]
|
|
|
|
|
label := labels.FromStrings("foo", "bar")
|
|
|
|
|
|
|
|
|
|
// Trigger a compaction to check that it was skipped and
|
|
|
|
|
// no new blocks were created when compaction is disabled.
|
|
|
|
|
db.DisableCompactions()
|
|
|
|
|
app := db.Appender()
|
|
|
|
|
for i := int64(0); i < 3; i++ {
|
|
|
|
|
_, err := app.Add(label, i*blockRange, 0)
|
|
|
|
|
testutil.Ok(t, err)
|
|
|
|
|
_, err = app.Add(label, i*blockRange+1000, 0)
|
|
|
|
|
testutil.Ok(t, err)
|
|
|
|
|
}
|
|
|
|
|
testutil.Ok(t, app.Commit())
|
|
|
|
|
|
|
|
|
|
select {
|
|
|
|
|
case db.compactc <- struct{}{}:
|
|
|
|
|
default:
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for x := 0; x < 10; x++ {
|
2019-04-25 10:07:04 +00:00
|
|
|
|
if prom_testutil.ToFloat64(db.metrics.compactionsSkipped) > 0.0 {
|
2018-11-20 10:34:26 +00:00
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
time.Sleep(10 * time.Millisecond)
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-25 10:07:04 +00:00
|
|
|
|
testutil.Assert(t, prom_testutil.ToFloat64(db.metrics.compactionsSkipped) > 0.0, "No compaction was skipped after the set timeout.")
|
2018-11-20 10:34:26 +00:00
|
|
|
|
testutil.Equals(t, 0, len(db.blocks))
|
|
|
|
|
|
|
|
|
|
// Enable the compaction, trigger it and check that the block is persisted.
|
|
|
|
|
db.EnableCompactions()
|
|
|
|
|
select {
|
|
|
|
|
case db.compactc <- struct{}{}:
|
|
|
|
|
default:
|
|
|
|
|
}
|
2018-12-12 11:49:03 +00:00
|
|
|
|
for x := 0; x < 100; x++ {
|
2018-11-20 10:34:26 +00:00
|
|
|
|
if len(db.Blocks()) > 0 {
|
|
|
|
|
break
|
|
|
|
|
}
|
2018-11-20 22:33:14 +00:00
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
2018-11-20 10:34:26 +00:00
|
|
|
|
}
|
|
|
|
|
testutil.Assert(t, len(db.Blocks()) > 0, "No block was persisted after the set timeout.")
|
|
|
|
|
}
|
2018-12-07 15:49:23 +00:00
|
|
|
|
|
|
|
|
|
// TestCancelCompactions ensures that when the db is closed
|
|
|
|
|
// any running compaction is cancelled to unblock closing the db.
|
|
|
|
|
func TestCancelCompactions(t *testing.T) {
|
2019-01-24 12:15:32 +00:00
|
|
|
|
tmpdir, err := ioutil.TempDir("", "testCancelCompaction")
|
|
|
|
|
testutil.Ok(t, err)
|
2019-03-19 13:31:57 +00:00
|
|
|
|
defer func() {
|
|
|
|
|
testutil.Ok(t, os.RemoveAll(tmpdir))
|
|
|
|
|
}()
|
2019-01-24 12:15:32 +00:00
|
|
|
|
|
|
|
|
|
// Create some blocks to fall within the compaction range.
|
2019-02-04 09:14:39 +00:00
|
|
|
|
createBlock(t, tmpdir, genSeries(10, 10000, 0, 1000))
|
|
|
|
|
createBlock(t, tmpdir, genSeries(10, 10000, 1000, 2000))
|
|
|
|
|
createBlock(t, tmpdir, genSeries(1, 1, 2000, 2001)) // The most recent block is ignored so can be e small one.
|
2018-12-07 15:49:23 +00:00
|
|
|
|
|
2019-01-24 12:15:32 +00:00
|
|
|
|
// Copy the db so we have an exact copy to compare compaction times.
|
|
|
|
|
tmpdirCopy := tmpdir + "Copy"
|
|
|
|
|
err = fileutil.CopyDirs(tmpdir, tmpdirCopy)
|
|
|
|
|
testutil.Ok(t, err)
|
2019-03-19 13:31:57 +00:00
|
|
|
|
defer func() {
|
|
|
|
|
testutil.Ok(t, os.RemoveAll(tmpdirCopy))
|
|
|
|
|
}()
|
2019-01-24 12:15:32 +00:00
|
|
|
|
|
|
|
|
|
// Measure the compaction time without interupting it.
|
|
|
|
|
var timeCompactionUninterrupted time.Duration
|
|
|
|
|
{
|
2019-01-24 10:48:56 +00:00
|
|
|
|
db, err := Open(tmpdir, log.NewNopLogger(), nil, &Options{BlockRanges: []int64{1, 2000}})
|
|
|
|
|
testutil.Ok(t, err)
|
|
|
|
|
testutil.Equals(t, 3, len(db.Blocks()), "initial block count mismatch")
|
|
|
|
|
testutil.Equals(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran), "initial compaction counter mismatch")
|
2019-01-24 12:34:16 +00:00
|
|
|
|
db.compactc <- struct{}{} // Trigger a compaction.
|
|
|
|
|
var start time.Time
|
2019-02-08 09:36:30 +00:00
|
|
|
|
for prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.populatingBlocks) <= 0 {
|
2019-01-24 12:34:16 +00:00
|
|
|
|
time.Sleep(3 * time.Millisecond)
|
|
|
|
|
}
|
2019-02-08 09:36:30 +00:00
|
|
|
|
start = time.Now()
|
2018-12-07 15:49:23 +00:00
|
|
|
|
|
2019-02-08 09:36:30 +00:00
|
|
|
|
for prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran) != 1 {
|
2019-01-24 12:34:16 +00:00
|
|
|
|
time.Sleep(3 * time.Millisecond)
|
|
|
|
|
}
|
2019-02-08 09:36:30 +00:00
|
|
|
|
timeCompactionUninterrupted = time.Since(start)
|
|
|
|
|
|
2019-01-24 12:34:16 +00:00
|
|
|
|
testutil.Ok(t, db.Close())
|
2018-12-07 15:49:23 +00:00
|
|
|
|
}
|
2019-01-24 12:15:32 +00:00
|
|
|
|
// Measure the compaction time when closing the db in the middle of compaction.
|
2019-01-24 10:48:56 +00:00
|
|
|
|
{
|
2019-01-24 12:15:32 +00:00
|
|
|
|
db, err := Open(tmpdirCopy, log.NewNopLogger(), nil, &Options{BlockRanges: []int64{1, 2000}})
|
|
|
|
|
testutil.Ok(t, err)
|
|
|
|
|
testutil.Equals(t, 3, len(db.Blocks()), "initial block count mismatch")
|
|
|
|
|
testutil.Equals(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran), "initial compaction counter mismatch")
|
2019-01-24 12:34:16 +00:00
|
|
|
|
db.compactc <- struct{}{} // Trigger a compaction.
|
|
|
|
|
dbClosed := make(chan struct{})
|
2019-02-08 16:09:23 +00:00
|
|
|
|
|
|
|
|
|
for prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.populatingBlocks) <= 0 {
|
2019-02-08 10:39:25 +00:00
|
|
|
|
time.Sleep(3 * time.Millisecond)
|
2019-01-24 12:34:16 +00:00
|
|
|
|
}
|
2019-02-08 16:09:23 +00:00
|
|
|
|
go func() {
|
|
|
|
|
testutil.Ok(t, db.Close())
|
|
|
|
|
close(dbClosed)
|
|
|
|
|
}()
|
2018-12-07 15:49:23 +00:00
|
|
|
|
|
2019-01-24 12:34:16 +00:00
|
|
|
|
start := time.Now()
|
|
|
|
|
<-dbClosed
|
|
|
|
|
actT := time.Since(start)
|
|
|
|
|
expT := time.Duration(timeCompactionUninterrupted / 2) // Closing the db in the middle of compaction should less than half the time.
|
|
|
|
|
testutil.Assert(t, actT < expT, "closing the db took more than expected. exp: <%v, act: %v", expT, actT)
|
2019-01-24 10:48:56 +00:00
|
|
|
|
}
|
2018-12-07 15:49:23 +00:00
|
|
|
|
}
|
2019-02-12 08:56:45 +00:00
|
|
|
|
|
2019-02-11 09:25:57 +00:00
|
|
|
|
// TestDeleteCompactionBlockAfterFailedReload ensures that a failed reload immediately after a compaction
|
2019-01-29 08:26:01 +00:00
|
|
|
|
// deletes the resulting block to avoid creatings blocks with the same time range.
|
|
|
|
|
func TestDeleteCompactionBlockAfterFailedReload(t *testing.T) {
|
|
|
|
|
|
|
|
|
|
tests := map[string]func(*DB) int{
|
|
|
|
|
"Test Head Compaction": func(db *DB) int {
|
|
|
|
|
rangeToTriggerCompaction := db.opts.BlockRanges[0]/2*3 - 1
|
|
|
|
|
defaultLabel := labels.FromStrings("foo", "bar")
|
|
|
|
|
|
|
|
|
|
// Add some data to the head that is enough to trigger a compaction.
|
|
|
|
|
app := db.Appender()
|
|
|
|
|
_, err := app.Add(defaultLabel, 1, 0)
|
|
|
|
|
testutil.Ok(t, err)
|
|
|
|
|
_, err = app.Add(defaultLabel, 2, 0)
|
|
|
|
|
testutil.Ok(t, err)
|
|
|
|
|
_, err = app.Add(defaultLabel, 3+rangeToTriggerCompaction, 0)
|
|
|
|
|
testutil.Ok(t, err)
|
|
|
|
|
testutil.Ok(t, app.Commit())
|
|
|
|
|
|
2019-01-30 09:40:55 +00:00
|
|
|
|
return 0
|
2019-01-29 08:26:01 +00:00
|
|
|
|
},
|
|
|
|
|
"Test Block Compaction": func(db *DB) int {
|
2019-01-30 09:40:55 +00:00
|
|
|
|
blocks := []*BlockMeta{
|
2019-01-29 08:26:01 +00:00
|
|
|
|
{MinTime: 0, MaxTime: 100},
|
|
|
|
|
{MinTime: 100, MaxTime: 150},
|
|
|
|
|
{MinTime: 150, MaxTime: 200},
|
|
|
|
|
}
|
2019-01-30 09:40:55 +00:00
|
|
|
|
for _, m := range blocks {
|
2019-01-29 08:26:01 +00:00
|
|
|
|
createBlock(t, db.Dir(), genSeries(1, 1, m.MinTime, m.MaxTime))
|
|
|
|
|
}
|
|
|
|
|
testutil.Ok(t, db.reload())
|
2019-01-30 09:40:55 +00:00
|
|
|
|
testutil.Equals(t, len(blocks), len(db.Blocks()), "unexpected block count after a reload")
|
2019-01-29 08:26:01 +00:00
|
|
|
|
|
2019-01-30 09:40:55 +00:00
|
|
|
|
return len(blocks)
|
2019-01-29 08:26:01 +00:00
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for title, bootStrap := range tests {
|
|
|
|
|
t.Run(title, func(t *testing.T) {
|
2019-01-30 09:40:12 +00:00
|
|
|
|
db, delete := openTestDB(t, &Options{
|
2019-01-29 08:26:01 +00:00
|
|
|
|
BlockRanges: []int64{1, 100},
|
|
|
|
|
})
|
2019-01-30 09:40:12 +00:00
|
|
|
|
defer func() {
|
|
|
|
|
testutil.Ok(t, db.Close())
|
|
|
|
|
delete()
|
|
|
|
|
}()
|
2019-01-29 08:26:01 +00:00
|
|
|
|
db.DisableCompactions()
|
|
|
|
|
|
|
|
|
|
expBlocks := bootStrap(db)
|
|
|
|
|
|
2019-02-11 09:25:57 +00:00
|
|
|
|
// Create a block that will trigger the reload to fail.
|
2019-01-29 08:26:01 +00:00
|
|
|
|
blockPath := createBlock(t, db.Dir(), genSeries(1, 1, 200, 300))
|
|
|
|
|
lastBlockIndex := path.Join(blockPath, indexFilename)
|
|
|
|
|
actBlocks, err := blockDirs(db.Dir())
|
|
|
|
|
testutil.Ok(t, err)
|
2019-01-30 09:40:55 +00:00
|
|
|
|
testutil.Equals(t, expBlocks, len(actBlocks)-1) // -1 to exclude the corrupted block.
|
|
|
|
|
testutil.Ok(t, os.RemoveAll(lastBlockIndex)) // Corrupt the block by removing the index file.
|
2019-01-29 08:26:01 +00:00
|
|
|
|
|
|
|
|
|
testutil.Equals(t, 0.0, prom_testutil.ToFloat64(db.metrics.reloadsFailed), "initial 'failed db reload' count metrics mismatch")
|
|
|
|
|
testutil.Equals(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran), "initial `compactions` count metric mismatch")
|
2019-05-30 11:57:28 +00:00
|
|
|
|
testutil.Equals(t, 0.0, prom_testutil.ToFloat64(db.metrics.compactionsFailed), "initial `compactions failed` count metric mismatch")
|
2019-01-29 08:26:01 +00:00
|
|
|
|
|
|
|
|
|
// Do the compaction and check the metrics.
|
2019-01-30 09:40:55 +00:00
|
|
|
|
// Compaction should succeed, but the reload should fail and
|
2019-01-29 08:26:01 +00:00
|
|
|
|
// the new block created from the compaction should be deleted.
|
|
|
|
|
testutil.NotOk(t, db.compact())
|
|
|
|
|
testutil.Equals(t, 1.0, prom_testutil.ToFloat64(db.metrics.reloadsFailed), "'failed db reload' count metrics mismatch")
|
|
|
|
|
testutil.Equals(t, 1.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran), "`compaction` count metric mismatch")
|
2019-05-30 11:57:28 +00:00
|
|
|
|
testutil.Equals(t, 1.0, prom_testutil.ToFloat64(db.metrics.compactionsFailed), "`compactions failed` count metric mismatch")
|
|
|
|
|
|
2019-01-29 08:26:01 +00:00
|
|
|
|
actBlocks, err = blockDirs(db.Dir())
|
|
|
|
|
testutil.Ok(t, err)
|
2019-01-30 09:40:55 +00:00
|
|
|
|
testutil.Equals(t, expBlocks, len(actBlocks)-1, "block count should be the same as before the compaction") // -1 to exclude the corrupted block.
|
2019-01-29 08:26:01 +00:00
|
|
|
|
})
|
|
|
|
|
}
|
2019-02-12 09:08:09 +00:00
|
|
|
|
}
|