2015-01-21 19:07:45 +00:00
|
|
|
// Copyright 2014 The Prometheus Authors
|
2014-09-19 16:18:44 +00:00
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2014-09-16 13:47:24 +00:00
|
|
|
package local
|
2014-06-06 09:55:53 +00:00
|
|
|
|
|
|
|
import (
|
2014-09-23 17:21:10 +00:00
|
|
|
"reflect"
|
2015-04-14 08:43:09 +00:00
|
|
|
"sync"
|
2014-06-06 09:55:53 +00:00
|
|
|
"testing"
|
|
|
|
|
|
|
|
clientmodel "github.com/prometheus/client_golang/model"
|
|
|
|
|
2014-09-23 17:21:10 +00:00
|
|
|
"github.com/prometheus/prometheus/storage/local/codable"
|
2014-09-14 13:33:56 +00:00
|
|
|
"github.com/prometheus/prometheus/storage/local/index"
|
2014-06-06 09:55:53 +00:00
|
|
|
"github.com/prometheus/prometheus/storage/metric"
|
2014-08-13 15:13:28 +00:00
|
|
|
"github.com/prometheus/prometheus/utility/test"
|
2014-06-06 09:55:53 +00:00
|
|
|
)
|
|
|
|
|
2014-10-28 18:01:41 +00:00
|
|
|
var (
|
|
|
|
m1 = clientmodel.Metric{"label": "value1"}
|
|
|
|
m2 = clientmodel.Metric{"label": "value2"}
|
|
|
|
m3 = clientmodel.Metric{"label": "value3"}
|
2015-03-09 01:33:10 +00:00
|
|
|
m4 = clientmodel.Metric{"label": "value4"}
|
|
|
|
m5 = clientmodel.Metric{"label": "value5"}
|
2014-10-28 18:01:41 +00:00
|
|
|
)
|
|
|
|
|
2015-03-13 14:49:07 +00:00
|
|
|
func newTestPersistence(t *testing.T, encoding chunkEncoding) (*persistence, test.Closer) {
|
|
|
|
*defaultChunkEncoding = int(encoding)
|
2014-08-13 15:13:28 +00:00
|
|
|
dir := test.NewTemporaryDirectory("test_persistence", t)
|
2015-03-19 14:41:50 +00:00
|
|
|
p, err := newPersistence(dir.Path(), false, false, func() bool { return false })
|
2014-06-06 09:55:53 +00:00
|
|
|
if err != nil {
|
2014-08-13 15:13:28 +00:00
|
|
|
dir.Close()
|
2014-06-06 09:55:53 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2014-08-21 20:06:11 +00:00
|
|
|
return p, test.NewCallbackCloser(func() {
|
2014-10-07 17:11:24 +00:00
|
|
|
p.close()
|
2014-08-21 20:06:11 +00:00
|
|
|
dir.Close()
|
|
|
|
})
|
2014-08-12 15:46:46 +00:00
|
|
|
}
|
2014-08-13 15:13:28 +00:00
|
|
|
|
2015-03-13 14:49:07 +00:00
|
|
|
func buildTestChunks(encoding chunkEncoding) map[clientmodel.Fingerprint][]chunk {
|
2014-08-13 15:13:28 +00:00
|
|
|
fps := clientmodel.Fingerprints{
|
2015-05-05 16:17:51 +00:00
|
|
|
m1.FastFingerprint(),
|
|
|
|
m2.FastFingerprint(),
|
|
|
|
m3.FastFingerprint(),
|
2014-08-13 15:13:28 +00:00
|
|
|
}
|
2014-10-15 13:53:05 +00:00
|
|
|
fpToChunks := map[clientmodel.Fingerprint][]chunk{}
|
2014-08-13 15:13:28 +00:00
|
|
|
|
|
|
|
for _, fp := range fps {
|
2014-10-15 13:53:05 +00:00
|
|
|
fpToChunks[fp] = make([]chunk, 0, 10)
|
2014-08-13 15:13:28 +00:00
|
|
|
for i := 0; i < 10; i++ {
|
2015-03-13 14:49:07 +00:00
|
|
|
fpToChunks[fp] = append(fpToChunks[fp], newChunkForEncoding(encoding).add(&metric.SamplePair{
|
2014-08-13 15:13:28 +00:00
|
|
|
Timestamp: clientmodel.Timestamp(i),
|
|
|
|
Value: clientmodel.SampleValue(fp),
|
|
|
|
})[0])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return fpToChunks
|
|
|
|
}
|
|
|
|
|
|
|
|
func chunksEqual(c1, c2 chunk) bool {
|
|
|
|
values2 := c2.values()
|
|
|
|
for v1 := range c1.values() {
|
|
|
|
v2 := <-values2
|
|
|
|
if !v1.Equal(v2) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2015-03-13 14:49:07 +00:00
|
|
|
func testPersistLoadDropChunks(t *testing.T, encoding chunkEncoding) {
|
|
|
|
p, closer := newTestPersistence(t, encoding)
|
2014-08-13 15:13:28 +00:00
|
|
|
defer closer.Close()
|
|
|
|
|
2015-03-13 14:49:07 +00:00
|
|
|
fpToChunks := buildTestChunks(encoding)
|
2014-08-13 15:13:28 +00:00
|
|
|
|
|
|
|
for fp, chunks := range fpToChunks {
|
2015-03-09 01:33:10 +00:00
|
|
|
firstTimeNotDropped, offset, numDropped, allDropped, err :=
|
|
|
|
p.dropAndPersistChunks(fp, clientmodel.Earliest, chunks)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if got, want := firstTimeNotDropped, clientmodel.Timestamp(0); got != want {
|
|
|
|
t.Errorf("Want firstTimeNotDropped %v, got %v.", got, want)
|
|
|
|
}
|
|
|
|
if got, want := offset, 0; got != want {
|
|
|
|
t.Errorf("Want offset %v, got %v.", got, want)
|
|
|
|
}
|
|
|
|
if got, want := numDropped, 0; got != want {
|
|
|
|
t.Errorf("Want numDropped %v, got %v.", got, want)
|
|
|
|
}
|
|
|
|
if allDropped {
|
|
|
|
t.Error("All dropped.")
|
2014-08-13 15:13:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for fp, expectedChunks := range fpToChunks {
|
|
|
|
indexes := make([]int, 0, len(expectedChunks))
|
2014-08-19 16:14:44 +00:00
|
|
|
for i := range expectedChunks {
|
2014-08-13 15:13:28 +00:00
|
|
|
indexes = append(indexes, i)
|
|
|
|
}
|
2014-10-27 19:40:48 +00:00
|
|
|
actualChunks, err := p.loadChunks(fp, indexes, 0)
|
2014-08-13 15:13:28 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
for _, i := range indexes {
|
|
|
|
if !chunksEqual(expectedChunks[i], actualChunks[i]) {
|
2014-10-28 18:01:41 +00:00
|
|
|
t.Errorf("%d. Chunks not equal.", i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Load all chunk descs.
|
|
|
|
actualChunkDescs, err := p.loadChunkDescs(fp, 10)
|
|
|
|
if len(actualChunkDescs) != 10 {
|
|
|
|
t.Errorf("Got %d chunkDescs, want %d.", len(actualChunkDescs), 10)
|
|
|
|
}
|
|
|
|
for i, cd := range actualChunkDescs {
|
|
|
|
if cd.firstTime() != clientmodel.Timestamp(i) || cd.lastTime() != clientmodel.Timestamp(i) {
|
|
|
|
t.Errorf(
|
|
|
|
"Want ts=%v, got firstTime=%v, lastTime=%v.",
|
|
|
|
i, cd.firstTime(), cd.lastTime(),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
// Load chunk descs partially.
|
|
|
|
actualChunkDescs, err = p.loadChunkDescs(fp, 5)
|
|
|
|
if len(actualChunkDescs) != 5 {
|
|
|
|
t.Errorf("Got %d chunkDescs, want %d.", len(actualChunkDescs), 5)
|
|
|
|
}
|
|
|
|
for i, cd := range actualChunkDescs {
|
|
|
|
if cd.firstTime() != clientmodel.Timestamp(i) || cd.lastTime() != clientmodel.Timestamp(i) {
|
|
|
|
t.Errorf(
|
|
|
|
"Want ts=%v, got firstTime=%v, lastTime=%v.",
|
|
|
|
i, cd.firstTime(), cd.lastTime(),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Drop half of the chunks.
|
|
|
|
for fp, expectedChunks := range fpToChunks {
|
2015-03-09 01:33:10 +00:00
|
|
|
firstTime, offset, numDropped, allDropped, err := p.dropAndPersistChunks(fp, 5, nil)
|
2014-10-28 18:01:41 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2015-03-09 01:33:10 +00:00
|
|
|
if offset != 5 {
|
|
|
|
t.Errorf("want offset 5, got %d", offset)
|
|
|
|
}
|
2014-11-10 17:22:08 +00:00
|
|
|
if firstTime != 5 {
|
|
|
|
t.Errorf("want first time 5, got %d", firstTime)
|
|
|
|
}
|
2014-10-28 18:01:41 +00:00
|
|
|
if numDropped != 5 {
|
|
|
|
t.Errorf("want 5 dropped chunks, got %v", numDropped)
|
|
|
|
}
|
|
|
|
if allDropped {
|
|
|
|
t.Error("all chunks dropped")
|
|
|
|
}
|
|
|
|
indexes := make([]int, 5)
|
|
|
|
for i := range indexes {
|
|
|
|
indexes[i] = i
|
|
|
|
}
|
|
|
|
actualChunks, err := p.loadChunks(fp, indexes, 0)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
for _, i := range indexes {
|
|
|
|
if !chunksEqual(expectedChunks[i+5], actualChunks[i]) {
|
|
|
|
t.Errorf("%d. Chunks not equal.", i)
|
2014-08-13 15:13:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-10-28 18:01:41 +00:00
|
|
|
// Drop all the chunks.
|
|
|
|
for fp := range fpToChunks {
|
2015-03-09 01:33:10 +00:00
|
|
|
firstTime, offset, numDropped, allDropped, err := p.dropAndPersistChunks(fp, 100, nil)
|
2014-11-10 17:22:08 +00:00
|
|
|
if firstTime != 0 {
|
|
|
|
t.Errorf("want first time 0, got %d", firstTime)
|
|
|
|
}
|
2014-10-28 18:01:41 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2015-03-09 01:33:10 +00:00
|
|
|
if offset != 0 {
|
|
|
|
t.Errorf("want offset 0, got %d", offset)
|
|
|
|
}
|
2014-10-28 18:01:41 +00:00
|
|
|
if numDropped != 5 {
|
|
|
|
t.Errorf("want 5 dropped chunks, got %v", numDropped)
|
|
|
|
}
|
|
|
|
if !allDropped {
|
|
|
|
t.Error("not all chunks dropped")
|
|
|
|
}
|
|
|
|
}
|
2015-03-09 01:33:10 +00:00
|
|
|
// Re-add first two of the chunks.
|
|
|
|
for fp, chunks := range fpToChunks {
|
|
|
|
firstTimeNotDropped, offset, numDropped, allDropped, err :=
|
|
|
|
p.dropAndPersistChunks(fp, clientmodel.Earliest, chunks[:2])
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if got, want := firstTimeNotDropped, clientmodel.Timestamp(0); got != want {
|
|
|
|
t.Errorf("Want firstTimeNotDropped %v, got %v.", got, want)
|
|
|
|
}
|
|
|
|
if got, want := offset, 0; got != want {
|
|
|
|
t.Errorf("Want offset %v, got %v.", got, want)
|
|
|
|
}
|
|
|
|
if got, want := numDropped, 0; got != want {
|
|
|
|
t.Errorf("Want numDropped %v, got %v.", got, want)
|
|
|
|
}
|
|
|
|
if allDropped {
|
|
|
|
t.Error("All dropped.")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Drop the first of the chunks while adding two more.
|
|
|
|
for fp, chunks := range fpToChunks {
|
|
|
|
firstTime, offset, numDropped, allDropped, err := p.dropAndPersistChunks(fp, 1, chunks[2:4])
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if offset != 1 {
|
|
|
|
t.Errorf("want offset 1, got %d", offset)
|
|
|
|
}
|
|
|
|
if firstTime != 1 {
|
|
|
|
t.Errorf("want first time 1, got %d", firstTime)
|
|
|
|
}
|
|
|
|
if numDropped != 1 {
|
|
|
|
t.Errorf("want 1 dropped chunk, got %v", numDropped)
|
|
|
|
}
|
|
|
|
if allDropped {
|
|
|
|
t.Error("all chunks dropped")
|
|
|
|
}
|
|
|
|
wantChunks := chunks[1:4]
|
|
|
|
indexes := make([]int, len(wantChunks))
|
|
|
|
for i := range indexes {
|
|
|
|
indexes[i] = i
|
|
|
|
}
|
|
|
|
gotChunks, err := p.loadChunks(fp, indexes, 0)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
for i, wantChunk := range wantChunks {
|
|
|
|
if !chunksEqual(wantChunk, gotChunks[i]) {
|
|
|
|
t.Errorf("%d. Chunks not equal.", i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Drop all the chunks while adding two more.
|
|
|
|
for fp, chunks := range fpToChunks {
|
|
|
|
firstTime, offset, numDropped, allDropped, err := p.dropAndPersistChunks(fp, 4, chunks[4:6])
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if offset != 0 {
|
|
|
|
t.Errorf("want offset 0, got %d", offset)
|
|
|
|
}
|
|
|
|
if firstTime != 4 {
|
|
|
|
t.Errorf("want first time 4, got %d", firstTime)
|
|
|
|
}
|
|
|
|
if numDropped != 3 {
|
|
|
|
t.Errorf("want 3 dropped chunks, got %v", numDropped)
|
|
|
|
}
|
|
|
|
if allDropped {
|
|
|
|
t.Error("all chunks dropped")
|
|
|
|
}
|
|
|
|
wantChunks := chunks[4:6]
|
|
|
|
indexes := make([]int, len(wantChunks))
|
|
|
|
for i := range indexes {
|
|
|
|
indexes[i] = i
|
|
|
|
}
|
|
|
|
gotChunks, err := p.loadChunks(fp, indexes, 0)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
for i, wantChunk := range wantChunks {
|
|
|
|
if !chunksEqual(wantChunk, gotChunks[i]) {
|
|
|
|
t.Errorf("%d. Chunks not equal.", i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// While adding two more, drop all but one of the added ones.
|
|
|
|
for fp, chunks := range fpToChunks {
|
|
|
|
firstTime, offset, numDropped, allDropped, err := p.dropAndPersistChunks(fp, 7, chunks[6:8])
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if offset != 0 {
|
|
|
|
t.Errorf("want offset 0, got %d", offset)
|
|
|
|
}
|
|
|
|
if firstTime != 7 {
|
|
|
|
t.Errorf("want first time 7, got %d", firstTime)
|
|
|
|
}
|
|
|
|
if numDropped != 3 {
|
|
|
|
t.Errorf("want 3 dropped chunks, got %v", numDropped)
|
|
|
|
}
|
|
|
|
if allDropped {
|
|
|
|
t.Error("all chunks dropped")
|
|
|
|
}
|
|
|
|
wantChunks := chunks[7:8]
|
|
|
|
indexes := make([]int, len(wantChunks))
|
|
|
|
for i := range indexes {
|
|
|
|
indexes[i] = i
|
|
|
|
}
|
|
|
|
gotChunks, err := p.loadChunks(fp, indexes, 0)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
for i, wantChunk := range wantChunks {
|
|
|
|
if !chunksEqual(wantChunk, gotChunks[i]) {
|
|
|
|
t.Errorf("%d. Chunks not equal.", i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// While adding two more, drop all chunks including the added ones.
|
|
|
|
for fp, chunks := range fpToChunks {
|
|
|
|
firstTime, offset, numDropped, allDropped, err := p.dropAndPersistChunks(fp, 10, chunks[8:])
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if offset != 0 {
|
|
|
|
t.Errorf("want offset 0, got %d", offset)
|
|
|
|
}
|
|
|
|
if firstTime != 0 {
|
|
|
|
t.Errorf("want first time 0, got %d", firstTime)
|
|
|
|
}
|
|
|
|
if numDropped != 3 {
|
|
|
|
t.Errorf("want 3 dropped chunks, got %v", numDropped)
|
|
|
|
}
|
|
|
|
if !allDropped {
|
|
|
|
t.Error("not all chunks dropped")
|
|
|
|
}
|
|
|
|
}
|
2014-10-28 18:01:41 +00:00
|
|
|
}
|
|
|
|
|
2015-03-04 12:40:18 +00:00
|
|
|
func TestPersistLoadDropChunksType0(t *testing.T) {
|
|
|
|
testPersistLoadDropChunks(t, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestPersistLoadDropChunksType1(t *testing.T) {
|
|
|
|
testPersistLoadDropChunks(t, 1)
|
|
|
|
}
|
|
|
|
|
2015-03-13 14:49:07 +00:00
|
|
|
func testCheckpointAndLoadSeriesMapAndHeads(t *testing.T, encoding chunkEncoding) {
|
|
|
|
p, closer := newTestPersistence(t, encoding)
|
2014-10-28 18:01:41 +00:00
|
|
|
defer closer.Close()
|
|
|
|
|
|
|
|
fpLocker := newFingerprintLocker(10)
|
|
|
|
sm := newSeriesMap()
|
2015-03-13 14:49:07 +00:00
|
|
|
s1 := newMemorySeries(m1, true, 0)
|
|
|
|
s2 := newMemorySeries(m2, false, 0)
|
|
|
|
s3 := newMemorySeries(m3, false, 0)
|
2015-03-09 01:33:10 +00:00
|
|
|
s4 := newMemorySeries(m4, true, 0)
|
|
|
|
s5 := newMemorySeries(m5, true, 0)
|
|
|
|
s1.add(&metric.SamplePair{Timestamp: 1, Value: 3.14})
|
|
|
|
s3.add(&metric.SamplePair{Timestamp: 2, Value: 2.7})
|
|
|
|
s3.headChunkClosed = true
|
|
|
|
s3.persistWatermark = 1
|
|
|
|
for i := 0; i < 10000; i++ {
|
|
|
|
s4.add(&metric.SamplePair{
|
|
|
|
Timestamp: clientmodel.Timestamp(i),
|
|
|
|
Value: clientmodel.SampleValue(i) / 2,
|
|
|
|
})
|
|
|
|
s5.add(&metric.SamplePair{
|
|
|
|
Timestamp: clientmodel.Timestamp(i),
|
|
|
|
Value: clientmodel.SampleValue(i * i),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
s5.persistWatermark = 3
|
|
|
|
chunkCountS4 := len(s4.chunkDescs)
|
|
|
|
chunkCountS5 := len(s5.chunkDescs)
|
2015-05-05 16:17:51 +00:00
|
|
|
sm.put(m1.FastFingerprint(), s1)
|
|
|
|
sm.put(m2.FastFingerprint(), s2)
|
|
|
|
sm.put(m3.FastFingerprint(), s3)
|
|
|
|
sm.put(m4.FastFingerprint(), s4)
|
|
|
|
sm.put(m5.FastFingerprint(), s5)
|
2014-10-28 18:01:41 +00:00
|
|
|
|
|
|
|
if err := p.checkpointSeriesMapAndHeads(sm, fpLocker); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2015-03-09 01:33:10 +00:00
|
|
|
loadedSM, _, err := p.loadSeriesMapAndHeads()
|
2014-10-28 18:01:41 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2015-03-09 01:33:10 +00:00
|
|
|
if loadedSM.length() != 4 {
|
|
|
|
t.Errorf("want 4 series in map, got %d", loadedSM.length())
|
2014-10-28 18:01:41 +00:00
|
|
|
}
|
2015-05-05 16:17:51 +00:00
|
|
|
if loadedS1, ok := loadedSM.get(m1.FastFingerprint()); ok {
|
2014-10-28 18:01:41 +00:00
|
|
|
if !reflect.DeepEqual(loadedS1.metric, m1) {
|
|
|
|
t.Errorf("want metric %v, got %v", m1, loadedS1.metric)
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(loadedS1.head().chunk, s1.head().chunk) {
|
|
|
|
t.Error("head chunks differ")
|
|
|
|
}
|
|
|
|
if loadedS1.chunkDescsOffset != 0 {
|
|
|
|
t.Errorf("want chunkDescsOffset 0, got %d", loadedS1.chunkDescsOffset)
|
|
|
|
}
|
2015-03-09 01:33:10 +00:00
|
|
|
if loadedS1.headChunkClosed {
|
|
|
|
t.Error("headChunkClosed is true")
|
2014-10-28 18:01:41 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
t.Errorf("couldn't find %v in loaded map", m1)
|
|
|
|
}
|
2015-05-05 16:17:51 +00:00
|
|
|
if loadedS3, ok := loadedSM.get(m3.FastFingerprint()); ok {
|
2014-10-28 18:01:41 +00:00
|
|
|
if !reflect.DeepEqual(loadedS3.metric, m3) {
|
|
|
|
t.Errorf("want metric %v, got %v", m3, loadedS3.metric)
|
|
|
|
}
|
|
|
|
if loadedS3.head().chunk != nil {
|
|
|
|
t.Error("head chunk not evicted")
|
|
|
|
}
|
|
|
|
if loadedS3.chunkDescsOffset != -1 {
|
|
|
|
t.Errorf("want chunkDescsOffset -1, got %d", loadedS3.chunkDescsOffset)
|
|
|
|
}
|
2015-03-09 01:33:10 +00:00
|
|
|
if !loadedS3.headChunkClosed {
|
|
|
|
t.Error("headChunkClosed is false")
|
2014-10-28 18:01:41 +00:00
|
|
|
}
|
|
|
|
} else {
|
2015-03-09 01:33:10 +00:00
|
|
|
t.Errorf("couldn't find %v in loaded map", m3)
|
|
|
|
}
|
2015-05-05 16:17:51 +00:00
|
|
|
if loadedS4, ok := loadedSM.get(m4.FastFingerprint()); ok {
|
2015-03-09 01:33:10 +00:00
|
|
|
if !reflect.DeepEqual(loadedS4.metric, m4) {
|
|
|
|
t.Errorf("want metric %v, got %v", m4, loadedS4.metric)
|
|
|
|
}
|
|
|
|
if got, want := len(loadedS4.chunkDescs), chunkCountS4; got != want {
|
|
|
|
t.Errorf("got %d chunkDescs, want %d", got, want)
|
|
|
|
}
|
|
|
|
if got, want := loadedS4.persistWatermark, 0; got != want {
|
|
|
|
t.Errorf("got persistWatermark %d, want %d", got, want)
|
|
|
|
}
|
|
|
|
if loadedS4.chunkDescs[2].isEvicted() {
|
|
|
|
t.Error("3rd chunk evicted")
|
|
|
|
}
|
|
|
|
if loadedS4.chunkDescs[3].isEvicted() {
|
|
|
|
t.Error("4th chunk evicted")
|
|
|
|
}
|
|
|
|
if loadedS4.chunkDescsOffset != 0 {
|
|
|
|
t.Errorf("want chunkDescsOffset 0, got %d", loadedS4.chunkDescsOffset)
|
|
|
|
}
|
|
|
|
if loadedS4.headChunkClosed {
|
|
|
|
t.Error("headChunkClosed is true")
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
t.Errorf("couldn't find %v in loaded map", m4)
|
|
|
|
}
|
2015-05-05 16:17:51 +00:00
|
|
|
if loadedS5, ok := loadedSM.get(m5.FastFingerprint()); ok {
|
2015-03-09 01:33:10 +00:00
|
|
|
if !reflect.DeepEqual(loadedS5.metric, m5) {
|
|
|
|
t.Errorf("want metric %v, got %v", m5, loadedS5.metric)
|
|
|
|
}
|
|
|
|
if got, want := len(loadedS5.chunkDescs), chunkCountS5; got != want {
|
|
|
|
t.Errorf("got %d chunkDescs, want %d", got, want)
|
|
|
|
}
|
|
|
|
if got, want := loadedS5.persistWatermark, 3; got != want {
|
|
|
|
t.Errorf("got persistWatermark %d, want %d", got, want)
|
|
|
|
}
|
|
|
|
if !loadedS5.chunkDescs[2].isEvicted() {
|
|
|
|
t.Error("3rd chunk not evicted")
|
|
|
|
}
|
|
|
|
if loadedS5.chunkDescs[3].isEvicted() {
|
|
|
|
t.Error("4th chunk evicted")
|
|
|
|
}
|
|
|
|
if loadedS5.chunkDescsOffset != 0 {
|
|
|
|
t.Errorf("want chunkDescsOffset 0, got %d", loadedS5.chunkDescsOffset)
|
|
|
|
}
|
|
|
|
if loadedS5.headChunkClosed {
|
|
|
|
t.Error("headChunkClosed is true")
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
t.Errorf("couldn't find %v in loaded map", m5)
|
2014-10-28 18:01:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-04 12:40:18 +00:00
|
|
|
func TestCheckpointAndLoadSeriesMapAndHeadsChunkType0(t *testing.T) {
|
|
|
|
testCheckpointAndLoadSeriesMapAndHeads(t, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCheckpointAndLoadSeriesMapAndHeadsChunkType1(t *testing.T) {
|
|
|
|
testCheckpointAndLoadSeriesMapAndHeads(t, 1)
|
|
|
|
}
|
|
|
|
|
2015-03-13 14:49:07 +00:00
|
|
|
func testGetFingerprintsModifiedBefore(t *testing.T, encoding chunkEncoding) {
|
|
|
|
p, closer := newTestPersistence(t, encoding)
|
2014-10-28 18:01:41 +00:00
|
|
|
defer closer.Close()
|
|
|
|
|
|
|
|
m1 := clientmodel.Metric{"n1": "v1"}
|
|
|
|
m2 := clientmodel.Metric{"n2": "v2"}
|
|
|
|
m3 := clientmodel.Metric{"n1": "v2"}
|
|
|
|
p.archiveMetric(1, m1, 2, 4)
|
|
|
|
p.archiveMetric(2, m2, 1, 6)
|
|
|
|
p.archiveMetric(3, m3, 5, 5)
|
|
|
|
|
|
|
|
expectedFPs := map[clientmodel.Timestamp][]clientmodel.Fingerprint{
|
|
|
|
0: {},
|
|
|
|
1: {},
|
|
|
|
2: {2},
|
|
|
|
3: {1, 2},
|
|
|
|
4: {1, 2},
|
|
|
|
5: {1, 2},
|
|
|
|
6: {1, 2, 3},
|
|
|
|
}
|
|
|
|
|
|
|
|
for ts, want := range expectedFPs {
|
|
|
|
got, err := p.getFingerprintsModifiedBefore(ts)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(want, got) {
|
|
|
|
t.Errorf("timestamp: %v, want FPs %v, got %v", ts, want, got)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-05 19:02:45 +00:00
|
|
|
unarchived, firstTime, err := p.unarchiveMetric(1)
|
2014-10-28 18:01:41 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if !unarchived {
|
|
|
|
t.Fatal("expected actual unarchival")
|
|
|
|
}
|
2014-11-05 19:02:45 +00:00
|
|
|
if firstTime != 2 {
|
|
|
|
t.Errorf("expected first time 2, got %v", firstTime)
|
|
|
|
}
|
|
|
|
unarchived, firstTime, err = p.unarchiveMetric(1)
|
2014-10-28 18:01:41 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if unarchived {
|
|
|
|
t.Fatal("expected no unarchival")
|
|
|
|
}
|
|
|
|
|
|
|
|
expectedFPs = map[clientmodel.Timestamp][]clientmodel.Fingerprint{
|
|
|
|
0: {},
|
|
|
|
1: {},
|
|
|
|
2: {2},
|
|
|
|
3: {2},
|
|
|
|
4: {2},
|
|
|
|
5: {2},
|
|
|
|
6: {2, 3},
|
|
|
|
}
|
|
|
|
|
|
|
|
for ts, want := range expectedFPs {
|
|
|
|
got, err := p.getFingerprintsModifiedBefore(ts)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(want, got) {
|
|
|
|
t.Errorf("timestamp: %v, want FPs %v, got %v", ts, want, got)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-04 12:40:18 +00:00
|
|
|
func TestGetFingerprintsModifiedBeforeChunkType0(t *testing.T) {
|
|
|
|
testGetFingerprintsModifiedBefore(t, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestGetFingerprintsModifiedBeforeChunkType1(t *testing.T) {
|
|
|
|
testGetFingerprintsModifiedBefore(t, 1)
|
|
|
|
}
|
|
|
|
|
2015-03-13 14:49:07 +00:00
|
|
|
func testDropArchivedMetric(t *testing.T, encoding chunkEncoding) {
|
|
|
|
p, closer := newTestPersistence(t, encoding)
|
2014-10-28 18:01:41 +00:00
|
|
|
defer closer.Close()
|
|
|
|
|
|
|
|
m1 := clientmodel.Metric{"n1": "v1"}
|
|
|
|
m2 := clientmodel.Metric{"n2": "v2"}
|
|
|
|
p.archiveMetric(1, m1, 2, 4)
|
|
|
|
p.archiveMetric(2, m2, 1, 6)
|
|
|
|
p.indexMetric(1, m1)
|
|
|
|
p.indexMetric(2, m2)
|
|
|
|
p.waitForIndexing()
|
|
|
|
|
|
|
|
outFPs, err := p.getFingerprintsForLabelPair(metric.LabelPair{Name: "n1", Value: "v1"})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
want := clientmodel.Fingerprints{1}
|
|
|
|
if !reflect.DeepEqual(outFPs, want) {
|
|
|
|
t.Errorf("want %#v, got %#v", want, outFPs)
|
|
|
|
}
|
|
|
|
outFPs, err = p.getFingerprintsForLabelPair(metric.LabelPair{Name: "n2", Value: "v2"})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
want = clientmodel.Fingerprints{2}
|
|
|
|
if !reflect.DeepEqual(outFPs, want) {
|
|
|
|
t.Errorf("want %#v, got %#v", want, outFPs)
|
|
|
|
}
|
|
|
|
if archived, _, _, err := p.hasArchivedMetric(1); err != nil || !archived {
|
|
|
|
t.Error("want FP 1 archived")
|
|
|
|
}
|
|
|
|
if archived, _, _, err := p.hasArchivedMetric(2); err != nil || !archived {
|
|
|
|
t.Error("want FP 2 archived")
|
|
|
|
}
|
|
|
|
|
2015-02-26 14:19:44 +00:00
|
|
|
if err != p.purgeArchivedMetric(1) {
|
2014-10-28 18:01:41 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2015-02-26 14:19:44 +00:00
|
|
|
if err != p.purgeArchivedMetric(3) {
|
|
|
|
// Purging something that has not beet archived is not an error.
|
2014-10-28 18:01:41 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
p.waitForIndexing()
|
|
|
|
|
|
|
|
outFPs, err = p.getFingerprintsForLabelPair(metric.LabelPair{Name: "n1", Value: "v1"})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
want = nil
|
|
|
|
if !reflect.DeepEqual(outFPs, want) {
|
|
|
|
t.Errorf("want %#v, got %#v", want, outFPs)
|
|
|
|
}
|
|
|
|
outFPs, err = p.getFingerprintsForLabelPair(metric.LabelPair{Name: "n2", Value: "v2"})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
want = clientmodel.Fingerprints{2}
|
|
|
|
if !reflect.DeepEqual(outFPs, want) {
|
|
|
|
t.Errorf("want %#v, got %#v", want, outFPs)
|
|
|
|
}
|
|
|
|
if archived, _, _, err := p.hasArchivedMetric(1); err != nil || archived {
|
|
|
|
t.Error("want FP 1 not archived")
|
|
|
|
}
|
|
|
|
if archived, _, _, err := p.hasArchivedMetric(2); err != nil || !archived {
|
|
|
|
t.Error("want FP 2 archived")
|
|
|
|
}
|
2014-08-13 15:13:28 +00:00
|
|
|
}
|
2014-09-14 13:33:56 +00:00
|
|
|
|
2015-03-04 12:40:18 +00:00
|
|
|
func TestDropArchivedMetricChunkType0(t *testing.T) {
|
|
|
|
testDropArchivedMetric(t, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDropArchivedMetricChunkType1(t *testing.T) {
|
|
|
|
testDropArchivedMetric(t, 1)
|
|
|
|
}
|
|
|
|
|
2014-09-14 13:33:56 +00:00
|
|
|
type incrementalBatch struct {
|
|
|
|
fpToMetric index.FingerprintMetricMapping
|
|
|
|
expectedLnToLvs index.LabelNameLabelValuesMapping
|
|
|
|
expectedLpToFps index.LabelPairFingerprintsMapping
|
|
|
|
}
|
|
|
|
|
2015-03-13 14:49:07 +00:00
|
|
|
func testIndexing(t *testing.T, encoding chunkEncoding) {
|
2014-09-14 13:33:56 +00:00
|
|
|
batches := []incrementalBatch{
|
|
|
|
{
|
|
|
|
fpToMetric: index.FingerprintMetricMapping{
|
|
|
|
0: {
|
|
|
|
clientmodel.MetricNameLabel: "metric_0",
|
|
|
|
"label_1": "value_1",
|
|
|
|
},
|
|
|
|
1: {
|
|
|
|
clientmodel.MetricNameLabel: "metric_0",
|
|
|
|
"label_2": "value_2",
|
|
|
|
"label_3": "value_3",
|
|
|
|
},
|
|
|
|
2: {
|
|
|
|
clientmodel.MetricNameLabel: "metric_1",
|
|
|
|
"label_1": "value_2",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expectedLnToLvs: index.LabelNameLabelValuesMapping{
|
2014-09-23 17:21:10 +00:00
|
|
|
clientmodel.MetricNameLabel: codable.LabelValueSet{
|
|
|
|
"metric_0": struct{}{},
|
|
|
|
"metric_1": struct{}{},
|
|
|
|
},
|
|
|
|
"label_1": codable.LabelValueSet{
|
|
|
|
"value_1": struct{}{},
|
|
|
|
"value_2": struct{}{},
|
|
|
|
},
|
|
|
|
"label_2": codable.LabelValueSet{
|
|
|
|
"value_2": struct{}{},
|
|
|
|
},
|
|
|
|
"label_3": codable.LabelValueSet{
|
|
|
|
"value_3": struct{}{},
|
|
|
|
},
|
2014-09-14 13:33:56 +00:00
|
|
|
},
|
|
|
|
expectedLpToFps: index.LabelPairFingerprintsMapping{
|
|
|
|
metric.LabelPair{
|
|
|
|
Name: clientmodel.MetricNameLabel,
|
|
|
|
Value: "metric_0",
|
2014-09-23 17:21:10 +00:00
|
|
|
}: codable.FingerprintSet{0: struct{}{}, 1: struct{}{}},
|
2014-09-14 13:33:56 +00:00
|
|
|
metric.LabelPair{
|
|
|
|
Name: clientmodel.MetricNameLabel,
|
|
|
|
Value: "metric_1",
|
2014-09-23 17:21:10 +00:00
|
|
|
}: codable.FingerprintSet{2: struct{}{}},
|
2014-09-14 13:33:56 +00:00
|
|
|
metric.LabelPair{
|
|
|
|
Name: "label_1",
|
|
|
|
Value: "value_1",
|
2014-09-23 17:21:10 +00:00
|
|
|
}: codable.FingerprintSet{0: struct{}{}},
|
2014-09-14 13:33:56 +00:00
|
|
|
metric.LabelPair{
|
|
|
|
Name: "label_1",
|
|
|
|
Value: "value_2",
|
2014-09-23 17:21:10 +00:00
|
|
|
}: codable.FingerprintSet{2: struct{}{}},
|
2014-09-14 13:33:56 +00:00
|
|
|
metric.LabelPair{
|
|
|
|
Name: "label_2",
|
|
|
|
Value: "value_2",
|
2014-09-23 17:21:10 +00:00
|
|
|
}: codable.FingerprintSet{1: struct{}{}},
|
2014-09-14 13:33:56 +00:00
|
|
|
metric.LabelPair{
|
|
|
|
Name: "label_3",
|
|
|
|
Value: "value_3",
|
2014-09-23 17:21:10 +00:00
|
|
|
}: codable.FingerprintSet{1: struct{}{}},
|
2014-09-14 13:33:56 +00:00
|
|
|
},
|
|
|
|
}, {
|
|
|
|
fpToMetric: index.FingerprintMetricMapping{
|
|
|
|
3: {
|
|
|
|
clientmodel.MetricNameLabel: "metric_0",
|
|
|
|
"label_1": "value_3",
|
|
|
|
},
|
|
|
|
4: {
|
|
|
|
clientmodel.MetricNameLabel: "metric_2",
|
|
|
|
"label_2": "value_2",
|
|
|
|
"label_3": "value_1",
|
|
|
|
},
|
|
|
|
5: {
|
|
|
|
clientmodel.MetricNameLabel: "metric_1",
|
|
|
|
"label_1": "value_3",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expectedLnToLvs: index.LabelNameLabelValuesMapping{
|
2014-09-23 17:21:10 +00:00
|
|
|
clientmodel.MetricNameLabel: codable.LabelValueSet{
|
|
|
|
"metric_0": struct{}{},
|
|
|
|
"metric_1": struct{}{},
|
|
|
|
"metric_2": struct{}{},
|
|
|
|
},
|
|
|
|
"label_1": codable.LabelValueSet{
|
|
|
|
"value_1": struct{}{},
|
|
|
|
"value_2": struct{}{},
|
|
|
|
"value_3": struct{}{},
|
|
|
|
},
|
|
|
|
"label_2": codable.LabelValueSet{
|
|
|
|
"value_2": struct{}{},
|
|
|
|
},
|
|
|
|
"label_3": codable.LabelValueSet{
|
|
|
|
"value_1": struct{}{},
|
|
|
|
"value_3": struct{}{},
|
|
|
|
},
|
2014-09-14 13:33:56 +00:00
|
|
|
},
|
|
|
|
expectedLpToFps: index.LabelPairFingerprintsMapping{
|
|
|
|
metric.LabelPair{
|
|
|
|
Name: clientmodel.MetricNameLabel,
|
|
|
|
Value: "metric_0",
|
2014-09-23 17:21:10 +00:00
|
|
|
}: codable.FingerprintSet{0: struct{}{}, 1: struct{}{}, 3: struct{}{}},
|
2014-09-14 13:33:56 +00:00
|
|
|
metric.LabelPair{
|
|
|
|
Name: clientmodel.MetricNameLabel,
|
|
|
|
Value: "metric_1",
|
2014-09-23 17:21:10 +00:00
|
|
|
}: codable.FingerprintSet{2: struct{}{}, 5: struct{}{}},
|
2014-09-14 13:33:56 +00:00
|
|
|
metric.LabelPair{
|
|
|
|
Name: clientmodel.MetricNameLabel,
|
|
|
|
Value: "metric_2",
|
2014-09-23 17:21:10 +00:00
|
|
|
}: codable.FingerprintSet{4: struct{}{}},
|
2014-09-14 13:33:56 +00:00
|
|
|
metric.LabelPair{
|
|
|
|
Name: "label_1",
|
|
|
|
Value: "value_1",
|
2014-09-23 17:21:10 +00:00
|
|
|
}: codable.FingerprintSet{0: struct{}{}},
|
2014-09-14 13:33:56 +00:00
|
|
|
metric.LabelPair{
|
|
|
|
Name: "label_1",
|
|
|
|
Value: "value_2",
|
2014-09-23 17:21:10 +00:00
|
|
|
}: codable.FingerprintSet{2: struct{}{}},
|
2014-09-14 13:33:56 +00:00
|
|
|
metric.LabelPair{
|
|
|
|
Name: "label_1",
|
|
|
|
Value: "value_3",
|
2014-09-23 17:21:10 +00:00
|
|
|
}: codable.FingerprintSet{3: struct{}{}, 5: struct{}{}},
|
2014-09-14 13:33:56 +00:00
|
|
|
metric.LabelPair{
|
|
|
|
Name: "label_2",
|
|
|
|
Value: "value_2",
|
2014-09-23 17:21:10 +00:00
|
|
|
}: codable.FingerprintSet{1: struct{}{}, 4: struct{}{}},
|
2014-09-14 13:33:56 +00:00
|
|
|
metric.LabelPair{
|
|
|
|
Name: "label_3",
|
|
|
|
Value: "value_1",
|
2014-09-23 17:21:10 +00:00
|
|
|
}: codable.FingerprintSet{4: struct{}{}},
|
2014-09-14 13:33:56 +00:00
|
|
|
metric.LabelPair{
|
|
|
|
Name: "label_3",
|
|
|
|
Value: "value_3",
|
2014-09-23 17:21:10 +00:00
|
|
|
}: codable.FingerprintSet{1: struct{}{}},
|
2014-09-14 13:33:56 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2015-03-13 14:49:07 +00:00
|
|
|
p, closer := newTestPersistence(t, encoding)
|
2014-09-14 13:33:56 +00:00
|
|
|
defer closer.Close()
|
|
|
|
|
|
|
|
indexedFpsToMetrics := index.FingerprintMetricMapping{}
|
|
|
|
for i, b := range batches {
|
|
|
|
for fp, m := range b.fpToMetric {
|
2014-10-28 18:01:41 +00:00
|
|
|
p.indexMetric(fp, m)
|
2014-10-07 17:11:24 +00:00
|
|
|
if err := p.archiveMetric(fp, m, 1, 2); err != nil {
|
2014-09-14 13:33:56 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
indexedFpsToMetrics[fp] = m
|
|
|
|
}
|
2014-10-07 17:11:24 +00:00
|
|
|
verifyIndexedState(i, t, b, indexedFpsToMetrics, p)
|
2014-09-14 13:33:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for i := len(batches) - 1; i >= 0; i-- {
|
|
|
|
b := batches[i]
|
2014-10-07 17:11:24 +00:00
|
|
|
verifyIndexedState(i, t, batches[i], indexedFpsToMetrics, p)
|
2014-09-14 13:33:56 +00:00
|
|
|
for fp, m := range b.fpToMetric {
|
2014-10-28 18:01:41 +00:00
|
|
|
p.unindexMetric(fp, m)
|
2014-11-05 19:02:45 +00:00
|
|
|
unarchived, firstTime, err := p.unarchiveMetric(fp)
|
2014-09-14 13:33:56 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if !unarchived {
|
|
|
|
t.Errorf("%d. metric not unarchived", i)
|
|
|
|
}
|
2014-11-05 19:02:45 +00:00
|
|
|
if firstTime != 1 {
|
|
|
|
t.Errorf("%d. expected firstTime=1, got %v", i, firstTime)
|
|
|
|
}
|
2014-09-14 13:33:56 +00:00
|
|
|
delete(indexedFpsToMetrics, fp)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-04 12:40:18 +00:00
|
|
|
func TestIndexingChunkType0(t *testing.T) {
|
2015-03-06 11:53:00 +00:00
|
|
|
testIndexing(t, 0)
|
2015-03-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestIndexingChunkType1(t *testing.T) {
|
|
|
|
testIndexing(t, 1)
|
|
|
|
}
|
|
|
|
|
2014-10-07 17:11:24 +00:00
|
|
|
func verifyIndexedState(i int, t *testing.T, b incrementalBatch, indexedFpsToMetrics index.FingerprintMetricMapping, p *persistence) {
|
|
|
|
p.waitForIndexing()
|
2014-09-14 13:33:56 +00:00
|
|
|
for fp, m := range indexedFpsToMetrics {
|
|
|
|
// Compare archived metrics with input metrics.
|
2014-10-07 17:11:24 +00:00
|
|
|
mOut, err := p.getArchivedMetric(fp)
|
2014-09-14 13:33:56 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if !mOut.Equal(m) {
|
|
|
|
t.Errorf("%d. %v: Got: %s; want %s", i, fp, mOut, m)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that archived metrics are in membership index.
|
2014-10-07 17:11:24 +00:00
|
|
|
has, first, last, err := p.hasArchivedMetric(fp)
|
2014-09-14 13:33:56 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if !has {
|
|
|
|
t.Errorf("%d. fingerprint %v not found", i, fp)
|
|
|
|
}
|
|
|
|
if first != 1 || last != 2 {
|
|
|
|
t.Errorf(
|
|
|
|
"%d. %v: Got first: %d, last %d; want first: %d, last %d",
|
|
|
|
i, fp, first, last, 1, 2,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compare label name -> label values mappings.
|
|
|
|
for ln, lvs := range b.expectedLnToLvs {
|
2014-10-07 17:11:24 +00:00
|
|
|
outLvs, err := p.getLabelValuesForLabelName(ln)
|
2014-09-14 13:33:56 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2014-09-23 17:21:10 +00:00
|
|
|
outSet := codable.LabelValueSet{}
|
|
|
|
for _, lv := range outLvs {
|
|
|
|
outSet[lv] = struct{}{}
|
2014-09-14 13:33:56 +00:00
|
|
|
}
|
2014-09-23 17:21:10 +00:00
|
|
|
|
|
|
|
if !reflect.DeepEqual(lvs, outSet) {
|
|
|
|
t.Errorf("%d. label values don't match. Got: %v; want %v", i, outSet, lvs)
|
2014-09-14 13:33:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compare label pair -> fingerprints mappings.
|
|
|
|
for lp, fps := range b.expectedLpToFps {
|
2014-10-28 18:01:41 +00:00
|
|
|
outFPs, err := p.getFingerprintsForLabelPair(lp)
|
2014-09-14 13:33:56 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2014-09-23 17:21:10 +00:00
|
|
|
outSet := codable.FingerprintSet{}
|
2014-10-28 18:01:41 +00:00
|
|
|
for _, fp := range outFPs {
|
2014-09-23 17:21:10 +00:00
|
|
|
outSet[fp] = struct{}{}
|
2014-09-14 13:33:56 +00:00
|
|
|
}
|
2014-09-23 17:21:10 +00:00
|
|
|
|
|
|
|
if !reflect.DeepEqual(fps, outSet) {
|
|
|
|
t.Errorf("%d. %v: fingerprints don't match. Got: %v; want %v", i, lp, outSet, fps)
|
2014-09-14 13:33:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-03-19 18:28:21 +00:00
|
|
|
|
|
|
|
var fpStrings = []string{
|
|
|
|
"b004b821ca50ba26",
|
|
|
|
"b037c21e884e4fc5",
|
|
|
|
"b037de1e884e5469",
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkLoadChunksSequentially(b *testing.B) {
|
|
|
|
p := persistence{
|
|
|
|
basePath: "fixtures",
|
2015-04-14 08:43:09 +00:00
|
|
|
bufPool: sync.Pool{New: func() interface{} { return make([]byte, 0, 3*chunkLenWithHeader) }},
|
2015-03-19 18:28:21 +00:00
|
|
|
}
|
|
|
|
sequentialIndexes := make([]int, 47)
|
|
|
|
for i := range sequentialIndexes {
|
|
|
|
sequentialIndexes[i] = i
|
|
|
|
}
|
|
|
|
|
|
|
|
var fp clientmodel.Fingerprint
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
for _, s := range fpStrings {
|
|
|
|
fp.LoadFromString(s)
|
|
|
|
cds, err := p.loadChunks(fp, sequentialIndexes, 0)
|
|
|
|
if err != nil {
|
|
|
|
b.Error(err)
|
|
|
|
}
|
|
|
|
if len(cds) == 0 {
|
|
|
|
b.Error("could not read any chunks")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkLoadChunksRandomly(b *testing.B) {
|
|
|
|
p := persistence{
|
|
|
|
basePath: "fixtures",
|
2015-04-14 08:43:09 +00:00
|
|
|
bufPool: sync.Pool{New: func() interface{} { return make([]byte, 0, 3*chunkLenWithHeader) }},
|
2015-03-19 18:28:21 +00:00
|
|
|
}
|
|
|
|
randomIndexes := []int{1, 5, 6, 8, 11, 14, 18, 23, 29, 33, 42, 46}
|
|
|
|
|
|
|
|
var fp clientmodel.Fingerprint
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
for _, s := range fpStrings {
|
|
|
|
fp.LoadFromString(s)
|
|
|
|
cds, err := p.loadChunks(fp, randomIndexes, 0)
|
|
|
|
if err != nil {
|
|
|
|
b.Error(err)
|
|
|
|
}
|
|
|
|
if len(cds) == 0 {
|
|
|
|
b.Error("could not read any chunks")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkLoadChunkDescs(b *testing.B) {
|
|
|
|
p := persistence{
|
|
|
|
basePath: "fixtures",
|
|
|
|
}
|
|
|
|
|
|
|
|
var fp clientmodel.Fingerprint
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
for _, s := range fpStrings {
|
|
|
|
fp.LoadFromString(s)
|
|
|
|
cds, err := p.loadChunkDescs(fp, clientmodel.Latest)
|
|
|
|
if err != nil {
|
|
|
|
b.Error(err)
|
|
|
|
}
|
|
|
|
if len(cds) == 0 {
|
|
|
|
b.Error("could not read any chunk descs")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|