prometheus/tsdb/chunkenc/chunk_test.go

220 lines
4.3 KiB
Go
Raw Normal View History

2017-04-10 18:59:45 +00:00
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chunkenc
2016-11-15 09:33:34 +00:00
import (
"fmt"
"io"
"math/rand"
"testing"
"github.com/stretchr/testify/assert"
2016-11-15 09:33:34 +00:00
)
type pair struct {
t int64
v float64
2016-11-15 09:33:34 +00:00
}
2016-11-29 21:43:24 +00:00
func TestChunk(t *testing.T) {
2016-12-31 09:10:27 +00:00
for enc, nc := range map[Encoding]func() Chunk{
EncXOR: func() Chunk { return NewXORChunk() },
2016-11-29 21:43:24 +00:00
} {
t.Run(fmt.Sprintf("%v", enc), func(t *testing.T) {
for range make([]struct{}, 1) {
2016-12-31 09:10:27 +00:00
c := nc()
testChunk(t, c)
2016-11-29 21:43:24 +00:00
}
})
}
}
func testChunk(t *testing.T, c Chunk) {
2016-11-29 21:43:24 +00:00
app, err := c.Appender()
assert.NoError(t, err)
2016-11-29 21:43:24 +00:00
var exp []pair
var (
ts = int64(1234123324)
v = 1243535.123
)
2016-12-08 09:04:24 +00:00
for i := 0; i < 300; i++ {
2016-11-29 21:43:24 +00:00
ts += int64(rand.Intn(10000) + 1)
if i%2 == 0 {
v += float64(rand.Intn(1000000))
} else {
v -= float64(rand.Intn(1000000))
}
// Start with a new appender every 10th sample. This emulates starting
// appending to a partially filled chunk.
if i%10 == 0 {
app, err = c.Appender()
assert.NoError(t, err)
}
2016-11-29 21:43:24 +00:00
2016-12-31 09:10:27 +00:00
app.Append(ts, v)
2016-11-29 21:43:24 +00:00
exp = append(exp, pair{t: ts, v: v})
}
// 1. Expand iterator in simple case.
it1 := c.Iterator(nil)
var res1 []pair
for it1.Next() {
ts, v := it1.At()
res1 = append(res1, pair{t: ts, v: v})
2016-11-29 21:43:24 +00:00
}
assert.NoError(t, it1.Err())
assert.Equal(t, exp, res1)
// 2. Expand second iterator while reusing first one.
it2 := c.Iterator(it1)
var res2 []pair
for it2.Next() {
ts, v := it2.At()
res2 = append(res2, pair{t: ts, v: v})
2016-11-29 21:43:24 +00:00
}
assert.NoError(t, it2.Err())
assert.Equal(t, exp, res2)
// 3. Test iterator Seek.
mid := len(exp) / 2
it3 := c.Iterator(nil)
var res3 []pair
assert.Equal(t, true, it3.Seek(exp[mid].t))
// Below ones should not matter.
assert.Equal(t, true, it3.Seek(exp[mid].t))
assert.Equal(t, true, it3.Seek(exp[mid].t))
ts, v = it3.At()
res3 = append(res3, pair{t: ts, v: v})
for it3.Next() {
ts, v := it3.At()
res3 = append(res3, pair{t: ts, v: v})
2016-11-29 21:43:24 +00:00
}
assert.NoError(t, it3.Err())
assert.Equal(t, exp[mid:], res3)
assert.Equal(t, false, it3.Seek(exp[len(exp)-1].t+1))
2016-11-29 21:43:24 +00:00
}
2016-12-31 09:10:27 +00:00
func benchmarkIterator(b *testing.B, newChunk func() Chunk) {
2016-11-15 09:33:34 +00:00
var (
storage: Added Chunks{Queryable/Querier/SeriesSet/Series/Iteratable. Added generic Merge{SeriesSet/Querier} implementation. (#7005) * storage: Added Chunks{Queryable/Querier/SeriesSet/Series/Iteratable. Added generic Merge{SeriesSet/Querier} implementation. ## Rationales: In many places (e.g. chunk Remote read, Thanos Receive fetching chunk from TSDB), we operate on encoded chunks not samples. This means that we unnecessary decode/encode, wasting CPU, time and memory. This PR adds chunk iterator interfaces and makes the merge code to be reused between both seriesSets I will make the use of it in following PR inside tsdb itself. For now fanout implements it and mergers. All merges now also allows passing series mergers. This opens doors for custom deduplications other than TSDB vertical ones (e.g. offline one we have in Thanos). ## Changes * Added Chunk versions of all iterating methods. It all starts in Querier/ChunkQuerier. The plan is that Storage will implement both chunked and samples. * Added Seek to chunks.Iterator interface for iterating over chunks. * NewMergeChunkQuerier was added; Both this and NewMergeQuerier are now using generigMergeQuerier to share the code. Generic code was added. * Improved tests. * Added some TODO for further simplifications in next PRs. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Addressed Brian's comments. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Moved s/Labeled/SeriesLabels as per Krasi suggestion. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Addressed Krasi's comments. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Second iteration of Krasi comments. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Another round of comments. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
2020-03-24 20:15:47 +00:00
t = int64(1234123324)
v = 1243535.123
exp []pair
2016-11-15 09:33:34 +00:00
)
for i := 0; i < b.N; i++ {
2017-01-02 21:24:35 +00:00
// t += int64(rand.Intn(10000) + 1)
t += int64(1000)
// v = rand.Float64()
2016-12-07 14:37:37 +00:00
v += float64(100)
exp = append(exp, pair{t: t, v: v})
2016-11-15 09:33:34 +00:00
}
2016-11-15 09:33:34 +00:00
var chunks []Chunk
for i := 0; i < b.N; {
2016-12-31 09:10:27 +00:00
c := newChunk()
a, err := c.Appender()
if err != nil {
b.Fatalf("get appender: %s", err)
}
2016-12-31 09:10:27 +00:00
j := 0
for _, p := range exp {
2016-12-31 09:10:27 +00:00
if j > 250 {
2016-11-15 09:33:34 +00:00
break
}
2016-12-31 09:10:27 +00:00
a.Append(p.t, p.v)
2016-11-15 09:33:34 +00:00
i++
2016-12-31 09:10:27 +00:00
j++
2016-11-15 09:33:34 +00:00
}
chunks = append(chunks, c)
}
b.ReportAllocs()
b.ResetTimer()
storage: Added Chunks{Queryable/Querier/SeriesSet/Series/Iteratable. Added generic Merge{SeriesSet/Querier} implementation. (#7005) * storage: Added Chunks{Queryable/Querier/SeriesSet/Series/Iteratable. Added generic Merge{SeriesSet/Querier} implementation. ## Rationales: In many places (e.g. chunk Remote read, Thanos Receive fetching chunk from TSDB), we operate on encoded chunks not samples. This means that we unnecessary decode/encode, wasting CPU, time and memory. This PR adds chunk iterator interfaces and makes the merge code to be reused between both seriesSets I will make the use of it in following PR inside tsdb itself. For now fanout implements it and mergers. All merges now also allows passing series mergers. This opens doors for custom deduplications other than TSDB vertical ones (e.g. offline one we have in Thanos). ## Changes * Added Chunk versions of all iterating methods. It all starts in Querier/ChunkQuerier. The plan is that Storage will implement both chunked and samples. * Added Seek to chunks.Iterator interface for iterating over chunks. * NewMergeChunkQuerier was added; Both this and NewMergeQuerier are now using generigMergeQuerier to share the code. Generic code was added. * Improved tests. * Added some TODO for further simplifications in next PRs. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Addressed Brian's comments. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Moved s/Labeled/SeriesLabels as per Krasi suggestion. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Addressed Krasi's comments. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Second iteration of Krasi comments. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Another round of comments. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
2020-03-24 20:15:47 +00:00
b.Log("num", b.N, "created chunks", len(chunks))
res := make([]float64, 0, 1024)
var it Iterator
2016-11-15 09:33:34 +00:00
for i := 0; i < len(chunks); i++ {
c := chunks[i]
it := c.Iterator(it)
2016-11-15 09:33:34 +00:00
for it.Next() {
2017-01-02 12:27:52 +00:00
_, v := it.At()
res = append(res, v)
2016-11-15 09:33:34 +00:00
}
if it.Err() != io.EOF {
assert.NoError(b, it.Err())
2016-11-15 09:33:34 +00:00
}
res = res[:0]
}
}
2016-11-20 13:33:00 +00:00
func BenchmarkXORIterator(b *testing.B) {
2016-12-31 09:10:27 +00:00
benchmarkIterator(b, func() Chunk {
return NewXORChunk()
2016-11-20 13:33:00 +00:00
})
}
func BenchmarkXORAppender(b *testing.B) {
2016-12-31 09:10:27 +00:00
benchmarkAppender(b, func() Chunk {
return NewXORChunk()
2016-11-20 13:33:00 +00:00
})
}
2016-12-31 09:10:27 +00:00
func benchmarkAppender(b *testing.B, newChunk func() Chunk) {
2016-11-15 09:33:34 +00:00
var (
t = int64(1234123324)
v = 1243535.123
2016-11-15 09:33:34 +00:00
)
var exp []pair
2016-11-15 09:33:34 +00:00
for i := 0; i < b.N; i++ {
2017-01-02 21:24:35 +00:00
// t += int64(rand.Intn(10000) + 1)
t += int64(1000)
// v = rand.Float64()
2016-12-07 14:37:37 +00:00
v += float64(100)
exp = append(exp, pair{t: t, v: v})
2016-11-15 09:33:34 +00:00
}
b.ReportAllocs()
b.ResetTimer()
var chunks []Chunk
for i := 0; i < b.N; {
2016-12-31 09:10:27 +00:00
c := newChunk()
a, err := c.Appender()
if err != nil {
b.Fatalf("get appender: %s", err)
}
2016-12-31 09:10:27 +00:00
j := 0
for _, p := range exp {
2016-12-31 09:10:27 +00:00
if j > 250 {
2016-11-15 09:33:34 +00:00
break
}
2016-12-31 09:10:27 +00:00
a.Append(p.t, p.v)
2016-11-15 09:33:34 +00:00
i++
2016-12-31 09:10:27 +00:00
j++
2016-11-15 09:33:34 +00:00
}
chunks = append(chunks, c)
}
fmt.Println("num", b.N, "created chunks", len(chunks))
2016-11-15 09:33:34 +00:00
}