2015-01-21 19:07:45 +00:00
|
|
|
// Copyright 2014 The Prometheus Authors
|
2014-09-19 16:18:44 +00:00
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2014-09-16 13:47:24 +00:00
|
|
|
package local
|
2014-06-06 09:55:53 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2015-06-22 10:02:03 +00:00
|
|
|
"hash/fnv"
|
2014-08-14 16:23:49 +00:00
|
|
|
"math/rand"
|
2015-07-13 19:12:27 +00:00
|
|
|
"reflect"
|
2014-06-06 09:55:53 +00:00
|
|
|
"testing"
|
2014-08-14 16:23:49 +00:00
|
|
|
"testing/quick"
|
2014-06-06 09:55:53 +00:00
|
|
|
"time"
|
|
|
|
|
2015-05-20 16:10:29 +00:00
|
|
|
"github.com/prometheus/log"
|
Improve persisting chunks to disk.
This is done by bucketing chunks by fingerprint. If the persisting to
disk falls behind, more and more chunks are in the queue. As soon as
there are "double hits", we will now persist both chunks in one go,
doubling the disk throughput (assuming it is limited by disk
seeks). Should even more pile up so that we end wit "triple hits", we
will persist those first, and so on.
Even if we have millions of time series, this will still help,
assuming not all of them are growing with the same speed. Series that
get many samples and/or are not very compressable will accumulate
chunks faster, and they will soon get double- or triple-writes.
To improve the chance of double writes,
-storage.local.persistence-queue-capacity could be set to a higher
value. However, that will slow down shutdown a lot (as the queue has
to be worked through). So we leave it to the user to set it to a
really high value. A more fundamental solution would be to checkpoint
not only head chunks, but also chunks still in the persist queue. That
would be quite complicated for a rather limited use-case (running many
time series with high ingestion rate on slow spinning disks).
2015-02-13 19:08:52 +00:00
|
|
|
|
2014-06-06 09:55:53 +00:00
|
|
|
clientmodel "github.com/prometheus/client_golang/model"
|
Improve persisting chunks to disk.
This is done by bucketing chunks by fingerprint. If the persisting to
disk falls behind, more and more chunks are in the queue. As soon as
there are "double hits", we will now persist both chunks in one go,
doubling the disk throughput (assuming it is limited by disk
seeks). Should even more pile up so that we end wit "triple hits", we
will persist those first, and so on.
Even if we have millions of time series, this will still help,
assuming not all of them are growing with the same speed. Series that
get many samples and/or are not very compressable will accumulate
chunks faster, and they will soon get double- or triple-writes.
To improve the chance of double writes,
-storage.local.persistence-queue-capacity could be set to a higher
value. However, that will slow down shutdown a lot (as the queue has
to be worked through). So we leave it to the user to set it to a
really high value. A more fundamental solution would be to checkpoint
not only head chunks, but also chunks still in the persist queue. That
would be quite complicated for a rather limited use-case (running many
time series with high ingestion rate on slow spinning disks).
2015-02-13 19:08:52 +00:00
|
|
|
|
2014-08-14 16:23:49 +00:00
|
|
|
"github.com/prometheus/prometheus/storage/metric"
|
2015-05-29 11:30:30 +00:00
|
|
|
"github.com/prometheus/prometheus/util/testutil"
|
2014-06-06 09:55:53 +00:00
|
|
|
)
|
|
|
|
|
2015-06-15 16:25:31 +00:00
|
|
|
func TestMatches(t *testing.T) {
|
2015-03-04 12:40:18 +00:00
|
|
|
storage, closer := NewTestStorage(t, 1)
|
2015-02-27 13:41:43 +00:00
|
|
|
defer closer.Close()
|
2014-10-28 18:01:41 +00:00
|
|
|
|
2015-02-27 13:41:43 +00:00
|
|
|
samples := make([]*clientmodel.Sample, 100)
|
|
|
|
fingerprints := make(clientmodel.Fingerprints, 100)
|
|
|
|
|
|
|
|
for i := range samples {
|
|
|
|
metric := clientmodel.Metric{
|
|
|
|
clientmodel.MetricNameLabel: clientmodel.LabelValue(fmt.Sprintf("test_metric_%d", i)),
|
|
|
|
"label1": clientmodel.LabelValue(fmt.Sprintf("test_%d", i/10)),
|
|
|
|
"label2": clientmodel.LabelValue(fmt.Sprintf("test_%d", (i+5)/10)),
|
2015-06-15 16:25:31 +00:00
|
|
|
"all": "const",
|
2015-02-27 13:41:43 +00:00
|
|
|
}
|
|
|
|
samples[i] = &clientmodel.Sample{
|
|
|
|
Metric: metric,
|
|
|
|
Timestamp: clientmodel.Timestamp(i),
|
|
|
|
Value: clientmodel.SampleValue(i),
|
|
|
|
}
|
2015-05-05 16:17:51 +00:00
|
|
|
fingerprints[i] = metric.FastFingerprint()
|
2015-02-27 13:41:43 +00:00
|
|
|
}
|
2015-03-15 02:36:15 +00:00
|
|
|
for _, s := range samples {
|
|
|
|
storage.Append(s)
|
|
|
|
}
|
2015-02-27 13:41:43 +00:00
|
|
|
storage.WaitForIndexing()
|
|
|
|
|
|
|
|
newMatcher := func(matchType metric.MatchType, name clientmodel.LabelName, value clientmodel.LabelValue) *metric.LabelMatcher {
|
|
|
|
lm, err := metric.NewLabelMatcher(matchType, name, value)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating label matcher: %s", err)
|
|
|
|
}
|
|
|
|
return lm
|
|
|
|
}
|
|
|
|
|
|
|
|
var matcherTests = []struct {
|
|
|
|
matchers metric.LabelMatchers
|
|
|
|
expected clientmodel.Fingerprints
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
matchers: metric.LabelMatchers{newMatcher(metric.Equal, "label1", "x")},
|
2015-07-23 20:46:13 +00:00
|
|
|
expected: clientmodel.Fingerprints{},
|
2015-02-27 13:41:43 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
matchers: metric.LabelMatchers{newMatcher(metric.Equal, "label1", "test_0")},
|
|
|
|
expected: fingerprints[:10],
|
|
|
|
},
|
|
|
|
{
|
|
|
|
matchers: metric.LabelMatchers{
|
|
|
|
newMatcher(metric.Equal, "label1", "test_0"),
|
|
|
|
newMatcher(metric.Equal, "label2", "test_1"),
|
|
|
|
},
|
|
|
|
expected: fingerprints[5:10],
|
|
|
|
},
|
|
|
|
{
|
2015-06-15 16:25:31 +00:00
|
|
|
matchers: metric.LabelMatchers{
|
|
|
|
newMatcher(metric.Equal, "all", "const"),
|
|
|
|
newMatcher(metric.NotEqual, "label1", "x"),
|
|
|
|
},
|
2015-02-27 13:41:43 +00:00
|
|
|
expected: fingerprints,
|
|
|
|
},
|
|
|
|
{
|
2015-06-15 16:25:31 +00:00
|
|
|
matchers: metric.LabelMatchers{
|
|
|
|
newMatcher(metric.Equal, "all", "const"),
|
|
|
|
newMatcher(metric.NotEqual, "label1", "test_0"),
|
|
|
|
},
|
2015-02-27 13:41:43 +00:00
|
|
|
expected: fingerprints[10:],
|
|
|
|
},
|
|
|
|
{
|
|
|
|
matchers: metric.LabelMatchers{
|
2015-06-15 16:25:31 +00:00
|
|
|
newMatcher(metric.Equal, "all", "const"),
|
2015-02-27 13:41:43 +00:00
|
|
|
newMatcher(metric.NotEqual, "label1", "test_0"),
|
|
|
|
newMatcher(metric.NotEqual, "label1", "test_1"),
|
|
|
|
newMatcher(metric.NotEqual, "label1", "test_2"),
|
|
|
|
},
|
|
|
|
expected: fingerprints[30:],
|
|
|
|
},
|
|
|
|
{
|
2015-06-15 16:25:31 +00:00
|
|
|
matchers: metric.LabelMatchers{
|
|
|
|
newMatcher(metric.Equal, "label1", ""),
|
|
|
|
},
|
|
|
|
expected: fingerprints[:0],
|
|
|
|
},
|
|
|
|
{
|
|
|
|
matchers: metric.LabelMatchers{
|
|
|
|
newMatcher(metric.NotEqual, "label1", "test_0"),
|
|
|
|
newMatcher(metric.Equal, "label1", ""),
|
|
|
|
},
|
|
|
|
expected: fingerprints[:0],
|
|
|
|
},
|
|
|
|
{
|
|
|
|
matchers: metric.LabelMatchers{
|
|
|
|
newMatcher(metric.NotEqual, "label1", "test_0"),
|
|
|
|
newMatcher(metric.Equal, "label2", ""),
|
|
|
|
},
|
|
|
|
expected: fingerprints[:0],
|
|
|
|
},
|
|
|
|
{
|
|
|
|
matchers: metric.LabelMatchers{
|
|
|
|
newMatcher(metric.Equal, "all", "const"),
|
|
|
|
newMatcher(metric.NotEqual, "label1", "test_0"),
|
|
|
|
newMatcher(metric.Equal, "not_existant", ""),
|
|
|
|
},
|
|
|
|
expected: fingerprints[10:],
|
|
|
|
},
|
|
|
|
{
|
|
|
|
matchers: metric.LabelMatchers{
|
|
|
|
newMatcher(metric.RegexMatch, "label1", `test_[3-5]`),
|
|
|
|
},
|
2015-02-27 13:41:43 +00:00
|
|
|
expected: fingerprints[30:60],
|
|
|
|
},
|
|
|
|
{
|
2015-06-15 16:25:31 +00:00
|
|
|
matchers: metric.LabelMatchers{
|
|
|
|
newMatcher(metric.Equal, "all", "const"),
|
|
|
|
newMatcher(metric.RegexNoMatch, "label1", `test_[3-5]`),
|
|
|
|
},
|
2015-02-27 13:41:43 +00:00
|
|
|
expected: append(append(clientmodel.Fingerprints{}, fingerprints[:30]...), fingerprints[60:]...),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
matchers: metric.LabelMatchers{
|
|
|
|
newMatcher(metric.RegexMatch, "label1", `test_[3-5]`),
|
|
|
|
newMatcher(metric.RegexMatch, "label2", `test_[4-6]`),
|
|
|
|
},
|
|
|
|
expected: fingerprints[35:60],
|
|
|
|
},
|
|
|
|
{
|
|
|
|
matchers: metric.LabelMatchers{
|
|
|
|
newMatcher(metric.RegexMatch, "label1", `test_[3-5]`),
|
|
|
|
newMatcher(metric.NotEqual, "label2", `test_4`),
|
|
|
|
},
|
|
|
|
expected: append(append(clientmodel.Fingerprints{}, fingerprints[30:35]...), fingerprints[45:60]...),
|
|
|
|
},
|
2015-07-23 20:46:13 +00:00
|
|
|
{
|
|
|
|
matchers: metric.LabelMatchers{
|
|
|
|
newMatcher(metric.Equal, "label1", `nonexistent`),
|
|
|
|
newMatcher(metric.RegexMatch, "label2", `test`),
|
|
|
|
},
|
|
|
|
expected: clientmodel.Fingerprints{},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
matchers: metric.LabelMatchers{
|
|
|
|
newMatcher(metric.Equal, "label1", `test_0`),
|
|
|
|
newMatcher(metric.RegexMatch, "label2", `nonexistent`),
|
|
|
|
},
|
|
|
|
expected: clientmodel.Fingerprints{},
|
|
|
|
},
|
2015-02-27 13:41:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, mt := range matcherTests {
|
2015-06-15 16:25:31 +00:00
|
|
|
res := storage.MetricsForLabelMatchers(mt.matchers...)
|
|
|
|
if len(mt.expected) != len(res) {
|
|
|
|
t.Fatalf("expected %d matches for %q, found %d", len(mt.expected), mt.matchers, len(res))
|
2015-02-27 13:41:43 +00:00
|
|
|
}
|
2015-06-15 16:25:31 +00:00
|
|
|
for fp1 := range res {
|
2015-02-27 13:41:43 +00:00
|
|
|
found := false
|
|
|
|
for _, fp2 := range mt.expected {
|
|
|
|
if fp1 == fp2 {
|
|
|
|
found = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !found {
|
|
|
|
t.Errorf("expected fingerprint %s for %q not in result", fp1, mt.matchers)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-10-28 18:01:41 +00:00
|
|
|
}
|
|
|
|
|
2015-06-15 16:25:31 +00:00
|
|
|
func TestFingerprintsForLabels(t *testing.T) {
|
|
|
|
storage, closer := NewTestStorage(t, 1)
|
|
|
|
defer closer.Close()
|
|
|
|
|
|
|
|
samples := make([]*clientmodel.Sample, 100)
|
|
|
|
fingerprints := make(clientmodel.Fingerprints, 100)
|
|
|
|
|
|
|
|
for i := range samples {
|
|
|
|
metric := clientmodel.Metric{
|
|
|
|
clientmodel.MetricNameLabel: clientmodel.LabelValue(fmt.Sprintf("test_metric_%d", i)),
|
|
|
|
"label1": clientmodel.LabelValue(fmt.Sprintf("test_%d", i/10)),
|
|
|
|
"label2": clientmodel.LabelValue(fmt.Sprintf("test_%d", (i+5)/10)),
|
|
|
|
}
|
|
|
|
samples[i] = &clientmodel.Sample{
|
|
|
|
Metric: metric,
|
|
|
|
Timestamp: clientmodel.Timestamp(i),
|
|
|
|
Value: clientmodel.SampleValue(i),
|
|
|
|
}
|
|
|
|
fingerprints[i] = metric.FastFingerprint()
|
|
|
|
}
|
|
|
|
for _, s := range samples {
|
|
|
|
storage.Append(s)
|
|
|
|
}
|
|
|
|
storage.WaitForIndexing()
|
|
|
|
|
|
|
|
var matcherTests = []struct {
|
|
|
|
pairs []metric.LabelPair
|
|
|
|
expected clientmodel.Fingerprints
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
pairs: []metric.LabelPair{{"label1", "x"}},
|
|
|
|
expected: fingerprints[:0],
|
|
|
|
},
|
|
|
|
{
|
|
|
|
pairs: []metric.LabelPair{{"label1", "test_0"}},
|
|
|
|
expected: fingerprints[:10],
|
|
|
|
},
|
|
|
|
{
|
|
|
|
pairs: []metric.LabelPair{
|
|
|
|
{"label1", "test_0"},
|
|
|
|
{"label1", "test_1"},
|
|
|
|
},
|
|
|
|
expected: fingerprints[:0],
|
|
|
|
},
|
|
|
|
{
|
|
|
|
pairs: []metric.LabelPair{
|
|
|
|
{"label1", "test_0"},
|
|
|
|
{"label2", "test_1"},
|
|
|
|
},
|
|
|
|
expected: fingerprints[5:10],
|
|
|
|
},
|
|
|
|
{
|
|
|
|
pairs: []metric.LabelPair{
|
|
|
|
{"label1", "test_1"},
|
|
|
|
{"label2", "test_2"},
|
|
|
|
},
|
|
|
|
expected: fingerprints[15:20],
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, mt := range matcherTests {
|
|
|
|
resfps := storage.fingerprintsForLabelPairs(mt.pairs...)
|
|
|
|
if len(mt.expected) != len(resfps) {
|
|
|
|
t.Fatalf("expected %d matches for %q, found %d", len(mt.expected), mt.pairs, len(resfps))
|
|
|
|
}
|
|
|
|
for fp1 := range resfps {
|
|
|
|
found := false
|
|
|
|
for _, fp2 := range mt.expected {
|
|
|
|
if fp1 == fp2 {
|
|
|
|
found = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !found {
|
|
|
|
t.Errorf("expected fingerprint %s for %q not in result", fp1, mt.pairs)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-22 10:02:03 +00:00
|
|
|
var benchLabelMatchingRes map[clientmodel.Fingerprint]clientmodel.COWMetric
|
|
|
|
|
|
|
|
func BenchmarkLabelMatching(b *testing.B) {
|
|
|
|
s, closer := NewTestStorage(b, 1)
|
|
|
|
defer closer.Close()
|
|
|
|
|
|
|
|
h := fnv.New64a()
|
|
|
|
lbl := func(x int) clientmodel.LabelValue {
|
|
|
|
h.Reset()
|
|
|
|
h.Write([]byte(fmt.Sprintf("%d", x)))
|
|
|
|
return clientmodel.LabelValue(fmt.Sprintf("%d", h.Sum64()))
|
|
|
|
}
|
|
|
|
|
|
|
|
M := 32
|
|
|
|
met := clientmodel.Metric{}
|
|
|
|
for i := 0; i < M; i++ {
|
|
|
|
met["label_a"] = lbl(i)
|
|
|
|
for j := 0; j < M; j++ {
|
|
|
|
met["label_b"] = lbl(j)
|
|
|
|
for k := 0; k < M; k++ {
|
|
|
|
met["label_c"] = lbl(k)
|
|
|
|
for l := 0; l < M; l++ {
|
|
|
|
met["label_d"] = lbl(l)
|
|
|
|
s.Append(&clientmodel.Sample{
|
|
|
|
Metric: met.Clone(),
|
|
|
|
Timestamp: 0,
|
|
|
|
Value: 1,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s.WaitForIndexing()
|
|
|
|
|
|
|
|
newMatcher := func(matchType metric.MatchType, name clientmodel.LabelName, value clientmodel.LabelValue) *metric.LabelMatcher {
|
|
|
|
lm, err := metric.NewLabelMatcher(matchType, name, value)
|
|
|
|
if err != nil {
|
|
|
|
b.Fatalf("error creating label matcher: %s", err)
|
|
|
|
}
|
|
|
|
return lm
|
|
|
|
}
|
|
|
|
|
|
|
|
var matcherTests = []metric.LabelMatchers{
|
|
|
|
{
|
|
|
|
newMatcher(metric.Equal, "label_a", lbl(1)),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
newMatcher(metric.Equal, "label_a", lbl(3)),
|
|
|
|
newMatcher(metric.Equal, "label_c", lbl(3)),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
newMatcher(metric.Equal, "label_a", lbl(3)),
|
|
|
|
newMatcher(metric.Equal, "label_c", lbl(3)),
|
|
|
|
newMatcher(metric.NotEqual, "label_d", lbl(3)),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
newMatcher(metric.Equal, "label_a", lbl(3)),
|
|
|
|
newMatcher(metric.Equal, "label_b", lbl(3)),
|
|
|
|
newMatcher(metric.Equal, "label_c", lbl(3)),
|
|
|
|
newMatcher(metric.NotEqual, "label_d", lbl(3)),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
newMatcher(metric.RegexMatch, "label_a", ".+"),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
newMatcher(metric.Equal, "label_a", lbl(3)),
|
|
|
|
newMatcher(metric.RegexMatch, "label_a", ".+"),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
newMatcher(metric.Equal, "label_a", lbl(1)),
|
|
|
|
newMatcher(metric.RegexMatch, "label_c", "("+lbl(3)+"|"+lbl(10)+")"),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
newMatcher(metric.Equal, "label_a", lbl(3)),
|
|
|
|
newMatcher(metric.Equal, "label_a", lbl(4)),
|
|
|
|
newMatcher(metric.RegexMatch, "label_c", "("+lbl(3)+"|"+lbl(10)+")"),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
b.ReportAllocs()
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
benchLabelMatchingRes = map[clientmodel.Fingerprint]clientmodel.COWMetric{}
|
|
|
|
for _, mt := range matcherTests {
|
2015-06-15 16:25:31 +00:00
|
|
|
benchLabelMatchingRes = s.MetricsForLabelMatchers(mt...)
|
2015-06-22 10:02:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// Stop timer to not count the storage closing.
|
|
|
|
b.StopTimer()
|
|
|
|
}
|
|
|
|
|
2015-05-27 09:24:56 +00:00
|
|
|
func TestRetentionCutoff(t *testing.T) {
|
|
|
|
now := clientmodel.Now()
|
|
|
|
insertStart := now.Add(-2 * time.Hour)
|
|
|
|
|
|
|
|
s, closer := NewTestStorage(t, 1)
|
|
|
|
defer closer.Close()
|
|
|
|
|
|
|
|
// Stop maintenance loop to prevent actual purging.
|
|
|
|
s.loopStopping <- struct{}{}
|
|
|
|
|
|
|
|
s.dropAfter = 1 * time.Hour
|
|
|
|
|
2015-05-27 15:41:57 +00:00
|
|
|
for i := 0; i < 120; i++ {
|
2015-05-27 09:24:56 +00:00
|
|
|
smpl := &clientmodel.Sample{
|
|
|
|
Metric: clientmodel.Metric{"job": "test"},
|
|
|
|
Timestamp: insertStart.Add(time.Duration(i) * time.Minute), // 1 minute intervals.
|
|
|
|
Value: 1,
|
|
|
|
}
|
|
|
|
s.Append(smpl)
|
|
|
|
}
|
|
|
|
s.WaitForIndexing()
|
|
|
|
|
2015-06-15 16:25:31 +00:00
|
|
|
var fp clientmodel.Fingerprint
|
2015-07-13 19:12:27 +00:00
|
|
|
for f := range s.fingerprintsForLabelPairs(metric.LabelPair{Name: "job", Value: "test"}) {
|
2015-06-15 16:25:31 +00:00
|
|
|
fp = f
|
|
|
|
break
|
2015-05-27 09:24:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pl := s.NewPreloader()
|
|
|
|
defer pl.Close()
|
|
|
|
|
|
|
|
// Preload everything.
|
2015-06-15 16:25:31 +00:00
|
|
|
err := pl.PreloadRange(fp, insertStart, now, 5*time.Minute)
|
2015-05-27 09:24:56 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Error preloading outdated chunks: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
it := s.NewIterator(fp)
|
|
|
|
|
|
|
|
vals := it.ValueAtTime(now.Add(-61 * time.Minute))
|
|
|
|
if len(vals) != 0 {
|
|
|
|
t.Errorf("unexpected result for timestamp before retention period")
|
|
|
|
}
|
|
|
|
|
2015-07-13 19:12:27 +00:00
|
|
|
vals = it.RangeValues(metric.Interval{OldestInclusive: insertStart, NewestInclusive: now})
|
2015-05-27 09:24:56 +00:00
|
|
|
// We get 59 values here because the clientmodel.Now() is slightly later
|
|
|
|
// than our now.
|
|
|
|
if len(vals) != 59 {
|
|
|
|
t.Errorf("expected 59 values but got %d", len(vals))
|
|
|
|
}
|
|
|
|
if expt := now.Add(-1 * time.Hour).Add(time.Minute); vals[0].Timestamp != expt {
|
|
|
|
t.Errorf("unexpected timestamp for first sample: %v, expected %v", vals[0].Timestamp.Time(), expt.Time())
|
|
|
|
}
|
|
|
|
|
2015-07-13 19:12:27 +00:00
|
|
|
vals = it.BoundaryValues(metric.Interval{OldestInclusive: insertStart, NewestInclusive: now})
|
2015-05-27 09:24:56 +00:00
|
|
|
if len(vals) != 2 {
|
|
|
|
t.Errorf("expected 2 values but got %d", len(vals))
|
|
|
|
}
|
|
|
|
if expt := now.Add(-1 * time.Hour).Add(time.Minute); vals[0].Timestamp != expt {
|
|
|
|
t.Errorf("unexpected timestamp for first sample: %v, expected %v", vals[0].Timestamp.Time(), expt.Time())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-27 15:41:57 +00:00
|
|
|
func TestDropMetrics(t *testing.T) {
|
|
|
|
now := clientmodel.Now()
|
|
|
|
insertStart := now.Add(-2 * time.Hour)
|
|
|
|
|
|
|
|
s, closer := NewTestStorage(t, 1)
|
|
|
|
defer closer.Close()
|
|
|
|
|
|
|
|
m1 := clientmodel.Metric{clientmodel.MetricNameLabel: "test", "n1": "v1"}
|
|
|
|
m2 := clientmodel.Metric{clientmodel.MetricNameLabel: "test", "n1": "v2"}
|
|
|
|
|
|
|
|
N := 120000
|
|
|
|
|
|
|
|
for j, m := range []clientmodel.Metric{m1, m2} {
|
|
|
|
for i := 0; i < N; i++ {
|
|
|
|
smpl := &clientmodel.Sample{
|
|
|
|
Metric: m,
|
|
|
|
Timestamp: insertStart.Add(time.Duration(i) * time.Millisecond), // 1 minute intervals.
|
|
|
|
Value: clientmodel.SampleValue(j),
|
|
|
|
}
|
|
|
|
s.Append(smpl)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s.WaitForIndexing()
|
|
|
|
|
2015-07-13 19:12:27 +00:00
|
|
|
fps := s.fingerprintsForLabelPairs(metric.LabelPair{Name: clientmodel.MetricNameLabel, Value: "test"})
|
2015-05-27 15:41:57 +00:00
|
|
|
if len(fps) != 2 {
|
|
|
|
t.Fatalf("unexpected number of fingerprints: %d", len(fps))
|
|
|
|
}
|
|
|
|
|
2015-06-15 16:25:31 +00:00
|
|
|
var fpList clientmodel.Fingerprints
|
|
|
|
for fp := range fps {
|
|
|
|
it := s.NewIterator(fp)
|
2015-07-13 19:12:27 +00:00
|
|
|
if vals := it.RangeValues(metric.Interval{OldestInclusive: insertStart, NewestInclusive: now}); len(vals) != N {
|
2015-06-15 16:25:31 +00:00
|
|
|
t.Fatalf("unexpected number of samples: %d", len(vals))
|
|
|
|
}
|
|
|
|
fpList = append(fpList, fp)
|
2015-05-27 15:41:57 +00:00
|
|
|
}
|
|
|
|
|
2015-06-15 16:25:31 +00:00
|
|
|
s.DropMetricsForFingerprints(fpList[0])
|
2015-05-27 15:41:57 +00:00
|
|
|
s.WaitForIndexing()
|
|
|
|
|
2015-07-13 19:12:27 +00:00
|
|
|
fps2 := s.fingerprintsForLabelPairs(metric.LabelPair{
|
|
|
|
Name: clientmodel.MetricNameLabel, Value: "test",
|
|
|
|
})
|
2015-05-27 15:41:57 +00:00
|
|
|
if len(fps2) != 1 {
|
|
|
|
t.Fatalf("unexpected number of fingerprints: %d", len(fps2))
|
|
|
|
}
|
|
|
|
|
2015-06-15 16:25:31 +00:00
|
|
|
it := s.NewIterator(fpList[0])
|
2015-07-13 19:12:27 +00:00
|
|
|
if vals := it.RangeValues(metric.Interval{OldestInclusive: insertStart, NewestInclusive: now}); len(vals) != 0 {
|
2015-05-27 15:41:57 +00:00
|
|
|
t.Fatalf("unexpected number of samples: %d", len(vals))
|
|
|
|
}
|
2015-06-15 16:25:31 +00:00
|
|
|
it = s.NewIterator(fpList[1])
|
2015-07-13 19:12:27 +00:00
|
|
|
if vals := it.RangeValues(metric.Interval{OldestInclusive: insertStart, NewestInclusive: now}); len(vals) != N {
|
2015-05-27 15:41:57 +00:00
|
|
|
t.Fatalf("unexpected number of samples: %d", len(vals))
|
|
|
|
}
|
|
|
|
|
2015-06-15 16:25:31 +00:00
|
|
|
s.DropMetricsForFingerprints(fpList...)
|
2015-05-27 15:41:57 +00:00
|
|
|
s.WaitForIndexing()
|
|
|
|
|
2015-07-13 19:12:27 +00:00
|
|
|
fps3 := s.fingerprintsForLabelPairs(metric.LabelPair{
|
|
|
|
Name: clientmodel.MetricNameLabel, Value: "test",
|
|
|
|
})
|
2015-05-27 15:41:57 +00:00
|
|
|
if len(fps3) != 0 {
|
|
|
|
t.Fatalf("unexpected number of fingerprints: %d", len(fps3))
|
|
|
|
}
|
|
|
|
|
2015-06-15 16:25:31 +00:00
|
|
|
it = s.NewIterator(fpList[0])
|
2015-07-13 19:12:27 +00:00
|
|
|
if vals := it.RangeValues(metric.Interval{OldestInclusive: insertStart, NewestInclusive: now}); len(vals) != 0 {
|
2015-05-27 15:41:57 +00:00
|
|
|
t.Fatalf("unexpected number of samples: %d", len(vals))
|
|
|
|
}
|
2015-06-15 16:25:31 +00:00
|
|
|
it = s.NewIterator(fpList[1])
|
2015-07-13 19:12:27 +00:00
|
|
|
if vals := it.RangeValues(metric.Interval{OldestInclusive: insertStart, NewestInclusive: now}); len(vals) != 0 {
|
2015-05-27 15:41:57 +00:00
|
|
|
t.Fatalf("unexpected number of samples: %d", len(vals))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-28 18:01:41 +00:00
|
|
|
// TestLoop is just a smoke test for the loop method, if we can switch it on and
|
|
|
|
// off without disaster.
|
|
|
|
func TestLoop(t *testing.T) {
|
2015-02-26 14:19:44 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("Skipping test in short mode.")
|
|
|
|
}
|
2014-10-28 18:01:41 +00:00
|
|
|
samples := make(clientmodel.Samples, 1000)
|
|
|
|
for i := range samples {
|
|
|
|
samples[i] = &clientmodel.Sample{
|
|
|
|
Timestamp: clientmodel.Timestamp(2 * i),
|
|
|
|
Value: clientmodel.SampleValue(float64(i) * 0.2),
|
|
|
|
}
|
|
|
|
}
|
2015-05-28 18:58:38 +00:00
|
|
|
directory := testutil.NewTemporaryDirectory("test_storage", t)
|
2014-10-28 18:01:41 +00:00
|
|
|
defer directory.Close()
|
|
|
|
o := &MemorySeriesStorageOptions{
|
2014-11-13 19:50:25 +00:00
|
|
|
MemoryChunks: 50,
|
2015-03-18 18:36:41 +00:00
|
|
|
MaxChunksToPersist: 1000000,
|
2014-10-28 18:01:41 +00:00
|
|
|
PersistenceRetentionPeriod: 24 * 7 * time.Hour,
|
|
|
|
PersistenceStoragePath: directory.Path(),
|
|
|
|
CheckpointInterval: 250 * time.Millisecond,
|
2015-03-19 14:41:50 +00:00
|
|
|
SyncStrategy: Adaptive,
|
2014-10-28 18:01:41 +00:00
|
|
|
}
|
2015-05-18 17:26:28 +00:00
|
|
|
storage := NewMemorySeriesStorage(o)
|
2015-05-20 14:12:07 +00:00
|
|
|
if err := storage.Start(); err != nil {
|
2015-05-18 17:26:28 +00:00
|
|
|
t.Fatalf("Error starting storage: %s", err)
|
2014-10-28 18:01:41 +00:00
|
|
|
}
|
2015-03-15 02:36:15 +00:00
|
|
|
for _, s := range samples {
|
|
|
|
storage.Append(s)
|
|
|
|
}
|
2015-02-26 14:19:44 +00:00
|
|
|
storage.WaitForIndexing()
|
2015-05-05 16:17:51 +00:00
|
|
|
series, _ := storage.(*memorySeriesStorage).fpToSeries.get(clientmodel.Metric{}.FastFingerprint())
|
2015-02-26 14:19:44 +00:00
|
|
|
cdsBefore := len(series.chunkDescs)
|
|
|
|
time.Sleep(fpMaxWaitDuration + time.Second) // TODO(beorn7): Ugh, need to wait for maintenance to kick in.
|
|
|
|
cdsAfter := len(series.chunkDescs)
|
2014-10-28 18:01:41 +00:00
|
|
|
storage.Stop()
|
2015-02-26 14:19:44 +00:00
|
|
|
if cdsBefore <= cdsAfter {
|
|
|
|
t.Errorf(
|
|
|
|
"Number of chunk descriptors should have gone down by now. Got before %d, after %d.",
|
|
|
|
cdsBefore, cdsAfter,
|
|
|
|
)
|
|
|
|
}
|
2014-10-28 18:01:41 +00:00
|
|
|
}
|
|
|
|
|
2015-03-13 14:49:07 +00:00
|
|
|
func testChunk(t *testing.T, encoding chunkEncoding) {
|
2014-06-06 09:55:53 +00:00
|
|
|
samples := make(clientmodel.Samples, 500000)
|
|
|
|
for i := range samples {
|
|
|
|
samples[i] = &clientmodel.Sample{
|
|
|
|
Timestamp: clientmodel.Timestamp(i),
|
|
|
|
Value: clientmodel.SampleValue(float64(i) * 0.2),
|
|
|
|
}
|
|
|
|
}
|
2015-03-13 14:49:07 +00:00
|
|
|
s, closer := NewTestStorage(t, encoding)
|
2014-06-06 09:55:53 +00:00
|
|
|
defer closer.Close()
|
|
|
|
|
2015-03-15 02:36:15 +00:00
|
|
|
for _, sample := range samples {
|
|
|
|
s.Append(sample)
|
|
|
|
}
|
2015-02-12 16:23:42 +00:00
|
|
|
s.WaitForIndexing()
|
2014-06-06 09:55:53 +00:00
|
|
|
|
2015-05-06 14:53:12 +00:00
|
|
|
for m := range s.fpToSeries.iter() {
|
|
|
|
s.fpLocker.Lock(m.fp)
|
2014-11-13 19:50:25 +00:00
|
|
|
|
|
|
|
var values metric.Values
|
|
|
|
for _, cd := range m.series.chunkDescs {
|
|
|
|
if cd.isEvicted() {
|
|
|
|
continue
|
|
|
|
}
|
2015-05-20 17:13:06 +00:00
|
|
|
for sample := range cd.c.newIterator().values() {
|
2014-11-13 19:50:25 +00:00
|
|
|
values = append(values, *sample)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, v := range values {
|
2014-06-06 09:55:53 +00:00
|
|
|
if samples[i].Timestamp != v.Timestamp {
|
2014-11-13 19:50:25 +00:00
|
|
|
t.Errorf("%d. Got %v; want %v", i, v.Timestamp, samples[i].Timestamp)
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
2015-03-06 15:03:03 +00:00
|
|
|
if samples[i].Value != v.Value {
|
2014-11-13 19:50:25 +00:00
|
|
|
t.Errorf("%d. Got %v; want %v", i, v.Value, samples[i].Value)
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
|
|
|
}
|
2015-05-06 14:53:12 +00:00
|
|
|
s.fpLocker.Unlock(m.fp)
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
2015-05-20 16:10:29 +00:00
|
|
|
log.Info("test done, closing")
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
|
|
|
|
2015-03-04 12:40:18 +00:00
|
|
|
func TestChunkType0(t *testing.T) {
|
|
|
|
testChunk(t, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestChunkType1(t *testing.T) {
|
|
|
|
testChunk(t, 1)
|
|
|
|
}
|
|
|
|
|
2015-05-20 17:13:06 +00:00
|
|
|
func testValueAtTime(t *testing.T, encoding chunkEncoding) {
|
2015-05-19 17:12:01 +00:00
|
|
|
samples := make(clientmodel.Samples, 10000)
|
2014-06-06 09:55:53 +00:00
|
|
|
for i := range samples {
|
|
|
|
samples[i] = &clientmodel.Sample{
|
2014-08-14 16:23:49 +00:00
|
|
|
Timestamp: clientmodel.Timestamp(2 * i),
|
2014-06-06 09:55:53 +00:00
|
|
|
Value: clientmodel.SampleValue(float64(i) * 0.2),
|
|
|
|
}
|
|
|
|
}
|
2015-03-13 14:49:07 +00:00
|
|
|
s, closer := NewTestStorage(t, encoding)
|
2014-06-06 09:55:53 +00:00
|
|
|
defer closer.Close()
|
|
|
|
|
2015-03-15 02:36:15 +00:00
|
|
|
for _, sample := range samples {
|
|
|
|
s.Append(sample)
|
|
|
|
}
|
2015-02-12 16:23:42 +00:00
|
|
|
s.WaitForIndexing()
|
2014-06-06 09:55:53 +00:00
|
|
|
|
2015-05-05 16:17:51 +00:00
|
|
|
fp := clientmodel.Metric{}.FastFingerprint()
|
2014-06-06 09:55:53 +00:00
|
|
|
|
|
|
|
it := s.NewIterator(fp)
|
|
|
|
|
2014-08-14 16:23:49 +00:00
|
|
|
// #1 Exactly on a sample.
|
2014-06-06 09:55:53 +00:00
|
|
|
for i, expected := range samples {
|
2015-05-20 17:13:06 +00:00
|
|
|
actual := it.ValueAtTime(expected.Timestamp)
|
2014-06-06 09:55:53 +00:00
|
|
|
|
2014-08-14 16:23:49 +00:00
|
|
|
if len(actual) != 1 {
|
|
|
|
t.Fatalf("1.%d. Expected exactly one result, got %d.", i, len(actual))
|
|
|
|
}
|
2014-06-06 09:55:53 +00:00
|
|
|
if expected.Timestamp != actual[0].Timestamp {
|
2014-08-14 16:23:49 +00:00
|
|
|
t.Errorf("1.%d. Got %v; want %v", i, actual[0].Timestamp, expected.Timestamp)
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
|
|
|
if expected.Value != actual[0].Value {
|
2014-08-14 16:23:49 +00:00
|
|
|
t.Errorf("1.%d. Got %v; want %v", i, actual[0].Value, expected.Value)
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
|
|
|
}
|
2014-08-14 16:23:49 +00:00
|
|
|
|
|
|
|
// #2 Between samples.
|
|
|
|
for i, expected1 := range samples {
|
|
|
|
if i == len(samples)-1 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
expected2 := samples[i+1]
|
2015-05-20 17:13:06 +00:00
|
|
|
actual := it.ValueAtTime(expected1.Timestamp + 1)
|
2014-08-14 16:23:49 +00:00
|
|
|
|
|
|
|
if len(actual) != 2 {
|
|
|
|
t.Fatalf("2.%d. Expected exactly 2 results, got %d.", i, len(actual))
|
|
|
|
}
|
|
|
|
if expected1.Timestamp != actual[0].Timestamp {
|
|
|
|
t.Errorf("2.%d. Got %v; want %v", i, actual[0].Timestamp, expected1.Timestamp)
|
|
|
|
}
|
|
|
|
if expected1.Value != actual[0].Value {
|
|
|
|
t.Errorf("2.%d. Got %v; want %v", i, actual[0].Value, expected1.Value)
|
|
|
|
}
|
|
|
|
if expected2.Timestamp != actual[1].Timestamp {
|
|
|
|
t.Errorf("2.%d. Got %v; want %v", i, actual[1].Timestamp, expected1.Timestamp)
|
|
|
|
}
|
|
|
|
if expected2.Value != actual[1].Value {
|
|
|
|
t.Errorf("2.%d. Got %v; want %v", i, actual[1].Value, expected1.Value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// #3 Corner cases: Just before the first sample, just after the last.
|
|
|
|
expected := samples[0]
|
2015-05-20 17:13:06 +00:00
|
|
|
actual := it.ValueAtTime(expected.Timestamp - 1)
|
2014-08-14 16:23:49 +00:00
|
|
|
if len(actual) != 1 {
|
|
|
|
t.Fatalf("3.1. Expected exactly one result, got %d.", len(actual))
|
|
|
|
}
|
|
|
|
if expected.Timestamp != actual[0].Timestamp {
|
|
|
|
t.Errorf("3.1. Got %v; want %v", actual[0].Timestamp, expected.Timestamp)
|
|
|
|
}
|
|
|
|
if expected.Value != actual[0].Value {
|
|
|
|
t.Errorf("3.1. Got %v; want %v", actual[0].Value, expected.Value)
|
|
|
|
}
|
|
|
|
expected = samples[len(samples)-1]
|
2015-05-20 17:13:06 +00:00
|
|
|
actual = it.ValueAtTime(expected.Timestamp + 1)
|
2014-08-14 16:23:49 +00:00
|
|
|
if len(actual) != 1 {
|
|
|
|
t.Fatalf("3.2. Expected exactly one result, got %d.", len(actual))
|
|
|
|
}
|
|
|
|
if expected.Timestamp != actual[0].Timestamp {
|
|
|
|
t.Errorf("3.2. Got %v; want %v", actual[0].Timestamp, expected.Timestamp)
|
|
|
|
}
|
|
|
|
if expected.Value != actual[0].Value {
|
|
|
|
t.Errorf("3.2. Got %v; want %v", actual[0].Value, expected.Value)
|
|
|
|
}
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
|
|
|
|
2015-05-20 17:13:06 +00:00
|
|
|
func TestValueAtTimeChunkType0(t *testing.T) {
|
|
|
|
testValueAtTime(t, 0)
|
2015-03-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
|
2015-05-20 17:13:06 +00:00
|
|
|
func TestValueAtTimeChunkType1(t *testing.T) {
|
|
|
|
testValueAtTime(t, 1)
|
2015-03-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
|
2015-05-20 17:13:06 +00:00
|
|
|
func benchmarkValueAtTime(b *testing.B, encoding chunkEncoding) {
|
2015-05-19 17:12:01 +00:00
|
|
|
samples := make(clientmodel.Samples, 10000)
|
|
|
|
for i := range samples {
|
|
|
|
samples[i] = &clientmodel.Sample{
|
|
|
|
Timestamp: clientmodel.Timestamp(2 * i),
|
|
|
|
Value: clientmodel.SampleValue(float64(i) * 0.2),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s, closer := NewTestStorage(b, encoding)
|
|
|
|
defer closer.Close()
|
|
|
|
|
|
|
|
for _, sample := range samples {
|
|
|
|
s.Append(sample)
|
|
|
|
}
|
|
|
|
s.WaitForIndexing()
|
|
|
|
|
|
|
|
fp := clientmodel.Metric{}.FastFingerprint()
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
it := s.NewIterator(fp)
|
|
|
|
|
|
|
|
// #1 Exactly on a sample.
|
|
|
|
for i, expected := range samples {
|
2015-05-20 17:13:06 +00:00
|
|
|
actual := it.ValueAtTime(expected.Timestamp)
|
2015-05-19 17:12:01 +00:00
|
|
|
|
|
|
|
if len(actual) != 1 {
|
|
|
|
b.Fatalf("1.%d. Expected exactly one result, got %d.", i, len(actual))
|
|
|
|
}
|
|
|
|
if expected.Timestamp != actual[0].Timestamp {
|
|
|
|
b.Errorf("1.%d. Got %v; want %v", i, actual[0].Timestamp, expected.Timestamp)
|
|
|
|
}
|
|
|
|
if expected.Value != actual[0].Value {
|
|
|
|
b.Errorf("1.%d. Got %v; want %v", i, actual[0].Value, expected.Value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// #2 Between samples.
|
|
|
|
for i, expected1 := range samples {
|
|
|
|
if i == len(samples)-1 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
expected2 := samples[i+1]
|
2015-05-20 17:13:06 +00:00
|
|
|
actual := it.ValueAtTime(expected1.Timestamp + 1)
|
2015-05-19 17:12:01 +00:00
|
|
|
|
|
|
|
if len(actual) != 2 {
|
|
|
|
b.Fatalf("2.%d. Expected exactly 2 results, got %d.", i, len(actual))
|
|
|
|
}
|
|
|
|
if expected1.Timestamp != actual[0].Timestamp {
|
|
|
|
b.Errorf("2.%d. Got %v; want %v", i, actual[0].Timestamp, expected1.Timestamp)
|
|
|
|
}
|
|
|
|
if expected1.Value != actual[0].Value {
|
|
|
|
b.Errorf("2.%d. Got %v; want %v", i, actual[0].Value, expected1.Value)
|
|
|
|
}
|
|
|
|
if expected2.Timestamp != actual[1].Timestamp {
|
|
|
|
b.Errorf("2.%d. Got %v; want %v", i, actual[1].Timestamp, expected1.Timestamp)
|
|
|
|
}
|
|
|
|
if expected2.Value != actual[1].Value {
|
|
|
|
b.Errorf("2.%d. Got %v; want %v", i, actual[1].Value, expected1.Value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-20 17:13:06 +00:00
|
|
|
func BenchmarkValueAtTimeChunkType0(b *testing.B) {
|
|
|
|
benchmarkValueAtTime(b, 0)
|
2015-05-19 17:12:01 +00:00
|
|
|
}
|
|
|
|
|
2015-05-20 17:13:06 +00:00
|
|
|
func BenchmarkValueAtTimeChunkType1(b *testing.B) {
|
|
|
|
benchmarkValueAtTime(b, 1)
|
2015-05-19 17:12:01 +00:00
|
|
|
}
|
|
|
|
|
2015-05-20 17:13:06 +00:00
|
|
|
func testRangeValues(t *testing.T, encoding chunkEncoding) {
|
2015-05-19 17:12:01 +00:00
|
|
|
samples := make(clientmodel.Samples, 10000)
|
2014-06-06 09:55:53 +00:00
|
|
|
for i := range samples {
|
|
|
|
samples[i] = &clientmodel.Sample{
|
2014-08-14 16:23:49 +00:00
|
|
|
Timestamp: clientmodel.Timestamp(2 * i),
|
2014-06-06 09:55:53 +00:00
|
|
|
Value: clientmodel.SampleValue(float64(i) * 0.2),
|
|
|
|
}
|
|
|
|
}
|
2015-03-13 14:49:07 +00:00
|
|
|
s, closer := NewTestStorage(t, encoding)
|
2014-06-06 09:55:53 +00:00
|
|
|
defer closer.Close()
|
|
|
|
|
2015-03-15 02:36:15 +00:00
|
|
|
for _, sample := range samples {
|
|
|
|
s.Append(sample)
|
|
|
|
}
|
2015-02-12 16:23:42 +00:00
|
|
|
s.WaitForIndexing()
|
2014-06-06 09:55:53 +00:00
|
|
|
|
2015-05-05 16:17:51 +00:00
|
|
|
fp := clientmodel.Metric{}.FastFingerprint()
|
2014-06-06 09:55:53 +00:00
|
|
|
|
|
|
|
it := s.NewIterator(fp)
|
|
|
|
|
2014-08-14 16:23:49 +00:00
|
|
|
// #1 Zero length interval at sample.
|
2014-06-06 09:55:53 +00:00
|
|
|
for i, expected := range samples {
|
2015-05-20 17:13:06 +00:00
|
|
|
actual := it.RangeValues(metric.Interval{
|
2014-08-14 16:23:49 +00:00
|
|
|
OldestInclusive: expected.Timestamp,
|
|
|
|
NewestInclusive: expected.Timestamp,
|
|
|
|
})
|
2014-06-06 09:55:53 +00:00
|
|
|
|
2014-08-14 16:23:49 +00:00
|
|
|
if len(actual) != 1 {
|
|
|
|
t.Fatalf("1.%d. Expected exactly one result, got %d.", i, len(actual))
|
|
|
|
}
|
2014-06-06 09:55:53 +00:00
|
|
|
if expected.Timestamp != actual[0].Timestamp {
|
2014-08-14 16:23:49 +00:00
|
|
|
t.Errorf("1.%d. Got %v; want %v.", i, actual[0].Timestamp, expected.Timestamp)
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
|
|
|
if expected.Value != actual[0].Value {
|
2014-08-14 16:23:49 +00:00
|
|
|
t.Errorf("1.%d. Got %v; want %v.", i, actual[0].Value, expected.Value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// #2 Zero length interval off sample.
|
|
|
|
for i, expected := range samples {
|
2015-05-20 17:13:06 +00:00
|
|
|
actual := it.RangeValues(metric.Interval{
|
2014-08-14 16:23:49 +00:00
|
|
|
OldestInclusive: expected.Timestamp + 1,
|
|
|
|
NewestInclusive: expected.Timestamp + 1,
|
|
|
|
})
|
|
|
|
|
|
|
|
if len(actual) != 0 {
|
|
|
|
t.Fatalf("2.%d. Expected no result, got %d.", i, len(actual))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// #3 2sec interval around sample.
|
|
|
|
for i, expected := range samples {
|
2015-05-20 17:13:06 +00:00
|
|
|
actual := it.RangeValues(metric.Interval{
|
2014-08-14 16:23:49 +00:00
|
|
|
OldestInclusive: expected.Timestamp - 1,
|
|
|
|
NewestInclusive: expected.Timestamp + 1,
|
|
|
|
})
|
|
|
|
|
|
|
|
if len(actual) != 1 {
|
|
|
|
t.Fatalf("3.%d. Expected exactly one result, got %d.", i, len(actual))
|
|
|
|
}
|
|
|
|
if expected.Timestamp != actual[0].Timestamp {
|
|
|
|
t.Errorf("3.%d. Got %v; want %v.", i, actual[0].Timestamp, expected.Timestamp)
|
|
|
|
}
|
|
|
|
if expected.Value != actual[0].Value {
|
|
|
|
t.Errorf("3.%d. Got %v; want %v.", i, actual[0].Value, expected.Value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// #4 2sec interval sample to sample.
|
|
|
|
for i, expected1 := range samples {
|
|
|
|
if i == len(samples)-1 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
expected2 := samples[i+1]
|
2015-05-20 17:13:06 +00:00
|
|
|
actual := it.RangeValues(metric.Interval{
|
2014-08-14 16:23:49 +00:00
|
|
|
OldestInclusive: expected1.Timestamp,
|
|
|
|
NewestInclusive: expected1.Timestamp + 2,
|
|
|
|
})
|
|
|
|
|
|
|
|
if len(actual) != 2 {
|
|
|
|
t.Fatalf("4.%d. Expected exactly 2 results, got %d.", i, len(actual))
|
|
|
|
}
|
|
|
|
if expected1.Timestamp != actual[0].Timestamp {
|
|
|
|
t.Errorf("4.%d. Got %v for 1st result; want %v.", i, actual[0].Timestamp, expected1.Timestamp)
|
|
|
|
}
|
|
|
|
if expected1.Value != actual[0].Value {
|
|
|
|
t.Errorf("4.%d. Got %v for 1st result; want %v.", i, actual[0].Value, expected1.Value)
|
|
|
|
}
|
|
|
|
if expected2.Timestamp != actual[1].Timestamp {
|
|
|
|
t.Errorf("4.%d. Got %v for 2nd result; want %v.", i, actual[1].Timestamp, expected2.Timestamp)
|
|
|
|
}
|
|
|
|
if expected2.Value != actual[1].Value {
|
|
|
|
t.Errorf("4.%d. Got %v for 2nd result; want %v.", i, actual[1].Value, expected2.Value)
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
|
|
|
}
|
2014-08-14 16:23:49 +00:00
|
|
|
|
|
|
|
// #5 corner cases: Interval ends at first sample, interval starts
|
|
|
|
// at last sample, interval entirely before/after samples.
|
|
|
|
expected := samples[0]
|
2015-05-20 17:13:06 +00:00
|
|
|
actual := it.RangeValues(metric.Interval{
|
2014-08-14 16:23:49 +00:00
|
|
|
OldestInclusive: expected.Timestamp - 2,
|
|
|
|
NewestInclusive: expected.Timestamp,
|
|
|
|
})
|
|
|
|
if len(actual) != 1 {
|
|
|
|
t.Fatalf("5.1. Expected exactly one result, got %d.", len(actual))
|
|
|
|
}
|
|
|
|
if expected.Timestamp != actual[0].Timestamp {
|
|
|
|
t.Errorf("5.1. Got %v; want %v.", actual[0].Timestamp, expected.Timestamp)
|
|
|
|
}
|
|
|
|
if expected.Value != actual[0].Value {
|
|
|
|
t.Errorf("5.1. Got %v; want %v.", actual[0].Value, expected.Value)
|
|
|
|
}
|
|
|
|
expected = samples[len(samples)-1]
|
2015-05-20 17:13:06 +00:00
|
|
|
actual = it.RangeValues(metric.Interval{
|
2014-08-14 16:23:49 +00:00
|
|
|
OldestInclusive: expected.Timestamp,
|
|
|
|
NewestInclusive: expected.Timestamp + 2,
|
|
|
|
})
|
|
|
|
if len(actual) != 1 {
|
|
|
|
t.Fatalf("5.2. Expected exactly one result, got %d.", len(actual))
|
|
|
|
}
|
|
|
|
if expected.Timestamp != actual[0].Timestamp {
|
|
|
|
t.Errorf("5.2. Got %v; want %v.", actual[0].Timestamp, expected.Timestamp)
|
|
|
|
}
|
|
|
|
if expected.Value != actual[0].Value {
|
|
|
|
t.Errorf("5.2. Got %v; want %v.", actual[0].Value, expected.Value)
|
|
|
|
}
|
|
|
|
firstSample := samples[0]
|
2015-05-20 17:13:06 +00:00
|
|
|
actual = it.RangeValues(metric.Interval{
|
2014-08-14 16:23:49 +00:00
|
|
|
OldestInclusive: firstSample.Timestamp - 4,
|
|
|
|
NewestInclusive: firstSample.Timestamp - 2,
|
|
|
|
})
|
|
|
|
if len(actual) != 0 {
|
|
|
|
t.Fatalf("5.3. Expected no results, got %d.", len(actual))
|
|
|
|
}
|
|
|
|
lastSample := samples[len(samples)-1]
|
2015-05-20 17:13:06 +00:00
|
|
|
actual = it.RangeValues(metric.Interval{
|
2014-08-14 16:23:49 +00:00
|
|
|
OldestInclusive: lastSample.Timestamp + 2,
|
|
|
|
NewestInclusive: lastSample.Timestamp + 4,
|
|
|
|
})
|
|
|
|
if len(actual) != 0 {
|
|
|
|
t.Fatalf("5.3. Expected no results, got %d.", len(actual))
|
|
|
|
}
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
|
|
|
|
2015-05-20 17:13:06 +00:00
|
|
|
func TestRangeValuesChunkType0(t *testing.T) {
|
|
|
|
testRangeValues(t, 0)
|
2015-03-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
|
2015-05-20 17:13:06 +00:00
|
|
|
func TestRangeValuesChunkType1(t *testing.T) {
|
|
|
|
testRangeValues(t, 1)
|
2015-03-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
|
2015-05-20 17:13:06 +00:00
|
|
|
func benchmarkRangeValues(b *testing.B, encoding chunkEncoding) {
|
2015-05-19 17:12:01 +00:00
|
|
|
samples := make(clientmodel.Samples, 10000)
|
|
|
|
for i := range samples {
|
|
|
|
samples[i] = &clientmodel.Sample{
|
|
|
|
Timestamp: clientmodel.Timestamp(2 * i),
|
|
|
|
Value: clientmodel.SampleValue(float64(i) * 0.2),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s, closer := NewTestStorage(b, encoding)
|
|
|
|
defer closer.Close()
|
|
|
|
|
|
|
|
for _, sample := range samples {
|
|
|
|
s.Append(sample)
|
|
|
|
}
|
|
|
|
s.WaitForIndexing()
|
|
|
|
|
|
|
|
fp := clientmodel.Metric{}.FastFingerprint()
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
|
|
|
|
it := s.NewIterator(fp)
|
|
|
|
|
|
|
|
for _, sample := range samples {
|
2015-05-20 17:13:06 +00:00
|
|
|
actual := it.RangeValues(metric.Interval{
|
2015-05-19 17:12:01 +00:00
|
|
|
OldestInclusive: sample.Timestamp - 20,
|
|
|
|
NewestInclusive: sample.Timestamp + 20,
|
|
|
|
})
|
|
|
|
|
|
|
|
if len(actual) < 10 {
|
|
|
|
b.Fatalf("not enough samples found")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-20 17:13:06 +00:00
|
|
|
func BenchmarkRangeValuesChunkType0(b *testing.B) {
|
|
|
|
benchmarkRangeValues(b, 0)
|
2015-05-19 17:12:01 +00:00
|
|
|
}
|
|
|
|
|
2015-05-20 17:13:06 +00:00
|
|
|
func BenchmarkRangeValuesChunkType1(b *testing.B) {
|
|
|
|
benchmarkRangeValues(b, 1)
|
2015-03-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
|
2015-03-13 14:49:07 +00:00
|
|
|
func testEvictAndPurgeSeries(t *testing.T, encoding chunkEncoding) {
|
2015-05-19 17:12:01 +00:00
|
|
|
samples := make(clientmodel.Samples, 10000)
|
2014-10-28 18:01:41 +00:00
|
|
|
for i := range samples {
|
|
|
|
samples[i] = &clientmodel.Sample{
|
|
|
|
Timestamp: clientmodel.Timestamp(2 * i),
|
2015-03-04 12:40:18 +00:00
|
|
|
Value: clientmodel.SampleValue(float64(i * i)),
|
2014-10-28 18:01:41 +00:00
|
|
|
}
|
|
|
|
}
|
2015-03-13 14:49:07 +00:00
|
|
|
s, closer := NewTestStorage(t, encoding)
|
2014-10-28 18:01:41 +00:00
|
|
|
defer closer.Close()
|
|
|
|
|
2015-03-15 02:36:15 +00:00
|
|
|
for _, sample := range samples {
|
|
|
|
s.Append(sample)
|
|
|
|
}
|
2015-02-12 16:23:42 +00:00
|
|
|
s.WaitForIndexing()
|
2014-10-28 18:01:41 +00:00
|
|
|
|
2015-05-05 16:17:51 +00:00
|
|
|
fp := clientmodel.Metric{}.FastFingerprint()
|
2014-10-28 18:01:41 +00:00
|
|
|
|
2015-02-26 14:19:44 +00:00
|
|
|
// Drop ~half of the chunks.
|
2015-05-19 17:12:01 +00:00
|
|
|
s.maintainMemorySeries(fp, 10000)
|
2014-10-28 18:01:41 +00:00
|
|
|
it := s.NewIterator(fp)
|
2015-05-20 17:13:06 +00:00
|
|
|
actual := it.BoundaryValues(metric.Interval{
|
2014-10-28 18:01:41 +00:00
|
|
|
OldestInclusive: 0,
|
2015-05-19 17:12:01 +00:00
|
|
|
NewestInclusive: 100000,
|
2014-10-28 18:01:41 +00:00
|
|
|
})
|
|
|
|
if len(actual) != 2 {
|
|
|
|
t.Fatal("expected two results after purging half of series")
|
|
|
|
}
|
2015-05-19 17:12:01 +00:00
|
|
|
if actual[0].Timestamp < 6000 || actual[0].Timestamp > 10000 {
|
2014-10-28 18:01:41 +00:00
|
|
|
t.Errorf("1st timestamp out of expected range: %v", actual[0].Timestamp)
|
|
|
|
}
|
2015-05-19 17:12:01 +00:00
|
|
|
want := clientmodel.Timestamp(19998)
|
2014-10-28 18:01:41 +00:00
|
|
|
if actual[1].Timestamp != want {
|
|
|
|
t.Errorf("2nd timestamp: want %v, got %v", want, actual[1].Timestamp)
|
|
|
|
}
|
|
|
|
|
2015-02-26 14:19:44 +00:00
|
|
|
// Drop everything.
|
2015-05-19 17:12:01 +00:00
|
|
|
s.maintainMemorySeries(fp, 100000)
|
2014-10-28 18:01:41 +00:00
|
|
|
it = s.NewIterator(fp)
|
2015-05-20 17:13:06 +00:00
|
|
|
actual = it.BoundaryValues(metric.Interval{
|
2014-10-28 18:01:41 +00:00
|
|
|
OldestInclusive: 0,
|
2015-05-19 17:12:01 +00:00
|
|
|
NewestInclusive: 100000,
|
2014-10-28 18:01:41 +00:00
|
|
|
})
|
|
|
|
if len(actual) != 0 {
|
|
|
|
t.Fatal("expected zero results after purging the whole series")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Recreate series.
|
2015-03-15 02:36:15 +00:00
|
|
|
for _, sample := range samples {
|
|
|
|
s.Append(sample)
|
|
|
|
}
|
2015-02-12 16:23:42 +00:00
|
|
|
s.WaitForIndexing()
|
2014-10-28 18:01:41 +00:00
|
|
|
|
2015-05-06 14:53:12 +00:00
|
|
|
series, ok := s.fpToSeries.get(fp)
|
2014-10-28 18:01:41 +00:00
|
|
|
if !ok {
|
|
|
|
t.Fatal("could not find series")
|
|
|
|
}
|
|
|
|
|
2014-11-13 19:50:25 +00:00
|
|
|
// Persist head chunk so we can safely archive.
|
2015-03-09 01:33:10 +00:00
|
|
|
series.headChunkClosed = true
|
2015-05-06 14:53:12 +00:00
|
|
|
s.maintainMemorySeries(fp, clientmodel.Earliest)
|
2014-10-28 18:01:41 +00:00
|
|
|
|
2014-11-13 19:50:25 +00:00
|
|
|
// Archive metrics.
|
2015-05-06 14:53:12 +00:00
|
|
|
s.fpToSeries.del(fp)
|
|
|
|
if err := s.persistence.archiveMetric(
|
Fix a bug handling freshly unarchived series.
Usually, if you unarchive a series, it is to add something to it,
which will create a new head chunk. However, if a series in
unarchived, and before anything is added to it, it is handled by the
maintenance loop, it will be archived again. In that case, we have to
load the chunkDescs to know the lastTime of the series to be
archived. Usually, this case will happen only rarely (as a race, has
never happened so far, possibly because the locking around unarchiving
and the subsequent sample append is smart enough). However, during
crash recovery, we sometimes treat series as "freshly unarchived"
without directly appending a sample. We might add more cases of that
type later, so better deal with archiving properly and load chunkDescs
if required.
2015-01-08 15:10:31 +00:00
|
|
|
fp, series.metric, series.firstTime(), series.head().lastTime(),
|
2014-10-28 18:01:41 +00:00
|
|
|
); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2015-05-06 14:53:12 +00:00
|
|
|
archived, _, _, err := s.persistence.hasArchivedMetric(fp)
|
2014-10-28 18:01:41 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if !archived {
|
|
|
|
t.Fatal("not archived")
|
|
|
|
}
|
|
|
|
|
2015-02-26 14:19:44 +00:00
|
|
|
// Drop ~half of the chunks of an archived series.
|
2015-05-19 17:12:01 +00:00
|
|
|
s.maintainArchivedSeries(fp, 10000)
|
2015-05-06 14:53:12 +00:00
|
|
|
archived, _, _, err = s.persistence.hasArchivedMetric(fp)
|
2014-10-28 18:01:41 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if !archived {
|
2015-02-26 14:19:44 +00:00
|
|
|
t.Fatal("archived series purged although only half of the chunks dropped")
|
2014-10-28 18:01:41 +00:00
|
|
|
}
|
|
|
|
|
2015-02-26 14:19:44 +00:00
|
|
|
// Drop everything.
|
2015-05-19 17:12:01 +00:00
|
|
|
s.maintainArchivedSeries(fp, 100000)
|
2015-05-06 14:53:12 +00:00
|
|
|
archived, _, _, err = s.persistence.hasArchivedMetric(fp)
|
2014-10-28 18:01:41 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if archived {
|
|
|
|
t.Fatal("archived series not dropped")
|
|
|
|
}
|
2015-04-09 13:57:11 +00:00
|
|
|
|
|
|
|
// Recreate series.
|
|
|
|
for _, sample := range samples {
|
|
|
|
s.Append(sample)
|
|
|
|
}
|
|
|
|
s.WaitForIndexing()
|
|
|
|
|
2015-05-06 14:53:12 +00:00
|
|
|
series, ok = s.fpToSeries.get(fp)
|
2015-04-09 13:57:11 +00:00
|
|
|
if !ok {
|
|
|
|
t.Fatal("could not find series")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Persist head chunk so we can safely archive.
|
|
|
|
series.headChunkClosed = true
|
2015-05-06 14:53:12 +00:00
|
|
|
s.maintainMemorySeries(fp, clientmodel.Earliest)
|
2015-04-09 13:57:11 +00:00
|
|
|
|
|
|
|
// Archive metrics.
|
2015-05-06 14:53:12 +00:00
|
|
|
s.fpToSeries.del(fp)
|
|
|
|
if err := s.persistence.archiveMetric(
|
2015-04-09 13:57:11 +00:00
|
|
|
fp, series.metric, series.firstTime(), series.head().lastTime(),
|
|
|
|
); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2015-05-06 14:53:12 +00:00
|
|
|
archived, _, _, err = s.persistence.hasArchivedMetric(fp)
|
2015-04-09 13:57:11 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if !archived {
|
|
|
|
t.Fatal("not archived")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unarchive metrics.
|
2015-05-06 14:53:12 +00:00
|
|
|
s.getOrCreateSeries(fp, clientmodel.Metric{})
|
2015-04-09 13:57:11 +00:00
|
|
|
|
2015-05-06 14:53:12 +00:00
|
|
|
series, ok = s.fpToSeries.get(fp)
|
2015-04-09 13:57:11 +00:00
|
|
|
if !ok {
|
|
|
|
t.Fatal("could not find series")
|
|
|
|
}
|
2015-05-06 14:53:12 +00:00
|
|
|
archived, _, _, err = s.persistence.hasArchivedMetric(fp)
|
2015-04-09 13:57:11 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if archived {
|
|
|
|
t.Fatal("archived")
|
|
|
|
}
|
|
|
|
|
|
|
|
// This will archive again, but must not drop it completely, despite the
|
|
|
|
// memorySeries being empty.
|
2015-05-19 17:12:01 +00:00
|
|
|
s.maintainMemorySeries(fp, 10000)
|
2015-05-06 14:53:12 +00:00
|
|
|
archived, _, _, err = s.persistence.hasArchivedMetric(fp)
|
2015-04-09 13:57:11 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if !archived {
|
|
|
|
t.Fatal("series purged completely")
|
|
|
|
}
|
2014-10-28 18:01:41 +00:00
|
|
|
}
|
|
|
|
|
2015-03-04 12:40:18 +00:00
|
|
|
func TestEvictAndPurgeSeriesChunkType0(t *testing.T) {
|
|
|
|
testEvictAndPurgeSeries(t, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestEvictAndPurgeSeriesChunkType1(t *testing.T) {
|
|
|
|
testEvictAndPurgeSeries(t, 1)
|
|
|
|
}
|
|
|
|
|
2015-07-15 17:53:15 +00:00
|
|
|
func testEvictAndLoadChunkDescs(t *testing.T, encoding chunkEncoding) {
|
|
|
|
samples := make(clientmodel.Samples, 10000)
|
|
|
|
for i := range samples {
|
|
|
|
samples[i] = &clientmodel.Sample{
|
|
|
|
Timestamp: clientmodel.Timestamp(2 * i),
|
|
|
|
Value: clientmodel.SampleValue(float64(i * i)),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Give last sample a timestamp of now so that the head chunk will not
|
|
|
|
// be closed (which would then archive the time series later as
|
|
|
|
// everything will get evicted).
|
|
|
|
samples[len(samples)-1] = &clientmodel.Sample{
|
|
|
|
Timestamp: clientmodel.Now(),
|
|
|
|
Value: clientmodel.SampleValue(3.14),
|
|
|
|
}
|
|
|
|
|
|
|
|
s, closer := NewTestStorage(t, encoding)
|
|
|
|
defer closer.Close()
|
|
|
|
|
|
|
|
// Adjust memory chunks to lower value to see evictions.
|
|
|
|
s.maxMemoryChunks = 1
|
|
|
|
|
|
|
|
for _, sample := range samples {
|
|
|
|
s.Append(sample)
|
|
|
|
}
|
|
|
|
s.WaitForIndexing()
|
|
|
|
|
|
|
|
fp := clientmodel.Metric{}.FastFingerprint()
|
|
|
|
|
|
|
|
series, ok := s.fpToSeries.get(fp)
|
|
|
|
if !ok {
|
|
|
|
t.Fatal("could not find series")
|
|
|
|
}
|
|
|
|
|
|
|
|
oldLen := len(series.chunkDescs)
|
|
|
|
// Maintain series without any dropped chunks.
|
|
|
|
s.maintainMemorySeries(fp, 0)
|
|
|
|
// Give the evict goroutine an opportunity to run.
|
|
|
|
time.Sleep(10 * time.Millisecond)
|
|
|
|
// Maintain series again to trigger chunkDesc eviction
|
|
|
|
s.maintainMemorySeries(fp, 0)
|
|
|
|
|
|
|
|
if oldLen <= len(series.chunkDescs) {
|
|
|
|
t.Errorf("Expected number of chunkDescs to decrease, old number %d, current number %d.", oldLen, len(series.chunkDescs))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Load everything back.
|
|
|
|
p := s.NewPreloader()
|
|
|
|
p.PreloadRange(fp, 0, 100000, time.Hour)
|
|
|
|
|
|
|
|
if oldLen != len(series.chunkDescs) {
|
|
|
|
t.Errorf("Expected number of chunkDescs to have reached old value again, old number %d, current number %d.", oldLen, len(series.chunkDescs))
|
|
|
|
}
|
|
|
|
|
|
|
|
p.Close()
|
|
|
|
|
|
|
|
// Now maintain series with drops to make sure nothing crazy happens.
|
|
|
|
s.maintainMemorySeries(fp, 100000)
|
|
|
|
|
|
|
|
if len(series.chunkDescs) != 1 {
|
|
|
|
t.Errorf("Expected exactly one chunkDesc left, got %d.", len(series.chunkDescs))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestEvictAndLoadChunkDescsType0(t *testing.T) {
|
|
|
|
testEvictAndLoadChunkDescs(t, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestEvictAndLoadChunkDescsType1(t *testing.T) {
|
|
|
|
testEvictAndLoadChunkDescs(t, 1)
|
|
|
|
}
|
|
|
|
|
2015-03-13 14:49:07 +00:00
|
|
|
func benchmarkAppend(b *testing.B, encoding chunkEncoding) {
|
2014-06-06 09:55:53 +00:00
|
|
|
samples := make(clientmodel.Samples, b.N)
|
|
|
|
for i := range samples {
|
|
|
|
samples[i] = &clientmodel.Sample{
|
|
|
|
Metric: clientmodel.Metric{
|
|
|
|
clientmodel.MetricNameLabel: clientmodel.LabelValue(fmt.Sprintf("test_metric_%d", i%10)),
|
|
|
|
"label1": clientmodel.LabelValue(fmt.Sprintf("test_metric_%d", i%10)),
|
|
|
|
"label2": clientmodel.LabelValue(fmt.Sprintf("test_metric_%d", i%10)),
|
|
|
|
},
|
|
|
|
Timestamp: clientmodel.Timestamp(i),
|
|
|
|
Value: clientmodel.SampleValue(i),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
b.ResetTimer()
|
2015-03-13 14:49:07 +00:00
|
|
|
s, closer := NewTestStorage(b, encoding)
|
2014-06-06 09:55:53 +00:00
|
|
|
defer closer.Close()
|
|
|
|
|
2015-03-15 02:36:15 +00:00
|
|
|
for _, sample := range samples {
|
|
|
|
s.Append(sample)
|
|
|
|
}
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
2014-08-14 16:23:49 +00:00
|
|
|
|
2015-03-04 12:40:18 +00:00
|
|
|
func BenchmarkAppendType0(b *testing.B) {
|
|
|
|
benchmarkAppend(b, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkAppendType1(b *testing.B) {
|
|
|
|
benchmarkAppend(b, 1)
|
|
|
|
}
|
|
|
|
|
2014-10-28 18:01:41 +00:00
|
|
|
// Append a large number of random samples and then check if we can get them out
|
|
|
|
// of the storage alright.
|
2015-03-13 14:49:07 +00:00
|
|
|
func testFuzz(t *testing.T, encoding chunkEncoding) {
|
2014-10-28 18:01:41 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("Skipping test in short mode.")
|
|
|
|
}
|
2014-08-14 16:23:49 +00:00
|
|
|
|
2014-10-28 18:01:41 +00:00
|
|
|
check := func(seed int64) bool {
|
|
|
|
rand.Seed(seed)
|
2015-03-13 14:49:07 +00:00
|
|
|
s, c := NewTestStorage(t, encoding)
|
2014-08-14 16:23:49 +00:00
|
|
|
defer c.Close()
|
|
|
|
|
2015-05-19 17:12:01 +00:00
|
|
|
samples := createRandomSamples("test_fuzz", 10000)
|
2015-03-15 02:36:15 +00:00
|
|
|
for _, sample := range samples {
|
|
|
|
s.Append(sample)
|
|
|
|
}
|
2014-10-28 18:01:41 +00:00
|
|
|
return verifyStorage(t, s, samples, 24*7*time.Hour)
|
2014-08-14 16:23:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if err := quick.Check(check, nil); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-04 12:40:18 +00:00
|
|
|
func TestFuzzChunkType0(t *testing.T) {
|
|
|
|
testFuzz(t, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestFuzzChunkType1(t *testing.T) {
|
|
|
|
testFuzz(t, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
// benchmarkFuzz is the benchmark version of testFuzz. The storage options are
|
|
|
|
// set such that evictions, checkpoints, and purging will happen concurrently,
|
|
|
|
// too. This benchmark will have a very long runtime (up to minutes). You can
|
|
|
|
// use it as an actual benchmark. Run it like this:
|
2014-10-28 18:01:41 +00:00
|
|
|
//
|
2015-03-09 01:33:10 +00:00
|
|
|
// go test -cpu 1,2,4,8 -run=NONE -bench BenchmarkFuzzChunkType -benchmem
|
2014-10-28 18:01:41 +00:00
|
|
|
//
|
|
|
|
// You can also use it as a test for races. In that case, run it like this (will
|
|
|
|
// make things even slower):
|
|
|
|
//
|
2015-03-09 01:33:10 +00:00
|
|
|
// go test -race -cpu 8 -short -bench BenchmarkFuzzChunkType
|
2015-03-13 14:49:07 +00:00
|
|
|
func benchmarkFuzz(b *testing.B, encoding chunkEncoding) {
|
2015-06-15 10:49:28 +00:00
|
|
|
DefaultChunkEncoding = encoding
|
2015-03-06 15:03:03 +00:00
|
|
|
const samplesPerRun = 100000
|
2014-10-28 18:01:41 +00:00
|
|
|
rand.Seed(42)
|
2015-05-28 18:58:38 +00:00
|
|
|
directory := testutil.NewTemporaryDirectory("test_storage", b)
|
2014-10-28 18:01:41 +00:00
|
|
|
defer directory.Close()
|
|
|
|
o := &MemorySeriesStorageOptions{
|
2014-11-13 19:50:25 +00:00
|
|
|
MemoryChunks: 100,
|
2015-03-18 18:36:41 +00:00
|
|
|
MaxChunksToPersist: 1000000,
|
2014-10-28 18:01:41 +00:00
|
|
|
PersistenceRetentionPeriod: time.Hour,
|
|
|
|
PersistenceStoragePath: directory.Path(),
|
2015-03-04 12:40:18 +00:00
|
|
|
CheckpointInterval: time.Second,
|
2015-03-19 14:41:50 +00:00
|
|
|
SyncStrategy: Adaptive,
|
2014-10-28 18:01:41 +00:00
|
|
|
}
|
2015-05-18 17:26:28 +00:00
|
|
|
s := NewMemorySeriesStorage(o)
|
|
|
|
if err := s.Start(); err != nil {
|
|
|
|
b.Fatalf("Error starting storage: %s", err)
|
2014-10-28 18:01:41 +00:00
|
|
|
}
|
|
|
|
s.Start()
|
|
|
|
defer s.Stop()
|
2015-03-04 12:40:18 +00:00
|
|
|
|
|
|
|
samples := createRandomSamples("benchmark_fuzz", samplesPerRun*b.N)
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
start := samplesPerRun * i
|
|
|
|
end := samplesPerRun * (i + 1)
|
|
|
|
middle := (start + end) / 2
|
2015-03-15 02:36:15 +00:00
|
|
|
for _, sample := range samples[start:middle] {
|
|
|
|
s.Append(sample)
|
|
|
|
}
|
2015-05-06 14:53:12 +00:00
|
|
|
verifyStorage(b, s.(*memorySeriesStorage), samples[:middle], o.PersistenceRetentionPeriod)
|
2015-03-15 02:36:15 +00:00
|
|
|
for _, sample := range samples[middle:end] {
|
|
|
|
s.Append(sample)
|
|
|
|
}
|
2015-05-06 14:53:12 +00:00
|
|
|
verifyStorage(b, s.(*memorySeriesStorage), samples[:end], o.PersistenceRetentionPeriod)
|
2015-03-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkFuzzChunkType0(b *testing.B) {
|
|
|
|
benchmarkFuzz(b, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkFuzzChunkType1(b *testing.B) {
|
|
|
|
benchmarkFuzz(b, 1)
|
2014-10-28 18:01:41 +00:00
|
|
|
}
|
|
|
|
|
2015-03-04 12:40:18 +00:00
|
|
|
func createRandomSamples(metricName string, minLen int) clientmodel.Samples {
|
2014-08-14 16:23:49 +00:00
|
|
|
type valueCreator func() clientmodel.SampleValue
|
|
|
|
type deltaApplier func(clientmodel.SampleValue) clientmodel.SampleValue
|
|
|
|
|
|
|
|
var (
|
|
|
|
maxMetrics = 5
|
|
|
|
maxStreakLength = 500
|
2015-03-04 12:40:18 +00:00
|
|
|
maxTimeDelta = 10000
|
2014-08-14 16:23:49 +00:00
|
|
|
maxTimeDeltaFactor = 10
|
2015-03-04 12:40:18 +00:00
|
|
|
timestamp = clientmodel.Now() - clientmodel.Timestamp(maxTimeDelta*maxTimeDeltaFactor*minLen/4) // So that some timestamps are in the future.
|
2014-08-14 16:23:49 +00:00
|
|
|
generators = []struct {
|
|
|
|
createValue valueCreator
|
|
|
|
applyDelta []deltaApplier
|
|
|
|
}{
|
|
|
|
{ // "Boolean".
|
|
|
|
createValue: func() clientmodel.SampleValue {
|
2014-10-28 18:01:41 +00:00
|
|
|
return clientmodel.SampleValue(rand.Intn(2))
|
2014-08-14 16:23:49 +00:00
|
|
|
},
|
|
|
|
applyDelta: []deltaApplier{
|
|
|
|
func(_ clientmodel.SampleValue) clientmodel.SampleValue {
|
2014-10-28 18:01:41 +00:00
|
|
|
return clientmodel.SampleValue(rand.Intn(2))
|
2014-08-14 16:23:49 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{ // Integer with int deltas of various byte length.
|
|
|
|
createValue: func() clientmodel.SampleValue {
|
2014-10-28 18:01:41 +00:00
|
|
|
return clientmodel.SampleValue(rand.Int63() - 1<<62)
|
2014-08-14 16:23:49 +00:00
|
|
|
},
|
|
|
|
applyDelta: []deltaApplier{
|
|
|
|
func(v clientmodel.SampleValue) clientmodel.SampleValue {
|
2014-10-28 18:01:41 +00:00
|
|
|
return clientmodel.SampleValue(rand.Intn(1<<8) - 1<<7 + int(v))
|
2014-08-14 16:23:49 +00:00
|
|
|
},
|
|
|
|
func(v clientmodel.SampleValue) clientmodel.SampleValue {
|
2014-10-28 18:01:41 +00:00
|
|
|
return clientmodel.SampleValue(rand.Intn(1<<16) - 1<<15 + int(v))
|
2014-08-14 16:23:49 +00:00
|
|
|
},
|
|
|
|
func(v clientmodel.SampleValue) clientmodel.SampleValue {
|
2015-04-30 21:19:48 +00:00
|
|
|
return clientmodel.SampleValue(rand.Int63n(1<<32) - 1<<31 + int64(v))
|
2014-08-14 16:23:49 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{ // Float with float32 and float64 deltas.
|
|
|
|
createValue: func() clientmodel.SampleValue {
|
2014-10-28 18:01:41 +00:00
|
|
|
return clientmodel.SampleValue(rand.NormFloat64())
|
2014-08-14 16:23:49 +00:00
|
|
|
},
|
|
|
|
applyDelta: []deltaApplier{
|
|
|
|
func(v clientmodel.SampleValue) clientmodel.SampleValue {
|
2014-10-28 18:01:41 +00:00
|
|
|
return v + clientmodel.SampleValue(float32(rand.NormFloat64()))
|
2014-08-14 16:23:49 +00:00
|
|
|
},
|
|
|
|
func(v clientmodel.SampleValue) clientmodel.SampleValue {
|
2014-10-28 18:01:41 +00:00
|
|
|
return v + clientmodel.SampleValue(rand.NormFloat64())
|
2014-08-14 16:23:49 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2015-05-11 15:15:30 +00:00
|
|
|
// Prefill result with two samples with colliding metrics (to test fingerprint mapping).
|
2015-05-06 14:53:12 +00:00
|
|
|
result := clientmodel.Samples{
|
|
|
|
&clientmodel.Sample{
|
|
|
|
Metric: clientmodel.Metric{
|
|
|
|
"instance": "ip-10-33-84-73.l05.ams5.s-cloud.net:24483",
|
|
|
|
"status": "503",
|
|
|
|
},
|
|
|
|
Value: 42,
|
|
|
|
Timestamp: timestamp,
|
|
|
|
},
|
|
|
|
&clientmodel.Sample{
|
|
|
|
Metric: clientmodel.Metric{
|
|
|
|
"instance": "ip-10-33-84-73.l05.ams5.s-cloud.net:24480",
|
|
|
|
"status": "500",
|
|
|
|
},
|
|
|
|
Value: 2010,
|
|
|
|
Timestamp: timestamp + 1,
|
|
|
|
},
|
|
|
|
}
|
2014-08-14 16:23:49 +00:00
|
|
|
|
|
|
|
metrics := []clientmodel.Metric{}
|
2014-10-28 18:01:41 +00:00
|
|
|
for n := rand.Intn(maxMetrics); n >= 0; n-- {
|
2014-08-14 16:23:49 +00:00
|
|
|
metrics = append(metrics, clientmodel.Metric{
|
2015-03-04 12:40:18 +00:00
|
|
|
clientmodel.MetricNameLabel: clientmodel.LabelValue(metricName),
|
2014-10-28 18:01:41 +00:00
|
|
|
clientmodel.LabelName(fmt.Sprintf("labelname_%d", n+1)): clientmodel.LabelValue(fmt.Sprintf("labelvalue_%d", rand.Int())),
|
2014-08-14 16:23:49 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-03-04 12:40:18 +00:00
|
|
|
for len(result) < minLen {
|
2014-08-14 16:23:49 +00:00
|
|
|
// Pick a metric for this cycle.
|
2014-10-28 18:01:41 +00:00
|
|
|
metric := metrics[rand.Intn(len(metrics))]
|
|
|
|
timeDelta := rand.Intn(maxTimeDelta) + 1
|
|
|
|
generator := generators[rand.Intn(len(generators))]
|
2014-08-14 16:23:49 +00:00
|
|
|
createValue := generator.createValue
|
2014-10-28 18:01:41 +00:00
|
|
|
applyDelta := generator.applyDelta[rand.Intn(len(generator.applyDelta))]
|
|
|
|
incTimestamp := func() { timestamp += clientmodel.Timestamp(timeDelta * (rand.Intn(maxTimeDeltaFactor) + 1)) }
|
|
|
|
switch rand.Intn(4) {
|
2014-08-14 16:23:49 +00:00
|
|
|
case 0: // A single sample.
|
|
|
|
result = append(result, &clientmodel.Sample{
|
|
|
|
Metric: metric,
|
|
|
|
Value: createValue(),
|
2014-10-28 18:01:41 +00:00
|
|
|
Timestamp: timestamp,
|
2014-08-14 16:23:49 +00:00
|
|
|
})
|
|
|
|
incTimestamp()
|
|
|
|
case 1: // A streak of random sample values.
|
2014-10-28 18:01:41 +00:00
|
|
|
for n := rand.Intn(maxStreakLength); n >= 0; n-- {
|
2014-08-14 16:23:49 +00:00
|
|
|
result = append(result, &clientmodel.Sample{
|
|
|
|
Metric: metric,
|
|
|
|
Value: createValue(),
|
2014-10-28 18:01:41 +00:00
|
|
|
Timestamp: timestamp,
|
2014-08-14 16:23:49 +00:00
|
|
|
})
|
|
|
|
incTimestamp()
|
|
|
|
}
|
|
|
|
case 2: // A streak of sample values with incremental changes.
|
|
|
|
value := createValue()
|
2014-10-28 18:01:41 +00:00
|
|
|
for n := rand.Intn(maxStreakLength); n >= 0; n-- {
|
2014-08-14 16:23:49 +00:00
|
|
|
result = append(result, &clientmodel.Sample{
|
|
|
|
Metric: metric,
|
|
|
|
Value: value,
|
2014-10-28 18:01:41 +00:00
|
|
|
Timestamp: timestamp,
|
2014-08-14 16:23:49 +00:00
|
|
|
})
|
|
|
|
incTimestamp()
|
|
|
|
value = applyDelta(value)
|
|
|
|
}
|
|
|
|
case 3: // A streak of constant sample values.
|
|
|
|
value := createValue()
|
2014-10-28 18:01:41 +00:00
|
|
|
for n := rand.Intn(maxStreakLength); n >= 0; n-- {
|
2014-08-14 16:23:49 +00:00
|
|
|
result = append(result, &clientmodel.Sample{
|
|
|
|
Metric: metric,
|
|
|
|
Value: value,
|
2014-10-28 18:01:41 +00:00
|
|
|
Timestamp: timestamp,
|
2014-08-14 16:23:49 +00:00
|
|
|
})
|
|
|
|
incTimestamp()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2015-05-06 14:53:12 +00:00
|
|
|
func verifyStorage(t testing.TB, s *memorySeriesStorage, samples clientmodel.Samples, maxAge time.Duration) bool {
|
2015-03-04 12:40:18 +00:00
|
|
|
s.WaitForIndexing()
|
2014-08-14 16:23:49 +00:00
|
|
|
result := true
|
2014-10-28 18:01:41 +00:00
|
|
|
for _, i := range rand.Perm(len(samples)) {
|
2014-08-14 16:23:49 +00:00
|
|
|
sample := samples[i]
|
2014-10-28 18:01:41 +00:00
|
|
|
if sample.Timestamp.Before(clientmodel.TimestampFromTime(time.Now().Add(-maxAge))) {
|
|
|
|
continue
|
|
|
|
// TODO: Once we have a guaranteed cutoff at the
|
|
|
|
// retention period, we can verify here that no results
|
|
|
|
// are returned.
|
2014-08-14 16:23:49 +00:00
|
|
|
}
|
2015-05-06 14:53:12 +00:00
|
|
|
fp, err := s.mapper.mapFP(sample.Metric.FastFingerprint(), sample.Metric)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2014-10-28 18:01:41 +00:00
|
|
|
p := s.NewPreloader()
|
|
|
|
p.PreloadRange(fp, sample.Timestamp, sample.Timestamp, time.Hour)
|
2015-05-20 17:13:06 +00:00
|
|
|
found := s.NewIterator(fp).ValueAtTime(sample.Timestamp)
|
2014-08-14 16:23:49 +00:00
|
|
|
if len(found) != 1 {
|
2014-10-28 18:01:41 +00:00
|
|
|
t.Errorf("Sample %#v: Expected exactly one value, found %d.", sample, len(found))
|
|
|
|
result = false
|
|
|
|
p.Close()
|
|
|
|
continue
|
2014-08-14 16:23:49 +00:00
|
|
|
}
|
2015-03-04 12:40:18 +00:00
|
|
|
want := sample.Value
|
|
|
|
got := found[0].Value
|
2015-03-06 15:03:03 +00:00
|
|
|
if want != got || sample.Timestamp != found[0].Timestamp {
|
2014-10-28 18:01:41 +00:00
|
|
|
t.Errorf(
|
|
|
|
"Value (or timestamp) mismatch, want %f (at time %v), got %f (at time %v).",
|
|
|
|
want, sample.Timestamp, got, found[0].Timestamp,
|
|
|
|
)
|
2014-08-14 16:23:49 +00:00
|
|
|
result = false
|
|
|
|
}
|
2014-10-28 18:01:41 +00:00
|
|
|
p.Close()
|
2014-08-14 16:23:49 +00:00
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
2015-07-13 19:12:27 +00:00
|
|
|
|
|
|
|
func TestAppendOutOfOrder(t *testing.T) {
|
|
|
|
s, closer := NewTestStorage(t, 1)
|
|
|
|
defer closer.Close()
|
|
|
|
|
|
|
|
m := clientmodel.Metric{
|
|
|
|
clientmodel.MetricNameLabel: "out_of_order",
|
|
|
|
}
|
|
|
|
|
2015-07-16 10:48:33 +00:00
|
|
|
for i, t := range []int{0, 2, 2, 1} {
|
2015-07-13 19:12:27 +00:00
|
|
|
s.Append(&clientmodel.Sample{
|
|
|
|
Metric: m,
|
|
|
|
Timestamp: clientmodel.Timestamp(t),
|
2015-07-16 10:48:33 +00:00
|
|
|
Value: clientmodel.SampleValue(i),
|
2015-07-13 19:12:27 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
fp, err := s.mapper.mapFP(m.FastFingerprint(), m)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
pl := s.NewPreloader()
|
|
|
|
defer pl.Close()
|
|
|
|
|
|
|
|
err = pl.PreloadRange(fp, 0, 2, 5*time.Minute)
|
|
|
|
if err != nil {
|
2015-07-15 17:53:15 +00:00
|
|
|
t.Fatalf("Error preloading chunks: %s", err)
|
2015-07-13 19:12:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
it := s.NewIterator(fp)
|
|
|
|
|
|
|
|
want := metric.Values{
|
|
|
|
{
|
|
|
|
Timestamp: 0,
|
|
|
|
Value: 0,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Timestamp: 2,
|
2015-07-16 10:48:33 +00:00
|
|
|
Value: 1,
|
2015-07-13 19:12:27 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
got := it.RangeValues(metric.Interval{OldestInclusive: 0, NewestInclusive: 2})
|
|
|
|
if !reflect.DeepEqual(want, got) {
|
|
|
|
t.Fatalf("want %v, got %v", want, got)
|
|
|
|
}
|
|
|
|
}
|