mirror of
https://github.com/prometheus/prometheus
synced 2025-03-25 04:36:53 +00:00
tsdb tests: restrict some 'defer' operations
'defer' only runs at the end of the function, so introduce some more functions / move the start, so that 'defer' can run at the end of the logical block. Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
This commit is contained in:
parent
f4fbe47254
commit
2f615a200d
284
tsdb/db_test.go
284
tsdb/db_test.go
@ -426,64 +426,65 @@ func TestDeleteSimple(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
Outer:
|
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
db := openTestDB(t, nil, nil)
|
t.Run("", func(t *testing.T) {
|
||||||
defer func() {
|
db := openTestDB(t, nil, nil)
|
||||||
require.NoError(t, db.Close())
|
defer func() {
|
||||||
}()
|
require.NoError(t, db.Close())
|
||||||
|
}()
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
app := db.Appender(ctx)
|
app := db.Appender(ctx)
|
||||||
|
|
||||||
smpls := make([]float64, numSamples)
|
smpls := make([]float64, numSamples)
|
||||||
for i := int64(0); i < numSamples; i++ {
|
for i := int64(0); i < numSamples; i++ {
|
||||||
smpls[i] = rand.Float64()
|
smpls[i] = rand.Float64()
|
||||||
app.Append(0, labels.FromStrings("a", "b"), i, smpls[i])
|
app.Append(0, labels.FromStrings("a", "b"), i, smpls[i])
|
||||||
}
|
|
||||||
|
|
||||||
require.NoError(t, app.Commit())
|
|
||||||
|
|
||||||
// TODO(gouthamve): Reset the tombstones somehow.
|
|
||||||
// Delete the ranges.
|
|
||||||
for _, r := range c.Intervals {
|
|
||||||
require.NoError(t, db.Delete(ctx, r.Mint, r.Maxt, labels.MustNewMatcher(labels.MatchEqual, "a", "b")))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compare the result.
|
|
||||||
q, err := db.Querier(0, numSamples)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
res := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
|
||||||
|
|
||||||
expSamples := make([]chunks.Sample, 0, len(c.remaint))
|
|
||||||
for _, ts := range c.remaint {
|
|
||||||
expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil})
|
|
||||||
}
|
|
||||||
|
|
||||||
expss := newMockSeriesSet([]storage.Series{
|
|
||||||
storage.NewListSeries(labels.FromStrings("a", "b"), expSamples),
|
|
||||||
})
|
|
||||||
|
|
||||||
for {
|
|
||||||
eok, rok := expss.Next(), res.Next()
|
|
||||||
require.Equal(t, eok, rok)
|
|
||||||
|
|
||||||
if !eok {
|
|
||||||
require.Empty(t, res.Warnings())
|
|
||||||
continue Outer
|
|
||||||
}
|
}
|
||||||
sexp := expss.At()
|
|
||||||
sres := res.At()
|
|
||||||
|
|
||||||
require.Equal(t, sexp.Labels(), sres.Labels())
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
smplExp, errExp := storage.ExpandSamples(sexp.Iterator(nil), nil)
|
// TODO(gouthamve): Reset the tombstones somehow.
|
||||||
smplRes, errRes := storage.ExpandSamples(sres.Iterator(nil), nil)
|
// Delete the ranges.
|
||||||
|
for _, r := range c.Intervals {
|
||||||
|
require.NoError(t, db.Delete(ctx, r.Mint, r.Maxt, labels.MustNewMatcher(labels.MatchEqual, "a", "b")))
|
||||||
|
}
|
||||||
|
|
||||||
require.Equal(t, errExp, errRes)
|
// Compare the result.
|
||||||
require.Equal(t, smplExp, smplRes)
|
q, err := db.Querier(0, numSamples)
|
||||||
}
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
res := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
||||||
|
|
||||||
|
expSamples := make([]chunks.Sample, 0, len(c.remaint))
|
||||||
|
for _, ts := range c.remaint {
|
||||||
|
expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil})
|
||||||
|
}
|
||||||
|
|
||||||
|
expss := newMockSeriesSet([]storage.Series{
|
||||||
|
storage.NewListSeries(labels.FromStrings("a", "b"), expSamples),
|
||||||
|
})
|
||||||
|
|
||||||
|
for {
|
||||||
|
eok, rok := expss.Next(), res.Next()
|
||||||
|
require.Equal(t, eok, rok)
|
||||||
|
|
||||||
|
if !eok {
|
||||||
|
require.Empty(t, res.Warnings())
|
||||||
|
break
|
||||||
|
}
|
||||||
|
sexp := expss.At()
|
||||||
|
sres := res.At()
|
||||||
|
|
||||||
|
require.Equal(t, sexp.Labels(), sres.Labels())
|
||||||
|
|
||||||
|
smplExp, errExp := storage.ExpandSamples(sexp.Iterator(nil), nil)
|
||||||
|
smplRes, errRes := storage.ExpandSamples(sres.Iterator(nil), nil)
|
||||||
|
|
||||||
|
require.Equal(t, errExp, errRes)
|
||||||
|
require.Equal(t, smplExp, smplRes)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -759,64 +760,65 @@ func TestDB_SnapshotWithDelete(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
Outer:
|
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
// TODO(gouthamve): Reset the tombstones somehow.
|
t.Run("", func(t *testing.T) {
|
||||||
// Delete the ranges.
|
// TODO(gouthamve): Reset the tombstones somehow.
|
||||||
for _, r := range c.intervals {
|
// Delete the ranges.
|
||||||
require.NoError(t, db.Delete(ctx, r.Mint, r.Maxt, labels.MustNewMatcher(labels.MatchEqual, "a", "b")))
|
for _, r := range c.intervals {
|
||||||
}
|
require.NoError(t, db.Delete(ctx, r.Mint, r.Maxt, labels.MustNewMatcher(labels.MatchEqual, "a", "b")))
|
||||||
|
|
||||||
// create snapshot
|
|
||||||
snap := t.TempDir()
|
|
||||||
|
|
||||||
require.NoError(t, db.Snapshot(snap, true))
|
|
||||||
|
|
||||||
// reopen DB from snapshot
|
|
||||||
newDB, err := Open(snap, nil, nil, nil, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { require.NoError(t, newDB.Close()) }()
|
|
||||||
|
|
||||||
// Compare the result.
|
|
||||||
q, err := newDB.Querier(0, numSamples)
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { require.NoError(t, q.Close()) }()
|
|
||||||
|
|
||||||
res := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
|
||||||
|
|
||||||
expSamples := make([]chunks.Sample, 0, len(c.remaint))
|
|
||||||
for _, ts := range c.remaint {
|
|
||||||
expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil})
|
|
||||||
}
|
|
||||||
|
|
||||||
expss := newMockSeriesSet([]storage.Series{
|
|
||||||
storage.NewListSeries(labels.FromStrings("a", "b"), expSamples),
|
|
||||||
})
|
|
||||||
|
|
||||||
if len(expSamples) == 0 {
|
|
||||||
require.False(t, res.Next())
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
eok, rok := expss.Next(), res.Next()
|
|
||||||
require.Equal(t, eok, rok)
|
|
||||||
|
|
||||||
if !eok {
|
|
||||||
require.Empty(t, res.Warnings())
|
|
||||||
continue Outer
|
|
||||||
}
|
}
|
||||||
sexp := expss.At()
|
|
||||||
sres := res.At()
|
|
||||||
|
|
||||||
require.Equal(t, sexp.Labels(), sres.Labels())
|
// create snapshot
|
||||||
|
snap := t.TempDir()
|
||||||
|
|
||||||
smplExp, errExp := storage.ExpandSamples(sexp.Iterator(nil), nil)
|
require.NoError(t, db.Snapshot(snap, true))
|
||||||
smplRes, errRes := storage.ExpandSamples(sres.Iterator(nil), nil)
|
|
||||||
|
|
||||||
require.Equal(t, errExp, errRes)
|
// reopen DB from snapshot
|
||||||
require.Equal(t, smplExp, smplRes)
|
newDB, err := Open(snap, nil, nil, nil, nil)
|
||||||
}
|
require.NoError(t, err)
|
||||||
|
defer func() { require.NoError(t, newDB.Close()) }()
|
||||||
|
|
||||||
|
// Compare the result.
|
||||||
|
q, err := newDB.Querier(0, numSamples)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer func() { require.NoError(t, q.Close()) }()
|
||||||
|
|
||||||
|
res := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
||||||
|
|
||||||
|
expSamples := make([]chunks.Sample, 0, len(c.remaint))
|
||||||
|
for _, ts := range c.remaint {
|
||||||
|
expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil})
|
||||||
|
}
|
||||||
|
|
||||||
|
expss := newMockSeriesSet([]storage.Series{
|
||||||
|
storage.NewListSeries(labels.FromStrings("a", "b"), expSamples),
|
||||||
|
})
|
||||||
|
|
||||||
|
if len(expSamples) == 0 {
|
||||||
|
require.False(t, res.Next())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
eok, rok := expss.Next(), res.Next()
|
||||||
|
require.Equal(t, eok, rok)
|
||||||
|
|
||||||
|
if !eok {
|
||||||
|
require.Empty(t, res.Warnings())
|
||||||
|
break
|
||||||
|
}
|
||||||
|
sexp := expss.At()
|
||||||
|
sres := res.At()
|
||||||
|
|
||||||
|
require.Equal(t, sexp.Labels(), sres.Labels())
|
||||||
|
|
||||||
|
smplExp, errExp := storage.ExpandSamples(sexp.Iterator(nil), nil)
|
||||||
|
smplRes, errRes := storage.ExpandSamples(sres.Iterator(nil), nil)
|
||||||
|
|
||||||
|
require.Equal(t, errExp, errRes)
|
||||||
|
require.Equal(t, smplExp, smplRes)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2250,49 +2252,51 @@ func TestDB_LabelNames(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
for _, tst := range tests {
|
for _, tst := range tests {
|
||||||
ctx := context.Background()
|
t.Run("", func(t *testing.T) {
|
||||||
db := openTestDB(t, nil, nil)
|
ctx := context.Background()
|
||||||
defer func() {
|
db := openTestDB(t, nil, nil)
|
||||||
require.NoError(t, db.Close())
|
defer func() {
|
||||||
}()
|
require.NoError(t, db.Close())
|
||||||
|
}()
|
||||||
|
|
||||||
appendSamples(db, 0, 4, tst.sampleLabels1)
|
appendSamples(db, 0, 4, tst.sampleLabels1)
|
||||||
|
|
||||||
// Testing head.
|
// Testing head.
|
||||||
headIndexr, err := db.head.Index()
|
headIndexr, err := db.head.Index()
|
||||||
require.NoError(t, err)
|
|
||||||
labelNames, err := headIndexr.LabelNames(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, tst.exp1, labelNames)
|
|
||||||
require.NoError(t, headIndexr.Close())
|
|
||||||
|
|
||||||
// Testing disk.
|
|
||||||
err = db.Compact(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
// All blocks have same label names, hence check them individually.
|
|
||||||
// No need to aggregate and check.
|
|
||||||
for _, b := range db.Blocks() {
|
|
||||||
blockIndexr, err := b.Index()
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
labelNames, err = blockIndexr.LabelNames(ctx)
|
labelNames, err := headIndexr.LabelNames(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, tst.exp1, labelNames)
|
require.Equal(t, tst.exp1, labelNames)
|
||||||
require.NoError(t, blockIndexr.Close())
|
require.NoError(t, headIndexr.Close())
|
||||||
}
|
|
||||||
|
|
||||||
// Adding more samples to head with new label names
|
// Testing disk.
|
||||||
// so that we can test (head+disk).LabelNames(ctx) (the union).
|
err = db.Compact(ctx)
|
||||||
appendSamples(db, 5, 9, tst.sampleLabels2)
|
require.NoError(t, err)
|
||||||
|
// All blocks have same label names, hence check them individually.
|
||||||
|
// No need to aggregate and check.
|
||||||
|
for _, b := range db.Blocks() {
|
||||||
|
blockIndexr, err := b.Index()
|
||||||
|
require.NoError(t, err)
|
||||||
|
labelNames, err = blockIndexr.LabelNames(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, tst.exp1, labelNames)
|
||||||
|
require.NoError(t, blockIndexr.Close())
|
||||||
|
}
|
||||||
|
|
||||||
// Testing DB (union).
|
// Adding more samples to head with new label names
|
||||||
q, err := db.Querier(math.MinInt64, math.MaxInt64)
|
// so that we can test (head+disk).LabelNames(ctx) (the union).
|
||||||
require.NoError(t, err)
|
appendSamples(db, 5, 9, tst.sampleLabels2)
|
||||||
var ws annotations.Annotations
|
|
||||||
labelNames, ws, err = q.LabelNames(ctx, nil)
|
// Testing DB (union).
|
||||||
require.NoError(t, err)
|
q, err := db.Querier(math.MinInt64, math.MaxInt64)
|
||||||
require.Empty(t, ws)
|
require.NoError(t, err)
|
||||||
require.NoError(t, q.Close())
|
var ws annotations.Annotations
|
||||||
require.Equal(t, tst.exp2, labelNames)
|
labelNames, ws, err = q.LabelNames(ctx, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Empty(t, ws)
|
||||||
|
require.NoError(t, q.Close())
|
||||||
|
require.Equal(t, tst.exp2, labelNames)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2633,42 +2633,42 @@ func BenchmarkSetMatcher(b *testing.B) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
dir := b.TempDir()
|
|
||||||
|
|
||||||
var (
|
|
||||||
blocks []*Block
|
|
||||||
prefilledLabels []map[string]string
|
|
||||||
generatedSeries []storage.Series
|
|
||||||
)
|
|
||||||
for i := int64(0); i < int64(c.numBlocks); i++ {
|
|
||||||
mint := i * int64(c.numSamplesPerSeriesPerBlock)
|
|
||||||
maxt := mint + int64(c.numSamplesPerSeriesPerBlock) - 1
|
|
||||||
if len(prefilledLabels) == 0 {
|
|
||||||
generatedSeries = genSeries(c.numSeries, 10, mint, maxt)
|
|
||||||
for _, s := range generatedSeries {
|
|
||||||
prefilledLabels = append(prefilledLabels, s.Labels().Map())
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
generatedSeries = populateSeries(prefilledLabels, mint, maxt)
|
|
||||||
}
|
|
||||||
block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil, nil)
|
|
||||||
require.NoError(b, err)
|
|
||||||
blocks = append(blocks, block)
|
|
||||||
defer block.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
qblocks := make([]storage.Querier, 0, len(blocks))
|
|
||||||
for _, blk := range blocks {
|
|
||||||
q, err := NewBlockQuerier(blk, math.MinInt64, math.MaxInt64)
|
|
||||||
require.NoError(b, err)
|
|
||||||
qblocks = append(qblocks, q)
|
|
||||||
}
|
|
||||||
|
|
||||||
sq := storage.NewMergeQuerier(qblocks, nil, storage.ChainedSeriesMerge)
|
|
||||||
defer sq.Close()
|
|
||||||
|
|
||||||
benchMsg := fmt.Sprintf("nSeries=%d,nBlocks=%d,cardinality=%d,pattern=\"%s\"", c.numSeries, c.numBlocks, c.cardinality, c.pattern)
|
benchMsg := fmt.Sprintf("nSeries=%d,nBlocks=%d,cardinality=%d,pattern=\"%s\"", c.numSeries, c.numBlocks, c.cardinality, c.pattern)
|
||||||
b.Run(benchMsg, func(b *testing.B) {
|
b.Run(benchMsg, func(b *testing.B) {
|
||||||
|
dir := b.TempDir()
|
||||||
|
|
||||||
|
var (
|
||||||
|
blocks []*Block
|
||||||
|
prefilledLabels []map[string]string
|
||||||
|
generatedSeries []storage.Series
|
||||||
|
)
|
||||||
|
for i := int64(0); i < int64(c.numBlocks); i++ {
|
||||||
|
mint := i * int64(c.numSamplesPerSeriesPerBlock)
|
||||||
|
maxt := mint + int64(c.numSamplesPerSeriesPerBlock) - 1
|
||||||
|
if len(prefilledLabels) == 0 {
|
||||||
|
generatedSeries = genSeries(c.numSeries, 10, mint, maxt)
|
||||||
|
for _, s := range generatedSeries {
|
||||||
|
prefilledLabels = append(prefilledLabels, s.Labels().Map())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
generatedSeries = populateSeries(prefilledLabels, mint, maxt)
|
||||||
|
}
|
||||||
|
block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil, nil)
|
||||||
|
require.NoError(b, err)
|
||||||
|
blocks = append(blocks, block)
|
||||||
|
defer block.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
qblocks := make([]storage.Querier, 0, len(blocks))
|
||||||
|
for _, blk := range blocks {
|
||||||
|
q, err := NewBlockQuerier(blk, math.MinInt64, math.MaxInt64)
|
||||||
|
require.NoError(b, err)
|
||||||
|
qblocks = append(qblocks, q)
|
||||||
|
}
|
||||||
|
|
||||||
|
sq := storage.NewMergeQuerier(qblocks, nil, storage.ChainedSeriesMerge)
|
||||||
|
defer sq.Close()
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
|
Loading…
Reference in New Issue
Block a user