Test updates

This commit is contained in:
Fabian Reinartz 2017-03-08 16:54:13 +01:00
parent cb4dde7659
commit ca1bc920b7
2 changed files with 115 additions and 87 deletions

View File

@ -8,6 +8,7 @@ import (
"unsafe"
"github.com/fabxc/tsdb/labels"
"github.com/pkg/errors"
promlabels "github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/textparse"
@ -91,5 +92,11 @@ func readPrometheusLabels(fn string, n int) ([]labels.Labels, error) {
hashes[h] = struct{}{}
i++
}
return mets, p.Err()
if err := p.Err(); err != nil {
return nil, err
}
if i != n {
return mets, errors.Errorf("requested %d metrics but found %d", n, i)
}
return mets, nil
}

View File

@ -2,12 +2,10 @@ package tsdb
import (
"io/ioutil"
"math/rand"
"os"
"sort"
"path/filepath"
"testing"
"github.com/fabxc/tsdb/chunks"
"github.com/fabxc/tsdb/labels"
"github.com/stretchr/testify/require"
)
@ -40,104 +38,127 @@ func (ir *mockIndexReader) Close() error {
return ir.close()
}
func TestPersistence_index_e2e(t *testing.T) {
dir, err := ioutil.TempDir("", "test_persistence_e2e")
func TestIndexRW_Create_Open(t *testing.T) {
dir, err := ioutil.TempDir("", "test_index_create")
require.NoError(t, err)
defer os.RemoveAll(dir)
lbls, err := readPrometheusLabels("testdata/20k.series", 20000)
require.NoError(t, err)
var input indexWriterSeriesSlice
// Generate ChunkMetas for every label set.
for i, lset := range lbls {
var metas []ChunkMeta
for j := 0; j <= (i % 20); j++ {
metas = append(metas, ChunkMeta{
MinTime: int64(j * 10000),
MaxTime: int64((j + 1) * 10000),
Ref: rand.Uint64(),
})
}
input = append(input, &indexWriterSeries{
labels: lset,
chunks: metas,
})
}
// An empty index must still result in a readable file.
iw, err := newIndexWriter(dir)
require.NoError(t, err)
// Population procedure as done by compaction.
var (
postings = &memPostings{m: make(map[term][]uint32, 512)}
values = map[string]stringset{}
)
for i, s := range input {
iw.AddSeries(uint32(i), s.labels, s.chunks...)
for _, l := range s.labels {
valset, ok := values[l.Name]
if !ok {
valset = stringset{}
values[l.Name] = valset
}
valset.set(l.Value)
postings.add(uint32(i), term{name: l.Name, value: l.Value})
}
i++
}
all := make([]uint32, len(lbls))
for i := range all {
all[i] = uint32(i)
}
err = iw.WritePostings("", "", newListPostings(all))
require.NoError(t, err)
err = iw.Close()
require.NoError(t, err)
require.NoError(t, err, "create index writer")
require.NoError(t, iw.Close(), "close index writer")
ir, err := newIndexReader(dir)
require.NoError(t, err, "open index reader")
require.NoError(t, ir.Close(), "close index reader")
// Modify magic header must cause open to fail.
f, err := os.OpenFile(filepath.Join(dir, "index"), os.O_WRONLY, 0666)
require.NoError(t, err)
_, err = f.WriteAt([]byte{0, 0}, 0)
require.NoError(t, err)
allp, err := ir.Postings("", "")
require.NoError(t, err)
_, err = newIndexReader(dir)
require.Error(t, err)
}
var result indexWriterSeriesSlice
// func TestPersistence_index_e2e(t *testing.T) {
// dir, err := ioutil.TempDir("", "test_persistence_e2e")
// require.NoError(t, err)
// defer os.RemoveAll(dir)
for allp.Next() {
ref := allp.At()
// lbls, err := readPrometheusLabels("testdata/20k.series", 20000)
// require.NoError(t, err)
lset, chks, err := ir.Series(ref)
require.NoError(t, err)
// var input indexWriterSeriesSlice
result = append(result, &indexWriterSeries{
offset: ref,
labels: lset,
chunks: chks,
})
}
require.NoError(t, allp.Err())
// // Generate ChunkMetas for every label set.
// for i, lset := range lbls {
// var metas []ChunkMeta
// Persisted data must be sorted.
sort.IsSorted(result)
// for j := 0; j <= (i % 20); j++ {
// metas = append(metas, ChunkMeta{
// MinTime: int64(j * 10000),
// MaxTime: int64((j + 1) * 10000),
// Ref: rand.Uint64(),
// })
// }
// input = append(input, &indexWriterSeries{
// labels: lset,
// chunks: metas,
// })
// }
// Validate result contents.
sort.Sort(input)
require.Equal(t, len(input), len(result))
// iw, err := newIndexWriter(dir)
// require.NoError(t, err)
for i, re := range result {
exp := input[i]
// // Population procedure as done by compaction.
// var (
// postings = &memPostings{m: make(map[term][]uint32, 512)}
// values = map[string]stringset{}
// )
require.Equal(t, exp.labels, re.labels)
require.Equal(t, exp.chunks, re.chunks)
}
// for i, s := range input {
// iw.AddSeries(uint32(i), s.labels, s.chunks...)
require.NoError(t, ir.Close())
// for _, l := range s.labels {
// valset, ok := values[l.Name]
// if !ok {
// valset = stringset{}
// values[l.Name] = valset
// }
// valset.set(l.Value)
}
// postings.add(uint32(i), term{name: l.Name, value: l.Value})
// }
// i++
// }
// all := make([]uint32, len(lbls))
// for i := range all {
// all[i] = uint32(i)
// }
// err = iw.WritePostings("", "", newListPostings(all))
// require.NoError(t, err)
// err = iw.Close()
// require.NoError(t, err)
// ir, err := newIndexReader(dir)
// require.NoError(t, err)
// allp, err := ir.Postings("", "")
// require.NoError(t, err)
// var result indexWriterSeriesSlice
// for allp.Next() {
// ref := allp.At()
// lset, chks, err := ir.Series(ref)
// require.NoError(t, err)
// result = append(result, &indexWriterSeries{
// offset: ref,
// labels: lset,
// chunks: chks,
// })
// }
// require.NoError(t, allp.Err())
// // Persisted data must be sorted.
// sort.IsSorted(result)
// // Validate result contents.
// sort.Sort(input)
// require.Equal(t, len(input), len(result))
// for i, re := range result {
// exp := input[i]
// require.Equal(t, exp.labels, re.labels)
// require.Equal(t, exp.chunks, re.chunks)
// }
// require.NoError(t, ir.Close())
// }