Use ChunkMeta references for clarity

This has been a common source of hard to debug issues. Its a premature
and unbenchmarked optimization and semantically, we want ChunkMetas to
be references in all changed cases.
This commit is contained in:
Fabian Reinartz 2017-03-14 15:40:16 +01:00
parent d6fb6aaaa8
commit a8e8903350
6 changed files with 34 additions and 40 deletions

View File

@ -36,7 +36,7 @@ type ChunkWriter interface {
// must be populated.
// After returning successfully, the Ref fields in the ChunkMetas
// is set and can be used to retrieve the chunks from the written data.
WriteChunks(chunks ...ChunkMeta) error
WriteChunks(chunks ...*ChunkMeta) error
// Close writes any required finalization and closes the resources
// associated with the underlying writer.
@ -156,7 +156,7 @@ func (w *chunkWriter) write(wr io.Writer, b []byte) error {
return err
}
func (w *chunkWriter) WriteChunks(chks ...ChunkMeta) error {
func (w *chunkWriter) WriteChunks(chks ...*ChunkMeta) error {
// Calculate maximum space we need and cut a new segment in case
// we don't fit into the current one.
maxLen := int64(binary.MaxVarintLen32)
@ -184,9 +184,7 @@ func (w *chunkWriter) WriteChunks(chks ...ChunkMeta) error {
}
seq := uint64(w.seq()) << 32
for i := range chks {
chk := &chks[i]
for _, chk := range chks {
chk.Ref = seq | uint64(w.n)
n = binary.PutUvarint(b, uint64(len(chk.Chunk.Bytes())))

View File

@ -334,7 +334,7 @@ func (c *compactor) populate(blocks []Block, indexw IndexWriter, chunkw ChunkWri
type compactionSet interface {
Next() bool
At() (labels.Labels, []ChunkMeta)
At() (labels.Labels, []*ChunkMeta)
Err() error
}
@ -344,7 +344,7 @@ type compactionSeriesSet struct {
chunks ChunkReader
l labels.Labels
c []ChunkMeta
c []*ChunkMeta
err error
}
@ -365,9 +365,7 @@ func (c *compactionSeriesSet) Next() bool {
if c.err != nil {
return false
}
for i := range c.c {
chk := &c.c[i]
for _, chk := range c.c {
chk.Chunk, c.err = c.chunks.Chunk(chk.Ref)
if c.err != nil {
return false
@ -384,7 +382,7 @@ func (c *compactionSeriesSet) Err() error {
return c.p.Err()
}
func (c *compactionSeriesSet) At() (labels.Labels, []ChunkMeta) {
func (c *compactionSeriesSet) At() (labels.Labels, []*ChunkMeta) {
return c.l, c.c
}
@ -393,12 +391,12 @@ type compactionMerger struct {
aok, bok bool
l labels.Labels
c []ChunkMeta
c []*ChunkMeta
}
type compactionSeries struct {
labels labels.Labels
chunks []ChunkMeta
chunks []*ChunkMeta
}
func newCompactionMerger(a, b compactionSet) (*compactionMerger, error) {
@ -459,7 +457,7 @@ func (c *compactionMerger) Err() error {
return c.b.Err()
}
func (c *compactionMerger) At() (labels.Labels, []ChunkMeta) {
func (c *compactionMerger) At() (labels.Labels, []*ChunkMeta) {
return c.l, c.c
}

View File

@ -480,7 +480,7 @@ func (h *headIndexReader) Postings(name, value string) (Postings, error) {
}
// Series returns the series for the given reference.
func (h *headIndexReader) Series(ref uint32) (labels.Labels, []ChunkMeta, error) {
func (h *headIndexReader) Series(ref uint32) (labels.Labels, []*ChunkMeta, error) {
h.mtx.RLock()
defer h.mtx.RUnlock()
@ -488,13 +488,13 @@ func (h *headIndexReader) Series(ref uint32) (labels.Labels, []ChunkMeta, error)
return nil, nil, ErrNotFound
}
s := h.series[ref]
metas := make([]ChunkMeta, 0, len(s.chunks))
metas := make([]*ChunkMeta, 0, len(s.chunks))
s.mtx.RLock()
defer s.mtx.RUnlock()
for i, c := range s.chunks {
metas = append(metas, ChunkMeta{
metas = append(metas, &ChunkMeta{
MinTime: c.minTime,
MaxTime: c.maxTime,
Ref: (uint64(ref) << 32) | uint64(i),

View File

@ -33,7 +33,7 @@ type IndexWriter interface {
// of chunks that the index can reference.
// The reference number is used to resolve a series against the postings
// list iterator. It only has to be available during the write processing.
AddSeries(ref uint32, l labels.Labels, chunks ...ChunkMeta) error
AddSeries(ref uint32, l labels.Labels, chunks ...*ChunkMeta) error
// WriteLabelIndex serializes an index from label names to values.
// The passed in values chained tuples of strings of the length of names.
@ -49,8 +49,8 @@ type IndexWriter interface {
type indexWriterSeries struct {
labels labels.Labels
chunks []ChunkMeta // series file offset of chunks
offset uint32 // index file offset of series reference
chunks []*ChunkMeta // series file offset of chunks
offset uint32 // index file offset of series reference
}
// indexWriter implements the IndexWriter interface for the standard
@ -142,7 +142,7 @@ func (w *indexWriter) writeMeta() error {
return w.write(w.bufw, b[:])
}
func (w *indexWriter) AddSeries(ref uint32, lset labels.Labels, chunks ...ChunkMeta) error {
func (w *indexWriter) AddSeries(ref uint32, lset labels.Labels, chunks ...*ChunkMeta) error {
if _, ok := w.series[ref]; ok {
return errors.Errorf("series with reference %d already added", ref)
}
@ -419,7 +419,7 @@ type IndexReader interface {
Postings(name, value string) (Postings, error)
// Series returns the series for the given reference.
Series(ref uint32) (labels.Labels, []ChunkMeta, error)
Series(ref uint32) (labels.Labels, []*ChunkMeta, error)
// LabelIndices returns the label pairs for which indices exist.
LabelIndices() ([][]string, error)
@ -599,7 +599,7 @@ func (r *indexReader) LabelIndices() ([][]string, error) {
return res, nil
}
func (r *indexReader) Series(ref uint32) (labels.Labels, []ChunkMeta, error) {
func (r *indexReader) Series(ref uint32) (labels.Labels, []*ChunkMeta, error) {
k, n := binary.Uvarint(r.b[ref:])
if n < 1 {
return nil, nil, errors.Wrap(errInvalidSize, "number of labels")
@ -642,7 +642,7 @@ func (r *indexReader) Series(ref uint32) (labels.Labels, []ChunkMeta, error) {
}
b = b[n:]
chunks := make([]ChunkMeta, 0, k)
chunks := make([]*ChunkMeta, 0, k)
for i := 0; i < int(k); i++ {
firstTime, n := binary.Varint(b)
@ -663,7 +663,7 @@ func (r *indexReader) Series(ref uint32) (labels.Labels, []ChunkMeta, error) {
}
b = b[n:]
chunks = append(chunks, ChunkMeta{
chunks = append(chunks, &ChunkMeta{
Ref: o,
MinTime: firstTime,
MaxTime: lastTime,

View File

@ -121,10 +121,10 @@ func TestPersistence_index_e2e(t *testing.T) {
// Generate ChunkMetas for every label set.
for i, lset := range lbls {
var metas []ChunkMeta
var metas []*ChunkMeta
for j := 0; j <= (i % 20); j++ {
metas = append(metas, ChunkMeta{
metas = append(metas, &ChunkMeta{
MinTime: int64(j * 10000),
MaxTime: int64((j + 1) * 10000),
Ref: rand.Uint64(),

View File

@ -345,7 +345,7 @@ func (s *mergedSeriesSet) Next() bool {
type chunkSeriesSet interface {
Next() bool
At() (labels.Labels, []ChunkMeta)
At() (labels.Labels, []*ChunkMeta)
Err() error
}
@ -357,12 +357,12 @@ type baseChunkSeries struct {
absent []string // labels that must be unset in results.
lset labels.Labels
chks []ChunkMeta
chks []*ChunkMeta
err error
}
func (s *baseChunkSeries) At() (labels.Labels, []ChunkMeta) { return s.lset, s.chks }
func (s *baseChunkSeries) Err() error { return s.err }
func (s *baseChunkSeries) At() (labels.Labels, []*ChunkMeta) { return s.lset, s.chks }
func (s *baseChunkSeries) Err() error { return s.err }
func (s *baseChunkSeries) Next() bool {
Outer:
@ -400,20 +400,18 @@ type populatedChunkSeries struct {
mint, maxt int64
err error
chks []ChunkMeta
chks []*ChunkMeta
lset labels.Labels
}
func (s *populatedChunkSeries) At() (labels.Labels, []ChunkMeta) { return s.lset, s.chks }
func (s *populatedChunkSeries) Err() error { return s.err }
func (s *populatedChunkSeries) At() (labels.Labels, []*ChunkMeta) { return s.lset, s.chks }
func (s *populatedChunkSeries) Err() error { return s.err }
func (s *populatedChunkSeries) Next() bool {
for s.set.Next() {
lset, chks := s.set.At()
for i := range chks {
c := &chks[i]
for i, c := range chks {
if c.MaxTime < s.mint {
chks = chks[1:]
continue
@ -468,7 +466,7 @@ func (s *blockSeriesSet) Err() error { return s.err }
// time series data.
type chunkSeries struct {
labels labels.Labels
chunks []ChunkMeta // in-order chunk refs
chunks []*ChunkMeta // in-order chunk refs
}
func (s *chunkSeries) Labels() labels.Labels {
@ -562,13 +560,13 @@ func (it *chainedSeriesIterator) Err() error {
// chunkSeriesIterator implements a series iterator on top
// of a list of time-sorted, non-overlapping chunks.
type chunkSeriesIterator struct {
chunks []ChunkMeta
chunks []*ChunkMeta
i int
cur chunks.Iterator
}
func newChunkSeriesIterator(cs []ChunkMeta) *chunkSeriesIterator {
func newChunkSeriesIterator(cs []*ChunkMeta) *chunkSeriesIterator {
return &chunkSeriesIterator{
chunks: cs,
i: 0,