Precalculate memSeries.head

This is read far more than it changes.
This cuts ~14% off walltme and ~27% off CPU for WAL reading.

Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
This commit is contained in:
Brian Brazil 2018-10-31 13:28:56 +00:00
parent d8c8e4e6e4
commit a64b0d51c4
1 changed files with 8 additions and 4 deletions

12
head.go
View File

@ -1338,6 +1338,7 @@ type memSeries struct {
ref uint64 ref uint64
lset labels.Labels lset labels.Labels
chunks []*memChunk chunks []*memChunk
headChunk *memChunk
chunkRange int64 chunkRange int64
firstChunkID int firstChunkID int
@ -1371,6 +1372,7 @@ func (s *memSeries) cut(mint int64) *memChunk {
maxTime: math.MinInt64, maxTime: math.MinInt64,
} }
s.chunks = append(s.chunks, c) s.chunks = append(s.chunks, c)
s.headChunk = c
// Set upper bound on when the next chunk must be started. An earlier timestamp // Set upper bound on when the next chunk must be started. An earlier timestamp
// may be chosen dynamically at a later point. // may be chosen dynamically at a later point.
@ -1439,6 +1441,11 @@ func (s *memSeries) truncateChunksBefore(mint int64) (removed int) {
} }
s.chunks = append(s.chunks[:0], s.chunks[k:]...) s.chunks = append(s.chunks[:0], s.chunks[k:]...)
s.firstChunkID += k s.firstChunkID += k
if len(s.chunks) == 0 {
s.headChunk = nil
} else {
s.headChunk = s.chunks[len(s.chunks)-1]
}
return k return k
} }
@ -1521,10 +1528,7 @@ func (s *memSeries) iterator(id int) chunkenc.Iterator {
} }
func (s *memSeries) head() *memChunk { func (s *memSeries) head() *memChunk {
if len(s.chunks) == 0 { return s.headChunk
return nil
}
return s.chunks[len(s.chunks)-1]
} }
type memChunk struct { type memChunk struct {