Avoid unnecessary cloning of the head chunk.

Change-Id: I5da774515d5493166a197b5814d0a720628cfaff
This commit is contained in:
Bjoern Rabenstein 2014-10-27 15:55:44 +01:00
parent f1de5b0c4e
commit a617269b12
2 changed files with 34 additions and 5 deletions

View File

@ -68,6 +68,13 @@ func (cd *chunkDesc) unpin() {
} }
} }
func (cd *chunkDesc) getRefCount() int {
cd.Lock()
defer cd.Unlock()
return cd.refCount
}
func (cd *chunkDesc) firstTime() clientmodel.Timestamp { func (cd *chunkDesc) firstTime() clientmodel.Timestamp {
cd.Lock() cd.Lock()
defer cd.Unlock() defer cd.Unlock()

View File

@ -144,6 +144,10 @@ type memorySeries struct {
// persisted. If true, the current head chunk must not be modified // persisted. If true, the current head chunk must not be modified
// anymore. // anymore.
headChunkPersisted bool headChunkPersisted bool
// Whether the current head chunk is used by an iterator. In that case,
// a non-persisted head chunk has to be cloned before more samples are
// appended.
headChunkUsedByIterator bool
} }
// newMemorySeries returns a pointer to a newly allocated memorySeries for the // newMemorySeries returns a pointer to a newly allocated memorySeries for the
@ -165,6 +169,21 @@ func (s *memorySeries) add(fp clientmodel.Fingerprint, v *metric.SamplePair, per
newHead := newChunkDesc(newDeltaEncodedChunk(d1, d0, true)) newHead := newChunkDesc(newDeltaEncodedChunk(d1, d0, true))
s.chunkDescs = append(s.chunkDescs, newHead) s.chunkDescs = append(s.chunkDescs, newHead)
s.headChunkPersisted = false s.headChunkPersisted = false
} else if s.headChunkUsedByIterator && s.head().getRefCount() > 1 {
// We only need to clone the head chunk if the current head
// chunk was used in an iterator at all and if the refCount is
// still greater than the 1 we always have because the head
// chunk is not yet persisted. The latter is just an
// approximation. We will still clone unnecessarily if an older
// iterator using a previous version of the head chunk is still
// around and keep the head chunk pinned. We needed to track
// pins by version of the head chunk, which is probably not
// worth the effort.
chunkOps.WithLabelValues(clone).Inc()
// No locking needed here because a non-persisted head chunk can
// not get evicted concurrently.
s.head().chunk = s.head().chunk.clone()
s.headChunkUsedByIterator = false
} }
chunks := s.head().add(v) chunks := s.head().add(v)
@ -258,6 +277,9 @@ func (s *memorySeries) evictOlderThan(
// This is a non-persisted head chunk that is old enough // This is a non-persisted head chunk that is old enough
// for eviction. Queue it to be persisted: // for eviction. Queue it to be persisted:
s.headChunkPersisted = true s.headChunkPersisted = true
// Since we cannot modify the head chunk from now on, we
// don't need to bother with cloning anymore.
s.headChunkUsedByIterator = false
persistQueue <- &persistRequest{ persistQueue <- &persistRequest{
fingerprint: fp, fingerprint: fp,
chunkDesc: cd, chunkDesc: cd,
@ -401,16 +423,16 @@ func (s *memorySeries) preloadChunksForRange(
return s.preloadChunks(pinIndexes, p) return s.preloadChunks(pinIndexes, p)
} }
// newIterator returns a new SeriesIterator. The caller must have locked the
// fingerprint of the memorySeries.
func (s *memorySeries) newIterator(lockFunc, unlockFunc func()) SeriesIterator { func (s *memorySeries) newIterator(lockFunc, unlockFunc func()) SeriesIterator {
chunks := make([]chunk, 0, len(s.chunkDescs)) chunks := make([]chunk, 0, len(s.chunkDescs))
for i, cd := range s.chunkDescs { for i, cd := range s.chunkDescs {
if !cd.isEvicted() { if !cd.isEvicted() {
if i == len(s.chunkDescs)-1 { if i == len(s.chunkDescs)-1 && !s.headChunkPersisted {
chunkOps.WithLabelValues(clone).Inc() s.headChunkUsedByIterator = true
chunks = append(chunks, cd.chunk.clone())
} else {
chunks = append(chunks, cd.chunk)
} }
chunks = append(chunks, cd.chunk)
} }
} }