2014-09-19 16:18:44 +00:00
|
|
|
// Copyright 2014 Prometheus Team
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2014-09-16 13:47:24 +00:00
|
|
|
package local
|
2014-06-06 09:55:53 +00:00
|
|
|
|
|
|
|
import (
|
2014-10-22 17:21:23 +00:00
|
|
|
"math"
|
2014-06-06 09:55:53 +00:00
|
|
|
"sort"
|
|
|
|
"sync"
|
2014-10-23 13:18:32 +00:00
|
|
|
"sync/atomic"
|
2014-06-06 09:55:53 +00:00
|
|
|
|
|
|
|
clientmodel "github.com/prometheus/client_golang/model"
|
|
|
|
|
|
|
|
"github.com/prometheus/prometheus/storage/metric"
|
|
|
|
)
|
|
|
|
|
2014-10-21 15:53:53 +00:00
|
|
|
// chunkDescEvictionFactor is a factor used for chunkDesc eviction (as opposed
|
|
|
|
// to evictions of chunks, see method evictOlderThan. A chunk takes about 20x
|
|
|
|
// more memory than a chunkDesc. With a chunkDescEvictionFactor of 10, not more
|
|
|
|
// than a third of the total memory taken by a series will be used for
|
|
|
|
// chunkDescs.
|
|
|
|
const chunkDescEvictionFactor = 10
|
|
|
|
|
2014-10-07 17:11:24 +00:00
|
|
|
// fingerprintSeriesPair pairs a fingerprint with a memorySeries pointer.
|
|
|
|
type fingerprintSeriesPair struct {
|
|
|
|
fp clientmodel.Fingerprint
|
|
|
|
series *memorySeries
|
|
|
|
}
|
|
|
|
|
|
|
|
// seriesMap maps fingerprints to memory series. All its methods are
|
|
|
|
// goroutine-safe. A SeriesMap is effectively is a goroutine-safe version of
|
|
|
|
// map[clientmodel.Fingerprint]*memorySeries.
|
|
|
|
type seriesMap struct {
|
|
|
|
mtx sync.RWMutex
|
|
|
|
m map[clientmodel.Fingerprint]*memorySeries
|
|
|
|
}
|
|
|
|
|
|
|
|
// newSeriesMap returns a newly allocated empty seriesMap. To create a seriesMap
|
|
|
|
// based on a prefilled map, use an explicit initializer.
|
2014-10-08 11:49:42 +00:00
|
|
|
func newSeriesMap() *seriesMap {
|
|
|
|
return &seriesMap{m: make(map[clientmodel.Fingerprint]*memorySeries)}
|
2014-10-07 17:11:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// length returns the number of mappings in the seriesMap.
|
2014-10-08 11:49:42 +00:00
|
|
|
func (sm *seriesMap) length() int {
|
2014-10-07 17:11:24 +00:00
|
|
|
sm.mtx.RLock()
|
|
|
|
defer sm.mtx.RUnlock()
|
|
|
|
|
|
|
|
return len(sm.m)
|
|
|
|
}
|
|
|
|
|
|
|
|
// get returns a memorySeries for a fingerprint. Return values have the same
|
|
|
|
// semantics as the native Go map.
|
2014-10-08 11:49:42 +00:00
|
|
|
func (sm *seriesMap) get(fp clientmodel.Fingerprint) (s *memorySeries, ok bool) {
|
2014-10-07 17:11:24 +00:00
|
|
|
sm.mtx.RLock()
|
|
|
|
defer sm.mtx.RUnlock()
|
|
|
|
|
|
|
|
s, ok = sm.m[fp]
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-10-10 17:16:07 +00:00
|
|
|
// put adds a mapping to the seriesMap. It panics if s == nil.
|
2014-10-08 11:49:42 +00:00
|
|
|
func (sm *seriesMap) put(fp clientmodel.Fingerprint, s *memorySeries) {
|
2014-10-07 17:11:24 +00:00
|
|
|
sm.mtx.Lock()
|
|
|
|
defer sm.mtx.Unlock()
|
|
|
|
|
2014-10-10 17:16:07 +00:00
|
|
|
if s == nil {
|
|
|
|
panic("tried to add nil pointer to seriesMap")
|
|
|
|
}
|
2014-10-07 17:11:24 +00:00
|
|
|
sm.m[fp] = s
|
|
|
|
}
|
|
|
|
|
|
|
|
// del removes a mapping from the series Map.
|
2014-10-08 11:49:42 +00:00
|
|
|
func (sm *seriesMap) del(fp clientmodel.Fingerprint) {
|
2014-10-07 17:11:24 +00:00
|
|
|
sm.mtx.Lock()
|
|
|
|
defer sm.mtx.Unlock()
|
|
|
|
|
|
|
|
delete(sm.m, fp)
|
|
|
|
}
|
|
|
|
|
|
|
|
// iter returns a channel that produces all mappings in the seriesMap. The
|
|
|
|
// channel will be closed once all fingerprints have been received. Not
|
|
|
|
// consuming all fingerprints from the channel will leak a goroutine. The
|
2014-10-07 18:09:56 +00:00
|
|
|
// semantics of concurrent modification of seriesMap is the similar as the one
|
|
|
|
// for iterating over a map with a 'range' clause. However, if the next element
|
|
|
|
// in iteration order is removed after the current element has been received
|
|
|
|
// from the channel, it will still be produced by the channel.
|
2014-10-08 11:49:42 +00:00
|
|
|
func (sm *seriesMap) iter() <-chan fingerprintSeriesPair {
|
2014-10-07 17:11:24 +00:00
|
|
|
ch := make(chan fingerprintSeriesPair)
|
|
|
|
go func() {
|
|
|
|
sm.mtx.RLock()
|
|
|
|
for fp, s := range sm.m {
|
|
|
|
sm.mtx.RUnlock()
|
|
|
|
ch <- fingerprintSeriesPair{fp, s}
|
|
|
|
sm.mtx.RLock()
|
|
|
|
}
|
|
|
|
sm.mtx.RUnlock()
|
2014-10-07 18:09:56 +00:00
|
|
|
close(ch)
|
2014-10-07 17:11:24 +00:00
|
|
|
}()
|
|
|
|
return ch
|
|
|
|
}
|
|
|
|
|
|
|
|
// fpIter returns a channel that produces all fingerprints in the seriesMap. The
|
|
|
|
// channel will be closed once all fingerprints have been received. Not
|
|
|
|
// consuming all fingerprints from the channel will leak a goroutine. The
|
2014-10-07 18:09:56 +00:00
|
|
|
// semantics of concurrent modification of seriesMap is the similar as the one
|
|
|
|
// for iterating over a map with a 'range' clause. However, if the next element
|
|
|
|
// in iteration order is removed after the current element has been received
|
|
|
|
// from the channel, it will still be produced by the channel.
|
2014-10-08 11:49:42 +00:00
|
|
|
func (sm *seriesMap) fpIter() <-chan clientmodel.Fingerprint {
|
2014-10-07 17:11:24 +00:00
|
|
|
ch := make(chan clientmodel.Fingerprint)
|
|
|
|
go func() {
|
|
|
|
sm.mtx.RLock()
|
|
|
|
for fp := range sm.m {
|
|
|
|
sm.mtx.RUnlock()
|
|
|
|
ch <- fp
|
|
|
|
sm.mtx.RLock()
|
|
|
|
}
|
|
|
|
sm.mtx.RUnlock()
|
2014-10-07 18:09:56 +00:00
|
|
|
close(ch)
|
2014-10-07 17:11:24 +00:00
|
|
|
}()
|
|
|
|
return ch
|
|
|
|
}
|
|
|
|
|
2014-06-06 09:55:53 +00:00
|
|
|
type memorySeries struct {
|
|
|
|
metric clientmodel.Metric
|
2014-09-10 16:41:52 +00:00
|
|
|
// Sorted by start time, overlapping chunk ranges are forbidden.
|
2014-10-15 13:53:05 +00:00
|
|
|
chunkDescs []*chunkDesc
|
2014-10-27 19:40:48 +00:00
|
|
|
// The chunkDescs in memory might not have all the chunkDescs for the
|
|
|
|
// chunks that are persisted to disk. The missing chunkDescs are all
|
|
|
|
// contiguous and at the tail end. chunkDescsOffset is the index of the
|
|
|
|
// chunk on disk that corresponds to the first chunkDesc in memory. If
|
|
|
|
// it is 0, the chunkDescs are all loaded. A value of -1 denotes a
|
|
|
|
// special case: There are chunks on disk, but the offset to the
|
|
|
|
// chunkDescs in memory is unknown. Also, there is no overlap between
|
|
|
|
// chunks on disk and chunks in memory (implying that upon first
|
|
|
|
// persiting of a chunk in memory, the offset has to be set).
|
|
|
|
chunkDescsOffset int
|
2014-10-14 15:44:01 +00:00
|
|
|
// Whether the current head chunk has already been scheduled to be
|
|
|
|
// persisted. If true, the current head chunk must not be modified
|
|
|
|
// anymore.
|
2014-09-16 13:47:24 +00:00
|
|
|
headChunkPersisted bool
|
2014-10-27 14:55:44 +00:00
|
|
|
// Whether the current head chunk is used by an iterator. In that case,
|
|
|
|
// a non-persisted head chunk has to be cloned before more samples are
|
|
|
|
// appended.
|
|
|
|
headChunkUsedByIterator bool
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
|
|
|
|
2014-10-07 17:11:24 +00:00
|
|
|
// newMemorySeries returns a pointer to a newly allocated memorySeries for the
|
|
|
|
// given metric. reallyNew defines if the memorySeries is a genuinely new series
|
|
|
|
// or (if false) a series for a metric being unarchived, i.e. a series that
|
|
|
|
// existed before but has been evicted from memory.
|
|
|
|
func newMemorySeries(m clientmodel.Metric, reallyNew bool) *memorySeries {
|
2014-10-27 19:40:48 +00:00
|
|
|
s := memorySeries{
|
2014-10-07 17:11:24 +00:00
|
|
|
metric: m,
|
|
|
|
headChunkPersisted: !reallyNew,
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
2014-10-27 19:40:48 +00:00
|
|
|
if !reallyNew {
|
|
|
|
s.chunkDescsOffset = -1
|
|
|
|
}
|
|
|
|
return &s
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
|
|
|
|
2014-10-07 17:11:24 +00:00
|
|
|
// add adds a sample pair to the series.
|
2014-10-27 19:40:48 +00:00
|
|
|
// It returns chunkDescs that must be queued to be persisted.
|
2014-10-07 17:11:24 +00:00
|
|
|
// The caller must have locked the fingerprint of the series.
|
2014-10-27 19:40:48 +00:00
|
|
|
func (s *memorySeries) add(fp clientmodel.Fingerprint, v *metric.SamplePair) []*chunkDesc {
|
2014-09-16 13:47:24 +00:00
|
|
|
if len(s.chunkDescs) == 0 || s.headChunkPersisted {
|
2014-10-22 17:21:23 +00:00
|
|
|
newHead := newChunkDesc(newDeltaEncodedChunk(d1, d0, true))
|
2014-09-16 13:47:24 +00:00
|
|
|
s.chunkDescs = append(s.chunkDescs, newHead)
|
|
|
|
s.headChunkPersisted = false
|
2014-10-27 14:55:44 +00:00
|
|
|
} else if s.headChunkUsedByIterator && s.head().getRefCount() > 1 {
|
|
|
|
// We only need to clone the head chunk if the current head
|
|
|
|
// chunk was used in an iterator at all and if the refCount is
|
|
|
|
// still greater than the 1 we always have because the head
|
|
|
|
// chunk is not yet persisted. The latter is just an
|
|
|
|
// approximation. We will still clone unnecessarily if an older
|
|
|
|
// iterator using a previous version of the head chunk is still
|
|
|
|
// around and keep the head chunk pinned. We needed to track
|
|
|
|
// pins by version of the head chunk, which is probably not
|
|
|
|
// worth the effort.
|
|
|
|
chunkOps.WithLabelValues(clone).Inc()
|
|
|
|
// No locking needed here because a non-persisted head chunk can
|
|
|
|
// not get evicted concurrently.
|
|
|
|
s.head().chunk = s.head().chunk.clone()
|
|
|
|
s.headChunkUsedByIterator = false
|
2014-09-16 13:47:24 +00:00
|
|
|
}
|
|
|
|
|
2014-06-06 09:55:53 +00:00
|
|
|
chunks := s.head().add(v)
|
|
|
|
s.head().chunk = chunks[0]
|
|
|
|
|
2014-10-27 19:40:48 +00:00
|
|
|
var chunkDescsToPersist []*chunkDesc
|
|
|
|
if len(chunks) > 1 {
|
|
|
|
chunkDescsToPersist = append(chunkDescsToPersist, s.head())
|
2014-06-06 09:55:53 +00:00
|
|
|
for i, c := range chunks[1:] {
|
2014-10-22 17:21:23 +00:00
|
|
|
cd := newChunkDesc(c)
|
2014-06-06 09:55:53 +00:00
|
|
|
s.chunkDescs = append(s.chunkDescs, cd)
|
|
|
|
// The last chunk is still growing.
|
|
|
|
if i < len(chunks[1:])-1 {
|
2014-10-27 19:40:48 +00:00
|
|
|
chunkDescsToPersist = append(chunkDescsToPersist, cd)
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-10-27 19:40:48 +00:00
|
|
|
return chunkDescsToPersist
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
|
|
|
|
2014-10-14 15:44:01 +00:00
|
|
|
// evictOlderThan marks for eviction all chunks whose latest sample is older
|
2014-10-27 19:40:48 +00:00
|
|
|
// than the given timestamp. It returns allEvicted as true if all chunks in the
|
|
|
|
// series were immediately evicted (i.e. all chunks are older than the
|
|
|
|
// timestamp, and none of the chunks was pinned).
|
2014-10-14 15:44:01 +00:00
|
|
|
//
|
2014-10-21 15:53:53 +00:00
|
|
|
// The method also evicts chunkDescs if there are chunkDescEvictionFactor times
|
|
|
|
// more chunkDescs in the series than unevicted chunks. (The number of unevicted
|
|
|
|
// chunks is considered the number of chunks between (and including) the oldest
|
|
|
|
// chunk that could not be evicted immediately and the newest chunk in the
|
|
|
|
// series, even if chunks in between were evicted.)
|
|
|
|
//
|
2014-10-14 15:44:01 +00:00
|
|
|
// Special considerations for the head chunk: If it has not been scheduled to be
|
2014-10-28 18:01:41 +00:00
|
|
|
// persisted yet but is old enough for eviction, this method returns a pointer
|
|
|
|
// to the descriptor of the head chunk to be persisted. (Otherwise, the method
|
|
|
|
// returns nil.) The caller is then responsible for persisting the head
|
|
|
|
// chunk. The internal state of this memorySeries is already set accordingly by
|
|
|
|
// this method. Calling evictOlderThan for a series with a non-persisted head
|
|
|
|
// chunk that is old enough for eviction will never evict all chunks
|
|
|
|
// immediately, even if no chunk is pinned for other reasons, because the head
|
|
|
|
// chunk is not persisted yet. A series old enough for archiving will require at
|
|
|
|
// least two eviction runs to become ready for archiving: In the first run, its
|
|
|
|
// head chunk is requested to be persisted. The next call of evictOlderThan will
|
|
|
|
// then return true, provided that the series hasn't received new samples in the
|
|
|
|
// meantime, the head chunk has now been persisted, and no chunk is pinned for
|
|
|
|
// other reasons.
|
2014-10-13 17:52:27 +00:00
|
|
|
//
|
2014-10-07 17:11:24 +00:00
|
|
|
// The caller must have locked the fingerprint of the series.
|
2014-10-28 18:01:41 +00:00
|
|
|
func (s *memorySeries) evictOlderThan(t clientmodel.Timestamp) (allEvicted bool, headChunkToPersist *chunkDesc) {
|
2014-10-27 19:40:48 +00:00
|
|
|
allEvicted = true
|
2014-10-21 15:53:53 +00:00
|
|
|
iOldestNotEvicted := -1
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
// Evict chunkDescs if there are chunkDescEvictionFactor times
|
|
|
|
// more than non-evicted chunks and we are not going to archive
|
|
|
|
// the whole series anyway.
|
|
|
|
if iOldestNotEvicted != -1 {
|
|
|
|
lenToKeep := chunkDescEvictionFactor * (len(s.chunkDescs) - iOldestNotEvicted)
|
|
|
|
if lenToKeep < len(s.chunkDescs) {
|
2014-10-23 13:18:32 +00:00
|
|
|
lenEvicted := len(s.chunkDescs) - lenToKeep
|
2014-10-27 19:40:48 +00:00
|
|
|
s.chunkDescsOffset += lenEvicted
|
2014-10-23 13:18:32 +00:00
|
|
|
chunkDescOps.WithLabelValues(evict).Add(float64(lenEvicted))
|
|
|
|
atomic.AddInt64(&numMemChunkDescs, -int64(lenEvicted))
|
2014-10-21 15:53:53 +00:00
|
|
|
s.chunkDescs = append(
|
|
|
|
make([]*chunkDesc, 0, lenToKeep),
|
2014-10-23 13:18:32 +00:00
|
|
|
s.chunkDescs[lenEvicted:]...,
|
2014-10-21 15:53:53 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2014-06-06 09:55:53 +00:00
|
|
|
// For now, always drop the entire range from oldest to t.
|
2014-10-14 15:44:01 +00:00
|
|
|
for i, cd := range s.chunkDescs {
|
2014-06-06 09:55:53 +00:00
|
|
|
if !cd.lastTime().Before(t) {
|
2014-10-21 15:53:53 +00:00
|
|
|
if iOldestNotEvicted == -1 {
|
|
|
|
iOldestNotEvicted = i
|
|
|
|
}
|
2014-10-28 18:01:41 +00:00
|
|
|
return false, nil
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
2014-10-14 15:44:01 +00:00
|
|
|
if cd.isEvicted() {
|
2014-06-06 09:55:53 +00:00
|
|
|
continue
|
|
|
|
}
|
2014-10-14 15:44:01 +00:00
|
|
|
if !s.headChunkPersisted && i == len(s.chunkDescs)-1 {
|
|
|
|
// This is a non-persisted head chunk that is old enough
|
2014-10-27 19:40:48 +00:00
|
|
|
// for eviction. Request it to be persisted:
|
2014-10-28 18:01:41 +00:00
|
|
|
headChunkToPersist = cd
|
2014-10-14 15:44:01 +00:00
|
|
|
s.headChunkPersisted = true
|
2014-10-27 14:55:44 +00:00
|
|
|
// Since we cannot modify the head chunk from now on, we
|
|
|
|
// don't need to bother with cloning anymore.
|
|
|
|
s.headChunkUsedByIterator = false
|
2014-10-14 15:44:01 +00:00
|
|
|
}
|
2014-10-13 17:52:27 +00:00
|
|
|
if !cd.evictOnUnpin() {
|
2014-10-21 15:53:53 +00:00
|
|
|
if iOldestNotEvicted == -1 {
|
|
|
|
iOldestNotEvicted = i
|
|
|
|
}
|
2014-10-13 17:52:27 +00:00
|
|
|
allEvicted = false
|
|
|
|
}
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
2014-10-28 18:01:41 +00:00
|
|
|
return allEvicted, headChunkToPersist
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
|
|
|
|
2014-10-27 19:40:48 +00:00
|
|
|
// purgeOlderThan removes chunkDescs older than t. It also evicts the chunks of
|
|
|
|
// those chunkDescs (although that's probably not even necessary). It returns
|
|
|
|
// the number of purged chunkDescs and true if all chunkDescs have been purged.
|
2014-10-14 15:44:01 +00:00
|
|
|
//
|
2014-10-07 17:11:24 +00:00
|
|
|
// The caller must have locked the fingerprint of the series.
|
2014-10-27 19:40:48 +00:00
|
|
|
func (s *memorySeries) purgeOlderThan(t clientmodel.Timestamp) (int, bool) {
|
2014-06-06 09:55:53 +00:00
|
|
|
keepIdx := len(s.chunkDescs)
|
|
|
|
for i, cd := range s.chunkDescs {
|
|
|
|
if !cd.lastTime().Before(t) {
|
|
|
|
keepIdx = i
|
|
|
|
break
|
|
|
|
}
|
2014-10-14 15:44:01 +00:00
|
|
|
s.chunkDescs[i].evictOnUnpin()
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
2014-10-27 19:40:48 +00:00
|
|
|
if keepIdx > 0 {
|
|
|
|
s.chunkDescs = append(make([]*chunkDesc, 0, len(s.chunkDescs)-keepIdx), s.chunkDescs[keepIdx:]...)
|
|
|
|
atomic.AddInt64(&numMemChunkDescs, -int64(keepIdx))
|
|
|
|
}
|
|
|
|
return keepIdx, len(s.chunkDescs) == 0
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
|
|
|
|
2014-10-07 17:11:24 +00:00
|
|
|
// preloadChunks is an internal helper method.
|
2014-10-15 13:53:05 +00:00
|
|
|
func (s *memorySeries) preloadChunks(indexes []int, p *persistence) ([]*chunkDesc, error) {
|
2014-06-06 09:55:53 +00:00
|
|
|
loadIndexes := []int{}
|
2014-10-15 13:53:05 +00:00
|
|
|
pinnedChunkDescs := make([]*chunkDesc, 0, len(indexes))
|
2014-06-06 09:55:53 +00:00
|
|
|
for _, idx := range indexes {
|
2014-10-22 17:21:23 +00:00
|
|
|
cd := s.chunkDescs[idx]
|
|
|
|
pinnedChunkDescs = append(pinnedChunkDescs, cd)
|
|
|
|
cd.pin() // Have to pin everything first to prevent concurrent evictOnUnpin later.
|
|
|
|
if cd.isEvicted() {
|
2014-06-06 09:55:53 +00:00
|
|
|
loadIndexes = append(loadIndexes, idx)
|
|
|
|
}
|
|
|
|
}
|
2014-10-22 17:21:23 +00:00
|
|
|
chunkOps.WithLabelValues(pin).Add(float64(len(pinnedChunkDescs)))
|
2014-06-06 09:55:53 +00:00
|
|
|
|
|
|
|
if len(loadIndexes) > 0 {
|
2014-10-27 19:40:48 +00:00
|
|
|
if s.chunkDescsOffset == -1 {
|
|
|
|
panic("requested loading chunks from persistence in a situation where we must not have persisted data for chunk descriptors in memory")
|
|
|
|
}
|
2014-06-06 09:55:53 +00:00
|
|
|
fp := s.metric.Fingerprint()
|
2014-10-27 19:40:48 +00:00
|
|
|
chunks, err := p.loadChunks(fp, loadIndexes, s.chunkDescsOffset)
|
2014-06-06 09:55:53 +00:00
|
|
|
if err != nil {
|
2014-10-22 17:21:23 +00:00
|
|
|
// Unpin the chunks since we won't return them as pinned chunks now.
|
2014-08-20 13:05:58 +00:00
|
|
|
for _, cd := range pinnedChunkDescs {
|
2014-10-22 17:21:23 +00:00
|
|
|
cd.unpin()
|
2014-08-20 13:05:58 +00:00
|
|
|
}
|
2014-10-22 17:21:23 +00:00
|
|
|
chunkOps.WithLabelValues(unpin).Add(float64(len(pinnedChunkDescs)))
|
2014-06-06 09:55:53 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
for i, c := range chunks {
|
2014-10-22 17:21:23 +00:00
|
|
|
s.chunkDescs[loadIndexes[i]].setChunk(c)
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
2014-10-22 17:21:23 +00:00
|
|
|
chunkOps.WithLabelValues(load).Add(float64(len(chunks)))
|
2014-10-23 13:18:32 +00:00
|
|
|
atomic.AddInt64(&numMemChunks, int64(len(chunks)))
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return pinnedChunkDescs, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2014-10-07 17:11:24 +00:00
|
|
|
func (s *memorySeries) preloadChunksAtTime(t clientmodel.Timestamp, p *persistence) (chunkDescs, error) {
|
2014-06-06 09:55:53 +00:00
|
|
|
s.mtx.Lock()
|
|
|
|
defer s.mtx.Unlock()
|
|
|
|
|
|
|
|
if len(s.chunkDescs) == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var pinIndexes []int
|
|
|
|
// Find first chunk where lastTime() is after or equal to t.
|
|
|
|
i := sort.Search(len(s.chunkDescs), func(i int) bool {
|
|
|
|
return !s.chunkDescs[i].lastTime().Before(t)
|
|
|
|
})
|
|
|
|
switch i {
|
|
|
|
case 0:
|
|
|
|
pinIndexes = []int{0}
|
|
|
|
case len(s.chunkDescs):
|
|
|
|
pinIndexes = []int{i - 1}
|
|
|
|
default:
|
|
|
|
if s.chunkDescs[i].contains(t) {
|
|
|
|
pinIndexes = []int{i}
|
|
|
|
} else {
|
|
|
|
pinIndexes = []int{i - 1, i}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return s.preloadChunks(pinIndexes, p)
|
|
|
|
}
|
|
|
|
*/
|
|
|
|
|
2014-10-07 17:11:24 +00:00
|
|
|
// preloadChunksForRange loads chunks for the given range from the persistence.
|
|
|
|
// The caller must have locked the fingerprint of the series.
|
2014-10-14 16:23:32 +00:00
|
|
|
func (s *memorySeries) preloadChunksForRange(
|
|
|
|
from clientmodel.Timestamp, through clientmodel.Timestamp,
|
|
|
|
fp clientmodel.Fingerprint, p *persistence,
|
2014-10-15 13:53:05 +00:00
|
|
|
) ([]*chunkDesc, error) {
|
2014-10-22 17:21:23 +00:00
|
|
|
firstChunkDescTime := clientmodel.Timestamp(math.MaxInt64)
|
2014-10-14 16:23:32 +00:00
|
|
|
if len(s.chunkDescs) > 0 {
|
|
|
|
firstChunkDescTime = s.chunkDescs[0].firstTime()
|
|
|
|
}
|
2014-10-27 19:40:48 +00:00
|
|
|
if s.chunkDescsOffset != 0 && from.Before(firstChunkDescTime) {
|
2014-10-14 16:23:32 +00:00
|
|
|
cds, err := p.loadChunkDescs(fp, firstChunkDescTime)
|
|
|
|
if err != nil {
|
2014-06-06 09:55:53 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2014-10-14 16:23:32 +00:00
|
|
|
s.chunkDescs = append(cds, s.chunkDescs...)
|
2014-10-27 19:40:48 +00:00
|
|
|
s.chunkDescsOffset = 0
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(s.chunkDescs) == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Find first chunk with start time after "from".
|
|
|
|
fromIdx := sort.Search(len(s.chunkDescs), func(i int) bool {
|
|
|
|
return s.chunkDescs[i].firstTime().After(from)
|
|
|
|
})
|
|
|
|
// Find first chunk with start time after "through".
|
|
|
|
throughIdx := sort.Search(len(s.chunkDescs), func(i int) bool {
|
|
|
|
return s.chunkDescs[i].firstTime().After(through)
|
|
|
|
})
|
|
|
|
if fromIdx > 0 {
|
|
|
|
fromIdx--
|
|
|
|
}
|
|
|
|
if throughIdx == len(s.chunkDescs) {
|
|
|
|
throughIdx--
|
|
|
|
}
|
|
|
|
|
|
|
|
pinIndexes := make([]int, 0, throughIdx-fromIdx+1)
|
|
|
|
for i := fromIdx; i <= throughIdx; i++ {
|
|
|
|
pinIndexes = append(pinIndexes, i)
|
|
|
|
}
|
|
|
|
return s.preloadChunks(pinIndexes, p)
|
|
|
|
}
|
|
|
|
|
2014-10-27 14:55:44 +00:00
|
|
|
// newIterator returns a new SeriesIterator. The caller must have locked the
|
|
|
|
// fingerprint of the memorySeries.
|
2014-10-07 17:11:24 +00:00
|
|
|
func (s *memorySeries) newIterator(lockFunc, unlockFunc func()) SeriesIterator {
|
2014-10-15 13:53:05 +00:00
|
|
|
chunks := make([]chunk, 0, len(s.chunkDescs))
|
2014-06-06 09:55:53 +00:00
|
|
|
for i, cd := range s.chunkDescs {
|
2014-10-14 15:44:01 +00:00
|
|
|
if !cd.isEvicted() {
|
2014-10-27 14:55:44 +00:00
|
|
|
if i == len(s.chunkDescs)-1 && !s.headChunkPersisted {
|
|
|
|
s.headChunkUsedByIterator = true
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
2014-10-27 14:55:44 +00:00
|
|
|
chunks = append(chunks, cd.chunk)
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &memorySeriesIterator{
|
2014-10-07 17:11:24 +00:00
|
|
|
lock: lockFunc,
|
|
|
|
unlock: unlockFunc,
|
2014-06-06 09:55:53 +00:00
|
|
|
chunks: chunks,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-28 18:01:41 +00:00
|
|
|
// head returns a pointer to the head chunk descriptor. The caller must have
|
|
|
|
// locked the fingerprint of the memorySeries.
|
2014-06-06 09:55:53 +00:00
|
|
|
func (s *memorySeries) head() *chunkDesc {
|
|
|
|
return s.chunkDescs[len(s.chunkDescs)-1]
|
|
|
|
}
|
|
|
|
|
2014-10-28 18:01:41 +00:00
|
|
|
// values returns all values in the series. The caller must have locked the
|
|
|
|
// fingerprint of the memorySeries.
|
2014-06-06 09:55:53 +00:00
|
|
|
func (s *memorySeries) values() metric.Values {
|
|
|
|
var values metric.Values
|
|
|
|
for _, cd := range s.chunkDescs {
|
|
|
|
for sample := range cd.chunk.values() {
|
|
|
|
values = append(values, *sample)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return values
|
|
|
|
}
|
|
|
|
|
2014-10-28 18:01:41 +00:00
|
|
|
// firstTime returns the timestamp of the first sample in the series. The caller
|
|
|
|
// must have locked the fingerprint of the memorySeries.
|
2014-09-15 17:24:26 +00:00
|
|
|
func (s *memorySeries) firstTime() clientmodel.Timestamp {
|
|
|
|
return s.chunkDescs[0].firstTime()
|
|
|
|
}
|
|
|
|
|
2014-10-28 18:01:41 +00:00
|
|
|
// lastTime returns the timestamp of the last sample in the series. The caller
|
|
|
|
// must have locked the fingerprint of the memorySeries.
|
2014-09-15 17:24:26 +00:00
|
|
|
func (s *memorySeries) lastTime() clientmodel.Timestamp {
|
|
|
|
return s.head().lastTime()
|
|
|
|
}
|
|
|
|
|
2014-10-14 11:52:39 +00:00
|
|
|
// memorySeriesIterator implements SeriesIterator.
|
|
|
|
type memorySeriesIterator struct {
|
|
|
|
lock, unlock func()
|
|
|
|
chunkIt chunkIterator
|
2014-10-15 13:53:05 +00:00
|
|
|
chunks []chunk
|
2014-10-14 11:52:39 +00:00
|
|
|
}
|
|
|
|
|
2014-09-16 13:47:24 +00:00
|
|
|
// GetValueAtTime implements SeriesIterator.
|
2014-06-06 09:55:53 +00:00
|
|
|
func (it *memorySeriesIterator) GetValueAtTime(t clientmodel.Timestamp) metric.Values {
|
2014-10-07 17:11:24 +00:00
|
|
|
it.lock()
|
|
|
|
defer it.unlock()
|
2014-06-06 09:55:53 +00:00
|
|
|
|
|
|
|
// The most common case. We are iterating through a chunk.
|
|
|
|
if it.chunkIt != nil && it.chunkIt.contains(t) {
|
|
|
|
return it.chunkIt.getValueAtTime(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
it.chunkIt = nil
|
|
|
|
|
|
|
|
if len(it.chunks) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Before or exactly on the first sample of the series.
|
|
|
|
if !t.After(it.chunks[0].firstTime()) {
|
|
|
|
// return first value of first chunk
|
|
|
|
return it.chunks[0].newIterator().getValueAtTime(t)
|
|
|
|
}
|
|
|
|
// After or exactly on the last sample of the series.
|
|
|
|
if !t.Before(it.chunks[len(it.chunks)-1].lastTime()) {
|
|
|
|
// return last value of last chunk
|
|
|
|
return it.chunks[len(it.chunks)-1].newIterator().getValueAtTime(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Find first chunk where lastTime() is after or equal to t.
|
|
|
|
i := sort.Search(len(it.chunks), func(i int) bool {
|
|
|
|
return !it.chunks[i].lastTime().Before(t)
|
|
|
|
})
|
|
|
|
if i == len(it.chunks) {
|
|
|
|
panic("out of bounds")
|
|
|
|
}
|
|
|
|
|
|
|
|
if t.Before(it.chunks[i].firstTime()) {
|
|
|
|
// We ended up between two chunks.
|
|
|
|
return metric.Values{
|
|
|
|
it.chunks[i-1].newIterator().getValueAtTime(t)[0],
|
|
|
|
it.chunks[i].newIterator().getValueAtTime(t)[0],
|
|
|
|
}
|
|
|
|
}
|
2014-08-12 15:46:46 +00:00
|
|
|
// We ended up in the middle of a chunk. We might stay there for a while,
|
|
|
|
// so save it as the current chunk iterator.
|
|
|
|
it.chunkIt = it.chunks[i].newIterator()
|
|
|
|
return it.chunkIt.getValueAtTime(t)
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
|
|
|
|
2014-09-24 14:32:07 +00:00
|
|
|
// GetBoundaryValues implements SeriesIterator.
|
2014-06-06 09:55:53 +00:00
|
|
|
func (it *memorySeriesIterator) GetBoundaryValues(in metric.Interval) metric.Values {
|
2014-10-07 17:11:24 +00:00
|
|
|
it.lock()
|
|
|
|
defer it.unlock()
|
2014-09-16 13:47:24 +00:00
|
|
|
|
|
|
|
// Find the first relevant chunk.
|
|
|
|
i := sort.Search(len(it.chunks), func(i int) bool {
|
|
|
|
return !it.chunks[i].lastTime().Before(in.OldestInclusive)
|
|
|
|
})
|
2014-10-10 13:34:09 +00:00
|
|
|
values := make(metric.Values, 0, 2)
|
|
|
|
for i, c := range it.chunks[i:] {
|
2014-09-16 13:47:24 +00:00
|
|
|
var chunkIt chunkIterator
|
|
|
|
if c.firstTime().After(in.NewestInclusive) {
|
|
|
|
if len(values) == 1 {
|
2014-10-28 18:01:41 +00:00
|
|
|
// We found the first value before, but are now
|
2014-09-16 13:47:24 +00:00
|
|
|
// already past the last value. The value we
|
|
|
|
// want must be the last value of the previous
|
|
|
|
// chunk. So backtrack...
|
|
|
|
chunkIt = it.chunks[i-1].newIterator()
|
|
|
|
values = append(values, chunkIt.getValueAtTime(in.NewestInclusive)[0])
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if len(values) == 0 {
|
|
|
|
chunkIt = c.newIterator()
|
|
|
|
firstValues := chunkIt.getValueAtTime(in.OldestInclusive)
|
|
|
|
switch len(firstValues) {
|
|
|
|
case 2:
|
|
|
|
values = append(values, firstValues[1])
|
|
|
|
case 1:
|
|
|
|
values = firstValues
|
|
|
|
default:
|
|
|
|
panic("unexpected return from getValueAtTime")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if c.lastTime().After(in.NewestInclusive) {
|
|
|
|
if chunkIt == nil {
|
|
|
|
chunkIt = c.newIterator()
|
|
|
|
}
|
|
|
|
values = append(values, chunkIt.getValueAtTime(in.NewestInclusive)[0])
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2014-10-10 13:34:09 +00:00
|
|
|
if len(values) == 1 {
|
|
|
|
// We found exactly one value. In that case, add the most recent we know.
|
|
|
|
values = append(
|
|
|
|
values,
|
|
|
|
it.chunks[len(it.chunks)-1].newIterator().getValueAtTime(in.NewestInclusive)[0],
|
|
|
|
)
|
|
|
|
}
|
2014-09-16 13:47:24 +00:00
|
|
|
if len(values) == 2 && values[0].Equal(&values[1]) {
|
|
|
|
return values[:1]
|
|
|
|
}
|
|
|
|
return values
|
2014-06-06 09:55:53 +00:00
|
|
|
}
|
|
|
|
|
2014-09-24 14:32:07 +00:00
|
|
|
// GetRangeValues implements SeriesIterator.
|
2014-06-06 09:55:53 +00:00
|
|
|
func (it *memorySeriesIterator) GetRangeValues(in metric.Interval) metric.Values {
|
2014-10-07 17:11:24 +00:00
|
|
|
it.lock()
|
|
|
|
defer it.unlock()
|
2014-06-06 09:55:53 +00:00
|
|
|
|
|
|
|
// Find the first relevant chunk.
|
|
|
|
i := sort.Search(len(it.chunks), func(i int) bool {
|
|
|
|
return !it.chunks[i].lastTime().Before(in.OldestInclusive)
|
|
|
|
})
|
|
|
|
values := metric.Values{}
|
|
|
|
for _, c := range it.chunks[i:] {
|
|
|
|
if c.firstTime().After(in.NewestInclusive) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
// TODO: actually reuse an iterator between calls if we get multiple ranges
|
|
|
|
// from the same chunk.
|
|
|
|
values = append(values, c.newIterator().getRangeValues(in)...)
|
|
|
|
}
|
|
|
|
return values
|
|
|
|
}
|
2014-10-14 11:52:39 +00:00
|
|
|
|
|
|
|
// nopSeriesIterator implements Series Iterator. It never returns any values.
|
|
|
|
type nopSeriesIterator struct{}
|
|
|
|
|
|
|
|
// GetValueAtTime implements SeriesIterator.
|
|
|
|
func (_ nopSeriesIterator) GetValueAtTime(t clientmodel.Timestamp) metric.Values {
|
|
|
|
return metric.Values{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetBoundaryValues implements SeriesIterator.
|
|
|
|
func (_ nopSeriesIterator) GetBoundaryValues(in metric.Interval) metric.Values {
|
|
|
|
return metric.Values{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetRangeValues implements SeriesIterator.
|
|
|
|
func (_ nopSeriesIterator) GetRangeValues(in metric.Interval) metric.Values {
|
|
|
|
return metric.Values{}
|
|
|
|
}
|