2017-04-10 18:59:45 +00:00
|
|
|
// Copyright 2017 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2016-12-10 17:08:50 +00:00
|
|
|
package tsdb
|
|
|
|
|
2016-12-13 14:26:58 +00:00
|
|
|
import (
|
|
|
|
"fmt"
|
2017-08-05 11:31:48 +00:00
|
|
|
"sort"
|
2016-12-14 14:39:23 +00:00
|
|
|
"strings"
|
2019-05-27 11:24:46 +00:00
|
|
|
"unicode/utf8"
|
2016-12-13 14:26:58 +00:00
|
|
|
|
2017-10-09 13:21:46 +00:00
|
|
|
"github.com/pkg/errors"
|
2019-08-13 08:34:14 +00:00
|
|
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
|
|
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
|
|
|
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
|
|
|
"github.com/prometheus/prometheus/tsdb/index"
|
|
|
|
"github.com/prometheus/prometheus/tsdb/labels"
|
2016-12-13 14:26:58 +00:00
|
|
|
)
|
2016-12-12 18:12:55 +00:00
|
|
|
|
2016-12-10 17:08:50 +00:00
|
|
|
// Querier provides querying access over time series data of a fixed
|
|
|
|
// time range.
|
|
|
|
type Querier interface {
|
2016-12-14 14:39:23 +00:00
|
|
|
// Select returns a set of series that matches the given label matchers.
|
2017-11-13 11:16:58 +00:00
|
|
|
Select(...labels.Matcher) (SeriesSet, error)
|
2016-12-10 17:08:50 +00:00
|
|
|
|
|
|
|
// LabelValues returns all potential values for a label name.
|
2016-12-13 14:26:58 +00:00
|
|
|
LabelValues(string) ([]string, error)
|
2018-11-16 18:02:24 +00:00
|
|
|
|
2016-12-10 17:08:50 +00:00
|
|
|
// LabelValuesFor returns all potential values for a label name.
|
|
|
|
// under the constraint of another label.
|
2016-12-21 08:39:01 +00:00
|
|
|
LabelValuesFor(string, labels.Label) ([]string, error)
|
2016-12-10 17:08:50 +00:00
|
|
|
|
2018-11-16 18:02:24 +00:00
|
|
|
// LabelNames returns all the unique label names present in the block in sorted order.
|
|
|
|
LabelNames() ([]string, error)
|
|
|
|
|
2016-12-10 17:08:50 +00:00
|
|
|
// Close releases the resources of the Querier.
|
|
|
|
Close() error
|
|
|
|
}
|
|
|
|
|
2017-06-25 17:02:02 +00:00
|
|
|
// Series exposes a single time series.
|
2016-12-10 17:08:50 +00:00
|
|
|
type Series interface {
|
2016-12-13 14:26:58 +00:00
|
|
|
// Labels returns the complete set of labels identifying the series.
|
2016-12-21 08:39:01 +00:00
|
|
|
Labels() labels.Labels
|
2016-12-16 11:13:17 +00:00
|
|
|
|
2016-12-10 17:08:50 +00:00
|
|
|
// Iterator returns a new iterator of the data of the series.
|
2016-12-13 14:26:58 +00:00
|
|
|
Iterator() SeriesIterator
|
|
|
|
}
|
|
|
|
|
2017-01-06 11:37:28 +00:00
|
|
|
// querier aggregates querying results from time blocks within
|
2017-01-06 07:08:02 +00:00
|
|
|
// a single partition.
|
2017-01-06 11:37:28 +00:00
|
|
|
type querier struct {
|
|
|
|
blocks []Querier
|
2016-12-13 14:26:58 +00:00
|
|
|
}
|
|
|
|
|
2017-01-06 11:37:28 +00:00
|
|
|
func (q *querier) LabelValues(n string) ([]string, error) {
|
2017-06-13 06:25:13 +00:00
|
|
|
return q.lvals(q.blocks, n)
|
|
|
|
}
|
|
|
|
|
2018-11-16 18:02:24 +00:00
|
|
|
// LabelNames returns all the unique label names present querier blocks.
|
|
|
|
func (q *querier) LabelNames() ([]string, error) {
|
|
|
|
labelNamesMap := make(map[string]struct{})
|
|
|
|
for _, b := range q.blocks {
|
|
|
|
names, err := b.LabelNames()
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "LabelNames() from Querier")
|
|
|
|
}
|
|
|
|
for _, name := range names {
|
|
|
|
labelNamesMap[name] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
labelNames := make([]string, 0, len(labelNamesMap))
|
|
|
|
for name := range labelNamesMap {
|
|
|
|
labelNames = append(labelNames, name)
|
|
|
|
}
|
|
|
|
sort.Strings(labelNames)
|
|
|
|
|
|
|
|
return labelNames, nil
|
|
|
|
}
|
|
|
|
|
2017-06-13 06:25:13 +00:00
|
|
|
func (q *querier) lvals(qs []Querier, n string) ([]string, error) {
|
|
|
|
if len(qs) == 0 {
|
2017-03-07 10:29:20 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
2017-06-13 06:25:13 +00:00
|
|
|
if len(qs) == 1 {
|
|
|
|
return qs[0].LabelValues(n)
|
|
|
|
}
|
|
|
|
l := len(qs) / 2
|
|
|
|
s1, err := q.lvals(qs[:l], n)
|
2016-12-19 11:26:25 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-06-13 06:25:13 +00:00
|
|
|
s2, err := q.lvals(qs[l:], n)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2016-12-19 11:26:25 +00:00
|
|
|
}
|
2017-06-13 06:25:13 +00:00
|
|
|
return mergeStrings(s1, s2), nil
|
2016-12-14 17:38:46 +00:00
|
|
|
}
|
|
|
|
|
2017-01-06 11:37:28 +00:00
|
|
|
func (q *querier) LabelValuesFor(string, labels.Label) ([]string, error) {
|
2016-12-14 17:38:46 +00:00
|
|
|
return nil, fmt.Errorf("not implemented")
|
|
|
|
}
|
|
|
|
|
2017-11-13 11:16:58 +00:00
|
|
|
func (q *querier) Select(ms ...labels.Matcher) (SeriesSet, error) {
|
2017-06-13 06:25:13 +00:00
|
|
|
return q.sel(q.blocks, ms)
|
|
|
|
}
|
|
|
|
|
2017-11-13 11:16:58 +00:00
|
|
|
func (q *querier) sel(qs []Querier, ms []labels.Matcher) (SeriesSet, error) {
|
2017-06-13 06:25:13 +00:00
|
|
|
if len(qs) == 0 {
|
2017-11-13 11:16:58 +00:00
|
|
|
return EmptySeriesSet(), nil
|
2016-12-20 12:10:37 +00:00
|
|
|
}
|
2017-06-13 06:25:13 +00:00
|
|
|
if len(qs) == 1 {
|
|
|
|
return qs[0].Select(ms...)
|
2016-12-20 12:10:37 +00:00
|
|
|
}
|
2017-06-13 06:25:13 +00:00
|
|
|
l := len(qs) / 2
|
2017-11-13 11:16:58 +00:00
|
|
|
|
|
|
|
a, err := q.sel(qs[:l], ms)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
b, err := q.sel(qs[l:], ms)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return newMergedSeriesSet(a, b), nil
|
2016-12-20 12:10:37 +00:00
|
|
|
}
|
|
|
|
|
2017-01-06 11:37:28 +00:00
|
|
|
func (q *querier) Close() error {
|
2019-03-19 13:31:57 +00:00
|
|
|
var merr tsdb_errors.MultiError
|
2016-12-28 10:41:44 +00:00
|
|
|
|
|
|
|
for _, bq := range q.blocks {
|
|
|
|
merr.Add(bq.Close())
|
|
|
|
}
|
|
|
|
return merr.Err()
|
2016-12-14 17:38:46 +00:00
|
|
|
}
|
|
|
|
|
2019-02-14 13:29:41 +00:00
|
|
|
// verticalQuerier aggregates querying results from time blocks within
|
|
|
|
// a single partition. The block time ranges can be overlapping.
|
|
|
|
type verticalQuerier struct {
|
|
|
|
querier
|
|
|
|
}
|
|
|
|
|
|
|
|
func (q *verticalQuerier) Select(ms ...labels.Matcher) (SeriesSet, error) {
|
|
|
|
return q.sel(q.blocks, ms)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (q *verticalQuerier) sel(qs []Querier, ms []labels.Matcher) (SeriesSet, error) {
|
|
|
|
if len(qs) == 0 {
|
|
|
|
return EmptySeriesSet(), nil
|
|
|
|
}
|
|
|
|
if len(qs) == 1 {
|
|
|
|
return qs[0].Select(ms...)
|
|
|
|
}
|
|
|
|
l := len(qs) / 2
|
|
|
|
|
|
|
|
a, err := q.sel(qs[:l], ms)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
b, err := q.sel(qs[l:], ms)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return newMergedVerticalSeriesSet(a, b), nil
|
|
|
|
}
|
|
|
|
|
2018-01-09 15:40:31 +00:00
|
|
|
// NewBlockQuerier returns a querier against the reader.
|
2017-10-09 13:21:46 +00:00
|
|
|
func NewBlockQuerier(b BlockReader, mint, maxt int64) (Querier, error) {
|
|
|
|
indexr, err := b.Index()
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrapf(err, "open index reader")
|
2017-08-25 08:32:54 +00:00
|
|
|
}
|
2017-10-09 13:21:46 +00:00
|
|
|
chunkr, err := b.Chunks()
|
|
|
|
if err != nil {
|
2017-10-23 18:30:03 +00:00
|
|
|
indexr.Close()
|
2017-10-09 13:21:46 +00:00
|
|
|
return nil, errors.Wrapf(err, "open chunk reader")
|
|
|
|
}
|
|
|
|
tombsr, err := b.Tombstones()
|
|
|
|
if err != nil {
|
2017-10-23 18:30:03 +00:00
|
|
|
indexr.Close()
|
|
|
|
chunkr.Close()
|
2017-10-09 13:21:46 +00:00
|
|
|
return nil, errors.Wrapf(err, "open tombstone reader")
|
|
|
|
}
|
|
|
|
return &blockQuerier{
|
|
|
|
mint: mint,
|
|
|
|
maxt: maxt,
|
|
|
|
index: indexr,
|
|
|
|
chunks: chunkr,
|
|
|
|
tombstones: tombsr,
|
|
|
|
}, nil
|
2017-08-25 08:32:54 +00:00
|
|
|
}
|
|
|
|
|
2016-12-14 17:38:46 +00:00
|
|
|
// blockQuerier provides querying access to a single block database.
|
|
|
|
type blockQuerier struct {
|
2017-05-16 14:18:28 +00:00
|
|
|
index IndexReader
|
|
|
|
chunks ChunkReader
|
|
|
|
tombstones TombstoneReader
|
2016-12-14 17:38:46 +00:00
|
|
|
|
2019-04-30 07:17:07 +00:00
|
|
|
closed bool
|
|
|
|
|
2016-12-14 17:38:46 +00:00
|
|
|
mint, maxt int64
|
|
|
|
}
|
|
|
|
|
2017-11-13 11:16:58 +00:00
|
|
|
func (q *blockQuerier) Select(ms ...labels.Matcher) (SeriesSet, error) {
|
2017-11-13 12:57:10 +00:00
|
|
|
base, err := LookupChunkSeries(q.index, q.tombstones, ms...)
|
2017-11-13 11:16:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-01-05 14:13:01 +00:00
|
|
|
return &blockSeriesSet{
|
2017-03-07 10:29:20 +00:00
|
|
|
set: &populatedChunkSeries{
|
2017-11-13 12:57:10 +00:00
|
|
|
set: base,
|
2017-03-07 10:29:20 +00:00
|
|
|
chunks: q.chunks,
|
|
|
|
mint: q.mint,
|
|
|
|
maxt: q.maxt,
|
|
|
|
},
|
2017-04-13 19:06:14 +00:00
|
|
|
|
|
|
|
mint: q.mint,
|
|
|
|
maxt: q.maxt,
|
2017-11-13 11:16:58 +00:00
|
|
|
}, nil
|
2016-12-14 17:38:46 +00:00
|
|
|
}
|
|
|
|
|
2017-05-13 15:43:25 +00:00
|
|
|
func (q *blockQuerier) LabelValues(name string) ([]string, error) {
|
|
|
|
tpls, err := q.index.LabelValues(name)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
res := make([]string, 0, tpls.Len())
|
|
|
|
|
|
|
|
for i := 0; i < tpls.Len(); i++ {
|
|
|
|
vals, err := tpls.At(i)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
res = append(res, vals[0])
|
|
|
|
}
|
|
|
|
return res, nil
|
|
|
|
}
|
|
|
|
|
2018-11-16 18:02:24 +00:00
|
|
|
func (q *blockQuerier) LabelNames() ([]string, error) {
|
|
|
|
return q.index.LabelNames()
|
|
|
|
}
|
|
|
|
|
2017-05-13 15:43:25 +00:00
|
|
|
func (q *blockQuerier) LabelValuesFor(string, labels.Label) ([]string, error) {
|
|
|
|
return nil, fmt.Errorf("not implemented")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (q *blockQuerier) Close() error {
|
2019-04-30 07:17:07 +00:00
|
|
|
if q.closed {
|
|
|
|
return errors.New("block querier already closed")
|
|
|
|
}
|
2017-10-09 13:21:46 +00:00
|
|
|
|
2019-04-30 07:17:07 +00:00
|
|
|
var merr tsdb_errors.MultiError
|
2017-10-09 13:21:46 +00:00
|
|
|
merr.Add(q.index.Close())
|
|
|
|
merr.Add(q.chunks.Close())
|
|
|
|
merr.Add(q.tombstones.Close())
|
2019-04-30 07:17:07 +00:00
|
|
|
q.closed = true
|
2017-10-09 13:21:46 +00:00
|
|
|
return merr.Err()
|
2017-05-13 15:43:25 +00:00
|
|
|
}
|
|
|
|
|
2019-05-27 11:24:46 +00:00
|
|
|
// Bitmap used by func isRegexMetaCharacter to check whether a character needs to be escaped.
|
|
|
|
var regexMetaCharacterBytes [16]byte
|
|
|
|
|
|
|
|
// isRegexMetaCharacter reports whether byte b needs to be escaped.
|
|
|
|
func isRegexMetaCharacter(b byte) bool {
|
|
|
|
return b < utf8.RuneSelf && regexMetaCharacterBytes[b%16]&(1<<(b/16)) != 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
for _, b := range []byte(`.+*?()|[]{}^$`) {
|
|
|
|
regexMetaCharacterBytes[b%16] |= 1 << (b / 16)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func findSetMatches(pattern string) []string {
|
|
|
|
// Return empty matches if the wrapper from Prometheus is missing.
|
|
|
|
if len(pattern) < 6 || pattern[:4] != "^(?:" || pattern[len(pattern)-2:] != ")$" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
escaped := false
|
2019-08-13 08:34:14 +00:00
|
|
|
sets := []*strings.Builder{{}}
|
2019-05-27 11:24:46 +00:00
|
|
|
for i := 4; i < len(pattern)-2; i++ {
|
|
|
|
if escaped {
|
|
|
|
switch {
|
|
|
|
case isRegexMetaCharacter(pattern[i]):
|
|
|
|
sets[len(sets)-1].WriteByte(pattern[i])
|
|
|
|
case pattern[i] == '\\':
|
|
|
|
sets[len(sets)-1].WriteByte('\\')
|
|
|
|
default:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
escaped = false
|
|
|
|
} else {
|
|
|
|
switch {
|
|
|
|
case isRegexMetaCharacter(pattern[i]):
|
|
|
|
if pattern[i] == '|' {
|
|
|
|
sets = append(sets, &strings.Builder{})
|
|
|
|
} else {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
case pattern[i] == '\\':
|
|
|
|
escaped = true
|
|
|
|
default:
|
|
|
|
sets[len(sets)-1].WriteByte(pattern[i])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
matches := make([]string, 0, len(sets))
|
|
|
|
for _, s := range sets {
|
|
|
|
if s.Len() > 0 {
|
|
|
|
matches = append(matches, s.String())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return matches
|
|
|
|
}
|
|
|
|
|
2017-11-13 12:57:10 +00:00
|
|
|
// PostingsForMatchers assembles a single postings iterator against the index reader
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 10:59:45 +00:00
|
|
|
// based on the given matchers.
|
2017-11-30 14:34:49 +00:00
|
|
|
func PostingsForMatchers(ix IndexReader, ms ...labels.Matcher) (index.Postings, error) {
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 10:59:45 +00:00
|
|
|
var its, notIts []index.Postings
|
|
|
|
// See which label must be non-empty.
|
|
|
|
labelMustBeSet := make(map[string]bool, len(ms))
|
|
|
|
for _, m := range ms {
|
|
|
|
if !m.Matches("") {
|
|
|
|
labelMustBeSet[m.Name()] = true
|
|
|
|
}
|
|
|
|
}
|
2017-11-30 14:34:49 +00:00
|
|
|
|
2017-05-13 15:43:25 +00:00
|
|
|
for _, m := range ms {
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 10:59:45 +00:00
|
|
|
matchesEmpty := m.Matches("")
|
|
|
|
if labelMustBeSet[m.Name()] || !matchesEmpty {
|
|
|
|
// If this matcher must be non-empty, we can be smarter.
|
|
|
|
nm, isNot := m.(*labels.NotMatcher)
|
|
|
|
if isNot && matchesEmpty { // l!="foo"
|
|
|
|
// If the label can't be empty and is a Not and the inner matcher
|
|
|
|
// doesn't match empty, then subtract it out at the end.
|
|
|
|
it, err := postingsForMatcher(ix, nm.Matcher)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
notIts = append(notIts, it)
|
|
|
|
} else if isNot && !matchesEmpty { // l!=""
|
|
|
|
// If the label can't be empty and is a Not, but the inner matcher can
|
|
|
|
// be empty we need to use inversePostingsForMatcher.
|
|
|
|
it, err := inversePostingsForMatcher(ix, nm.Matcher)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
its = append(its, it)
|
|
|
|
} else { // l="a"
|
|
|
|
// Non-Not matcher, use normal postingsForMatcher.
|
|
|
|
it, err := postingsForMatcher(ix, m)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
its = append(its, it)
|
|
|
|
}
|
|
|
|
} else { // l=""
|
|
|
|
// If the matchers for a labelname selects an empty value, it selects all
|
|
|
|
// the series which don't have the label name set too. See:
|
|
|
|
// https://github.com/prometheus/prometheus/issues/3575 and
|
|
|
|
// https://github.com/prometheus/prometheus/pull/3578#issuecomment-351653555
|
|
|
|
it, err := inversePostingsForMatcher(ix, m)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
notIts = append(notIts, it)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there's nothing to subtract from, add in everything and remove the notIts later.
|
|
|
|
if len(its) == 0 && len(notIts) != 0 {
|
|
|
|
allPostings, err := ix.Postings(index.AllPostingsKey())
|
2017-11-13 11:16:58 +00:00
|
|
|
if err != nil {
|
2017-12-17 18:08:21 +00:00
|
|
|
return nil, err
|
2017-11-13 11:16:58 +00:00
|
|
|
}
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 10:59:45 +00:00
|
|
|
its = append(its, allPostings)
|
2017-05-13 15:43:25 +00:00
|
|
|
}
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 10:59:45 +00:00
|
|
|
|
|
|
|
it := index.Intersect(its...)
|
|
|
|
|
|
|
|
for _, n := range notIts {
|
|
|
|
it = index.Without(it, n)
|
|
|
|
}
|
|
|
|
|
|
|
|
return ix.SortedPostings(it), nil
|
2017-05-13 15:43:25 +00:00
|
|
|
}
|
|
|
|
|
2017-11-30 14:34:49 +00:00
|
|
|
func postingsForMatcher(ix IndexReader, m labels.Matcher) (index.Postings, error) {
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 10:59:45 +00:00
|
|
|
// This method will not return postings for missing labels.
|
2017-12-17 18:08:21 +00:00
|
|
|
|
2017-04-05 12:14:30 +00:00
|
|
|
// Fast-path for equal matching.
|
|
|
|
if em, ok := m.(*labels.EqualMatcher); ok {
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 10:59:45 +00:00
|
|
|
return ix.Postings(em.Name(), em.Value())
|
2017-04-05 12:14:30 +00:00
|
|
|
}
|
|
|
|
|
2019-05-27 11:24:46 +00:00
|
|
|
// Fast-path for set matching.
|
|
|
|
if em, ok := m.(*labels.RegexpMatcher); ok {
|
|
|
|
setMatches := findSetMatches(em.Value())
|
|
|
|
if len(setMatches) > 0 {
|
|
|
|
return postingsForSetMatcher(ix, em.Name(), setMatches)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-30 14:34:49 +00:00
|
|
|
tpls, err := ix.LabelValues(m.Name())
|
2016-12-14 17:38:46 +00:00
|
|
|
if err != nil {
|
2017-11-13 11:16:58 +00:00
|
|
|
return nil, err
|
2016-12-14 17:38:46 +00:00
|
|
|
}
|
2017-05-13 15:43:25 +00:00
|
|
|
|
2016-12-14 17:38:46 +00:00
|
|
|
var res []string
|
2018-12-28 18:13:02 +00:00
|
|
|
for i := 0; i < tpls.Len(); i++ {
|
|
|
|
vals, err := tpls.At(i)
|
2016-12-14 17:38:46 +00:00
|
|
|
if err != nil {
|
2017-11-13 11:16:58 +00:00
|
|
|
return nil, err
|
2016-12-14 17:38:46 +00:00
|
|
|
}
|
2018-12-28 18:13:02 +00:00
|
|
|
if m.Matches(vals[0]) {
|
|
|
|
res = append(res, vals[0])
|
2016-12-14 17:38:46 +00:00
|
|
|
}
|
|
|
|
}
|
2017-05-13 15:43:25 +00:00
|
|
|
|
2016-12-14 17:38:46 +00:00
|
|
|
if len(res) == 0 {
|
2017-11-30 14:34:49 +00:00
|
|
|
return index.EmptyPostings(), nil
|
2016-12-14 17:38:46 +00:00
|
|
|
}
|
|
|
|
|
2017-11-30 14:34:49 +00:00
|
|
|
var rit []index.Postings
|
2016-12-14 17:38:46 +00:00
|
|
|
|
|
|
|
for _, v := range res {
|
2017-11-30 14:34:49 +00:00
|
|
|
it, err := ix.Postings(m.Name(), v)
|
2016-12-14 17:38:46 +00:00
|
|
|
if err != nil {
|
2017-11-13 11:16:58 +00:00
|
|
|
return nil, err
|
2016-12-14 17:38:46 +00:00
|
|
|
}
|
|
|
|
rit = append(rit, it)
|
|
|
|
}
|
|
|
|
|
2017-11-30 14:34:49 +00:00
|
|
|
return index.Merge(rit...), nil
|
2016-12-14 17:38:46 +00:00
|
|
|
}
|
|
|
|
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 10:59:45 +00:00
|
|
|
// inversePostingsForMatcher eeturns the postings for the series with the label name set but not matching the matcher.
|
|
|
|
func inversePostingsForMatcher(ix IndexReader, m labels.Matcher) (index.Postings, error) {
|
2017-11-30 14:34:49 +00:00
|
|
|
tpls, err := ix.LabelValues(m.Name())
|
2017-12-17 18:08:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var res []string
|
|
|
|
for i := 0; i < tpls.Len(); i++ {
|
|
|
|
vals, err := tpls.At(i)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if !m.Matches(vals[0]) {
|
|
|
|
res = append(res, vals[0])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-30 14:34:49 +00:00
|
|
|
var rit []index.Postings
|
2017-12-17 18:08:21 +00:00
|
|
|
for _, v := range res {
|
2017-11-30 14:34:49 +00:00
|
|
|
it, err := ix.Postings(m.Name(), v)
|
2017-12-17 18:08:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
rit = append(rit, it)
|
|
|
|
}
|
|
|
|
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 10:59:45 +00:00
|
|
|
return index.Merge(rit...), nil
|
2017-12-17 18:08:21 +00:00
|
|
|
}
|
|
|
|
|
2019-05-27 11:24:46 +00:00
|
|
|
func postingsForSetMatcher(ix IndexReader, name string, matches []string) (index.Postings, error) {
|
|
|
|
var its []index.Postings
|
|
|
|
for _, match := range matches {
|
|
|
|
if it, err := ix.Postings(name, match); err == nil {
|
|
|
|
its = append(its, it)
|
|
|
|
} else {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return index.Merge(its...), nil
|
|
|
|
}
|
|
|
|
|
2017-01-06 11:37:28 +00:00
|
|
|
func mergeStrings(a, b []string) []string {
|
|
|
|
maxl := len(a)
|
|
|
|
if len(b) > len(a) {
|
|
|
|
maxl = len(b)
|
|
|
|
}
|
|
|
|
res := make([]string, 0, maxl*10/9)
|
|
|
|
|
|
|
|
for len(a) > 0 && len(b) > 0 {
|
|
|
|
d := strings.Compare(a[0], b[0])
|
|
|
|
|
|
|
|
if d == 0 {
|
|
|
|
res = append(res, a[0])
|
|
|
|
a, b = a[1:], b[1:]
|
|
|
|
} else if d < 0 {
|
|
|
|
res = append(res, a[0])
|
|
|
|
a = a[1:]
|
|
|
|
} else if d > 0 {
|
|
|
|
res = append(res, b[0])
|
|
|
|
b = b[1:]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Append all remaining elements.
|
|
|
|
res = append(res, a...)
|
|
|
|
res = append(res, b...)
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
2016-12-14 17:38:46 +00:00
|
|
|
// SeriesSet contains a set of series.
|
|
|
|
type SeriesSet interface {
|
|
|
|
Next() bool
|
2017-01-02 12:27:52 +00:00
|
|
|
At() Series
|
2016-12-14 17:38:46 +00:00
|
|
|
Err() error
|
|
|
|
}
|
|
|
|
|
2017-11-13 11:16:58 +00:00
|
|
|
var emptySeriesSet = errSeriesSet{}
|
2016-12-14 17:38:46 +00:00
|
|
|
|
2017-11-13 11:16:58 +00:00
|
|
|
// EmptySeriesSet returns a series set that's always empty.
|
|
|
|
func EmptySeriesSet() SeriesSet {
|
|
|
|
return emptySeriesSet
|
|
|
|
}
|
2016-12-14 17:38:46 +00:00
|
|
|
|
2017-04-04 09:21:19 +00:00
|
|
|
// mergedSeriesSet takes two series sets as a single series set. The input series sets
|
|
|
|
// must be sorted and sequential in time, i.e. if they have the same label set,
|
|
|
|
// the datapoints of a must be before the datapoints of b.
|
2016-12-14 14:39:23 +00:00
|
|
|
type mergedSeriesSet struct {
|
|
|
|
a, b SeriesSet
|
|
|
|
|
2017-01-02 11:05:52 +00:00
|
|
|
cur Series
|
|
|
|
adone, bdone bool
|
2016-12-14 14:39:23 +00:00
|
|
|
}
|
|
|
|
|
2017-08-25 14:06:24 +00:00
|
|
|
// NewMergedSeriesSet takes two series sets as a single series set. The input series sets
|
|
|
|
// must be sorted and sequential in time, i.e. if they have the same label set,
|
|
|
|
// the datapoints of a must be before the datapoints of b.
|
|
|
|
func NewMergedSeriesSet(a, b SeriesSet) SeriesSet {
|
|
|
|
return newMergedSeriesSet(a, b)
|
|
|
|
}
|
|
|
|
|
2017-03-14 14:24:08 +00:00
|
|
|
func newMergedSeriesSet(a, b SeriesSet) *mergedSeriesSet {
|
|
|
|
s := &mergedSeriesSet{a: a, b: b}
|
2016-12-14 14:39:23 +00:00
|
|
|
// Initialize first elements of both sets as Next() needs
|
|
|
|
// one element look-ahead.
|
2017-01-02 11:05:52 +00:00
|
|
|
s.adone = !s.a.Next()
|
|
|
|
s.bdone = !s.b.Next()
|
2016-12-14 14:39:23 +00:00
|
|
|
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
2017-03-14 14:24:08 +00:00
|
|
|
func (s *mergedSeriesSet) At() Series {
|
2016-12-14 14:39:23 +00:00
|
|
|
return s.cur
|
2016-12-13 14:26:58 +00:00
|
|
|
}
|
|
|
|
|
2017-03-14 14:24:08 +00:00
|
|
|
func (s *mergedSeriesSet) Err() error {
|
2016-12-14 14:39:23 +00:00
|
|
|
if s.a.Err() != nil {
|
|
|
|
return s.a.Err()
|
|
|
|
}
|
|
|
|
return s.b.Err()
|
|
|
|
}
|
2016-12-13 14:26:58 +00:00
|
|
|
|
2017-03-14 14:24:08 +00:00
|
|
|
func (s *mergedSeriesSet) compare() int {
|
2017-01-02 11:05:52 +00:00
|
|
|
if s.adone {
|
2016-12-14 14:39:23 +00:00
|
|
|
return 1
|
|
|
|
}
|
2017-01-02 11:05:52 +00:00
|
|
|
if s.bdone {
|
2016-12-14 14:39:23 +00:00
|
|
|
return -1
|
|
|
|
}
|
2017-01-03 18:02:42 +00:00
|
|
|
return labels.Compare(s.a.At().Labels(), s.b.At().Labels())
|
2016-12-14 14:39:23 +00:00
|
|
|
}
|
|
|
|
|
2017-03-14 14:24:08 +00:00
|
|
|
func (s *mergedSeriesSet) Next() bool {
|
2017-01-02 11:05:52 +00:00
|
|
|
if s.adone && s.bdone || s.Err() != nil {
|
2016-12-14 14:39:23 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
d := s.compare()
|
2017-01-04 08:47:20 +00:00
|
|
|
|
2016-12-14 14:39:23 +00:00
|
|
|
// Both sets contain the current series. Chain them into a single one.
|
|
|
|
if d > 0 {
|
2017-01-02 12:27:52 +00:00
|
|
|
s.cur = s.b.At()
|
2017-01-02 11:05:52 +00:00
|
|
|
s.bdone = !s.b.Next()
|
2016-12-14 14:39:23 +00:00
|
|
|
} else if d < 0 {
|
2017-01-02 12:27:52 +00:00
|
|
|
s.cur = s.a.At()
|
2017-01-02 11:05:52 +00:00
|
|
|
s.adone = !s.a.Next()
|
2016-12-14 14:39:23 +00:00
|
|
|
} else {
|
2017-01-02 12:27:52 +00:00
|
|
|
s.cur = &chainedSeries{series: []Series{s.a.At(), s.b.At()}}
|
2017-01-02 11:05:52 +00:00
|
|
|
s.adone = !s.a.Next()
|
|
|
|
s.bdone = !s.b.Next()
|
2016-12-14 14:39:23 +00:00
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2019-02-14 13:29:41 +00:00
|
|
|
type mergedVerticalSeriesSet struct {
|
|
|
|
a, b SeriesSet
|
|
|
|
cur Series
|
|
|
|
adone, bdone bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewMergedVerticalSeriesSet takes two series sets as a single series set.
|
|
|
|
// The input series sets must be sorted and
|
|
|
|
// the time ranges of the series can be overlapping.
|
|
|
|
func NewMergedVerticalSeriesSet(a, b SeriesSet) SeriesSet {
|
|
|
|
return newMergedVerticalSeriesSet(a, b)
|
|
|
|
}
|
|
|
|
|
|
|
|
func newMergedVerticalSeriesSet(a, b SeriesSet) *mergedVerticalSeriesSet {
|
|
|
|
s := &mergedVerticalSeriesSet{a: a, b: b}
|
|
|
|
// Initialize first elements of both sets as Next() needs
|
|
|
|
// one element look-ahead.
|
|
|
|
s.adone = !s.a.Next()
|
|
|
|
s.bdone = !s.b.Next()
|
|
|
|
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *mergedVerticalSeriesSet) At() Series {
|
|
|
|
return s.cur
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *mergedVerticalSeriesSet) Err() error {
|
|
|
|
if s.a.Err() != nil {
|
|
|
|
return s.a.Err()
|
|
|
|
}
|
|
|
|
return s.b.Err()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *mergedVerticalSeriesSet) compare() int {
|
|
|
|
if s.adone {
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
if s.bdone {
|
|
|
|
return -1
|
|
|
|
}
|
|
|
|
return labels.Compare(s.a.At().Labels(), s.b.At().Labels())
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *mergedVerticalSeriesSet) Next() bool {
|
|
|
|
if s.adone && s.bdone || s.Err() != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
d := s.compare()
|
|
|
|
|
|
|
|
// Both sets contain the current series. Chain them into a single one.
|
|
|
|
if d > 0 {
|
|
|
|
s.cur = s.b.At()
|
|
|
|
s.bdone = !s.b.Next()
|
|
|
|
} else if d < 0 {
|
|
|
|
s.cur = s.a.At()
|
|
|
|
s.adone = !s.a.Next()
|
|
|
|
} else {
|
|
|
|
s.cur = &verticalChainedSeries{series: []Series{s.a.At(), s.b.At()}}
|
|
|
|
s.adone = !s.a.Next()
|
|
|
|
s.bdone = !s.b.Next()
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2017-12-17 18:08:21 +00:00
|
|
|
// ChunkSeriesSet exposes the chunks and intervals of a series instead of the
|
|
|
|
// actual series itself.
|
2017-11-13 12:57:10 +00:00
|
|
|
type ChunkSeriesSet interface {
|
2017-03-07 10:29:20 +00:00
|
|
|
Next() bool
|
2017-11-30 14:34:49 +00:00
|
|
|
At() (labels.Labels, []chunks.Meta, Intervals)
|
2017-03-07 10:29:20 +00:00
|
|
|
Err() error
|
|
|
|
}
|
2016-12-14 14:39:23 +00:00
|
|
|
|
2017-03-07 10:29:20 +00:00
|
|
|
// baseChunkSeries loads the label set and chunk references for a postings
|
|
|
|
// list from an index. It filters out series that have labels set that should be unset.
|
|
|
|
type baseChunkSeries struct {
|
2017-11-30 14:34:49 +00:00
|
|
|
p index.Postings
|
2017-05-17 09:19:42 +00:00
|
|
|
index IndexReader
|
|
|
|
tombstones TombstoneReader
|
|
|
|
|
2017-05-22 11:12:36 +00:00
|
|
|
lset labels.Labels
|
2017-11-30 14:34:49 +00:00
|
|
|
chks []chunks.Meta
|
2017-08-25 08:11:46 +00:00
|
|
|
intervals Intervals
|
2017-05-22 11:12:36 +00:00
|
|
|
err error
|
2017-05-17 09:19:42 +00:00
|
|
|
}
|
2017-03-07 10:29:20 +00:00
|
|
|
|
2017-11-13 12:57:10 +00:00
|
|
|
// LookupChunkSeries retrieves all series for the given matchers and returns a ChunkSeriesSet
|
|
|
|
// over them. It drops chunks based on tombstones in the given reader.
|
|
|
|
func LookupChunkSeries(ir IndexReader, tr TombstoneReader, ms ...labels.Matcher) (ChunkSeriesSet, error) {
|
|
|
|
if tr == nil {
|
2018-11-14 16:40:01 +00:00
|
|
|
tr = newMemTombstones()
|
2017-11-13 12:57:10 +00:00
|
|
|
}
|
2017-12-17 18:08:21 +00:00
|
|
|
p, err := PostingsForMatchers(ir, ms...)
|
2017-11-13 12:57:10 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return &baseChunkSeries{
|
|
|
|
p: p,
|
|
|
|
index: ir,
|
|
|
|
tombstones: tr,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2017-11-30 14:34:49 +00:00
|
|
|
func (s *baseChunkSeries) At() (labels.Labels, []chunks.Meta, Intervals) {
|
2017-05-22 11:12:36 +00:00
|
|
|
return s.lset, s.chks, s.intervals
|
2016-12-14 14:39:23 +00:00
|
|
|
}
|
|
|
|
|
2017-05-17 09:19:42 +00:00
|
|
|
func (s *baseChunkSeries) Err() error { return s.err }
|
2017-03-07 10:29:20 +00:00
|
|
|
|
|
|
|
func (s *baseChunkSeries) Next() bool {
|
2017-08-05 11:31:48 +00:00
|
|
|
var (
|
2018-11-09 13:54:56 +00:00
|
|
|
lset = make(labels.Labels, len(s.lset))
|
|
|
|
chkMetas = make([]chunks.Meta, len(s.chks))
|
2017-11-30 14:34:49 +00:00
|
|
|
err error
|
2017-08-05 11:31:48 +00:00
|
|
|
)
|
2017-12-17 18:08:21 +00:00
|
|
|
|
2017-03-07 10:29:20 +00:00
|
|
|
for s.p.Next() {
|
2017-05-17 09:19:42 +00:00
|
|
|
ref := s.p.At()
|
2017-11-30 14:34:49 +00:00
|
|
|
if err := s.index.Series(ref, &lset, &chkMetas); err != nil {
|
2017-10-11 07:33:35 +00:00
|
|
|
// Postings may be stale. Skip if no underlying series exists.
|
|
|
|
if errors.Cause(err) == ErrNotFound {
|
|
|
|
continue
|
|
|
|
}
|
2016-12-16 11:13:17 +00:00
|
|
|
s.err = err
|
|
|
|
return false
|
|
|
|
}
|
2016-12-31 14:35:08 +00:00
|
|
|
|
2017-03-07 10:29:20 +00:00
|
|
|
s.lset = lset
|
2017-11-30 14:34:49 +00:00
|
|
|
s.chks = chkMetas
|
2017-11-13 12:32:24 +00:00
|
|
|
s.intervals, err = s.tombstones.Get(s.p.At())
|
|
|
|
if err != nil {
|
|
|
|
s.err = errors.Wrap(err, "get tombstones")
|
|
|
|
return false
|
|
|
|
}
|
2017-05-17 09:19:42 +00:00
|
|
|
|
2017-05-24 05:54:24 +00:00
|
|
|
if len(s.intervals) > 0 {
|
2017-05-17 09:19:42 +00:00
|
|
|
// Only those chunks that are not entirely deleted.
|
2017-11-30 14:34:49 +00:00
|
|
|
chks := make([]chunks.Meta, 0, len(s.chks))
|
2017-05-17 09:19:42 +00:00
|
|
|
for _, chk := range s.chks {
|
2017-08-25 08:11:46 +00:00
|
|
|
if !(Interval{chk.MinTime, chk.MaxTime}.isSubrange(s.intervals)) {
|
2017-05-17 09:19:42 +00:00
|
|
|
chks = append(chks, chk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
s.chks = chks
|
|
|
|
}
|
2017-03-07 10:29:20 +00:00
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if err := s.p.Err(); err != nil {
|
|
|
|
s.err = err
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// populatedChunkSeries loads chunk data from a store for a set of series
|
|
|
|
// with known chunk references. It filters out chunks that do not fit the
|
|
|
|
// given time range.
|
|
|
|
type populatedChunkSeries struct {
|
2017-11-13 12:57:10 +00:00
|
|
|
set ChunkSeriesSet
|
2017-03-07 10:29:20 +00:00
|
|
|
chunks ChunkReader
|
|
|
|
mint, maxt int64
|
|
|
|
|
2017-05-22 11:12:36 +00:00
|
|
|
err error
|
2017-11-30 14:34:49 +00:00
|
|
|
chks []chunks.Meta
|
2017-05-22 11:12:36 +00:00
|
|
|
lset labels.Labels
|
2017-08-25 08:11:46 +00:00
|
|
|
intervals Intervals
|
2017-03-07 10:29:20 +00:00
|
|
|
}
|
|
|
|
|
2017-11-30 14:34:49 +00:00
|
|
|
func (s *populatedChunkSeries) At() (labels.Labels, []chunks.Meta, Intervals) {
|
2017-05-22 11:12:36 +00:00
|
|
|
return s.lset, s.chks, s.intervals
|
2017-05-17 09:19:42 +00:00
|
|
|
}
|
2017-12-13 20:58:21 +00:00
|
|
|
|
2017-05-17 09:19:42 +00:00
|
|
|
func (s *populatedChunkSeries) Err() error { return s.err }
|
2017-03-07 10:29:20 +00:00
|
|
|
|
|
|
|
func (s *populatedChunkSeries) Next() bool {
|
|
|
|
for s.set.Next() {
|
2017-05-22 11:12:36 +00:00
|
|
|
lset, chks, dranges := s.set.At()
|
2017-03-07 10:29:20 +00:00
|
|
|
|
2017-05-05 14:04:59 +00:00
|
|
|
for len(chks) > 0 {
|
|
|
|
if chks[0].MaxTime >= s.mint {
|
|
|
|
break
|
2016-12-31 14:35:08 +00:00
|
|
|
}
|
2017-05-05 14:04:59 +00:00
|
|
|
chks = chks[1:]
|
|
|
|
}
|
|
|
|
|
2017-12-13 20:58:21 +00:00
|
|
|
// This is to delete in place while iterating.
|
|
|
|
for i, rlen := 0, len(chks); i < rlen; i++ {
|
|
|
|
j := i - (rlen - len(chks))
|
|
|
|
c := &chks[j]
|
2017-08-06 18:41:24 +00:00
|
|
|
|
|
|
|
// Break out at the first chunk that has no overlap with mint, maxt.
|
2016-12-31 14:35:08 +00:00
|
|
|
if c.MinTime > s.maxt {
|
2017-12-13 20:58:21 +00:00
|
|
|
chks = chks[:j]
|
2016-12-31 14:35:08 +00:00
|
|
|
break
|
|
|
|
}
|
2017-12-13 20:58:21 +00:00
|
|
|
|
2017-03-07 10:29:20 +00:00
|
|
|
c.Chunk, s.err = s.chunks.Chunk(c.Ref)
|
|
|
|
if s.err != nil {
|
2017-12-13 20:58:21 +00:00
|
|
|
// This means that the chunk has be garbage collected. Remove it from the list.
|
|
|
|
if s.err == ErrNotFound {
|
|
|
|
s.err = nil
|
|
|
|
// Delete in-place.
|
2019-01-02 16:48:42 +00:00
|
|
|
s.chks = append(chks[:j], chks[j+1:]...)
|
2017-12-13 20:58:21 +00:00
|
|
|
}
|
2017-03-07 10:29:20 +00:00
|
|
|
return false
|
|
|
|
}
|
2016-12-31 14:35:08 +00:00
|
|
|
}
|
2017-12-13 20:58:21 +00:00
|
|
|
|
2017-03-07 10:29:20 +00:00
|
|
|
if len(chks) == 0 {
|
2016-12-31 14:35:08 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-03-07 10:29:20 +00:00
|
|
|
s.lset = lset
|
|
|
|
s.chks = chks
|
2017-05-22 11:12:36 +00:00
|
|
|
s.intervals = dranges
|
2017-03-07 10:29:20 +00:00
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if err := s.set.Err(); err != nil {
|
|
|
|
s.err = err
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// blockSeriesSet is a set of series from an inverted index query.
|
|
|
|
type blockSeriesSet struct {
|
2017-11-13 12:57:10 +00:00
|
|
|
set ChunkSeriesSet
|
2017-03-07 10:29:20 +00:00
|
|
|
err error
|
|
|
|
cur Series
|
2017-04-13 19:06:14 +00:00
|
|
|
|
|
|
|
mint, maxt int64
|
2017-03-07 10:29:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *blockSeriesSet) Next() bool {
|
|
|
|
for s.set.Next() {
|
2017-05-22 11:12:36 +00:00
|
|
|
lset, chunks, dranges := s.set.At()
|
2017-05-17 09:19:42 +00:00
|
|
|
s.cur = &chunkSeries{
|
|
|
|
labels: lset,
|
|
|
|
chunks: chunks,
|
|
|
|
mint: s.mint,
|
|
|
|
maxt: s.maxt,
|
|
|
|
|
2017-05-22 11:12:36 +00:00
|
|
|
intervals: dranges,
|
2017-05-17 09:19:42 +00:00
|
|
|
}
|
2016-12-30 18:36:28 +00:00
|
|
|
return true
|
2016-12-14 14:39:23 +00:00
|
|
|
}
|
2017-03-07 10:29:20 +00:00
|
|
|
if s.set.Err() != nil {
|
|
|
|
s.err = s.set.Err()
|
2016-12-14 14:39:23 +00:00
|
|
|
}
|
2016-12-16 11:13:17 +00:00
|
|
|
return false
|
2016-12-14 14:39:23 +00:00
|
|
|
}
|
|
|
|
|
2017-01-02 12:27:52 +00:00
|
|
|
func (s *blockSeriesSet) At() Series { return s.cur }
|
|
|
|
func (s *blockSeriesSet) Err() error { return s.err }
|
2016-12-14 14:39:23 +00:00
|
|
|
|
2016-12-19 11:26:25 +00:00
|
|
|
// chunkSeries is a series that is backed by a sequence of chunks holding
|
|
|
|
// time series data.
|
|
|
|
type chunkSeries struct {
|
2016-12-21 08:39:01 +00:00
|
|
|
labels labels.Labels
|
2017-11-30 14:34:49 +00:00
|
|
|
chunks []chunks.Meta // in-order chunk refs
|
2017-04-13 19:06:14 +00:00
|
|
|
|
|
|
|
mint, maxt int64
|
2017-05-17 09:19:42 +00:00
|
|
|
|
2017-08-25 08:11:46 +00:00
|
|
|
intervals Intervals
|
2016-12-16 11:13:17 +00:00
|
|
|
}
|
|
|
|
|
2016-12-21 08:39:01 +00:00
|
|
|
func (s *chunkSeries) Labels() labels.Labels {
|
2016-12-16 11:13:17 +00:00
|
|
|
return s.labels
|
|
|
|
}
|
|
|
|
|
2016-12-19 11:26:25 +00:00
|
|
|
func (s *chunkSeries) Iterator() SeriesIterator {
|
2017-05-22 11:12:36 +00:00
|
|
|
return newChunkSeriesIterator(s.chunks, s.intervals, s.mint, s.maxt)
|
2016-12-16 11:13:17 +00:00
|
|
|
}
|
|
|
|
|
2016-12-10 17:08:50 +00:00
|
|
|
// SeriesIterator iterates over the data of a time series.
|
|
|
|
type SeriesIterator interface {
|
|
|
|
// Seek advances the iterator forward to the given timestamp.
|
2017-04-09 14:00:25 +00:00
|
|
|
// If there's no value exactly at t, it advances to the first value
|
|
|
|
// after t.
|
2016-12-10 17:08:50 +00:00
|
|
|
Seek(t int64) bool
|
2017-03-19 16:05:01 +00:00
|
|
|
// At returns the current timestamp/value pair.
|
2017-01-02 12:27:52 +00:00
|
|
|
At() (t int64, v float64)
|
2016-12-10 17:08:50 +00:00
|
|
|
// Next advances the iterator by one.
|
|
|
|
Next() bool
|
|
|
|
// Err returns the current error.
|
|
|
|
Err() error
|
|
|
|
}
|
2016-12-12 18:12:55 +00:00
|
|
|
|
2016-12-16 11:13:17 +00:00
|
|
|
// chainedSeries implements a series for a list of time-sorted series.
|
2016-12-19 11:26:25 +00:00
|
|
|
// They all must have the same labels.
|
2016-12-14 14:39:23 +00:00
|
|
|
type chainedSeries struct {
|
|
|
|
series []Series
|
|
|
|
}
|
|
|
|
|
2016-12-21 08:39:01 +00:00
|
|
|
func (s *chainedSeries) Labels() labels.Labels {
|
2016-12-14 14:39:23 +00:00
|
|
|
return s.series[0].Labels()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *chainedSeries) Iterator() SeriesIterator {
|
2017-04-09 14:00:25 +00:00
|
|
|
return newChainedSeriesIterator(s.series...)
|
2016-12-14 14:39:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// chainedSeriesIterator implements a series iterater over a list
|
2016-12-15 14:23:15 +00:00
|
|
|
// of time-sorted, non-overlapping iterators.
|
2016-12-14 14:39:23 +00:00
|
|
|
type chainedSeriesIterator struct {
|
2016-12-16 11:13:17 +00:00
|
|
|
series []Series // series in time order
|
2016-12-15 14:23:15 +00:00
|
|
|
|
|
|
|
i int
|
|
|
|
cur SeriesIterator
|
2016-12-14 14:39:23 +00:00
|
|
|
}
|
|
|
|
|
2017-04-09 14:00:25 +00:00
|
|
|
func newChainedSeriesIterator(s ...Series) *chainedSeriesIterator {
|
|
|
|
return &chainedSeriesIterator{
|
|
|
|
series: s,
|
|
|
|
i: 0,
|
|
|
|
cur: s[0].Iterator(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-14 14:39:23 +00:00
|
|
|
func (it *chainedSeriesIterator) Seek(t int64) bool {
|
2016-12-16 11:13:17 +00:00
|
|
|
// We just scan the chained series sequentially as they are already
|
|
|
|
// pre-selected by relevant time and should be accessed sequentially anyway.
|
|
|
|
for i, s := range it.series[it.i:] {
|
|
|
|
cur := s.Iterator()
|
|
|
|
if !cur.Seek(t) {
|
|
|
|
continue
|
2016-12-15 14:23:15 +00:00
|
|
|
}
|
2016-12-16 11:13:17 +00:00
|
|
|
it.cur = cur
|
|
|
|
it.i += i
|
|
|
|
return true
|
2016-12-15 14:23:15 +00:00
|
|
|
}
|
2016-12-14 14:39:23 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2016-12-15 14:23:15 +00:00
|
|
|
func (it *chainedSeriesIterator) Next() bool {
|
|
|
|
if it.cur.Next() {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if err := it.cur.Err(); err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if it.i == len(it.series)-1 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
it.i++
|
2016-12-16 11:13:17 +00:00
|
|
|
it.cur = it.series[it.i].Iterator()
|
2016-12-15 14:23:15 +00:00
|
|
|
|
|
|
|
return it.Next()
|
2016-12-14 14:39:23 +00:00
|
|
|
}
|
|
|
|
|
2017-01-02 12:27:52 +00:00
|
|
|
func (it *chainedSeriesIterator) At() (t int64, v float64) {
|
|
|
|
return it.cur.At()
|
2016-12-14 14:39:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (it *chainedSeriesIterator) Err() error {
|
2016-12-15 14:23:15 +00:00
|
|
|
return it.cur.Err()
|
2016-12-14 14:39:23 +00:00
|
|
|
}
|
|
|
|
|
2019-02-14 13:29:41 +00:00
|
|
|
// verticalChainedSeries implements a series for a list of time-sorted, time-overlapping series.
|
|
|
|
// They all must have the same labels.
|
|
|
|
type verticalChainedSeries struct {
|
|
|
|
series []Series
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *verticalChainedSeries) Labels() labels.Labels {
|
|
|
|
return s.series[0].Labels()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *verticalChainedSeries) Iterator() SeriesIterator {
|
|
|
|
return newVerticalMergeSeriesIterator(s.series...)
|
|
|
|
}
|
|
|
|
|
|
|
|
// verticalMergeSeriesIterator implements a series iterater over a list
|
|
|
|
// of time-sorted, time-overlapping iterators.
|
|
|
|
type verticalMergeSeriesIterator struct {
|
|
|
|
a, b SeriesIterator
|
|
|
|
aok, bok, initialized bool
|
|
|
|
|
|
|
|
curT int64
|
|
|
|
curV float64
|
|
|
|
}
|
|
|
|
|
|
|
|
func newVerticalMergeSeriesIterator(s ...Series) SeriesIterator {
|
|
|
|
if len(s) == 1 {
|
|
|
|
return s[0].Iterator()
|
|
|
|
} else if len(s) == 2 {
|
|
|
|
return &verticalMergeSeriesIterator{
|
|
|
|
a: s[0].Iterator(),
|
|
|
|
b: s[1].Iterator(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return &verticalMergeSeriesIterator{
|
|
|
|
a: s[0].Iterator(),
|
|
|
|
b: newVerticalMergeSeriesIterator(s[1:]...),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *verticalMergeSeriesIterator) Seek(t int64) bool {
|
|
|
|
it.aok, it.bok = it.a.Seek(t), it.b.Seek(t)
|
|
|
|
it.initialized = true
|
|
|
|
return it.Next()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *verticalMergeSeriesIterator) Next() bool {
|
|
|
|
if !it.initialized {
|
|
|
|
it.aok = it.a.Next()
|
|
|
|
it.bok = it.b.Next()
|
|
|
|
it.initialized = true
|
|
|
|
}
|
|
|
|
|
|
|
|
if !it.aok && !it.bok {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
if !it.aok {
|
|
|
|
it.curT, it.curV = it.b.At()
|
|
|
|
it.bok = it.b.Next()
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if !it.bok {
|
|
|
|
it.curT, it.curV = it.a.At()
|
|
|
|
it.aok = it.a.Next()
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
acurT, acurV := it.a.At()
|
|
|
|
bcurT, bcurV := it.b.At()
|
|
|
|
if acurT < bcurT {
|
|
|
|
it.curT, it.curV = acurT, acurV
|
|
|
|
it.aok = it.a.Next()
|
|
|
|
} else if acurT > bcurT {
|
|
|
|
it.curT, it.curV = bcurT, bcurV
|
|
|
|
it.bok = it.b.Next()
|
|
|
|
} else {
|
|
|
|
it.curT, it.curV = bcurT, bcurV
|
|
|
|
it.aok = it.a.Next()
|
|
|
|
it.bok = it.b.Next()
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *verticalMergeSeriesIterator) At() (t int64, v float64) {
|
|
|
|
return it.curT, it.curV
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *verticalMergeSeriesIterator) Err() error {
|
|
|
|
if it.a.Err() != nil {
|
|
|
|
return it.a.Err()
|
|
|
|
}
|
|
|
|
return it.b.Err()
|
|
|
|
}
|
|
|
|
|
2016-12-12 18:12:55 +00:00
|
|
|
// chunkSeriesIterator implements a series iterator on top
|
|
|
|
// of a list of time-sorted, non-overlapping chunks.
|
|
|
|
type chunkSeriesIterator struct {
|
2017-11-30 14:34:49 +00:00
|
|
|
chunks []chunks.Meta
|
2016-12-12 18:12:55 +00:00
|
|
|
|
2019-07-09 09:49:34 +00:00
|
|
|
i int
|
|
|
|
cur chunkenc.Iterator
|
|
|
|
bufDelIter *deletedIterator
|
2017-04-13 19:06:14 +00:00
|
|
|
|
|
|
|
maxt, mint int64
|
2017-05-17 09:19:42 +00:00
|
|
|
|
2017-08-25 08:11:46 +00:00
|
|
|
intervals Intervals
|
2016-12-12 18:12:55 +00:00
|
|
|
}
|
|
|
|
|
2017-11-30 14:34:49 +00:00
|
|
|
func newChunkSeriesIterator(cs []chunks.Meta, dranges Intervals, mint, maxt int64) *chunkSeriesIterator {
|
2019-07-09 09:49:34 +00:00
|
|
|
csi := &chunkSeriesIterator{
|
2016-12-12 18:12:55 +00:00
|
|
|
chunks: cs,
|
|
|
|
i: 0,
|
2017-04-13 19:06:14 +00:00
|
|
|
|
|
|
|
mint: mint,
|
|
|
|
maxt: maxt,
|
2017-05-17 09:19:42 +00:00
|
|
|
|
2017-05-22 11:12:36 +00:00
|
|
|
intervals: dranges,
|
2016-12-12 18:12:55 +00:00
|
|
|
}
|
2019-07-09 09:49:34 +00:00
|
|
|
csi.resetCurIterator()
|
|
|
|
|
|
|
|
return csi
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *chunkSeriesIterator) resetCurIterator() {
|
|
|
|
if len(it.intervals) == 0 {
|
|
|
|
it.cur = it.chunks[it.i].Chunk.Iterator(it.cur)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if it.bufDelIter == nil {
|
|
|
|
it.bufDelIter = &deletedIterator{
|
|
|
|
intervals: it.intervals,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
it.bufDelIter.it = it.chunks[it.i].Chunk.Iterator(it.bufDelIter.it)
|
|
|
|
it.cur = it.bufDelIter
|
2016-12-12 18:12:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (it *chunkSeriesIterator) Seek(t int64) (ok bool) {
|
2017-04-13 19:07:21 +00:00
|
|
|
if t > it.maxt {
|
2017-04-13 19:06:14 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2017-04-13 19:07:21 +00:00
|
|
|
// Seek to the first valid value after t.
|
|
|
|
if t < it.mint {
|
|
|
|
t = it.mint
|
|
|
|
}
|
|
|
|
|
2017-06-30 13:06:27 +00:00
|
|
|
for ; it.chunks[it.i].MaxTime < t; it.i++ {
|
|
|
|
if it.i == len(it.chunks)-1 {
|
|
|
|
return false
|
|
|
|
}
|
2016-12-15 14:23:15 +00:00
|
|
|
}
|
|
|
|
|
2019-07-09 09:49:34 +00:00
|
|
|
it.resetCurIterator()
|
2016-12-15 14:23:15 +00:00
|
|
|
|
|
|
|
for it.cur.Next() {
|
2017-01-02 12:27:52 +00:00
|
|
|
t0, _ := it.cur.At()
|
2016-12-15 14:23:15 +00:00
|
|
|
if t0 >= t {
|
2016-12-16 11:13:17 +00:00
|
|
|
return true
|
2016-12-12 18:12:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2017-01-02 12:27:52 +00:00
|
|
|
func (it *chunkSeriesIterator) At() (t int64, v float64) {
|
|
|
|
return it.cur.At()
|
2016-12-12 18:12:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (it *chunkSeriesIterator) Next() bool {
|
2017-08-28 22:39:17 +00:00
|
|
|
if it.cur.Next() {
|
2017-04-13 19:06:14 +00:00
|
|
|
t, _ := it.cur.At()
|
2017-08-28 22:39:17 +00:00
|
|
|
|
2017-06-13 05:54:04 +00:00
|
|
|
if t < it.mint {
|
2017-08-28 22:39:17 +00:00
|
|
|
if !it.Seek(it.mint) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
t, _ = it.At()
|
2017-06-13 05:54:04 +00:00
|
|
|
|
2017-08-28 22:39:17 +00:00
|
|
|
return t <= it.maxt
|
|
|
|
}
|
2017-06-13 05:54:04 +00:00
|
|
|
if t > it.maxt {
|
|
|
|
return false
|
2017-04-13 19:06:14 +00:00
|
|
|
}
|
2017-06-13 05:54:04 +00:00
|
|
|
return true
|
2016-12-12 18:12:55 +00:00
|
|
|
}
|
|
|
|
if err := it.cur.Err(); err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if it.i == len(it.chunks)-1 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
it.i++
|
2019-07-09 09:49:34 +00:00
|
|
|
it.resetCurIterator()
|
2016-12-12 18:12:55 +00:00
|
|
|
|
|
|
|
return it.Next()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *chunkSeriesIterator) Err() error {
|
|
|
|
return it.cur.Err()
|
|
|
|
}
|
|
|
|
|
2017-11-30 14:34:49 +00:00
|
|
|
// deletedIterator wraps an Iterator and makes sure any deleted metrics are not
|
|
|
|
// returned.
|
|
|
|
type deletedIterator struct {
|
|
|
|
it chunkenc.Iterator
|
|
|
|
|
|
|
|
intervals Intervals
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *deletedIterator) At() (int64, float64) {
|
|
|
|
return it.it.At()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *deletedIterator) Next() bool {
|
|
|
|
Outer:
|
|
|
|
for it.it.Next() {
|
|
|
|
ts, _ := it.it.At()
|
|
|
|
|
|
|
|
for _, tr := range it.intervals {
|
|
|
|
if tr.inBounds(ts) {
|
|
|
|
continue Outer
|
|
|
|
}
|
|
|
|
|
|
|
|
if ts > tr.Maxt {
|
|
|
|
it.intervals = it.intervals[1:]
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *deletedIterator) Err() error {
|
|
|
|
return it.it.Err()
|
|
|
|
}
|
|
|
|
|
2017-01-04 08:47:20 +00:00
|
|
|
type errSeriesSet struct {
|
|
|
|
err error
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s errSeriesSet) Next() bool { return false }
|
|
|
|
func (s errSeriesSet) At() Series { return nil }
|
|
|
|
func (s errSeriesSet) Err() error { return s.err }
|