2017-04-10 18:59:45 +00:00
|
|
|
// Copyright 2017 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2016-12-10 17:08:50 +00:00
|
|
|
package tsdb
|
|
|
|
|
2016-12-13 14:26:58 +00:00
|
|
|
import (
|
|
|
|
"fmt"
|
2017-08-05 11:31:48 +00:00
|
|
|
"sort"
|
2016-12-14 14:39:23 +00:00
|
|
|
"strings"
|
2019-05-27 11:24:46 +00:00
|
|
|
"unicode/utf8"
|
2016-12-13 14:26:58 +00:00
|
|
|
|
2017-10-09 13:21:46 +00:00
|
|
|
"github.com/pkg/errors"
|
2019-11-18 19:53:33 +00:00
|
|
|
"github.com/prometheus/prometheus/pkg/labels"
|
2019-08-13 08:34:14 +00:00
|
|
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
|
|
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
|
|
|
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
|
|
|
"github.com/prometheus/prometheus/tsdb/index"
|
2019-09-19 09:15:41 +00:00
|
|
|
"github.com/prometheus/prometheus/tsdb/tombstones"
|
2016-12-13 14:26:58 +00:00
|
|
|
)
|
2016-12-12 18:12:55 +00:00
|
|
|
|
2016-12-10 17:08:50 +00:00
|
|
|
// Querier provides querying access over time series data of a fixed
|
|
|
|
// time range.
|
|
|
|
type Querier interface {
|
2016-12-14 14:39:23 +00:00
|
|
|
// Select returns a set of series that matches the given label matchers.
|
2019-11-18 19:53:33 +00:00
|
|
|
Select(...*labels.Matcher) (SeriesSet, error)
|
2016-12-10 17:08:50 +00:00
|
|
|
|
|
|
|
// LabelValues returns all potential values for a label name.
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 16:55:32 +00:00
|
|
|
// It is not safe to use the strings beyond the lifefime of the querier.
|
2016-12-13 14:26:58 +00:00
|
|
|
LabelValues(string) ([]string, error)
|
2018-11-16 18:02:24 +00:00
|
|
|
|
|
|
|
// LabelNames returns all the unique label names present in the block in sorted order.
|
|
|
|
LabelNames() ([]string, error)
|
|
|
|
|
2016-12-10 17:08:50 +00:00
|
|
|
// Close releases the resources of the Querier.
|
|
|
|
Close() error
|
|
|
|
}
|
|
|
|
|
2017-06-25 17:02:02 +00:00
|
|
|
// Series exposes a single time series.
|
2016-12-10 17:08:50 +00:00
|
|
|
type Series interface {
|
2016-12-13 14:26:58 +00:00
|
|
|
// Labels returns the complete set of labels identifying the series.
|
2016-12-21 08:39:01 +00:00
|
|
|
Labels() labels.Labels
|
2016-12-16 11:13:17 +00:00
|
|
|
|
2016-12-10 17:08:50 +00:00
|
|
|
// Iterator returns a new iterator of the data of the series.
|
2016-12-13 14:26:58 +00:00
|
|
|
Iterator() SeriesIterator
|
|
|
|
}
|
|
|
|
|
2017-01-06 11:37:28 +00:00
|
|
|
// querier aggregates querying results from time blocks within
|
2017-01-06 07:08:02 +00:00
|
|
|
// a single partition.
|
2017-01-06 11:37:28 +00:00
|
|
|
type querier struct {
|
|
|
|
blocks []Querier
|
2016-12-13 14:26:58 +00:00
|
|
|
}
|
|
|
|
|
2017-01-06 11:37:28 +00:00
|
|
|
func (q *querier) LabelValues(n string) ([]string, error) {
|
2017-06-13 06:25:13 +00:00
|
|
|
return q.lvals(q.blocks, n)
|
|
|
|
}
|
|
|
|
|
2018-11-16 18:02:24 +00:00
|
|
|
// LabelNames returns all the unique label names present querier blocks.
|
|
|
|
func (q *querier) LabelNames() ([]string, error) {
|
|
|
|
labelNamesMap := make(map[string]struct{})
|
|
|
|
for _, b := range q.blocks {
|
|
|
|
names, err := b.LabelNames()
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "LabelNames() from Querier")
|
|
|
|
}
|
|
|
|
for _, name := range names {
|
|
|
|
labelNamesMap[name] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
labelNames := make([]string, 0, len(labelNamesMap))
|
|
|
|
for name := range labelNamesMap {
|
|
|
|
labelNames = append(labelNames, name)
|
|
|
|
}
|
|
|
|
sort.Strings(labelNames)
|
|
|
|
|
|
|
|
return labelNames, nil
|
|
|
|
}
|
|
|
|
|
2017-06-13 06:25:13 +00:00
|
|
|
func (q *querier) lvals(qs []Querier, n string) ([]string, error) {
|
|
|
|
if len(qs) == 0 {
|
2017-03-07 10:29:20 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
2017-06-13 06:25:13 +00:00
|
|
|
if len(qs) == 1 {
|
|
|
|
return qs[0].LabelValues(n)
|
|
|
|
}
|
|
|
|
l := len(qs) / 2
|
|
|
|
s1, err := q.lvals(qs[:l], n)
|
2016-12-19 11:26:25 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-06-13 06:25:13 +00:00
|
|
|
s2, err := q.lvals(qs[l:], n)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2016-12-19 11:26:25 +00:00
|
|
|
}
|
2017-06-13 06:25:13 +00:00
|
|
|
return mergeStrings(s1, s2), nil
|
2016-12-14 17:38:46 +00:00
|
|
|
}
|
|
|
|
|
2019-11-18 19:53:33 +00:00
|
|
|
func (q *querier) Select(ms ...*labels.Matcher) (SeriesSet, error) {
|
2019-11-15 14:45:29 +00:00
|
|
|
if len(q.blocks) == 0 {
|
2017-11-13 11:16:58 +00:00
|
|
|
return EmptySeriesSet(), nil
|
2016-12-20 12:10:37 +00:00
|
|
|
}
|
2019-11-15 14:45:29 +00:00
|
|
|
ss := make([]SeriesSet, len(q.blocks))
|
|
|
|
var s SeriesSet
|
|
|
|
var err error
|
|
|
|
for i, b := range q.blocks {
|
|
|
|
s, err = b.Select(ms...)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
ss[i] = s
|
2016-12-20 12:10:37 +00:00
|
|
|
}
|
2017-11-13 11:16:58 +00:00
|
|
|
|
2019-11-15 14:45:29 +00:00
|
|
|
return NewMergedSeriesSet(ss), nil
|
2016-12-20 12:10:37 +00:00
|
|
|
}
|
|
|
|
|
2017-01-06 11:37:28 +00:00
|
|
|
func (q *querier) Close() error {
|
2019-03-19 13:31:57 +00:00
|
|
|
var merr tsdb_errors.MultiError
|
2016-12-28 10:41:44 +00:00
|
|
|
|
|
|
|
for _, bq := range q.blocks {
|
|
|
|
merr.Add(bq.Close())
|
|
|
|
}
|
|
|
|
return merr.Err()
|
2016-12-14 17:38:46 +00:00
|
|
|
}
|
|
|
|
|
2019-02-14 13:29:41 +00:00
|
|
|
// verticalQuerier aggregates querying results from time blocks within
|
|
|
|
// a single partition. The block time ranges can be overlapping.
|
|
|
|
type verticalQuerier struct {
|
|
|
|
querier
|
|
|
|
}
|
|
|
|
|
2019-11-18 19:53:33 +00:00
|
|
|
func (q *verticalQuerier) Select(ms ...*labels.Matcher) (SeriesSet, error) {
|
2019-02-14 13:29:41 +00:00
|
|
|
return q.sel(q.blocks, ms)
|
|
|
|
}
|
|
|
|
|
2019-11-18 19:53:33 +00:00
|
|
|
func (q *verticalQuerier) sel(qs []Querier, ms []*labels.Matcher) (SeriesSet, error) {
|
2019-02-14 13:29:41 +00:00
|
|
|
if len(qs) == 0 {
|
|
|
|
return EmptySeriesSet(), nil
|
|
|
|
}
|
|
|
|
if len(qs) == 1 {
|
|
|
|
return qs[0].Select(ms...)
|
|
|
|
}
|
|
|
|
l := len(qs) / 2
|
|
|
|
|
|
|
|
a, err := q.sel(qs[:l], ms)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
b, err := q.sel(qs[l:], ms)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return newMergedVerticalSeriesSet(a, b), nil
|
|
|
|
}
|
|
|
|
|
2018-01-09 15:40:31 +00:00
|
|
|
// NewBlockQuerier returns a querier against the reader.
|
2017-10-09 13:21:46 +00:00
|
|
|
func NewBlockQuerier(b BlockReader, mint, maxt int64) (Querier, error) {
|
|
|
|
indexr, err := b.Index()
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrapf(err, "open index reader")
|
2017-08-25 08:32:54 +00:00
|
|
|
}
|
2017-10-09 13:21:46 +00:00
|
|
|
chunkr, err := b.Chunks()
|
|
|
|
if err != nil {
|
2017-10-23 18:30:03 +00:00
|
|
|
indexr.Close()
|
2017-10-09 13:21:46 +00:00
|
|
|
return nil, errors.Wrapf(err, "open chunk reader")
|
|
|
|
}
|
|
|
|
tombsr, err := b.Tombstones()
|
|
|
|
if err != nil {
|
2017-10-23 18:30:03 +00:00
|
|
|
indexr.Close()
|
|
|
|
chunkr.Close()
|
2017-10-09 13:21:46 +00:00
|
|
|
return nil, errors.Wrapf(err, "open tombstone reader")
|
|
|
|
}
|
|
|
|
return &blockQuerier{
|
|
|
|
mint: mint,
|
|
|
|
maxt: maxt,
|
|
|
|
index: indexr,
|
|
|
|
chunks: chunkr,
|
|
|
|
tombstones: tombsr,
|
|
|
|
}, nil
|
2017-08-25 08:32:54 +00:00
|
|
|
}
|
|
|
|
|
2016-12-14 17:38:46 +00:00
|
|
|
// blockQuerier provides querying access to a single block database.
|
|
|
|
type blockQuerier struct {
|
2017-05-16 14:18:28 +00:00
|
|
|
index IndexReader
|
|
|
|
chunks ChunkReader
|
2019-09-19 09:15:41 +00:00
|
|
|
tombstones tombstones.Reader
|
2016-12-14 17:38:46 +00:00
|
|
|
|
2019-04-30 07:17:07 +00:00
|
|
|
closed bool
|
|
|
|
|
2016-12-14 17:38:46 +00:00
|
|
|
mint, maxt int64
|
|
|
|
}
|
|
|
|
|
2019-11-18 19:53:33 +00:00
|
|
|
func (q *blockQuerier) Select(ms ...*labels.Matcher) (SeriesSet, error) {
|
2017-11-13 12:57:10 +00:00
|
|
|
base, err := LookupChunkSeries(q.index, q.tombstones, ms...)
|
2017-11-13 11:16:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-01-05 14:13:01 +00:00
|
|
|
return &blockSeriesSet{
|
2017-03-07 10:29:20 +00:00
|
|
|
set: &populatedChunkSeries{
|
2017-11-13 12:57:10 +00:00
|
|
|
set: base,
|
2017-03-07 10:29:20 +00:00
|
|
|
chunks: q.chunks,
|
|
|
|
mint: q.mint,
|
|
|
|
maxt: q.maxt,
|
|
|
|
},
|
2017-04-13 19:06:14 +00:00
|
|
|
|
|
|
|
mint: q.mint,
|
|
|
|
maxt: q.maxt,
|
2017-11-13 11:16:58 +00:00
|
|
|
}, nil
|
2016-12-14 17:38:46 +00:00
|
|
|
}
|
|
|
|
|
2017-05-13 15:43:25 +00:00
|
|
|
func (q *blockQuerier) LabelValues(name string) ([]string, error) {
|
|
|
|
tpls, err := q.index.LabelValues(name)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
res := make([]string, 0, tpls.Len())
|
|
|
|
|
|
|
|
for i := 0; i < tpls.Len(); i++ {
|
|
|
|
vals, err := tpls.At(i)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
res = append(res, vals[0])
|
|
|
|
}
|
|
|
|
return res, nil
|
|
|
|
}
|
|
|
|
|
2018-11-16 18:02:24 +00:00
|
|
|
func (q *blockQuerier) LabelNames() ([]string, error) {
|
|
|
|
return q.index.LabelNames()
|
|
|
|
}
|
|
|
|
|
2017-05-13 15:43:25 +00:00
|
|
|
func (q *blockQuerier) LabelValuesFor(string, labels.Label) ([]string, error) {
|
|
|
|
return nil, fmt.Errorf("not implemented")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (q *blockQuerier) Close() error {
|
2019-04-30 07:17:07 +00:00
|
|
|
if q.closed {
|
|
|
|
return errors.New("block querier already closed")
|
|
|
|
}
|
2017-10-09 13:21:46 +00:00
|
|
|
|
2019-04-30 07:17:07 +00:00
|
|
|
var merr tsdb_errors.MultiError
|
2017-10-09 13:21:46 +00:00
|
|
|
merr.Add(q.index.Close())
|
|
|
|
merr.Add(q.chunks.Close())
|
|
|
|
merr.Add(q.tombstones.Close())
|
2019-04-30 07:17:07 +00:00
|
|
|
q.closed = true
|
2017-10-09 13:21:46 +00:00
|
|
|
return merr.Err()
|
2017-05-13 15:43:25 +00:00
|
|
|
}
|
|
|
|
|
2019-05-27 11:24:46 +00:00
|
|
|
// Bitmap used by func isRegexMetaCharacter to check whether a character needs to be escaped.
|
|
|
|
var regexMetaCharacterBytes [16]byte
|
|
|
|
|
|
|
|
// isRegexMetaCharacter reports whether byte b needs to be escaped.
|
|
|
|
func isRegexMetaCharacter(b byte) bool {
|
|
|
|
return b < utf8.RuneSelf && regexMetaCharacterBytes[b%16]&(1<<(b/16)) != 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
for _, b := range []byte(`.+*?()|[]{}^$`) {
|
|
|
|
regexMetaCharacterBytes[b%16] |= 1 << (b / 16)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func findSetMatches(pattern string) []string {
|
|
|
|
// Return empty matches if the wrapper from Prometheus is missing.
|
|
|
|
if len(pattern) < 6 || pattern[:4] != "^(?:" || pattern[len(pattern)-2:] != ")$" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
escaped := false
|
2019-08-13 08:34:14 +00:00
|
|
|
sets := []*strings.Builder{{}}
|
2019-05-27 11:24:46 +00:00
|
|
|
for i := 4; i < len(pattern)-2; i++ {
|
|
|
|
if escaped {
|
|
|
|
switch {
|
|
|
|
case isRegexMetaCharacter(pattern[i]):
|
|
|
|
sets[len(sets)-1].WriteByte(pattern[i])
|
|
|
|
case pattern[i] == '\\':
|
|
|
|
sets[len(sets)-1].WriteByte('\\')
|
|
|
|
default:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
escaped = false
|
|
|
|
} else {
|
|
|
|
switch {
|
|
|
|
case isRegexMetaCharacter(pattern[i]):
|
|
|
|
if pattern[i] == '|' {
|
|
|
|
sets = append(sets, &strings.Builder{})
|
|
|
|
} else {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
case pattern[i] == '\\':
|
|
|
|
escaped = true
|
|
|
|
default:
|
|
|
|
sets[len(sets)-1].WriteByte(pattern[i])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
matches := make([]string, 0, len(sets))
|
|
|
|
for _, s := range sets {
|
|
|
|
if s.Len() > 0 {
|
|
|
|
matches = append(matches, s.String())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return matches
|
|
|
|
}
|
|
|
|
|
2017-11-13 12:57:10 +00:00
|
|
|
// PostingsForMatchers assembles a single postings iterator against the index reader
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 10:59:45 +00:00
|
|
|
// based on the given matchers.
|
2019-11-18 19:53:33 +00:00
|
|
|
func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings, error) {
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 10:59:45 +00:00
|
|
|
var its, notIts []index.Postings
|
|
|
|
// See which label must be non-empty.
|
2019-09-13 15:10:35 +00:00
|
|
|
// Optimization for case like {l=~".", l!="1"}.
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 10:59:45 +00:00
|
|
|
labelMustBeSet := make(map[string]bool, len(ms))
|
|
|
|
for _, m := range ms {
|
|
|
|
if !m.Matches("") {
|
2019-11-18 19:53:33 +00:00
|
|
|
labelMustBeSet[m.Name] = true
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 10:59:45 +00:00
|
|
|
}
|
|
|
|
}
|
2017-11-30 14:34:49 +00:00
|
|
|
|
2017-05-13 15:43:25 +00:00
|
|
|
for _, m := range ms {
|
2019-11-18 19:53:33 +00:00
|
|
|
if labelMustBeSet[m.Name] {
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 10:59:45 +00:00
|
|
|
// If this matcher must be non-empty, we can be smarter.
|
2019-09-13 15:10:35 +00:00
|
|
|
matchesEmpty := m.Matches("")
|
2019-11-18 19:53:33 +00:00
|
|
|
isNot := m.Type == labels.MatchNotEqual || m.Type == labels.MatchNotRegexp
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 10:59:45 +00:00
|
|
|
if isNot && matchesEmpty { // l!="foo"
|
|
|
|
// If the label can't be empty and is a Not and the inner matcher
|
|
|
|
// doesn't match empty, then subtract it out at the end.
|
2019-11-18 19:53:33 +00:00
|
|
|
inverse, err := m.Inverse()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
it, err := postingsForMatcher(ix, inverse)
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 10:59:45 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
notIts = append(notIts, it)
|
|
|
|
} else if isNot && !matchesEmpty { // l!=""
|
|
|
|
// If the label can't be empty and is a Not, but the inner matcher can
|
|
|
|
// be empty we need to use inversePostingsForMatcher.
|
2019-11-18 19:53:33 +00:00
|
|
|
inverse, err := m.Inverse()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
it, err := inversePostingsForMatcher(ix, inverse)
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 10:59:45 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
its = append(its, it)
|
|
|
|
} else { // l="a"
|
|
|
|
// Non-Not matcher, use normal postingsForMatcher.
|
|
|
|
it, err := postingsForMatcher(ix, m)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
its = append(its, it)
|
|
|
|
}
|
|
|
|
} else { // l=""
|
|
|
|
// If the matchers for a labelname selects an empty value, it selects all
|
|
|
|
// the series which don't have the label name set too. See:
|
|
|
|
// https://github.com/prometheus/prometheus/issues/3575 and
|
|
|
|
// https://github.com/prometheus/prometheus/pull/3578#issuecomment-351653555
|
|
|
|
it, err := inversePostingsForMatcher(ix, m)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
notIts = append(notIts, it)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there's nothing to subtract from, add in everything and remove the notIts later.
|
|
|
|
if len(its) == 0 && len(notIts) != 0 {
|
Reduce memory used by postings offset table.
Rather than keeping the offset of each postings list, instead
keep the nth offset of the offset of the posting list. As postings
list offsets have always been sorted, we can then get to the closest
entry before the one we want an iterate forwards.
I haven't done much tuning on the 32 number, it was chosen to try
not to read through more than a 4k page of data.
Switch to a bulk interface for fetching postings. Use it to avoid having
to re-read parts of the posting offset table when querying lots of it.
For a index with what BenchmarkHeadPostingForMatchers uses RAM
for r.postings drops from 3.79MB to 80.19kB or about 48x.
Bytes allocated go down by 30%, and suprisingly CPU usage drops by
4-6% for typical queries too.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56%
BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70%
BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01%
BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01%
BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69%
BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69%
BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-05 18:27:40 +00:00
|
|
|
k, v := index.AllPostingsKey()
|
|
|
|
allPostings, err := ix.Postings(k, v)
|
2017-11-13 11:16:58 +00:00
|
|
|
if err != nil {
|
2017-12-17 18:08:21 +00:00
|
|
|
return nil, err
|
2017-11-13 11:16:58 +00:00
|
|
|
}
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 10:59:45 +00:00
|
|
|
its = append(its, allPostings)
|
2017-05-13 15:43:25 +00:00
|
|
|
}
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 10:59:45 +00:00
|
|
|
|
|
|
|
it := index.Intersect(its...)
|
|
|
|
|
|
|
|
for _, n := range notIts {
|
|
|
|
it = index.Without(it, n)
|
|
|
|
}
|
|
|
|
|
|
|
|
return ix.SortedPostings(it), nil
|
2017-05-13 15:43:25 +00:00
|
|
|
}
|
|
|
|
|
2019-11-18 19:53:33 +00:00
|
|
|
func postingsForMatcher(ix IndexReader, m *labels.Matcher) (index.Postings, error) {
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 10:59:45 +00:00
|
|
|
// This method will not return postings for missing labels.
|
2017-12-17 18:08:21 +00:00
|
|
|
|
2017-04-05 12:14:30 +00:00
|
|
|
// Fast-path for equal matching.
|
2019-11-18 19:53:33 +00:00
|
|
|
if m.Type == labels.MatchEqual {
|
|
|
|
return ix.Postings(m.Name, m.Value)
|
2017-04-05 12:14:30 +00:00
|
|
|
}
|
|
|
|
|
2019-05-27 11:24:46 +00:00
|
|
|
// Fast-path for set matching.
|
2019-11-18 19:53:33 +00:00
|
|
|
if m.Type == labels.MatchRegexp {
|
|
|
|
setMatches := findSetMatches(m.Value)
|
2019-05-27 11:24:46 +00:00
|
|
|
if len(setMatches) > 0 {
|
Reduce memory used by postings offset table.
Rather than keeping the offset of each postings list, instead
keep the nth offset of the offset of the posting list. As postings
list offsets have always been sorted, we can then get to the closest
entry before the one we want an iterate forwards.
I haven't done much tuning on the 32 number, it was chosen to try
not to read through more than a 4k page of data.
Switch to a bulk interface for fetching postings. Use it to avoid having
to re-read parts of the posting offset table when querying lots of it.
For a index with what BenchmarkHeadPostingForMatchers uses RAM
for r.postings drops from 3.79MB to 80.19kB or about 48x.
Bytes allocated go down by 30%, and suprisingly CPU usage drops by
4-6% for typical queries too.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56%
BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70%
BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01%
BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01%
BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69%
BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69%
BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-05 18:27:40 +00:00
|
|
|
sort.Strings(setMatches)
|
|
|
|
return ix.Postings(m.Name, setMatches...)
|
2019-05-27 11:24:46 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-18 19:53:33 +00:00
|
|
|
tpls, err := ix.LabelValues(m.Name)
|
2016-12-14 17:38:46 +00:00
|
|
|
if err != nil {
|
2017-11-13 11:16:58 +00:00
|
|
|
return nil, err
|
2016-12-14 17:38:46 +00:00
|
|
|
}
|
2017-05-13 15:43:25 +00:00
|
|
|
|
2016-12-14 17:38:46 +00:00
|
|
|
var res []string
|
2018-12-28 18:13:02 +00:00
|
|
|
for i := 0; i < tpls.Len(); i++ {
|
|
|
|
vals, err := tpls.At(i)
|
2016-12-14 17:38:46 +00:00
|
|
|
if err != nil {
|
2017-11-13 11:16:58 +00:00
|
|
|
return nil, err
|
2016-12-14 17:38:46 +00:00
|
|
|
}
|
2018-12-28 18:13:02 +00:00
|
|
|
if m.Matches(vals[0]) {
|
|
|
|
res = append(res, vals[0])
|
2016-12-14 17:38:46 +00:00
|
|
|
}
|
|
|
|
}
|
2017-05-13 15:43:25 +00:00
|
|
|
|
2016-12-14 17:38:46 +00:00
|
|
|
if len(res) == 0 {
|
2017-11-30 14:34:49 +00:00
|
|
|
return index.EmptyPostings(), nil
|
2016-12-14 17:38:46 +00:00
|
|
|
}
|
|
|
|
|
Reduce memory used by postings offset table.
Rather than keeping the offset of each postings list, instead
keep the nth offset of the offset of the posting list. As postings
list offsets have always been sorted, we can then get to the closest
entry before the one we want an iterate forwards.
I haven't done much tuning on the 32 number, it was chosen to try
not to read through more than a 4k page of data.
Switch to a bulk interface for fetching postings. Use it to avoid having
to re-read parts of the posting offset table when querying lots of it.
For a index with what BenchmarkHeadPostingForMatchers uses RAM
for r.postings drops from 3.79MB to 80.19kB or about 48x.
Bytes allocated go down by 30%, and suprisingly CPU usage drops by
4-6% for typical queries too.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56%
BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70%
BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01%
BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01%
BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69%
BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69%
BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-05 18:27:40 +00:00
|
|
|
return ix.Postings(m.Name, res...)
|
2016-12-14 17:38:46 +00:00
|
|
|
}
|
|
|
|
|
2019-09-13 15:10:35 +00:00
|
|
|
// inversePostingsForMatcher returns the postings for the series with the label name set but not matching the matcher.
|
2019-11-18 19:53:33 +00:00
|
|
|
func inversePostingsForMatcher(ix IndexReader, m *labels.Matcher) (index.Postings, error) {
|
|
|
|
tpls, err := ix.LabelValues(m.Name)
|
2017-12-17 18:08:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var res []string
|
|
|
|
for i := 0; i < tpls.Len(); i++ {
|
|
|
|
vals, err := tpls.At(i)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if !m.Matches(vals[0]) {
|
|
|
|
res = append(res, vals[0])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Reduce memory used by postings offset table.
Rather than keeping the offset of each postings list, instead
keep the nth offset of the offset of the posting list. As postings
list offsets have always been sorted, we can then get to the closest
entry before the one we want an iterate forwards.
I haven't done much tuning on the 32 number, it was chosen to try
not to read through more than a 4k page of data.
Switch to a bulk interface for fetching postings. Use it to avoid having
to re-read parts of the posting offset table when querying lots of it.
For a index with what BenchmarkHeadPostingForMatchers uses RAM
for r.postings drops from 3.79MB to 80.19kB or about 48x.
Bytes allocated go down by 30%, and suprisingly CPU usage drops by
4-6% for typical queries too.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56%
BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70%
BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01%
BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01%
BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69%
BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69%
BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-05 18:27:40 +00:00
|
|
|
return ix.Postings(m.Name, res...)
|
2019-05-27 11:24:46 +00:00
|
|
|
}
|
|
|
|
|
2017-01-06 11:37:28 +00:00
|
|
|
func mergeStrings(a, b []string) []string {
|
|
|
|
maxl := len(a)
|
|
|
|
if len(b) > len(a) {
|
|
|
|
maxl = len(b)
|
|
|
|
}
|
|
|
|
res := make([]string, 0, maxl*10/9)
|
|
|
|
|
|
|
|
for len(a) > 0 && len(b) > 0 {
|
|
|
|
d := strings.Compare(a[0], b[0])
|
|
|
|
|
|
|
|
if d == 0 {
|
|
|
|
res = append(res, a[0])
|
|
|
|
a, b = a[1:], b[1:]
|
|
|
|
} else if d < 0 {
|
|
|
|
res = append(res, a[0])
|
|
|
|
a = a[1:]
|
|
|
|
} else if d > 0 {
|
|
|
|
res = append(res, b[0])
|
|
|
|
b = b[1:]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Append all remaining elements.
|
|
|
|
res = append(res, a...)
|
|
|
|
res = append(res, b...)
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
2016-12-14 17:38:46 +00:00
|
|
|
// SeriesSet contains a set of series.
|
|
|
|
type SeriesSet interface {
|
|
|
|
Next() bool
|
2017-01-02 12:27:52 +00:00
|
|
|
At() Series
|
2016-12-14 17:38:46 +00:00
|
|
|
Err() error
|
|
|
|
}
|
|
|
|
|
2017-11-13 11:16:58 +00:00
|
|
|
var emptySeriesSet = errSeriesSet{}
|
2016-12-14 17:38:46 +00:00
|
|
|
|
2017-11-13 11:16:58 +00:00
|
|
|
// EmptySeriesSet returns a series set that's always empty.
|
|
|
|
func EmptySeriesSet() SeriesSet {
|
|
|
|
return emptySeriesSet
|
|
|
|
}
|
2016-12-14 17:38:46 +00:00
|
|
|
|
2019-11-15 14:45:29 +00:00
|
|
|
// mergedSeriesSet returns a series sets slice as a single series set. The input series sets
|
|
|
|
// must be sorted and sequential in time.
|
2016-12-14 14:39:23 +00:00
|
|
|
type mergedSeriesSet struct {
|
2019-11-15 14:45:29 +00:00
|
|
|
all []SeriesSet
|
|
|
|
buf []SeriesSet // A buffer for keeping the order of SeriesSet slice during forwarding the SeriesSet.
|
|
|
|
ids []int // The indices of chosen SeriesSet for the current run.
|
|
|
|
done bool
|
|
|
|
err error
|
|
|
|
cur Series
|
2016-12-14 14:39:23 +00:00
|
|
|
}
|
|
|
|
|
2019-11-15 14:45:29 +00:00
|
|
|
func NewMergedSeriesSet(all []SeriesSet) SeriesSet {
|
|
|
|
if len(all) == 1 {
|
|
|
|
return all[0]
|
|
|
|
}
|
|
|
|
s := &mergedSeriesSet{all: all}
|
|
|
|
// Initialize first elements of all sets as Next() needs
|
2016-12-14 14:39:23 +00:00
|
|
|
// one element look-ahead.
|
2019-11-15 14:45:29 +00:00
|
|
|
s.nextAll()
|
|
|
|
if len(s.all) == 0 {
|
|
|
|
s.done = true
|
|
|
|
}
|
2016-12-14 14:39:23 +00:00
|
|
|
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
2017-03-14 14:24:08 +00:00
|
|
|
func (s *mergedSeriesSet) At() Series {
|
2016-12-14 14:39:23 +00:00
|
|
|
return s.cur
|
2016-12-13 14:26:58 +00:00
|
|
|
}
|
|
|
|
|
2017-03-14 14:24:08 +00:00
|
|
|
func (s *mergedSeriesSet) Err() error {
|
2019-11-15 14:45:29 +00:00
|
|
|
return s.err
|
|
|
|
}
|
|
|
|
|
|
|
|
// nextAll is to call Next() for all SeriesSet.
|
|
|
|
// Because the order of the SeriesSet slice will affect the results,
|
|
|
|
// we need to use an buffer slice to hold the order.
|
|
|
|
func (s *mergedSeriesSet) nextAll() {
|
|
|
|
s.buf = s.buf[:0]
|
|
|
|
for _, ss := range s.all {
|
|
|
|
if ss.Next() {
|
|
|
|
s.buf = append(s.buf, ss)
|
|
|
|
} else if ss.Err() != nil {
|
|
|
|
s.done = true
|
|
|
|
s.err = ss.Err()
|
|
|
|
break
|
|
|
|
}
|
2016-12-14 14:39:23 +00:00
|
|
|
}
|
2019-11-15 14:45:29 +00:00
|
|
|
s.all, s.buf = s.buf, s.all
|
2016-12-14 14:39:23 +00:00
|
|
|
}
|
2016-12-13 14:26:58 +00:00
|
|
|
|
2019-11-15 14:45:29 +00:00
|
|
|
// nextWithID is to call Next() for the SeriesSet with the indices of s.ids.
|
|
|
|
// Because the order of the SeriesSet slice will affect the results,
|
|
|
|
// we need to use an buffer slice to hold the order.
|
|
|
|
func (s *mergedSeriesSet) nextWithID() {
|
|
|
|
if len(s.ids) == 0 {
|
|
|
|
return
|
2016-12-14 14:39:23 +00:00
|
|
|
}
|
2019-11-15 14:45:29 +00:00
|
|
|
|
|
|
|
s.buf = s.buf[:0]
|
|
|
|
i1 := 0
|
|
|
|
i2 := 0
|
|
|
|
for i1 < len(s.all) {
|
|
|
|
if i2 < len(s.ids) && i1 == s.ids[i2] {
|
|
|
|
if !s.all[s.ids[i2]].Next() {
|
|
|
|
if s.all[s.ids[i2]].Err() != nil {
|
|
|
|
s.done = true
|
|
|
|
s.err = s.all[s.ids[i2]].Err()
|
|
|
|
break
|
|
|
|
}
|
|
|
|
i2++
|
|
|
|
i1++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
i2++
|
|
|
|
}
|
|
|
|
s.buf = append(s.buf, s.all[i1])
|
|
|
|
i1++
|
2016-12-14 14:39:23 +00:00
|
|
|
}
|
2019-11-15 14:45:29 +00:00
|
|
|
s.all, s.buf = s.buf, s.all
|
2016-12-14 14:39:23 +00:00
|
|
|
}
|
|
|
|
|
2017-03-14 14:24:08 +00:00
|
|
|
func (s *mergedSeriesSet) Next() bool {
|
2019-11-15 14:45:29 +00:00
|
|
|
if s.done {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
s.nextWithID()
|
|
|
|
if s.done {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
s.ids = s.ids[:0]
|
|
|
|
if len(s.all) == 0 {
|
|
|
|
s.done = true
|
2016-12-14 14:39:23 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2019-11-15 14:45:29 +00:00
|
|
|
// Here we are looking for a set of series sets with the lowest labels,
|
|
|
|
// and we will cache their indexes in s.ids.
|
|
|
|
s.ids = append(s.ids, 0)
|
|
|
|
for i := 1; i < len(s.all); i++ {
|
|
|
|
cmp := labels.Compare(s.all[s.ids[0]].At().Labels(), s.all[i].At().Labels())
|
|
|
|
if cmp > 0 {
|
|
|
|
s.ids = s.ids[:1]
|
|
|
|
s.ids[0] = i
|
|
|
|
} else if cmp == 0 {
|
|
|
|
s.ids = append(s.ids, i)
|
|
|
|
}
|
|
|
|
}
|
2017-01-04 08:47:20 +00:00
|
|
|
|
2019-11-15 14:45:29 +00:00
|
|
|
if len(s.ids) > 1 {
|
|
|
|
series := make([]Series, len(s.ids))
|
|
|
|
for i, idx := range s.ids {
|
|
|
|
series[i] = s.all[idx].At()
|
|
|
|
}
|
|
|
|
s.cur = &chainedSeries{series: series}
|
2016-12-14 14:39:23 +00:00
|
|
|
} else {
|
2019-11-15 14:45:29 +00:00
|
|
|
s.cur = s.all[s.ids[0]].At()
|
2016-12-14 14:39:23 +00:00
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2019-02-14 13:29:41 +00:00
|
|
|
type mergedVerticalSeriesSet struct {
|
|
|
|
a, b SeriesSet
|
|
|
|
cur Series
|
|
|
|
adone, bdone bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewMergedVerticalSeriesSet takes two series sets as a single series set.
|
|
|
|
// The input series sets must be sorted and
|
|
|
|
// the time ranges of the series can be overlapping.
|
|
|
|
func NewMergedVerticalSeriesSet(a, b SeriesSet) SeriesSet {
|
|
|
|
return newMergedVerticalSeriesSet(a, b)
|
|
|
|
}
|
|
|
|
|
|
|
|
func newMergedVerticalSeriesSet(a, b SeriesSet) *mergedVerticalSeriesSet {
|
|
|
|
s := &mergedVerticalSeriesSet{a: a, b: b}
|
|
|
|
// Initialize first elements of both sets as Next() needs
|
|
|
|
// one element look-ahead.
|
|
|
|
s.adone = !s.a.Next()
|
|
|
|
s.bdone = !s.b.Next()
|
|
|
|
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *mergedVerticalSeriesSet) At() Series {
|
|
|
|
return s.cur
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *mergedVerticalSeriesSet) Err() error {
|
|
|
|
if s.a.Err() != nil {
|
|
|
|
return s.a.Err()
|
|
|
|
}
|
|
|
|
return s.b.Err()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *mergedVerticalSeriesSet) compare() int {
|
|
|
|
if s.adone {
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
if s.bdone {
|
|
|
|
return -1
|
|
|
|
}
|
|
|
|
return labels.Compare(s.a.At().Labels(), s.b.At().Labels())
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *mergedVerticalSeriesSet) Next() bool {
|
|
|
|
if s.adone && s.bdone || s.Err() != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
d := s.compare()
|
|
|
|
|
|
|
|
// Both sets contain the current series. Chain them into a single one.
|
|
|
|
if d > 0 {
|
|
|
|
s.cur = s.b.At()
|
|
|
|
s.bdone = !s.b.Next()
|
|
|
|
} else if d < 0 {
|
|
|
|
s.cur = s.a.At()
|
|
|
|
s.adone = !s.a.Next()
|
|
|
|
} else {
|
|
|
|
s.cur = &verticalChainedSeries{series: []Series{s.a.At(), s.b.At()}}
|
|
|
|
s.adone = !s.a.Next()
|
|
|
|
s.bdone = !s.b.Next()
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2017-12-17 18:08:21 +00:00
|
|
|
// ChunkSeriesSet exposes the chunks and intervals of a series instead of the
|
|
|
|
// actual series itself.
|
2017-11-13 12:57:10 +00:00
|
|
|
type ChunkSeriesSet interface {
|
2017-03-07 10:29:20 +00:00
|
|
|
Next() bool
|
2019-09-19 09:15:41 +00:00
|
|
|
At() (labels.Labels, []chunks.Meta, tombstones.Intervals)
|
2017-03-07 10:29:20 +00:00
|
|
|
Err() error
|
|
|
|
}
|
2016-12-14 14:39:23 +00:00
|
|
|
|
2017-03-07 10:29:20 +00:00
|
|
|
// baseChunkSeries loads the label set and chunk references for a postings
|
|
|
|
// list from an index. It filters out series that have labels set that should be unset.
|
|
|
|
type baseChunkSeries struct {
|
2017-11-30 14:34:49 +00:00
|
|
|
p index.Postings
|
2017-05-17 09:19:42 +00:00
|
|
|
index IndexReader
|
2019-09-19 09:15:41 +00:00
|
|
|
tombstones tombstones.Reader
|
2017-05-17 09:19:42 +00:00
|
|
|
|
2017-05-22 11:12:36 +00:00
|
|
|
lset labels.Labels
|
2017-11-30 14:34:49 +00:00
|
|
|
chks []chunks.Meta
|
2019-09-19 09:15:41 +00:00
|
|
|
intervals tombstones.Intervals
|
2017-05-22 11:12:36 +00:00
|
|
|
err error
|
2017-05-17 09:19:42 +00:00
|
|
|
}
|
2017-03-07 10:29:20 +00:00
|
|
|
|
2017-11-13 12:57:10 +00:00
|
|
|
// LookupChunkSeries retrieves all series for the given matchers and returns a ChunkSeriesSet
|
|
|
|
// over them. It drops chunks based on tombstones in the given reader.
|
2019-11-18 19:53:33 +00:00
|
|
|
func LookupChunkSeries(ir IndexReader, tr tombstones.Reader, ms ...*labels.Matcher) (ChunkSeriesSet, error) {
|
2017-11-13 12:57:10 +00:00
|
|
|
if tr == nil {
|
2019-09-19 09:15:41 +00:00
|
|
|
tr = tombstones.NewMemTombstones()
|
2017-11-13 12:57:10 +00:00
|
|
|
}
|
2017-12-17 18:08:21 +00:00
|
|
|
p, err := PostingsForMatchers(ir, ms...)
|
2017-11-13 12:57:10 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return &baseChunkSeries{
|
|
|
|
p: p,
|
|
|
|
index: ir,
|
|
|
|
tombstones: tr,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2019-09-19 09:15:41 +00:00
|
|
|
func (s *baseChunkSeries) At() (labels.Labels, []chunks.Meta, tombstones.Intervals) {
|
2017-05-22 11:12:36 +00:00
|
|
|
return s.lset, s.chks, s.intervals
|
2016-12-14 14:39:23 +00:00
|
|
|
}
|
|
|
|
|
2017-05-17 09:19:42 +00:00
|
|
|
func (s *baseChunkSeries) Err() error { return s.err }
|
2017-03-07 10:29:20 +00:00
|
|
|
|
|
|
|
func (s *baseChunkSeries) Next() bool {
|
2017-08-05 11:31:48 +00:00
|
|
|
var (
|
2018-11-09 13:54:56 +00:00
|
|
|
lset = make(labels.Labels, len(s.lset))
|
|
|
|
chkMetas = make([]chunks.Meta, len(s.chks))
|
2017-11-30 14:34:49 +00:00
|
|
|
err error
|
2017-08-05 11:31:48 +00:00
|
|
|
)
|
2017-12-17 18:08:21 +00:00
|
|
|
|
2017-03-07 10:29:20 +00:00
|
|
|
for s.p.Next() {
|
2017-05-17 09:19:42 +00:00
|
|
|
ref := s.p.At()
|
2017-11-30 14:34:49 +00:00
|
|
|
if err := s.index.Series(ref, &lset, &chkMetas); err != nil {
|
2017-10-11 07:33:35 +00:00
|
|
|
// Postings may be stale. Skip if no underlying series exists.
|
|
|
|
if errors.Cause(err) == ErrNotFound {
|
|
|
|
continue
|
|
|
|
}
|
2016-12-16 11:13:17 +00:00
|
|
|
s.err = err
|
|
|
|
return false
|
|
|
|
}
|
2016-12-31 14:35:08 +00:00
|
|
|
|
2017-03-07 10:29:20 +00:00
|
|
|
s.lset = lset
|
2017-11-30 14:34:49 +00:00
|
|
|
s.chks = chkMetas
|
2017-11-13 12:32:24 +00:00
|
|
|
s.intervals, err = s.tombstones.Get(s.p.At())
|
|
|
|
if err != nil {
|
|
|
|
s.err = errors.Wrap(err, "get tombstones")
|
|
|
|
return false
|
|
|
|
}
|
2017-05-17 09:19:42 +00:00
|
|
|
|
2017-05-24 05:54:24 +00:00
|
|
|
if len(s.intervals) > 0 {
|
2017-05-17 09:19:42 +00:00
|
|
|
// Only those chunks that are not entirely deleted.
|
2017-11-30 14:34:49 +00:00
|
|
|
chks := make([]chunks.Meta, 0, len(s.chks))
|
2017-05-17 09:19:42 +00:00
|
|
|
for _, chk := range s.chks {
|
2019-09-19 09:15:41 +00:00
|
|
|
if !(tombstones.Interval{Mint: chk.MinTime, Maxt: chk.MaxTime}.IsSubrange(s.intervals)) {
|
2017-05-17 09:19:42 +00:00
|
|
|
chks = append(chks, chk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
s.chks = chks
|
|
|
|
}
|
2017-03-07 10:29:20 +00:00
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if err := s.p.Err(); err != nil {
|
|
|
|
s.err = err
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// populatedChunkSeries loads chunk data from a store for a set of series
|
|
|
|
// with known chunk references. It filters out chunks that do not fit the
|
|
|
|
// given time range.
|
|
|
|
type populatedChunkSeries struct {
|
2017-11-13 12:57:10 +00:00
|
|
|
set ChunkSeriesSet
|
2017-03-07 10:29:20 +00:00
|
|
|
chunks ChunkReader
|
|
|
|
mint, maxt int64
|
|
|
|
|
2017-05-22 11:12:36 +00:00
|
|
|
err error
|
2017-11-30 14:34:49 +00:00
|
|
|
chks []chunks.Meta
|
2017-05-22 11:12:36 +00:00
|
|
|
lset labels.Labels
|
2019-09-19 09:15:41 +00:00
|
|
|
intervals tombstones.Intervals
|
2017-03-07 10:29:20 +00:00
|
|
|
}
|
|
|
|
|
2019-09-19 09:15:41 +00:00
|
|
|
func (s *populatedChunkSeries) At() (labels.Labels, []chunks.Meta, tombstones.Intervals) {
|
2017-05-22 11:12:36 +00:00
|
|
|
return s.lset, s.chks, s.intervals
|
2017-05-17 09:19:42 +00:00
|
|
|
}
|
2017-12-13 20:58:21 +00:00
|
|
|
|
2017-05-17 09:19:42 +00:00
|
|
|
func (s *populatedChunkSeries) Err() error { return s.err }
|
2017-03-07 10:29:20 +00:00
|
|
|
|
|
|
|
func (s *populatedChunkSeries) Next() bool {
|
|
|
|
for s.set.Next() {
|
2017-05-22 11:12:36 +00:00
|
|
|
lset, chks, dranges := s.set.At()
|
2017-03-07 10:29:20 +00:00
|
|
|
|
2017-05-05 14:04:59 +00:00
|
|
|
for len(chks) > 0 {
|
|
|
|
if chks[0].MaxTime >= s.mint {
|
|
|
|
break
|
2016-12-31 14:35:08 +00:00
|
|
|
}
|
2017-05-05 14:04:59 +00:00
|
|
|
chks = chks[1:]
|
|
|
|
}
|
|
|
|
|
2017-12-13 20:58:21 +00:00
|
|
|
// This is to delete in place while iterating.
|
|
|
|
for i, rlen := 0, len(chks); i < rlen; i++ {
|
|
|
|
j := i - (rlen - len(chks))
|
|
|
|
c := &chks[j]
|
2017-08-06 18:41:24 +00:00
|
|
|
|
|
|
|
// Break out at the first chunk that has no overlap with mint, maxt.
|
2016-12-31 14:35:08 +00:00
|
|
|
if c.MinTime > s.maxt {
|
2017-12-13 20:58:21 +00:00
|
|
|
chks = chks[:j]
|
2016-12-31 14:35:08 +00:00
|
|
|
break
|
|
|
|
}
|
2017-12-13 20:58:21 +00:00
|
|
|
|
2017-03-07 10:29:20 +00:00
|
|
|
c.Chunk, s.err = s.chunks.Chunk(c.Ref)
|
|
|
|
if s.err != nil {
|
2017-12-13 20:58:21 +00:00
|
|
|
// This means that the chunk has be garbage collected. Remove it from the list.
|
|
|
|
if s.err == ErrNotFound {
|
|
|
|
s.err = nil
|
|
|
|
// Delete in-place.
|
2019-01-02 16:48:42 +00:00
|
|
|
s.chks = append(chks[:j], chks[j+1:]...)
|
2017-12-13 20:58:21 +00:00
|
|
|
}
|
2017-03-07 10:29:20 +00:00
|
|
|
return false
|
|
|
|
}
|
2016-12-31 14:35:08 +00:00
|
|
|
}
|
2017-12-13 20:58:21 +00:00
|
|
|
|
2017-03-07 10:29:20 +00:00
|
|
|
if len(chks) == 0 {
|
2016-12-31 14:35:08 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-03-07 10:29:20 +00:00
|
|
|
s.lset = lset
|
|
|
|
s.chks = chks
|
2017-05-22 11:12:36 +00:00
|
|
|
s.intervals = dranges
|
2017-03-07 10:29:20 +00:00
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if err := s.set.Err(); err != nil {
|
|
|
|
s.err = err
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// blockSeriesSet is a set of series from an inverted index query.
|
|
|
|
type blockSeriesSet struct {
|
2017-11-13 12:57:10 +00:00
|
|
|
set ChunkSeriesSet
|
2017-03-07 10:29:20 +00:00
|
|
|
err error
|
|
|
|
cur Series
|
2017-04-13 19:06:14 +00:00
|
|
|
|
|
|
|
mint, maxt int64
|
2017-03-07 10:29:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *blockSeriesSet) Next() bool {
|
|
|
|
for s.set.Next() {
|
2017-05-22 11:12:36 +00:00
|
|
|
lset, chunks, dranges := s.set.At()
|
2017-05-17 09:19:42 +00:00
|
|
|
s.cur = &chunkSeries{
|
|
|
|
labels: lset,
|
|
|
|
chunks: chunks,
|
|
|
|
mint: s.mint,
|
|
|
|
maxt: s.maxt,
|
|
|
|
|
2017-05-22 11:12:36 +00:00
|
|
|
intervals: dranges,
|
2017-05-17 09:19:42 +00:00
|
|
|
}
|
2016-12-30 18:36:28 +00:00
|
|
|
return true
|
2016-12-14 14:39:23 +00:00
|
|
|
}
|
2017-03-07 10:29:20 +00:00
|
|
|
if s.set.Err() != nil {
|
|
|
|
s.err = s.set.Err()
|
2016-12-14 14:39:23 +00:00
|
|
|
}
|
2016-12-16 11:13:17 +00:00
|
|
|
return false
|
2016-12-14 14:39:23 +00:00
|
|
|
}
|
|
|
|
|
2017-01-02 12:27:52 +00:00
|
|
|
func (s *blockSeriesSet) At() Series { return s.cur }
|
|
|
|
func (s *blockSeriesSet) Err() error { return s.err }
|
2016-12-14 14:39:23 +00:00
|
|
|
|
2016-12-19 11:26:25 +00:00
|
|
|
// chunkSeries is a series that is backed by a sequence of chunks holding
|
|
|
|
// time series data.
|
|
|
|
type chunkSeries struct {
|
2016-12-21 08:39:01 +00:00
|
|
|
labels labels.Labels
|
2017-11-30 14:34:49 +00:00
|
|
|
chunks []chunks.Meta // in-order chunk refs
|
2017-04-13 19:06:14 +00:00
|
|
|
|
|
|
|
mint, maxt int64
|
2017-05-17 09:19:42 +00:00
|
|
|
|
2019-09-19 09:15:41 +00:00
|
|
|
intervals tombstones.Intervals
|
2016-12-16 11:13:17 +00:00
|
|
|
}
|
|
|
|
|
2016-12-21 08:39:01 +00:00
|
|
|
func (s *chunkSeries) Labels() labels.Labels {
|
2016-12-16 11:13:17 +00:00
|
|
|
return s.labels
|
|
|
|
}
|
|
|
|
|
2016-12-19 11:26:25 +00:00
|
|
|
func (s *chunkSeries) Iterator() SeriesIterator {
|
2017-05-22 11:12:36 +00:00
|
|
|
return newChunkSeriesIterator(s.chunks, s.intervals, s.mint, s.maxt)
|
2016-12-16 11:13:17 +00:00
|
|
|
}
|
|
|
|
|
2016-12-10 17:08:50 +00:00
|
|
|
// SeriesIterator iterates over the data of a time series.
|
|
|
|
type SeriesIterator interface {
|
|
|
|
// Seek advances the iterator forward to the given timestamp.
|
2017-04-09 14:00:25 +00:00
|
|
|
// If there's no value exactly at t, it advances to the first value
|
|
|
|
// after t.
|
2016-12-10 17:08:50 +00:00
|
|
|
Seek(t int64) bool
|
2017-03-19 16:05:01 +00:00
|
|
|
// At returns the current timestamp/value pair.
|
2017-01-02 12:27:52 +00:00
|
|
|
At() (t int64, v float64)
|
2016-12-10 17:08:50 +00:00
|
|
|
// Next advances the iterator by one.
|
|
|
|
Next() bool
|
|
|
|
// Err returns the current error.
|
|
|
|
Err() error
|
|
|
|
}
|
2016-12-12 18:12:55 +00:00
|
|
|
|
2016-12-16 11:13:17 +00:00
|
|
|
// chainedSeries implements a series for a list of time-sorted series.
|
2016-12-19 11:26:25 +00:00
|
|
|
// They all must have the same labels.
|
2016-12-14 14:39:23 +00:00
|
|
|
type chainedSeries struct {
|
|
|
|
series []Series
|
|
|
|
}
|
|
|
|
|
2016-12-21 08:39:01 +00:00
|
|
|
func (s *chainedSeries) Labels() labels.Labels {
|
2016-12-14 14:39:23 +00:00
|
|
|
return s.series[0].Labels()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *chainedSeries) Iterator() SeriesIterator {
|
2017-04-09 14:00:25 +00:00
|
|
|
return newChainedSeriesIterator(s.series...)
|
2016-12-14 14:39:23 +00:00
|
|
|
}
|
|
|
|
|
2019-09-30 15:54:55 +00:00
|
|
|
// chainedSeriesIterator implements a series iterator over a list
|
2016-12-15 14:23:15 +00:00
|
|
|
// of time-sorted, non-overlapping iterators.
|
2016-12-14 14:39:23 +00:00
|
|
|
type chainedSeriesIterator struct {
|
2016-12-16 11:13:17 +00:00
|
|
|
series []Series // series in time order
|
2016-12-15 14:23:15 +00:00
|
|
|
|
|
|
|
i int
|
|
|
|
cur SeriesIterator
|
2016-12-14 14:39:23 +00:00
|
|
|
}
|
|
|
|
|
2017-04-09 14:00:25 +00:00
|
|
|
func newChainedSeriesIterator(s ...Series) *chainedSeriesIterator {
|
|
|
|
return &chainedSeriesIterator{
|
|
|
|
series: s,
|
|
|
|
i: 0,
|
|
|
|
cur: s[0].Iterator(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-14 14:39:23 +00:00
|
|
|
func (it *chainedSeriesIterator) Seek(t int64) bool {
|
2016-12-16 11:13:17 +00:00
|
|
|
// We just scan the chained series sequentially as they are already
|
|
|
|
// pre-selected by relevant time and should be accessed sequentially anyway.
|
|
|
|
for i, s := range it.series[it.i:] {
|
|
|
|
cur := s.Iterator()
|
|
|
|
if !cur.Seek(t) {
|
|
|
|
continue
|
2016-12-15 14:23:15 +00:00
|
|
|
}
|
2016-12-16 11:13:17 +00:00
|
|
|
it.cur = cur
|
|
|
|
it.i += i
|
|
|
|
return true
|
2016-12-15 14:23:15 +00:00
|
|
|
}
|
2016-12-14 14:39:23 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2016-12-15 14:23:15 +00:00
|
|
|
func (it *chainedSeriesIterator) Next() bool {
|
|
|
|
if it.cur.Next() {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if err := it.cur.Err(); err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if it.i == len(it.series)-1 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
it.i++
|
2016-12-16 11:13:17 +00:00
|
|
|
it.cur = it.series[it.i].Iterator()
|
2016-12-15 14:23:15 +00:00
|
|
|
|
|
|
|
return it.Next()
|
2016-12-14 14:39:23 +00:00
|
|
|
}
|
|
|
|
|
2017-01-02 12:27:52 +00:00
|
|
|
func (it *chainedSeriesIterator) At() (t int64, v float64) {
|
|
|
|
return it.cur.At()
|
2016-12-14 14:39:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (it *chainedSeriesIterator) Err() error {
|
2016-12-15 14:23:15 +00:00
|
|
|
return it.cur.Err()
|
2016-12-14 14:39:23 +00:00
|
|
|
}
|
|
|
|
|
2019-02-14 13:29:41 +00:00
|
|
|
// verticalChainedSeries implements a series for a list of time-sorted, time-overlapping series.
|
|
|
|
// They all must have the same labels.
|
|
|
|
type verticalChainedSeries struct {
|
|
|
|
series []Series
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *verticalChainedSeries) Labels() labels.Labels {
|
|
|
|
return s.series[0].Labels()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *verticalChainedSeries) Iterator() SeriesIterator {
|
|
|
|
return newVerticalMergeSeriesIterator(s.series...)
|
|
|
|
}
|
|
|
|
|
2019-09-30 15:54:55 +00:00
|
|
|
// verticalMergeSeriesIterator implements a series iterator over a list
|
2019-02-14 13:29:41 +00:00
|
|
|
// of time-sorted, time-overlapping iterators.
|
|
|
|
type verticalMergeSeriesIterator struct {
|
|
|
|
a, b SeriesIterator
|
|
|
|
aok, bok, initialized bool
|
|
|
|
|
|
|
|
curT int64
|
|
|
|
curV float64
|
|
|
|
}
|
|
|
|
|
|
|
|
func newVerticalMergeSeriesIterator(s ...Series) SeriesIterator {
|
|
|
|
if len(s) == 1 {
|
|
|
|
return s[0].Iterator()
|
|
|
|
} else if len(s) == 2 {
|
|
|
|
return &verticalMergeSeriesIterator{
|
|
|
|
a: s[0].Iterator(),
|
|
|
|
b: s[1].Iterator(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return &verticalMergeSeriesIterator{
|
|
|
|
a: s[0].Iterator(),
|
|
|
|
b: newVerticalMergeSeriesIterator(s[1:]...),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *verticalMergeSeriesIterator) Seek(t int64) bool {
|
|
|
|
it.aok, it.bok = it.a.Seek(t), it.b.Seek(t)
|
|
|
|
it.initialized = true
|
|
|
|
return it.Next()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *verticalMergeSeriesIterator) Next() bool {
|
|
|
|
if !it.initialized {
|
|
|
|
it.aok = it.a.Next()
|
|
|
|
it.bok = it.b.Next()
|
|
|
|
it.initialized = true
|
|
|
|
}
|
|
|
|
|
|
|
|
if !it.aok && !it.bok {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
if !it.aok {
|
|
|
|
it.curT, it.curV = it.b.At()
|
|
|
|
it.bok = it.b.Next()
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if !it.bok {
|
|
|
|
it.curT, it.curV = it.a.At()
|
|
|
|
it.aok = it.a.Next()
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
acurT, acurV := it.a.At()
|
|
|
|
bcurT, bcurV := it.b.At()
|
|
|
|
if acurT < bcurT {
|
|
|
|
it.curT, it.curV = acurT, acurV
|
|
|
|
it.aok = it.a.Next()
|
|
|
|
} else if acurT > bcurT {
|
|
|
|
it.curT, it.curV = bcurT, bcurV
|
|
|
|
it.bok = it.b.Next()
|
|
|
|
} else {
|
|
|
|
it.curT, it.curV = bcurT, bcurV
|
|
|
|
it.aok = it.a.Next()
|
|
|
|
it.bok = it.b.Next()
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *verticalMergeSeriesIterator) At() (t int64, v float64) {
|
|
|
|
return it.curT, it.curV
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *verticalMergeSeriesIterator) Err() error {
|
|
|
|
if it.a.Err() != nil {
|
|
|
|
return it.a.Err()
|
|
|
|
}
|
|
|
|
return it.b.Err()
|
|
|
|
}
|
|
|
|
|
2016-12-12 18:12:55 +00:00
|
|
|
// chunkSeriesIterator implements a series iterator on top
|
|
|
|
// of a list of time-sorted, non-overlapping chunks.
|
|
|
|
type chunkSeriesIterator struct {
|
2017-11-30 14:34:49 +00:00
|
|
|
chunks []chunks.Meta
|
2016-12-12 18:12:55 +00:00
|
|
|
|
2019-07-09 09:49:34 +00:00
|
|
|
i int
|
|
|
|
cur chunkenc.Iterator
|
|
|
|
bufDelIter *deletedIterator
|
2017-04-13 19:06:14 +00:00
|
|
|
|
|
|
|
maxt, mint int64
|
2017-05-17 09:19:42 +00:00
|
|
|
|
2019-09-19 09:15:41 +00:00
|
|
|
intervals tombstones.Intervals
|
2016-12-12 18:12:55 +00:00
|
|
|
}
|
|
|
|
|
2019-09-19 09:15:41 +00:00
|
|
|
func newChunkSeriesIterator(cs []chunks.Meta, dranges tombstones.Intervals, mint, maxt int64) *chunkSeriesIterator {
|
2019-07-09 09:49:34 +00:00
|
|
|
csi := &chunkSeriesIterator{
|
2016-12-12 18:12:55 +00:00
|
|
|
chunks: cs,
|
|
|
|
i: 0,
|
2017-04-13 19:06:14 +00:00
|
|
|
|
|
|
|
mint: mint,
|
|
|
|
maxt: maxt,
|
2017-05-17 09:19:42 +00:00
|
|
|
|
2017-05-22 11:12:36 +00:00
|
|
|
intervals: dranges,
|
2016-12-12 18:12:55 +00:00
|
|
|
}
|
2019-07-09 09:49:34 +00:00
|
|
|
csi.resetCurIterator()
|
|
|
|
|
|
|
|
return csi
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *chunkSeriesIterator) resetCurIterator() {
|
|
|
|
if len(it.intervals) == 0 {
|
|
|
|
it.cur = it.chunks[it.i].Chunk.Iterator(it.cur)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if it.bufDelIter == nil {
|
|
|
|
it.bufDelIter = &deletedIterator{
|
|
|
|
intervals: it.intervals,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
it.bufDelIter.it = it.chunks[it.i].Chunk.Iterator(it.bufDelIter.it)
|
|
|
|
it.cur = it.bufDelIter
|
2016-12-12 18:12:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (it *chunkSeriesIterator) Seek(t int64) (ok bool) {
|
2017-04-13 19:07:21 +00:00
|
|
|
if t > it.maxt {
|
2017-04-13 19:06:14 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2017-04-13 19:07:21 +00:00
|
|
|
// Seek to the first valid value after t.
|
|
|
|
if t < it.mint {
|
|
|
|
t = it.mint
|
|
|
|
}
|
|
|
|
|
2017-06-30 13:06:27 +00:00
|
|
|
for ; it.chunks[it.i].MaxTime < t; it.i++ {
|
|
|
|
if it.i == len(it.chunks)-1 {
|
|
|
|
return false
|
|
|
|
}
|
2016-12-15 14:23:15 +00:00
|
|
|
}
|
|
|
|
|
2019-07-09 09:49:34 +00:00
|
|
|
it.resetCurIterator()
|
2016-12-15 14:23:15 +00:00
|
|
|
|
|
|
|
for it.cur.Next() {
|
2017-01-02 12:27:52 +00:00
|
|
|
t0, _ := it.cur.At()
|
2016-12-15 14:23:15 +00:00
|
|
|
if t0 >= t {
|
2016-12-16 11:13:17 +00:00
|
|
|
return true
|
2016-12-12 18:12:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2017-01-02 12:27:52 +00:00
|
|
|
func (it *chunkSeriesIterator) At() (t int64, v float64) {
|
|
|
|
return it.cur.At()
|
2016-12-12 18:12:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (it *chunkSeriesIterator) Next() bool {
|
2017-08-28 22:39:17 +00:00
|
|
|
if it.cur.Next() {
|
2017-04-13 19:06:14 +00:00
|
|
|
t, _ := it.cur.At()
|
2017-08-28 22:39:17 +00:00
|
|
|
|
2017-06-13 05:54:04 +00:00
|
|
|
if t < it.mint {
|
2017-08-28 22:39:17 +00:00
|
|
|
if !it.Seek(it.mint) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
t, _ = it.At()
|
2017-06-13 05:54:04 +00:00
|
|
|
|
2017-08-28 22:39:17 +00:00
|
|
|
return t <= it.maxt
|
|
|
|
}
|
2017-06-13 05:54:04 +00:00
|
|
|
if t > it.maxt {
|
|
|
|
return false
|
2017-04-13 19:06:14 +00:00
|
|
|
}
|
2017-06-13 05:54:04 +00:00
|
|
|
return true
|
2016-12-12 18:12:55 +00:00
|
|
|
}
|
|
|
|
if err := it.cur.Err(); err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if it.i == len(it.chunks)-1 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
it.i++
|
2019-07-09 09:49:34 +00:00
|
|
|
it.resetCurIterator()
|
2016-12-12 18:12:55 +00:00
|
|
|
|
|
|
|
return it.Next()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *chunkSeriesIterator) Err() error {
|
|
|
|
return it.cur.Err()
|
|
|
|
}
|
|
|
|
|
2017-11-30 14:34:49 +00:00
|
|
|
// deletedIterator wraps an Iterator and makes sure any deleted metrics are not
|
|
|
|
// returned.
|
|
|
|
type deletedIterator struct {
|
|
|
|
it chunkenc.Iterator
|
|
|
|
|
2019-09-19 09:15:41 +00:00
|
|
|
intervals tombstones.Intervals
|
2017-11-30 14:34:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (it *deletedIterator) At() (int64, float64) {
|
|
|
|
return it.it.At()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *deletedIterator) Next() bool {
|
|
|
|
Outer:
|
|
|
|
for it.it.Next() {
|
|
|
|
ts, _ := it.it.At()
|
|
|
|
|
|
|
|
for _, tr := range it.intervals {
|
2019-09-19 09:15:41 +00:00
|
|
|
if tr.InBounds(ts) {
|
2017-11-30 14:34:49 +00:00
|
|
|
continue Outer
|
|
|
|
}
|
|
|
|
|
|
|
|
if ts > tr.Maxt {
|
|
|
|
it.intervals = it.intervals[1:]
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *deletedIterator) Err() error {
|
|
|
|
return it.it.Err()
|
|
|
|
}
|
|
|
|
|
2017-01-04 08:47:20 +00:00
|
|
|
type errSeriesSet struct {
|
|
|
|
err error
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s errSeriesSet) Next() bool { return false }
|
|
|
|
func (s errSeriesSet) At() Series { return nil }
|
|
|
|
func (s errSeriesSet) Err() error { return s.err }
|