2015-03-30 16:12:51 +00:00
|
|
|
// Copyright 2015 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package promql
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"math"
|
|
|
|
"reflect"
|
2015-04-29 14:35:18 +00:00
|
|
|
"strings"
|
2015-03-30 16:12:51 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2015-08-20 15:18:46 +00:00
|
|
|
"github.com/prometheus/common/model"
|
2015-08-22 07:42:45 +00:00
|
|
|
|
2015-03-30 16:12:51 +00:00
|
|
|
"github.com/prometheus/prometheus/storage/metric"
|
|
|
|
)
|
|
|
|
|
|
|
|
var testExpr = []struct {
|
2015-04-29 14:35:18 +00:00
|
|
|
input string // The input to be parsed.
|
|
|
|
expected Expr // The expected expression AST.
|
|
|
|
fail bool // Whether parsing is supposed to fail.
|
|
|
|
errMsg string // If not empty the parsing error has to contain this string.
|
2015-03-30 16:12:51 +00:00
|
|
|
}{
|
|
|
|
// Scalars and scalar-to-scalar operations.
|
|
|
|
{
|
|
|
|
input: "1",
|
|
|
|
expected: &NumberLiteral{1},
|
|
|
|
}, {
|
|
|
|
input: "+Inf",
|
2015-08-20 15:18:46 +00:00
|
|
|
expected: &NumberLiteral{model.SampleValue(math.Inf(1))},
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
|
|
|
input: "-Inf",
|
2015-08-20 15:18:46 +00:00
|
|
|
expected: &NumberLiteral{model.SampleValue(math.Inf(-1))},
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
|
|
|
input: ".5",
|
|
|
|
expected: &NumberLiteral{0.5},
|
|
|
|
}, {
|
|
|
|
input: "5.",
|
|
|
|
expected: &NumberLiteral{5},
|
|
|
|
}, {
|
|
|
|
input: "123.4567",
|
|
|
|
expected: &NumberLiteral{123.4567},
|
|
|
|
}, {
|
|
|
|
input: "5e-3",
|
|
|
|
expected: &NumberLiteral{0.005},
|
|
|
|
}, {
|
|
|
|
input: "5e3",
|
|
|
|
expected: &NumberLiteral{5000},
|
|
|
|
}, {
|
|
|
|
input: "0xc",
|
|
|
|
expected: &NumberLiteral{12},
|
|
|
|
}, {
|
|
|
|
input: "0755",
|
|
|
|
expected: &NumberLiteral{493},
|
|
|
|
}, {
|
|
|
|
input: "+5.5e-3",
|
|
|
|
expected: &NumberLiteral{0.0055},
|
|
|
|
}, {
|
|
|
|
input: "-0755",
|
|
|
|
expected: &NumberLiteral{-493},
|
|
|
|
}, {
|
|
|
|
input: "1 + 1",
|
2015-09-02 13:51:44 +00:00
|
|
|
expected: &BinaryExpr{itemADD, &NumberLiteral{1}, &NumberLiteral{1}, nil, false},
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
|
|
|
input: "1 - 1",
|
2015-09-02 13:51:44 +00:00
|
|
|
expected: &BinaryExpr{itemSUB, &NumberLiteral{1}, &NumberLiteral{1}, nil, false},
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
|
|
|
input: "1 * 1",
|
2015-09-02 13:51:44 +00:00
|
|
|
expected: &BinaryExpr{itemMUL, &NumberLiteral{1}, &NumberLiteral{1}, nil, false},
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
|
|
|
input: "1 % 1",
|
2015-09-02 13:51:44 +00:00
|
|
|
expected: &BinaryExpr{itemMOD, &NumberLiteral{1}, &NumberLiteral{1}, nil, false},
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
|
|
|
input: "1 / 1",
|
2015-09-02 13:51:44 +00:00
|
|
|
expected: &BinaryExpr{itemDIV, &NumberLiteral{1}, &NumberLiteral{1}, nil, false},
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-10-10 15:19:14 +00:00
|
|
|
input: "1 == bool 1",
|
|
|
|
expected: &BinaryExpr{itemEQL, &NumberLiteral{1}, &NumberLiteral{1}, nil, true},
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-10-10 15:19:14 +00:00
|
|
|
input: "1 != bool 1",
|
|
|
|
expected: &BinaryExpr{itemNEQ, &NumberLiteral{1}, &NumberLiteral{1}, nil, true},
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-10-10 15:19:14 +00:00
|
|
|
input: "1 > bool 1",
|
|
|
|
expected: &BinaryExpr{itemGTR, &NumberLiteral{1}, &NumberLiteral{1}, nil, true},
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-10-10 15:19:14 +00:00
|
|
|
input: "1 >= bool 1",
|
|
|
|
expected: &BinaryExpr{itemGTE, &NumberLiteral{1}, &NumberLiteral{1}, nil, true},
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-10-10 15:19:14 +00:00
|
|
|
input: "1 < bool 1",
|
|
|
|
expected: &BinaryExpr{itemLSS, &NumberLiteral{1}, &NumberLiteral{1}, nil, true},
|
2015-09-02 13:51:44 +00:00
|
|
|
}, {
|
|
|
|
input: "1 <= bool 1",
|
|
|
|
expected: &BinaryExpr{itemLTE, &NumberLiteral{1}, &NumberLiteral{1}, nil, true},
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
|
|
|
input: "+1 + -2 * 1",
|
|
|
|
expected: &BinaryExpr{
|
|
|
|
Op: itemADD,
|
|
|
|
LHS: &NumberLiteral{1},
|
|
|
|
RHS: &BinaryExpr{
|
|
|
|
Op: itemMUL, LHS: &NumberLiteral{-2}, RHS: &NumberLiteral{1},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: "1 + 2/(3*1)",
|
|
|
|
expected: &BinaryExpr{
|
|
|
|
Op: itemADD,
|
|
|
|
LHS: &NumberLiteral{1},
|
|
|
|
RHS: &BinaryExpr{
|
|
|
|
Op: itemDIV,
|
|
|
|
LHS: &NumberLiteral{2},
|
|
|
|
RHS: &ParenExpr{&BinaryExpr{
|
|
|
|
Op: itemMUL, LHS: &NumberLiteral{3}, RHS: &NumberLiteral{1},
|
|
|
|
}},
|
|
|
|
},
|
|
|
|
},
|
2016-03-02 23:56:40 +00:00
|
|
|
}, {
|
|
|
|
input: "1 < bool 2 - 1 * 2",
|
|
|
|
expected: &BinaryExpr{
|
|
|
|
Op: itemLSS,
|
|
|
|
ReturnBool: true,
|
|
|
|
LHS: &NumberLiteral{1},
|
|
|
|
RHS: &BinaryExpr{
|
|
|
|
Op: itemSUB,
|
|
|
|
LHS: &NumberLiteral{2},
|
|
|
|
RHS: &BinaryExpr{
|
|
|
|
Op: itemMUL, LHS: &NumberLiteral{1}, RHS: &NumberLiteral{2},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2015-08-04 12:57:34 +00:00
|
|
|
}, {
|
|
|
|
input: "-some_metric", expected: &UnaryExpr{
|
|
|
|
Op: itemSUB,
|
|
|
|
Expr: &VectorSelector{
|
|
|
|
Name: "some_metric",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "some_metric"),
|
2015-08-04 12:57:34 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: "+some_metric", expected: &UnaryExpr{
|
|
|
|
Op: itemADD,
|
|
|
|
Expr: &VectorSelector{
|
|
|
|
Name: "some_metric",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "some_metric"),
|
2015-08-04 12:57:34 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: "",
|
|
|
|
fail: true,
|
|
|
|
errMsg: "no expression found in input",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: "# just a comment\n\n",
|
|
|
|
fail: true,
|
|
|
|
errMsg: "no expression found in input",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: "1+",
|
|
|
|
fail: true,
|
2015-08-03 10:28:40 +00:00
|
|
|
errMsg: "no valid expression found",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: ".",
|
|
|
|
fail: true,
|
|
|
|
errMsg: "unexpected character: '.'",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: "2.5.",
|
|
|
|
fail: true,
|
|
|
|
errMsg: "could not parse remaining input \".\"...",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: "100..4",
|
|
|
|
fail: true,
|
|
|
|
errMsg: "could not parse remaining input \".4\"...",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: "0deadbeef",
|
|
|
|
fail: true,
|
|
|
|
errMsg: "bad number or duration syntax: \"0de\"",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: "1 /",
|
|
|
|
fail: true,
|
2015-08-03 10:28:40 +00:00
|
|
|
errMsg: "no valid expression found",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: "*1",
|
|
|
|
fail: true,
|
|
|
|
errMsg: "no valid expression found",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: "(1))",
|
|
|
|
fail: true,
|
|
|
|
errMsg: "could not parse remaining input \")\"...",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: "((1)",
|
|
|
|
fail: true,
|
|
|
|
errMsg: "unclosed left parenthesis",
|
2015-05-12 10:00:28 +00:00
|
|
|
}, {
|
|
|
|
input: "999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999",
|
|
|
|
fail: true,
|
|
|
|
errMsg: "out of range",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: "(",
|
|
|
|
fail: true,
|
|
|
|
errMsg: "unclosed left parenthesis",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: "1 and 1",
|
|
|
|
fail: true,
|
2016-04-02 22:52:18 +00:00
|
|
|
errMsg: "set operator \"and\" not allowed in binary scalar expression",
|
2015-10-10 15:19:14 +00:00
|
|
|
}, {
|
|
|
|
input: "1 == 1",
|
|
|
|
fail: true,
|
|
|
|
errMsg: "parse error at char 7: comparisons between scalars must use BOOL modifier",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: "1 or 1",
|
|
|
|
fail: true,
|
2016-04-02 22:52:18 +00:00
|
|
|
errMsg: "set operator \"or\" not allowed in binary scalar expression",
|
|
|
|
}, {
|
|
|
|
input: "1 unless 1",
|
|
|
|
fail: true,
|
|
|
|
errMsg: "set operator \"unless\" not allowed in binary scalar expression",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: "1 !~ 1",
|
|
|
|
fail: true,
|
|
|
|
errMsg: "could not parse remaining input \"!~ 1\"...",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: "1 =~ 1",
|
|
|
|
fail: true,
|
|
|
|
errMsg: "could not parse remaining input \"=~ 1\"...",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-08-04 12:57:34 +00:00
|
|
|
input: `-"string"`,
|
2015-04-29 14:35:18 +00:00
|
|
|
fail: true,
|
2016-11-17 21:02:28 +00:00
|
|
|
errMsg: `unary expression only allowed on expressions of type scalar or instant vector, got "string"`,
|
2015-04-29 14:35:18 +00:00
|
|
|
}, {
|
2015-08-04 12:57:34 +00:00
|
|
|
input: `-test[5m]`,
|
2015-04-29 14:35:18 +00:00
|
|
|
fail: true,
|
2016-11-17 21:02:28 +00:00
|
|
|
errMsg: `unary expression only allowed on expressions of type scalar or instant vector, got "range vector"`,
|
2015-08-04 12:57:34 +00:00
|
|
|
}, {
|
|
|
|
input: `*test`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "no valid expression found",
|
2016-01-25 18:22:37 +00:00
|
|
|
}, {
|
|
|
|
input: "1 offset 1d",
|
|
|
|
fail: true,
|
|
|
|
errMsg: "offset modifier must be preceded by an instant or range selector",
|
2016-04-21 10:45:06 +00:00
|
|
|
}, {
|
2016-04-21 14:53:14 +00:00
|
|
|
input: "a - on(b) ignoring(c) d",
|
2016-04-21 10:45:06 +00:00
|
|
|
fail: true,
|
2016-04-21 14:53:14 +00:00
|
|
|
errMsg: "parse error at char 11: no valid expression found",
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
// Vector binary operations.
|
|
|
|
{
|
|
|
|
input: "foo * bar",
|
|
|
|
expected: &BinaryExpr{
|
|
|
|
Op: itemMUL,
|
|
|
|
LHS: &VectorSelector{
|
|
|
|
Name: "foo",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "foo"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
RHS: &VectorSelector{
|
|
|
|
Name: "bar",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "bar"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
VectorMatching: &VectorMatching{Card: CardOneToOne},
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: "foo == 1",
|
|
|
|
expected: &BinaryExpr{
|
|
|
|
Op: itemEQL,
|
|
|
|
LHS: &VectorSelector{
|
|
|
|
Name: "foo",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "foo"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
RHS: &NumberLiteral{1},
|
|
|
|
},
|
2015-09-02 13:51:44 +00:00
|
|
|
}, {
|
|
|
|
input: "foo == bool 1",
|
|
|
|
expected: &BinaryExpr{
|
|
|
|
Op: itemEQL,
|
|
|
|
LHS: &VectorSelector{
|
|
|
|
Name: "foo",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "foo"),
|
2015-09-02 13:51:44 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
RHS: &NumberLiteral{1},
|
|
|
|
ReturnBool: true,
|
|
|
|
},
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
|
|
|
input: "2.5 / bar",
|
|
|
|
expected: &BinaryExpr{
|
|
|
|
Op: itemDIV,
|
|
|
|
LHS: &NumberLiteral{2.5},
|
|
|
|
RHS: &VectorSelector{
|
|
|
|
Name: "bar",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "bar"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: "foo and bar",
|
|
|
|
expected: &BinaryExpr{
|
|
|
|
Op: itemLAND,
|
|
|
|
LHS: &VectorSelector{
|
|
|
|
Name: "foo",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "foo"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
RHS: &VectorSelector{
|
|
|
|
Name: "bar",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "bar"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
VectorMatching: &VectorMatching{Card: CardManyToMany},
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: "foo or bar",
|
|
|
|
expected: &BinaryExpr{
|
|
|
|
Op: itemLOR,
|
|
|
|
LHS: &VectorSelector{
|
|
|
|
Name: "foo",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "foo"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
RHS: &VectorSelector{
|
|
|
|
Name: "bar",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "bar"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
VectorMatching: &VectorMatching{Card: CardManyToMany},
|
|
|
|
},
|
2016-04-02 22:52:18 +00:00
|
|
|
}, {
|
|
|
|
input: "foo unless bar",
|
|
|
|
expected: &BinaryExpr{
|
|
|
|
Op: itemLUnless,
|
|
|
|
LHS: &VectorSelector{
|
|
|
|
Name: "foo",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "foo"),
|
2016-04-02 22:52:18 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
RHS: &VectorSelector{
|
|
|
|
Name: "bar",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "bar"),
|
2016-04-02 22:52:18 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
VectorMatching: &VectorMatching{Card: CardManyToMany},
|
|
|
|
},
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
|
|
|
// Test and/or precedence and reassigning of operands.
|
|
|
|
input: "foo + bar or bla and blub",
|
|
|
|
expected: &BinaryExpr{
|
|
|
|
Op: itemLOR,
|
|
|
|
LHS: &BinaryExpr{
|
|
|
|
Op: itemADD,
|
|
|
|
LHS: &VectorSelector{
|
|
|
|
Name: "foo",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "foo"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
RHS: &VectorSelector{
|
|
|
|
Name: "bar",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "bar"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
VectorMatching: &VectorMatching{Card: CardOneToOne},
|
|
|
|
},
|
|
|
|
RHS: &BinaryExpr{
|
|
|
|
Op: itemLAND,
|
|
|
|
LHS: &VectorSelector{
|
|
|
|
Name: "bla",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "bla"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
RHS: &VectorSelector{
|
|
|
|
Name: "blub",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "blub"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
VectorMatching: &VectorMatching{Card: CardManyToMany},
|
|
|
|
},
|
|
|
|
VectorMatching: &VectorMatching{Card: CardManyToMany},
|
|
|
|
},
|
2016-04-02 22:52:18 +00:00
|
|
|
}, {
|
|
|
|
// Test and/or/unless precedence.
|
|
|
|
input: "foo and bar unless baz or qux",
|
|
|
|
expected: &BinaryExpr{
|
|
|
|
Op: itemLOR,
|
|
|
|
LHS: &BinaryExpr{
|
|
|
|
Op: itemLUnless,
|
|
|
|
LHS: &BinaryExpr{
|
|
|
|
Op: itemLAND,
|
|
|
|
LHS: &VectorSelector{
|
|
|
|
Name: "foo",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "foo"),
|
2016-04-02 22:52:18 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
RHS: &VectorSelector{
|
|
|
|
Name: "bar",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "bar"),
|
2016-04-02 22:52:18 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
VectorMatching: &VectorMatching{Card: CardManyToMany},
|
|
|
|
},
|
|
|
|
RHS: &VectorSelector{
|
|
|
|
Name: "baz",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "baz"),
|
2016-04-02 22:52:18 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
VectorMatching: &VectorMatching{Card: CardManyToMany},
|
|
|
|
},
|
|
|
|
RHS: &VectorSelector{
|
|
|
|
Name: "qux",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "qux"),
|
2016-04-02 22:52:18 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
VectorMatching: &VectorMatching{Card: CardManyToMany},
|
|
|
|
},
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
|
|
|
// Test precedence and reassigning of operands.
|
|
|
|
input: "bar + on(foo) bla / on(baz, buz) group_right(test) blub",
|
|
|
|
expected: &BinaryExpr{
|
|
|
|
Op: itemADD,
|
|
|
|
LHS: &VectorSelector{
|
|
|
|
Name: "bar",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "bar"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
RHS: &BinaryExpr{
|
|
|
|
Op: itemDIV,
|
|
|
|
LHS: &VectorSelector{
|
|
|
|
Name: "bla",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "bla"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
RHS: &VectorSelector{
|
|
|
|
Name: "blub",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "blub"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
VectorMatching: &VectorMatching{
|
2016-04-26 13:28:36 +00:00
|
|
|
Card: CardOneToMany,
|
|
|
|
MatchingLabels: model.LabelNames{"baz", "buz"},
|
2016-06-23 16:23:44 +00:00
|
|
|
On: true,
|
2016-04-26 13:28:36 +00:00
|
|
|
Include: model.LabelNames{"test"},
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
VectorMatching: &VectorMatching{
|
2016-04-26 13:28:36 +00:00
|
|
|
Card: CardOneToOne,
|
|
|
|
MatchingLabels: model.LabelNames{"foo"},
|
2016-06-23 16:23:44 +00:00
|
|
|
On: true,
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: "foo * on(test,blub) bar",
|
|
|
|
expected: &BinaryExpr{
|
|
|
|
Op: itemMUL,
|
|
|
|
LHS: &VectorSelector{
|
|
|
|
Name: "foo",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "foo"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
RHS: &VectorSelector{
|
|
|
|
Name: "bar",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "bar"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
VectorMatching: &VectorMatching{
|
2016-04-26 13:28:36 +00:00
|
|
|
Card: CardOneToOne,
|
|
|
|
MatchingLabels: model.LabelNames{"test", "blub"},
|
2016-06-23 16:23:44 +00:00
|
|
|
On: true,
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
2016-04-21 18:03:10 +00:00
|
|
|
}, {
|
|
|
|
input: "foo * on(test,blub) group_left bar",
|
|
|
|
expected: &BinaryExpr{
|
|
|
|
Op: itemMUL,
|
|
|
|
LHS: &VectorSelector{
|
|
|
|
Name: "foo",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "foo"),
|
2016-04-21 18:03:10 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
RHS: &VectorSelector{
|
|
|
|
Name: "bar",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "bar"),
|
2016-04-21 18:03:10 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
VectorMatching: &VectorMatching{
|
2016-04-26 13:28:36 +00:00
|
|
|
Card: CardManyToOne,
|
|
|
|
MatchingLabels: model.LabelNames{"test", "blub"},
|
2016-06-23 16:23:44 +00:00
|
|
|
On: true,
|
2016-04-21 18:03:10 +00:00
|
|
|
},
|
|
|
|
},
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
|
|
|
input: "foo and on(test,blub) bar",
|
|
|
|
expected: &BinaryExpr{
|
|
|
|
Op: itemLAND,
|
|
|
|
LHS: &VectorSelector{
|
|
|
|
Name: "foo",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "foo"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
RHS: &VectorSelector{
|
|
|
|
Name: "bar",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "bar"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
VectorMatching: &VectorMatching{
|
2016-04-26 13:28:36 +00:00
|
|
|
Card: CardManyToMany,
|
|
|
|
MatchingLabels: model.LabelNames{"test", "blub"},
|
2016-06-23 16:23:44 +00:00
|
|
|
On: true,
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
2016-06-23 16:49:22 +00:00
|
|
|
}, {
|
|
|
|
input: "foo and on() bar",
|
|
|
|
expected: &BinaryExpr{
|
|
|
|
Op: itemLAND,
|
|
|
|
LHS: &VectorSelector{
|
|
|
|
Name: "foo",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "foo"),
|
2016-06-23 16:49:22 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
RHS: &VectorSelector{
|
|
|
|
Name: "bar",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "bar"),
|
2016-06-23 16:49:22 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
VectorMatching: &VectorMatching{
|
|
|
|
Card: CardManyToMany,
|
|
|
|
MatchingLabels: model.LabelNames{},
|
|
|
|
On: true,
|
|
|
|
},
|
|
|
|
},
|
2016-04-21 10:45:06 +00:00
|
|
|
}, {
|
|
|
|
input: "foo and ignoring(test,blub) bar",
|
|
|
|
expected: &BinaryExpr{
|
|
|
|
Op: itemLAND,
|
|
|
|
LHS: &VectorSelector{
|
|
|
|
Name: "foo",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "foo"),
|
2016-04-21 10:45:06 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
RHS: &VectorSelector{
|
|
|
|
Name: "bar",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "bar"),
|
2016-04-21 10:45:06 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
VectorMatching: &VectorMatching{
|
2016-04-26 13:28:36 +00:00
|
|
|
Card: CardManyToMany,
|
|
|
|
MatchingLabels: model.LabelNames{"test", "blub"},
|
2016-04-21 10:45:06 +00:00
|
|
|
},
|
|
|
|
},
|
2016-06-23 16:49:22 +00:00
|
|
|
}, {
|
|
|
|
input: "foo and ignoring() bar",
|
|
|
|
expected: &BinaryExpr{
|
|
|
|
Op: itemLAND,
|
|
|
|
LHS: &VectorSelector{
|
|
|
|
Name: "foo",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "foo"),
|
2016-06-23 16:49:22 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
RHS: &VectorSelector{
|
|
|
|
Name: "bar",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "bar"),
|
2016-06-23 16:49:22 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
VectorMatching: &VectorMatching{
|
|
|
|
Card: CardManyToMany,
|
|
|
|
MatchingLabels: model.LabelNames{},
|
|
|
|
},
|
|
|
|
},
|
2016-04-02 22:52:18 +00:00
|
|
|
}, {
|
|
|
|
input: "foo unless on(bar) baz",
|
|
|
|
expected: &BinaryExpr{
|
|
|
|
Op: itemLUnless,
|
|
|
|
LHS: &VectorSelector{
|
|
|
|
Name: "foo",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "foo"),
|
2016-04-02 22:52:18 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
RHS: &VectorSelector{
|
|
|
|
Name: "baz",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "baz"),
|
2016-04-02 22:52:18 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
VectorMatching: &VectorMatching{
|
2016-04-26 13:28:36 +00:00
|
|
|
Card: CardManyToMany,
|
|
|
|
MatchingLabels: model.LabelNames{"bar"},
|
2016-06-23 16:23:44 +00:00
|
|
|
On: true,
|
2016-04-02 22:52:18 +00:00
|
|
|
},
|
|
|
|
},
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
|
|
|
input: "foo / on(test,blub) group_left(bar) bar",
|
|
|
|
expected: &BinaryExpr{
|
|
|
|
Op: itemDIV,
|
|
|
|
LHS: &VectorSelector{
|
|
|
|
Name: "foo",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "foo"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
RHS: &VectorSelector{
|
|
|
|
Name: "bar",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "bar"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
VectorMatching: &VectorMatching{
|
2016-04-26 13:28:36 +00:00
|
|
|
Card: CardManyToOne,
|
|
|
|
MatchingLabels: model.LabelNames{"test", "blub"},
|
2016-06-23 16:23:44 +00:00
|
|
|
On: true,
|
2016-04-26 13:28:36 +00:00
|
|
|
Include: model.LabelNames{"bar"},
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
2016-04-21 14:53:14 +00:00
|
|
|
}, {
|
|
|
|
input: "foo / ignoring(test,blub) group_left(blub) bar",
|
|
|
|
expected: &BinaryExpr{
|
|
|
|
Op: itemDIV,
|
|
|
|
LHS: &VectorSelector{
|
|
|
|
Name: "foo",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "foo"),
|
2016-04-21 14:53:14 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
RHS: &VectorSelector{
|
|
|
|
Name: "bar",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "bar"),
|
2016-04-21 14:53:14 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
VectorMatching: &VectorMatching{
|
2016-04-26 13:28:36 +00:00
|
|
|
Card: CardManyToOne,
|
|
|
|
MatchingLabels: model.LabelNames{"test", "blub"},
|
|
|
|
Include: model.LabelNames{"blub"},
|
2016-04-21 14:53:14 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: "foo / ignoring(test,blub) group_left(bar) bar",
|
|
|
|
expected: &BinaryExpr{
|
|
|
|
Op: itemDIV,
|
|
|
|
LHS: &VectorSelector{
|
|
|
|
Name: "foo",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "foo"),
|
2016-04-21 14:53:14 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
RHS: &VectorSelector{
|
|
|
|
Name: "bar",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "bar"),
|
2016-04-21 14:53:14 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
VectorMatching: &VectorMatching{
|
2016-04-26 13:28:36 +00:00
|
|
|
Card: CardManyToOne,
|
|
|
|
MatchingLabels: model.LabelNames{"test", "blub"},
|
|
|
|
Include: model.LabelNames{"bar"},
|
2016-04-21 14:53:14 +00:00
|
|
|
},
|
|
|
|
},
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
|
|
|
input: "foo - on(test,blub) group_right(bar,foo) bar",
|
|
|
|
expected: &BinaryExpr{
|
|
|
|
Op: itemSUB,
|
|
|
|
LHS: &VectorSelector{
|
|
|
|
Name: "foo",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "foo"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
RHS: &VectorSelector{
|
|
|
|
Name: "bar",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "bar"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
VectorMatching: &VectorMatching{
|
2016-04-26 13:28:36 +00:00
|
|
|
Card: CardOneToMany,
|
|
|
|
MatchingLabels: model.LabelNames{"test", "blub"},
|
|
|
|
Include: model.LabelNames{"bar", "foo"},
|
2016-06-23 16:23:44 +00:00
|
|
|
On: true,
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
2016-04-21 14:53:14 +00:00
|
|
|
}, {
|
|
|
|
input: "foo - ignoring(test,blub) group_right(bar,foo) bar",
|
|
|
|
expected: &BinaryExpr{
|
|
|
|
Op: itemSUB,
|
|
|
|
LHS: &VectorSelector{
|
|
|
|
Name: "foo",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "foo"),
|
2016-04-21 14:53:14 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
RHS: &VectorSelector{
|
|
|
|
Name: "bar",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "bar"),
|
2016-04-21 14:53:14 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
VectorMatching: &VectorMatching{
|
2016-04-26 13:28:36 +00:00
|
|
|
Card: CardOneToMany,
|
|
|
|
MatchingLabels: model.LabelNames{"test", "blub"},
|
|
|
|
Include: model.LabelNames{"bar", "foo"},
|
2016-04-21 14:53:14 +00:00
|
|
|
},
|
|
|
|
},
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: "foo and 1",
|
|
|
|
fail: true,
|
2016-04-02 22:52:18 +00:00
|
|
|
errMsg: "set operator \"and\" not allowed in binary scalar expression",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: "1 and foo",
|
|
|
|
fail: true,
|
2016-04-02 22:52:18 +00:00
|
|
|
errMsg: "set operator \"and\" not allowed in binary scalar expression",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: "foo or 1",
|
|
|
|
fail: true,
|
2016-04-02 22:52:18 +00:00
|
|
|
errMsg: "set operator \"or\" not allowed in binary scalar expression",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: "1 or foo",
|
|
|
|
fail: true,
|
2016-04-02 22:52:18 +00:00
|
|
|
errMsg: "set operator \"or\" not allowed in binary scalar expression",
|
|
|
|
}, {
|
|
|
|
input: "foo unless 1",
|
|
|
|
fail: true,
|
|
|
|
errMsg: "set operator \"unless\" not allowed in binary scalar expression",
|
|
|
|
}, {
|
|
|
|
input: "1 unless foo",
|
|
|
|
fail: true,
|
|
|
|
errMsg: "set operator \"unless\" not allowed in binary scalar expression",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: "1 or on(bar) foo",
|
|
|
|
fail: true,
|
2016-11-17 21:02:28 +00:00
|
|
|
errMsg: "vector matching only allowed between instant vectors",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: "foo == on(bar) 10",
|
|
|
|
fail: true,
|
2016-11-17 21:02:28 +00:00
|
|
|
errMsg: "vector matching only allowed between instant vectors",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: "foo and on(bar) group_left(baz) bar",
|
|
|
|
fail: true,
|
2016-04-02 22:52:18 +00:00
|
|
|
errMsg: "no grouping allowed for \"and\" operation",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: "foo and on(bar) group_right(baz) bar",
|
|
|
|
fail: true,
|
2016-04-02 22:52:18 +00:00
|
|
|
errMsg: "no grouping allowed for \"and\" operation",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: "foo or on(bar) group_left(baz) bar",
|
|
|
|
fail: true,
|
2016-04-02 22:52:18 +00:00
|
|
|
errMsg: "no grouping allowed for \"or\" operation",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: "foo or on(bar) group_right(baz) bar",
|
|
|
|
fail: true,
|
2016-04-02 22:52:18 +00:00
|
|
|
errMsg: "no grouping allowed for \"or\" operation",
|
|
|
|
}, {
|
|
|
|
input: "foo unless on(bar) group_left(baz) bar",
|
|
|
|
fail: true,
|
|
|
|
errMsg: "no grouping allowed for \"unless\" operation",
|
|
|
|
}, {
|
|
|
|
input: "foo unless on(bar) group_right(baz) bar",
|
|
|
|
fail: true,
|
|
|
|
errMsg: "no grouping allowed for \"unless\" operation",
|
2015-05-12 10:00:28 +00:00
|
|
|
}, {
|
|
|
|
input: `http_requests{group="production"} + on(instance) group_left(job,instance) cpu_count{type="smp"}`,
|
|
|
|
fail: true,
|
2016-04-26 13:31:00 +00:00
|
|
|
errMsg: "label \"instance\" must not occur in ON and GROUP clause at once",
|
2015-09-02 13:51:44 +00:00
|
|
|
}, {
|
|
|
|
input: "foo + bool bar",
|
|
|
|
fail: true,
|
|
|
|
errMsg: "bool modifier can only be used on comparison operators",
|
|
|
|
}, {
|
|
|
|
input: "foo + bool 10",
|
|
|
|
fail: true,
|
|
|
|
errMsg: "bool modifier can only be used on comparison operators",
|
|
|
|
}, {
|
|
|
|
input: "foo and bool 10",
|
|
|
|
fail: true,
|
|
|
|
errMsg: "bool modifier can only be used on comparison operators",
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
// Test vector selector.
|
|
|
|
{
|
|
|
|
input: "foo",
|
|
|
|
expected: &VectorSelector{
|
|
|
|
Name: "foo",
|
|
|
|
Offset: 0,
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "foo"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: "foo offset 5m",
|
|
|
|
expected: &VectorSelector{
|
|
|
|
Name: "foo",
|
|
|
|
Offset: 5 * time.Minute,
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "foo"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}, {
|
2015-05-08 14:43:02 +00:00
|
|
|
input: `foo:bar{a="bc"}`,
|
2015-03-30 16:12:51 +00:00
|
|
|
expected: &VectorSelector{
|
|
|
|
Name: "foo:bar",
|
|
|
|
Offset: 0,
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, "a", "bc"),
|
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "foo:bar"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}, {
|
2015-05-08 14:43:02 +00:00
|
|
|
input: `foo{NaN='bc'}`,
|
2015-03-30 16:12:51 +00:00
|
|
|
expected: &VectorSelector{
|
|
|
|
Name: "foo",
|
|
|
|
Offset: 0,
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, "NaN", "bc"),
|
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "foo"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: `foo{a="b", foo!="bar", test=~"test", bar!~"baz"}`,
|
|
|
|
expected: &VectorSelector{
|
|
|
|
Name: "foo",
|
|
|
|
Offset: 0,
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, "a", "b"),
|
|
|
|
mustLabelMatcher(metric.NotEqual, "foo", "bar"),
|
2015-03-30 16:12:51 +00:00
|
|
|
mustLabelMatcher(metric.RegexMatch, "test", "test"),
|
|
|
|
mustLabelMatcher(metric.RegexNoMatch, "bar", "baz"),
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "foo"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: `{`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "unexpected end of input inside braces",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: `}`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "unexpected character: '}'",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: `some{`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "unexpected end of input inside braces",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: `some}`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "could not parse remaining input \"}\"...",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: `some_metric{a=b}`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "unexpected identifier \"b\" in label matching, expected string",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: `some_metric{a:b="b"}`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "unexpected character inside braces: ':'",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: `foo{a*"b"}`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "unexpected character inside braces: '*'",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: `foo{a>="b"}`,
|
|
|
|
fail: true,
|
|
|
|
// TODO(fabxc): willingly lexing wrong tokens allows for more precrise error
|
|
|
|
// messages from the parser - consider if this is an option.
|
|
|
|
errMsg: "unexpected character inside braces: '>'",
|
2017-06-16 14:19:24 +00:00
|
|
|
}, {
|
|
|
|
input: "some_metric{a=\"\xff\"}",
|
|
|
|
fail: true,
|
|
|
|
errMsg: "parse error at char 15: invalid UTF-8 rune",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: `foo{gibberish}`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "expected label matching operator but got }",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: `foo{1}`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "unexpected character inside braces: '1'",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: `{}`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "vector selector must contain label matchers or metric name",
|
2015-06-15 16:34:41 +00:00
|
|
|
}, {
|
|
|
|
input: `{x=""}`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "vector selector must contain at least one non-empty matcher",
|
|
|
|
}, {
|
|
|
|
input: `{x=~".*"}`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "vector selector must contain at least one non-empty matcher",
|
|
|
|
}, {
|
|
|
|
input: `{x!~".+"}`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "vector selector must contain at least one non-empty matcher",
|
|
|
|
}, {
|
|
|
|
input: `{x!="a"}`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "vector selector must contain at least one non-empty matcher",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: `foo{__name__="bar"}`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "metric name must not be set twice: \"foo\" or \"bar\"",
|
|
|
|
// }, {
|
|
|
|
// input: `:foo`,
|
|
|
|
// fail: true,
|
|
|
|
// errMsg: "bla",
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
// Test matrix selector.
|
|
|
|
{
|
|
|
|
input: "test[5s]",
|
|
|
|
expected: &MatrixSelector{
|
|
|
|
Name: "test",
|
|
|
|
Offset: 0,
|
|
|
|
Range: 5 * time.Second,
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "test"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: "test[5m]",
|
|
|
|
expected: &MatrixSelector{
|
|
|
|
Name: "test",
|
|
|
|
Offset: 0,
|
|
|
|
Range: 5 * time.Minute,
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "test"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: "test[5h] OFFSET 5m",
|
|
|
|
expected: &MatrixSelector{
|
|
|
|
Name: "test",
|
|
|
|
Offset: 5 * time.Minute,
|
|
|
|
Range: 5 * time.Hour,
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "test"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: "test[5d] OFFSET 10s",
|
|
|
|
expected: &MatrixSelector{
|
|
|
|
Name: "test",
|
|
|
|
Offset: 10 * time.Second,
|
|
|
|
Range: 5 * 24 * time.Hour,
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "test"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: "test[5w] offset 2w",
|
|
|
|
expected: &MatrixSelector{
|
|
|
|
Name: "test",
|
|
|
|
Offset: 14 * 24 * time.Hour,
|
|
|
|
Range: 5 * 7 * 24 * time.Hour,
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "test"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: `test{a="b"}[5y] OFFSET 3d`,
|
|
|
|
expected: &MatrixSelector{
|
|
|
|
Name: "test",
|
|
|
|
Offset: 3 * 24 * time.Hour,
|
|
|
|
Range: 5 * 365 * 24 * time.Hour,
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, "a", "b"),
|
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "test"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: `foo[5mm]`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "bad duration syntax: \"5mm\"",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: `foo[0m]`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "duration must be greater than 0",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: `foo[5m30s]`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "bad duration syntax: \"5m3\"",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: `foo[5m] OFFSET 1h30m`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "bad number or duration syntax: \"1h3\"",
|
2015-05-12 10:00:28 +00:00
|
|
|
}, {
|
|
|
|
input: `foo["5m"]`,
|
|
|
|
fail: true,
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: `foo[]`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "missing unit character in duration",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: `foo[1]`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "missing unit character in duration",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: `some_metric[5m] OFFSET 1`,
|
|
|
|
fail: true,
|
2016-01-25 03:50:46 +00:00
|
|
|
errMsg: "unexpected number \"1\" in offset, expected duration",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: `some_metric[5m] OFFSET 1mm`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "bad number or duration syntax: \"1mm\"",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: `some_metric[5m] OFFSET`,
|
|
|
|
fail: true,
|
2016-01-25 03:50:46 +00:00
|
|
|
errMsg: "unexpected end of input in offset, expected duration",
|
|
|
|
}, {
|
|
|
|
input: `some_metric OFFSET 1m[5m]`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "could not parse remaining input \"[5m]\"...",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: `(foo + bar)[5m]`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "could not parse remaining input \"[5m]\"...",
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
// Test aggregation.
|
|
|
|
{
|
|
|
|
input: "sum by (foo)(some_metric)",
|
|
|
|
expected: &AggregateExpr{
|
|
|
|
Op: itemSum,
|
|
|
|
Expr: &VectorSelector{
|
|
|
|
Name: "some_metric",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "some_metric"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
2015-08-20 15:18:46 +00:00
|
|
|
Grouping: model.LabelNames{"foo"},
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
}, {
|
2015-06-12 12:21:01 +00:00
|
|
|
input: "sum by (foo) keep_common (some_metric)",
|
2015-03-30 16:12:51 +00:00
|
|
|
expected: &AggregateExpr{
|
2016-05-26 16:42:19 +00:00
|
|
|
Op: itemSum,
|
|
|
|
KeepCommonLabels: true,
|
2015-03-30 16:12:51 +00:00
|
|
|
Expr: &VectorSelector{
|
|
|
|
Name: "some_metric",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "some_metric"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
2015-08-20 15:18:46 +00:00
|
|
|
Grouping: model.LabelNames{"foo"},
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
}, {
|
2015-06-12 12:21:01 +00:00
|
|
|
input: "sum (some_metric) by (foo,bar) keep_common",
|
2015-03-30 16:12:51 +00:00
|
|
|
expected: &AggregateExpr{
|
2016-05-26 16:42:19 +00:00
|
|
|
Op: itemSum,
|
|
|
|
KeepCommonLabels: true,
|
2015-03-30 16:12:51 +00:00
|
|
|
Expr: &VectorSelector{
|
|
|
|
Name: "some_metric",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "some_metric"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
2015-08-20 15:18:46 +00:00
|
|
|
Grouping: model.LabelNames{"foo", "bar"},
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: "avg by (foo)(some_metric)",
|
|
|
|
expected: &AggregateExpr{
|
|
|
|
Op: itemAvg,
|
|
|
|
Expr: &VectorSelector{
|
|
|
|
Name: "some_metric",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "some_metric"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
2015-08-20 15:18:46 +00:00
|
|
|
Grouping: model.LabelNames{"foo"},
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
}, {
|
2015-06-12 12:21:01 +00:00
|
|
|
input: "COUNT by (foo) keep_common (some_metric)",
|
2015-03-30 16:12:51 +00:00
|
|
|
expected: &AggregateExpr{
|
|
|
|
Op: itemCount,
|
|
|
|
Expr: &VectorSelector{
|
|
|
|
Name: "some_metric",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "some_metric"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
2016-05-26 16:42:19 +00:00
|
|
|
Grouping: model.LabelNames{"foo"},
|
|
|
|
KeepCommonLabels: true,
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
}, {
|
2015-06-12 12:21:01 +00:00
|
|
|
input: "MIN (some_metric) by (foo) keep_common",
|
2015-03-30 16:12:51 +00:00
|
|
|
expected: &AggregateExpr{
|
|
|
|
Op: itemMin,
|
|
|
|
Expr: &VectorSelector{
|
|
|
|
Name: "some_metric",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "some_metric"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
2016-05-26 16:42:19 +00:00
|
|
|
Grouping: model.LabelNames{"foo"},
|
|
|
|
KeepCommonLabels: true,
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: "max by (foo)(some_metric)",
|
|
|
|
expected: &AggregateExpr{
|
|
|
|
Op: itemMax,
|
|
|
|
Expr: &VectorSelector{
|
|
|
|
Name: "some_metric",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "some_metric"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
2015-08-20 15:18:46 +00:00
|
|
|
Grouping: model.LabelNames{"foo"},
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
2016-02-07 18:03:16 +00:00
|
|
|
}, {
|
|
|
|
input: "sum without (foo) (some_metric)",
|
|
|
|
expected: &AggregateExpr{
|
|
|
|
Op: itemSum,
|
|
|
|
Without: true,
|
|
|
|
Expr: &VectorSelector{
|
|
|
|
Name: "some_metric",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "some_metric"),
|
2016-02-07 18:03:16 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
Grouping: model.LabelNames{"foo"},
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: "sum (some_metric) without (foo)",
|
|
|
|
expected: &AggregateExpr{
|
|
|
|
Op: itemSum,
|
|
|
|
Without: true,
|
|
|
|
Expr: &VectorSelector{
|
|
|
|
Name: "some_metric",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "some_metric"),
|
2016-02-07 18:03:16 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
Grouping: model.LabelNames{"foo"},
|
|
|
|
},
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
|
|
|
input: "stddev(some_metric)",
|
|
|
|
expected: &AggregateExpr{
|
|
|
|
Op: itemStddev,
|
|
|
|
Expr: &VectorSelector{
|
|
|
|
Name: "some_metric",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "some_metric"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: "stdvar by (foo)(some_metric)",
|
|
|
|
expected: &AggregateExpr{
|
|
|
|
Op: itemStdvar,
|
|
|
|
Expr: &VectorSelector{
|
|
|
|
Name: "some_metric",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "some_metric"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
2015-08-20 15:18:46 +00:00
|
|
|
Grouping: model.LabelNames{"foo"},
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
2016-06-23 16:49:22 +00:00
|
|
|
}, {
|
|
|
|
input: "sum by ()(some_metric)",
|
|
|
|
expected: &AggregateExpr{
|
|
|
|
Op: itemSum,
|
|
|
|
Expr: &VectorSelector{
|
|
|
|
Name: "some_metric",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "some_metric"),
|
2016-06-23 16:49:22 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
Grouping: model.LabelNames{},
|
|
|
|
},
|
2016-07-04 12:10:42 +00:00
|
|
|
}, {
|
|
|
|
input: "topk(5, some_metric)",
|
|
|
|
expected: &AggregateExpr{
|
|
|
|
Op: itemTopK,
|
|
|
|
Expr: &VectorSelector{
|
|
|
|
Name: "some_metric",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "some_metric"),
|
2016-07-04 12:10:42 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
Param: &NumberLiteral{5},
|
|
|
|
},
|
2016-07-05 16:12:19 +00:00
|
|
|
}, {
|
|
|
|
input: "count_values(\"value\", some_metric)",
|
|
|
|
expected: &AggregateExpr{
|
|
|
|
Op: itemCountValues,
|
|
|
|
Expr: &VectorSelector{
|
|
|
|
Name: "some_metric",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "some_metric"),
|
2016-07-05 16:12:19 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
Param: &StringLiteral{"value"},
|
|
|
|
},
|
Fix parsing of label names which are also keywords
The current separation between lexer and parser is a bit fuzzy when it
comes to operators, aggregators and other keywords. The lexer already
tries to determine the type of a token, even though that type might
change depending on the context.
This led to the problematic behavior that no tokens known to the lexer
could be used as label names, including operators (and, by, ...),
aggregators (count, quantile, ...) or other keywords (for, offset, ...).
This change additionally checks whether an identifier is one of these
types. We might want to check whether the specific item identification
should be moved from the lexer to the parser.
2016-09-07 19:16:34 +00:00
|
|
|
}, {
|
|
|
|
// Test usage of keywords as label names.
|
|
|
|
input: "sum without(and, by, avg, count, alert, annotations)(some_metric)",
|
|
|
|
expected: &AggregateExpr{
|
|
|
|
Op: itemSum,
|
|
|
|
Without: true,
|
|
|
|
Expr: &VectorSelector{
|
|
|
|
Name: "some_metric",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "some_metric"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Grouping: model.LabelNames{"and", "by", "avg", "count", "alert", "annotations"},
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: "sum without(==)(some_metric)",
|
|
|
|
fail: true,
|
|
|
|
errMsg: "unexpected <op:==> in grouping opts, expected label",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: `sum some_metric by (test)`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "unexpected identifier \"some_metric\" in aggregation, expected \"(\"",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: `sum (some_metric) by test`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "unexpected identifier \"test\" in grouping opts, expected \"(\"",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: `sum (some_metric) by test`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "unexpected identifier \"test\" in grouping opts, expected \"(\"",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: `sum () by (test)`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "no valid expression found",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-06-12 12:21:01 +00:00
|
|
|
input: "MIN keep_common (some_metric) by (foo)",
|
2015-04-29 14:35:18 +00:00
|
|
|
fail: true,
|
|
|
|
errMsg: "could not parse remaining input \"by (foo)\"...",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-06-12 12:21:01 +00:00
|
|
|
input: "MIN by(test) (some_metric) keep_common",
|
2015-04-29 14:35:18 +00:00
|
|
|
fail: true,
|
2015-06-12 12:21:01 +00:00
|
|
|
errMsg: "could not parse remaining input \"keep_common\"...",
|
2016-02-07 18:03:16 +00:00
|
|
|
}, {
|
|
|
|
input: `sum (some_metric) without (test) keep_common`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "cannot use 'keep_common' with 'without'",
|
|
|
|
}, {
|
|
|
|
input: `sum (some_metric) without (test) by (test)`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "could not parse remaining input \"by (test)\"...",
|
|
|
|
}, {
|
|
|
|
input: `sum without (test) (some_metric) by (test)`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "could not parse remaining input \"by (test)\"...",
|
2016-07-04 12:10:42 +00:00
|
|
|
}, {
|
|
|
|
input: `topk(some_metric)`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "parse error at char 17: unexpected \")\" in aggregation, expected \",\"",
|
2016-07-04 17:03:05 +00:00
|
|
|
}, {
|
|
|
|
input: `topk(some_metric, other_metric)`,
|
|
|
|
fail: true,
|
2016-11-17 21:02:28 +00:00
|
|
|
errMsg: "parse error at char 32: expected type scalar in aggregation parameter, got instant vector",
|
2016-07-05 16:12:19 +00:00
|
|
|
}, {
|
|
|
|
input: `count_values(5, other_metric)`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "parse error at char 30: expected type string in aggregation parameter, got scalar",
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
// Test function calls.
|
|
|
|
{
|
|
|
|
input: "time()",
|
|
|
|
expected: &Call{
|
|
|
|
Func: mustGetFunction("time"),
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: `floor(some_metric{foo!="bar"})`,
|
|
|
|
expected: &Call{
|
|
|
|
Func: mustGetFunction("floor"),
|
|
|
|
Args: Expressions{
|
|
|
|
&VectorSelector{
|
|
|
|
Name: "some_metric",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.NotEqual, "foo", "bar"),
|
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "some_metric"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: "rate(some_metric[5m])",
|
|
|
|
expected: &Call{
|
|
|
|
Func: mustGetFunction("rate"),
|
|
|
|
Args: Expressions{
|
|
|
|
&MatrixSelector{
|
|
|
|
Name: "some_metric",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "some_metric"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
Range: 5 * time.Minute,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: "round(some_metric)",
|
|
|
|
expected: &Call{
|
|
|
|
Func: mustGetFunction("round"),
|
|
|
|
Args: Expressions{
|
|
|
|
&VectorSelector{
|
|
|
|
Name: "some_metric",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "some_metric"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: "round(some_metric, 5)",
|
|
|
|
expected: &Call{
|
|
|
|
Func: mustGetFunction("round"),
|
|
|
|
Args: Expressions{
|
|
|
|
&VectorSelector{
|
|
|
|
Name: "some_metric",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "some_metric"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
&NumberLiteral{5},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: "floor()",
|
|
|
|
fail: true,
|
2017-06-16 13:51:22 +00:00
|
|
|
errMsg: "expected 1 argument(s) in call to \"floor\", got 0",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: "floor(some_metric, other_metric)",
|
|
|
|
fail: true,
|
2017-06-16 13:51:22 +00:00
|
|
|
errMsg: "expected 1 argument(s) in call to \"floor\", got 2",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: "floor(1)",
|
|
|
|
fail: true,
|
2016-11-17 21:02:28 +00:00
|
|
|
errMsg: "expected type instant vector in call to function \"floor\", got scalar",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2016-09-15 03:23:28 +00:00
|
|
|
input: "non_existent_function_far_bar()",
|
2015-04-29 14:35:18 +00:00
|
|
|
fail: true,
|
2016-09-15 03:23:28 +00:00
|
|
|
errMsg: "unknown function with name \"non_existent_function_far_bar\"",
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
2015-04-29 14:35:18 +00:00
|
|
|
input: "rate(some_metric)",
|
|
|
|
fail: true,
|
2016-11-17 21:02:28 +00:00
|
|
|
errMsg: "expected type range vector in call to function \"rate\", got instant vector",
|
2017-06-16 14:19:24 +00:00
|
|
|
}, {
|
|
|
|
input: "label_replace(a, `b`, `c\xff`, `d`, `.*`)",
|
|
|
|
fail: true,
|
|
|
|
errMsg: "parse error at char 23: invalid UTF-8 rune",
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
2015-08-03 10:28:40 +00:00
|
|
|
// Fuzzing regression tests.
|
|
|
|
{
|
|
|
|
input: "-=",
|
|
|
|
fail: true,
|
|
|
|
errMsg: `no valid expression found`,
|
2015-08-04 12:57:34 +00:00
|
|
|
}, {
|
2015-08-03 10:28:40 +00:00
|
|
|
input: "++-++-+-+-<",
|
|
|
|
fail: true,
|
|
|
|
errMsg: `no valid expression found`,
|
2015-08-04 12:57:34 +00:00
|
|
|
}, {
|
2015-08-03 10:28:40 +00:00
|
|
|
input: "e-+=/(0)",
|
|
|
|
fail: true,
|
|
|
|
errMsg: `no valid expression found`,
|
2015-08-04 12:57:34 +00:00
|
|
|
}, {
|
2015-08-03 10:28:40 +00:00
|
|
|
input: "-If",
|
|
|
|
fail: true,
|
|
|
|
errMsg: `no valid expression found`,
|
|
|
|
},
|
2015-09-30 19:27:08 +00:00
|
|
|
// String quoting and escape sequence interpretation tests.
|
|
|
|
{
|
|
|
|
input: `"double-quoted string \" with escaped quote"`,
|
|
|
|
expected: &StringLiteral{
|
|
|
|
Val: "double-quoted string \" with escaped quote",
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: `'single-quoted string \' with escaped quote'`,
|
|
|
|
expected: &StringLiteral{
|
|
|
|
Val: "single-quoted string ' with escaped quote",
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: "`backtick-quoted string`",
|
|
|
|
expected: &StringLiteral{
|
|
|
|
Val: "backtick-quoted string",
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: `"\a\b\f\n\r\t\v\\\" - \xFF\377\u1234\U00010111\U0001011111☺"`,
|
|
|
|
expected: &StringLiteral{
|
|
|
|
Val: "\a\b\f\n\r\t\v\\\" - \xFF\377\u1234\U00010111\U0001011111☺",
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: `'\a\b\f\n\r\t\v\\\' - \xFF\377\u1234\U00010111\U0001011111☺'`,
|
|
|
|
expected: &StringLiteral{
|
|
|
|
Val: "\a\b\f\n\r\t\v\\' - \xFF\377\u1234\U00010111\U0001011111☺",
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: "`" + `\a\b\f\n\r\t\v\\\"\' - \xFF\377\u1234\U00010111\U0001011111☺` + "`",
|
|
|
|
expected: &StringLiteral{
|
|
|
|
Val: `\a\b\f\n\r\t\v\\\"\' - \xFF\377\u1234\U00010111\U0001011111☺`,
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: "`\\``",
|
|
|
|
fail: true,
|
|
|
|
errMsg: "could not parse remaining input",
|
|
|
|
}, {
|
|
|
|
input: `"\`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "escape sequence not terminated",
|
|
|
|
}, {
|
|
|
|
input: `"\c"`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "unknown escape sequence U+0063 'c'",
|
|
|
|
}, {
|
|
|
|
input: `"\x."`,
|
|
|
|
fail: true,
|
|
|
|
errMsg: "illegal character U+002E '.' in escape sequence",
|
|
|
|
},
|
2015-03-30 16:12:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestParseExpressions(t *testing.T) {
|
|
|
|
for _, test := range testExpr {
|
2015-04-29 09:36:41 +00:00
|
|
|
parser := newParser(test.input)
|
2015-03-30 16:12:51 +00:00
|
|
|
|
|
|
|
expr, err := parser.parseExpr()
|
2015-08-03 10:53:31 +00:00
|
|
|
|
|
|
|
// Unexpected errors are always caused by a bug.
|
|
|
|
if err == errUnexpected {
|
|
|
|
t.Fatalf("unexpected error occurred")
|
|
|
|
}
|
|
|
|
|
2015-03-30 16:12:51 +00:00
|
|
|
if !test.fail && err != nil {
|
|
|
|
t.Errorf("error in input '%s'", test.input)
|
|
|
|
t.Fatalf("could not parse: %s", err)
|
|
|
|
}
|
|
|
|
if test.fail && err != nil {
|
2015-04-29 14:35:18 +00:00
|
|
|
if !strings.Contains(err.Error(), test.errMsg) {
|
|
|
|
t.Errorf("unexpected error on input '%s'", test.input)
|
|
|
|
t.Fatalf("expected error to contain %q but got %q", test.errMsg, err)
|
|
|
|
}
|
2015-03-30 16:12:51 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
err = parser.typecheck(expr)
|
|
|
|
if !test.fail && err != nil {
|
|
|
|
t.Errorf("error on input '%s'", test.input)
|
|
|
|
t.Fatalf("typecheck failed: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if test.fail {
|
|
|
|
if err != nil {
|
2015-04-29 14:35:18 +00:00
|
|
|
if !strings.Contains(err.Error(), test.errMsg) {
|
|
|
|
t.Errorf("unexpected error on input '%s'", test.input)
|
|
|
|
t.Fatalf("expected error to contain %q but got %q", test.errMsg, err)
|
|
|
|
}
|
2015-03-30 16:12:51 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
t.Errorf("error on input '%s'", test.input)
|
|
|
|
t.Fatalf("failure expected, but passed with result: %q", expr)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(expr, test.expected) {
|
|
|
|
t.Errorf("error on input '%s'", test.input)
|
|
|
|
t.Fatalf("no match\n\nexpected:\n%s\ngot: \n%s\n", Tree(test.expected), Tree(expr))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// NaN has no equality. Thus, we need a separate test for it.
|
|
|
|
func TestNaNExpression(t *testing.T) {
|
2015-04-29 09:36:41 +00:00
|
|
|
parser := newParser("NaN")
|
2015-03-30 16:12:51 +00:00
|
|
|
|
|
|
|
expr, err := parser.parseExpr()
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("error on input 'NaN'")
|
|
|
|
t.Fatalf("coud not parse: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
nl, ok := expr.(*NumberLiteral)
|
|
|
|
if !ok {
|
|
|
|
t.Errorf("error on input 'NaN'")
|
|
|
|
t.Fatalf("expected number literal but got %T", expr)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !math.IsNaN(float64(nl.Val)) {
|
|
|
|
t.Errorf("error on input 'NaN'")
|
2015-09-18 14:51:53 +00:00
|
|
|
t.Fatalf("expected 'NaN' in number literal but got %v", nl.Val)
|
2015-03-30 16:12:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var testStatement = []struct {
|
|
|
|
input string
|
|
|
|
expected Statements
|
|
|
|
fail bool
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
// Test a file-like input.
|
|
|
|
input: `
|
|
|
|
# A simple test recording rule.
|
|
|
|
dc:http_request:rate5m = sum(rate(http_request_count[5m])) by (dc)
|
2016-01-25 03:50:46 +00:00
|
|
|
|
2015-03-30 16:12:51 +00:00
|
|
|
# A simple test alerting rule.
|
2015-12-23 13:54:02 +00:00
|
|
|
ALERT GlobalRequestRateLow IF(dc:http_request:rate5m < 10000) FOR 5m
|
|
|
|
LABELS {
|
2015-03-30 16:12:51 +00:00
|
|
|
service = "testservice"
|
|
|
|
# ... more fields here ...
|
|
|
|
}
|
2015-12-11 16:02:34 +00:00
|
|
|
ANNOTATIONS {
|
|
|
|
summary = "Global request rate low",
|
|
|
|
description = "The global request rate is low"
|
|
|
|
}
|
2015-03-30 16:12:51 +00:00
|
|
|
|
|
|
|
foo = bar{label1="value1"}
|
|
|
|
|
2015-06-24 17:27:09 +00:00
|
|
|
ALERT BazAlert IF foo > 10
|
2015-12-11 16:02:34 +00:00
|
|
|
ANNOTATIONS {
|
|
|
|
description = "BazAlert",
|
|
|
|
runbook = "http://my.url",
|
|
|
|
summary = "Baz",
|
|
|
|
}
|
2015-03-30 16:12:51 +00:00
|
|
|
`,
|
|
|
|
expected: Statements{
|
|
|
|
&RecordStmt{
|
|
|
|
Name: "dc:http_request:rate5m",
|
|
|
|
Expr: &AggregateExpr{
|
|
|
|
Op: itemSum,
|
2015-08-20 15:18:46 +00:00
|
|
|
Grouping: model.LabelNames{"dc"},
|
2015-03-30 16:12:51 +00:00
|
|
|
Expr: &Call{
|
|
|
|
Func: mustGetFunction("rate"),
|
|
|
|
Args: Expressions{
|
|
|
|
&MatrixSelector{
|
|
|
|
Name: "http_request_count",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "http_request_count"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
Range: 5 * time.Minute,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Labels: nil,
|
|
|
|
},
|
|
|
|
&AlertStmt{
|
|
|
|
Name: "GlobalRequestRateLow",
|
|
|
|
Expr: &ParenExpr{&BinaryExpr{
|
|
|
|
Op: itemLSS,
|
|
|
|
LHS: &VectorSelector{
|
|
|
|
Name: "dc:http_request:rate5m",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "dc:http_request:rate5m"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
RHS: &NumberLiteral{10000},
|
|
|
|
}},
|
2015-12-11 16:02:34 +00:00
|
|
|
Labels: model.LabelSet{"service": "testservice"},
|
|
|
|
Duration: 5 * time.Minute,
|
|
|
|
Annotations: model.LabelSet{
|
|
|
|
"summary": "Global request rate low",
|
|
|
|
"description": "The global request rate is low",
|
|
|
|
},
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
&RecordStmt{
|
|
|
|
Name: "foo",
|
|
|
|
Expr: &VectorSelector{
|
|
|
|
Name: "bar",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, "label1", "value1"),
|
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "bar"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
Labels: nil,
|
|
|
|
},
|
|
|
|
&AlertStmt{
|
|
|
|
Name: "BazAlert",
|
|
|
|
Expr: &BinaryExpr{
|
|
|
|
Op: itemGTR,
|
|
|
|
LHS: &VectorSelector{
|
|
|
|
Name: "foo",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "foo"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
RHS: &NumberLiteral{10},
|
|
|
|
},
|
2015-12-11 16:02:34 +00:00
|
|
|
Labels: model.LabelSet{},
|
|
|
|
Annotations: model.LabelSet{
|
|
|
|
"summary": "Baz",
|
|
|
|
"description": "BazAlert",
|
|
|
|
"runbook": "http://my.url",
|
|
|
|
},
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: `foo{x="", a="z"} = bar{a="b", x=~"y"}`,
|
|
|
|
expected: Statements{
|
|
|
|
&RecordStmt{
|
|
|
|
Name: "foo",
|
|
|
|
Expr: &VectorSelector{
|
|
|
|
Name: "bar",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, "a", "b"),
|
2015-03-30 16:12:51 +00:00
|
|
|
mustLabelMatcher(metric.RegexMatch, "x", "y"),
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "bar"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
2015-08-20 15:18:46 +00:00
|
|
|
Labels: model.LabelSet{"x": "", "a": "z"},
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
2016-01-11 10:39:22 +00:00
|
|
|
}, {
|
|
|
|
input: `ALERT SomeName IF some_metric > 1
|
|
|
|
LABELS {}
|
2015-12-11 16:02:34 +00:00
|
|
|
ANNOTATIONS {
|
2016-01-11 10:39:22 +00:00
|
|
|
summary = "Global request rate low",
|
|
|
|
description = "The global request rate is low",
|
2015-12-11 16:02:34 +00:00
|
|
|
}
|
2015-03-30 16:12:51 +00:00
|
|
|
`,
|
|
|
|
expected: Statements{
|
|
|
|
&AlertStmt{
|
|
|
|
Name: "SomeName",
|
|
|
|
Expr: &BinaryExpr{
|
|
|
|
Op: itemGTR,
|
|
|
|
LHS: &VectorSelector{
|
|
|
|
Name: "some_metric",
|
|
|
|
LabelMatchers: metric.LabelMatchers{
|
storage: improve index lookups
tl;dr: This is not a fundamental solution to the indexing problem
(like tindex is) but it at least avoids utilizing the intersection
problem to the greatest possible amount.
In more detail:
Imagine the following query:
nicely:aggregating:rule{job="foo",env="prod"}
While it uses a nicely aggregating recording rule (which might have a
very low cardinality), Prometheus still intersects the low number of
fingerprints for `{__name__="nicely:aggregating:rule"}` with the many
thousands of fingerprints matching `{job="foo"}` and with the millions
of fingerprints matching `{env="prod"}`. This totally innocuous query
is dead slow if the Prometheus server has a lot of time series with
the `{env="prod"}` label. Ironically, if you make the query more
complicated, it becomes blazingly fast:
nicely:aggregating:rule{job=~"foo",env=~"prod"}
Why so? Because Prometheus only intersects with non-Equal matchers if
there are no Equal matchers. That's good in this case because it
retrieves the few fingerprints for
`{__name__="nicely:aggregating:rule"}` and then starts right ahead to
retrieve the metric for those FPs and checking individually if they
match the other matchers.
This change is generalizing the idea of when to stop intersecting FPs
and go into "retrieve metrics and check them individually against
remaining matchers" mode:
- First, sort all matchers by "expected cardinality". Matchers
matching the empty string are always worst (and never used for
intersections). Equal matchers are in general consider best, but by
using some crude heuristics, we declare some better than others
(instance labels or anything that looks like a recording rule).
- Then go through the matchers until we hit a threshold of remaining
FPs in the intersection. This threshold is higher if we are already
in the non-Equal matcher area as intersection is even more expensive
here.
- Once the threshold has been reached (or we have run out of matchers
that do not match the empty string), start with "retrieve metrics
and check them individually against remaining matchers".
A beefy server at SoundCloud was spending 67% of its CPU time in index
lookups (fingerprintsForLabelPairs), serving mostly a dashboard that
is exclusively built with recording rules. With this change, it spends
only 35% in fingerprintsForLabelPairs. The CPU usage dropped from 26
cores to 18 cores. The median latency for query_range dropped from 14s
to 50ms(!). As expected, higher percentile latency didn't improve that
much because the new approach is _occasionally_ running into the worst
case while the old one was _systematically_ doing so. The 99th
percentile latency is now about as high as the median before (14s)
while it was almost twice as high before (26s).
2016-06-28 18:18:32 +00:00
|
|
|
mustLabelMatcher(metric.Equal, model.MetricNameLabel, "some_metric"),
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
RHS: &NumberLiteral{1},
|
|
|
|
},
|
2015-12-11 16:02:34 +00:00
|
|
|
Labels: model.LabelSet{},
|
|
|
|
Annotations: model.LabelSet{
|
|
|
|
"summary": "Global request rate low",
|
|
|
|
"description": "The global request rate is low",
|
|
|
|
},
|
2015-03-30 16:12:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
input: `
|
|
|
|
# A simple test alerting rule.
|
2015-12-23 13:54:02 +00:00
|
|
|
ALERT GlobalRequestRateLow IF(dc:http_request:rate5m < 10000) FOR 5
|
|
|
|
LABELS {
|
2015-03-30 16:12:51 +00:00
|
|
|
service = "testservice"
|
2016-01-25 03:50:46 +00:00
|
|
|
# ... more fields here ...
|
2015-03-30 16:12:51 +00:00
|
|
|
}
|
2015-12-11 16:02:34 +00:00
|
|
|
ANNOTATIONS {
|
|
|
|
summary = "Global request rate low"
|
|
|
|
description = "The global request rate is low"
|
|
|
|
}
|
2015-03-30 16:12:51 +00:00
|
|
|
`,
|
|
|
|
fail: true,
|
|
|
|
}, {
|
|
|
|
input: "",
|
|
|
|
expected: Statements{},
|
|
|
|
}, {
|
|
|
|
input: "foo = time()",
|
2015-08-19 20:09:00 +00:00
|
|
|
expected: Statements{
|
|
|
|
&RecordStmt{
|
|
|
|
Name: "foo",
|
|
|
|
Expr: &Call{Func: mustGetFunction("time")},
|
|
|
|
Labels: nil,
|
|
|
|
}},
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
|
|
|
input: "foo = 1",
|
2015-08-19 20:09:00 +00:00
|
|
|
expected: Statements{
|
|
|
|
&RecordStmt{
|
|
|
|
Name: "foo",
|
|
|
|
Expr: &NumberLiteral{1},
|
|
|
|
Labels: nil,
|
|
|
|
}},
|
2015-03-30 16:12:51 +00:00
|
|
|
}, {
|
|
|
|
input: "foo = bar[5m]",
|
|
|
|
fail: true,
|
|
|
|
}, {
|
|
|
|
input: `foo = "test"`,
|
|
|
|
fail: true,
|
|
|
|
}, {
|
|
|
|
input: `foo = `,
|
|
|
|
fail: true,
|
|
|
|
}, {
|
|
|
|
input: `foo{a!="b"} = bar`,
|
|
|
|
fail: true,
|
|
|
|
}, {
|
|
|
|
input: `foo{a=~"b"} = bar`,
|
|
|
|
fail: true,
|
|
|
|
}, {
|
|
|
|
input: `foo{a!~"b"} = bar`,
|
|
|
|
fail: true,
|
|
|
|
},
|
2015-08-03 10:53:31 +00:00
|
|
|
// Fuzzing regression tests.
|
|
|
|
{
|
|
|
|
input: `I=-/`,
|
|
|
|
fail: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
input: `I=3E8/-=`,
|
|
|
|
fail: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
input: `M=-=-0-0`,
|
|
|
|
fail: true,
|
|
|
|
},
|
2015-03-30 16:12:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestParseStatements(t *testing.T) {
|
|
|
|
for _, test := range testStatement {
|
2015-04-29 09:36:41 +00:00
|
|
|
parser := newParser(test.input)
|
2015-03-30 16:12:51 +00:00
|
|
|
|
|
|
|
stmts, err := parser.parseStmts()
|
2015-08-03 10:53:31 +00:00
|
|
|
|
|
|
|
// Unexpected errors are always caused by a bug.
|
|
|
|
if err == errUnexpected {
|
|
|
|
t.Fatalf("unexpected error occurred")
|
|
|
|
}
|
|
|
|
|
2015-03-30 16:12:51 +00:00
|
|
|
if !test.fail && err != nil {
|
|
|
|
t.Errorf("error in input: \n\n%s\n", test.input)
|
|
|
|
t.Fatalf("could not parse: %s", err)
|
|
|
|
}
|
|
|
|
if test.fail && err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
err = parser.typecheck(stmts)
|
|
|
|
if !test.fail && err != nil {
|
|
|
|
t.Errorf("error in input: \n\n%s\n", test.input)
|
|
|
|
t.Fatalf("typecheck failed: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if test.fail {
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
t.Errorf("error in input: \n\n%s\n", test.input)
|
|
|
|
t.Fatalf("failure expected, but passed")
|
|
|
|
}
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(stmts, test.expected) {
|
|
|
|
t.Errorf("error in input: \n\n%s\n", test.input)
|
|
|
|
t.Fatalf("no match\n\nexpected:\n%s\ngot: \n%s\n", Tree(test.expected), Tree(stmts))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-20 15:18:46 +00:00
|
|
|
func mustLabelMatcher(mt metric.MatchType, name model.LabelName, val model.LabelValue) *metric.LabelMatcher {
|
2015-03-30 16:12:51 +00:00
|
|
|
m, err := metric.NewLabelMatcher(mt, name, val)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return m
|
|
|
|
}
|
|
|
|
|
|
|
|
func mustGetFunction(name string) *Function {
|
2015-03-30 17:13:36 +00:00
|
|
|
f, ok := getFunction(name)
|
2015-03-30 16:12:51 +00:00
|
|
|
if !ok {
|
|
|
|
panic(fmt.Errorf("function %q does not exist", name))
|
|
|
|
}
|
|
|
|
return f
|
|
|
|
}
|
2015-05-11 12:04:53 +00:00
|
|
|
|
|
|
|
var testSeries = []struct {
|
|
|
|
input string
|
2015-08-20 15:18:46 +00:00
|
|
|
expectedMetric model.Metric
|
2015-05-11 12:04:53 +00:00
|
|
|
expectedValues []sequenceValue
|
|
|
|
fail bool
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
input: `{} 1 2 3`,
|
2015-08-20 15:18:46 +00:00
|
|
|
expectedMetric: model.Metric{},
|
2015-05-11 12:04:53 +00:00
|
|
|
expectedValues: newSeq(1, 2, 3),
|
|
|
|
}, {
|
|
|
|
input: `{a="b"} -1 2 3`,
|
2015-08-20 15:18:46 +00:00
|
|
|
expectedMetric: model.Metric{
|
2015-05-11 12:04:53 +00:00
|
|
|
"a": "b",
|
|
|
|
},
|
|
|
|
expectedValues: newSeq(-1, 2, 3),
|
|
|
|
}, {
|
|
|
|
input: `my_metric 1 2 3`,
|
2015-08-20 15:18:46 +00:00
|
|
|
expectedMetric: model.Metric{
|
|
|
|
model.MetricNameLabel: "my_metric",
|
2015-05-11 12:04:53 +00:00
|
|
|
},
|
|
|
|
expectedValues: newSeq(1, 2, 3),
|
|
|
|
}, {
|
|
|
|
input: `my_metric{} 1 2 3`,
|
2015-08-20 15:18:46 +00:00
|
|
|
expectedMetric: model.Metric{
|
|
|
|
model.MetricNameLabel: "my_metric",
|
2015-05-11 12:04:53 +00:00
|
|
|
},
|
|
|
|
expectedValues: newSeq(1, 2, 3),
|
|
|
|
}, {
|
|
|
|
input: `my_metric{a="b"} 1 2 3`,
|
2015-08-20 15:18:46 +00:00
|
|
|
expectedMetric: model.Metric{
|
|
|
|
model.MetricNameLabel: "my_metric",
|
2015-05-11 12:04:53 +00:00
|
|
|
"a": "b",
|
|
|
|
},
|
|
|
|
expectedValues: newSeq(1, 2, 3),
|
|
|
|
}, {
|
|
|
|
input: `my_metric{a="b"} 1 2 3-10x4`,
|
2015-08-20 15:18:46 +00:00
|
|
|
expectedMetric: model.Metric{
|
|
|
|
model.MetricNameLabel: "my_metric",
|
2015-05-11 12:04:53 +00:00
|
|
|
"a": "b",
|
|
|
|
},
|
|
|
|
expectedValues: newSeq(1, 2, 3, -7, -17, -27, -37),
|
2015-06-04 16:21:24 +00:00
|
|
|
}, {
|
|
|
|
input: `my_metric{a="b"} 1 2 3-0x4`,
|
2015-08-20 15:18:46 +00:00
|
|
|
expectedMetric: model.Metric{
|
|
|
|
model.MetricNameLabel: "my_metric",
|
2015-06-04 16:21:24 +00:00
|
|
|
"a": "b",
|
|
|
|
},
|
|
|
|
expectedValues: newSeq(1, 2, 3, 3, 3, 3, 3),
|
2015-05-11 12:04:53 +00:00
|
|
|
}, {
|
|
|
|
input: `my_metric{a="b"} 1 3 _ 5 _x4`,
|
2015-08-20 15:18:46 +00:00
|
|
|
expectedMetric: model.Metric{
|
|
|
|
model.MetricNameLabel: "my_metric",
|
2015-05-11 12:04:53 +00:00
|
|
|
"a": "b",
|
|
|
|
},
|
|
|
|
expectedValues: newSeq(1, 3, none, 5, none, none, none, none),
|
|
|
|
}, {
|
|
|
|
input: `my_metric{a="b"} 1 3 _ 5 _a4`,
|
|
|
|
fail: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2015-05-11 13:56:35 +00:00
|
|
|
// For these tests only, we use the smallest float64 to signal an omitted value.
|
2015-05-11 12:04:53 +00:00
|
|
|
const none = math.SmallestNonzeroFloat64
|
|
|
|
|
|
|
|
func newSeq(vals ...float64) (res []sequenceValue) {
|
|
|
|
for _, v := range vals {
|
|
|
|
if v == none {
|
|
|
|
res = append(res, sequenceValue{omitted: true})
|
|
|
|
} else {
|
2015-08-20 15:18:46 +00:00
|
|
|
res = append(res, sequenceValue{value: model.SampleValue(v)})
|
2015-05-11 12:04:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestParseSeries(t *testing.T) {
|
|
|
|
for _, test := range testSeries {
|
|
|
|
parser := newParser(test.input)
|
|
|
|
parser.lex.seriesDesc = true
|
|
|
|
|
|
|
|
metric, vals, err := parser.parseSeriesDesc()
|
2015-08-03 10:53:31 +00:00
|
|
|
|
|
|
|
// Unexpected errors are always caused by a bug.
|
|
|
|
if err == errUnexpected {
|
|
|
|
t.Fatalf("unexpected error occurred")
|
|
|
|
}
|
|
|
|
|
2015-05-11 12:04:53 +00:00
|
|
|
if !test.fail && err != nil {
|
|
|
|
t.Errorf("error in input: \n\n%s\n", test.input)
|
|
|
|
t.Fatalf("could not parse: %s", err)
|
|
|
|
}
|
|
|
|
if test.fail && err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if test.fail {
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
t.Errorf("error in input: \n\n%s\n", test.input)
|
|
|
|
t.Fatalf("failure expected, but passed")
|
|
|
|
}
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(vals, test.expectedValues) || !reflect.DeepEqual(metric, test.expectedMetric) {
|
|
|
|
t.Errorf("error in input: \n\n%s\n", test.input)
|
|
|
|
t.Fatalf("no match\n\nexpected:\n%s %s\ngot: \n%s %s\n", test.expectedMetric, test.expectedValues, metric, vals)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-08-02 11:37:42 +00:00
|
|
|
|
2015-08-19 13:28:53 +00:00
|
|
|
func TestRecoverParserRuntime(t *testing.T) {
|
2015-08-02 11:37:42 +00:00
|
|
|
var p *parser
|
|
|
|
var err error
|
|
|
|
defer p.recover(&err)
|
|
|
|
|
|
|
|
// Cause a runtime panic.
|
|
|
|
var a []int
|
|
|
|
a[123] = 1
|
|
|
|
|
2015-08-03 10:53:31 +00:00
|
|
|
if err != errUnexpected {
|
|
|
|
t.Fatalf("wrong error message: %q, expected %q", err, errUnexpected)
|
2015-08-02 11:37:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-19 13:28:53 +00:00
|
|
|
func TestRecoverParserError(t *testing.T) {
|
2015-08-02 11:37:42 +00:00
|
|
|
var p *parser
|
|
|
|
var err error
|
|
|
|
|
|
|
|
e := fmt.Errorf("custom error")
|
|
|
|
|
2015-08-26 00:04:01 +00:00
|
|
|
defer func() {
|
|
|
|
if err.Error() != e.Error() {
|
|
|
|
t.Fatalf("wrong error message: %q, expected %q", err, e)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
defer p.recover(&err)
|
|
|
|
|
|
|
|
panic(e)
|
2015-08-02 11:37:42 +00:00
|
|
|
}
|