Add more benchmarks for inhibition rules (#3773)

* Add more benchmarks for inhibition rules

This commit adds more benchmarks for inhibition rules where
just the last rule in the benchmark inhibits the labels.

Signed-off-by: George Robinson <george.robinson@grafana.com>

---------

Signed-off-by: George Robinson <george.robinson@grafana.com>
This commit is contained in:
George Robinson 2024-03-22 09:07:19 +00:00 committed by GitHub
parent f41fccb730
commit 14cbe6301c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1 changed files with 73 additions and 22 deletions

View File

@ -35,34 +35,46 @@ import (
// for different numbers of inhibition rules. // for different numbers of inhibition rules.
func BenchmarkMutes(b *testing.B) { func BenchmarkMutes(b *testing.B) {
b.Run("1 inhibition rule, 1 inhibiting alert", func(b *testing.B) { b.Run("1 inhibition rule, 1 inhibiting alert", func(b *testing.B) {
benchmarkMutes(b, defaultBenchmark(b, 1, 1)) benchmarkMutes(b, allRulesMatchBenchmark(b, 1, 1))
}) })
b.Run("10 inhibition rules, 1 inhibiting alert", func(b *testing.B) { b.Run("10 inhibition rules, 1 inhibiting alert", func(b *testing.B) {
benchmarkMutes(b, defaultBenchmark(b, 10, 1)) benchmarkMutes(b, allRulesMatchBenchmark(b, 10, 1))
}) })
b.Run("100 inhibition rules, 1 inhibiting alert", func(b *testing.B) { b.Run("100 inhibition rules, 1 inhibiting alert", func(b *testing.B) {
benchmarkMutes(b, defaultBenchmark(b, 100, 1)) benchmarkMutes(b, allRulesMatchBenchmark(b, 100, 1))
}) })
b.Run("1000 inhibition rules, 1 inhibiting alert", func(b *testing.B) { b.Run("1000 inhibition rules, 1 inhibiting alert", func(b *testing.B) {
benchmarkMutes(b, defaultBenchmark(b, 1000, 1)) benchmarkMutes(b, allRulesMatchBenchmark(b, 1000, 1))
}) })
b.Run("10000 inhibition rules, 1 inhibiting alert", func(b *testing.B) { b.Run("10000 inhibition rules, 1 inhibiting alert", func(b *testing.B) {
benchmarkMutes(b, defaultBenchmark(b, 10000, 1)) benchmarkMutes(b, allRulesMatchBenchmark(b, 10000, 1))
}) })
b.Run("1 inhibition rule, 10 inhibiting alerts", func(b *testing.B) { b.Run("1 inhibition rule, 10 inhibiting alerts", func(b *testing.B) {
benchmarkMutes(b, defaultBenchmark(b, 1, 10)) benchmarkMutes(b, allRulesMatchBenchmark(b, 1, 10))
}) })
b.Run("1 inhibition rule, 100 inhibiting alerts", func(b *testing.B) { b.Run("1 inhibition rule, 100 inhibiting alerts", func(b *testing.B) {
benchmarkMutes(b, defaultBenchmark(b, 1, 100)) benchmarkMutes(b, allRulesMatchBenchmark(b, 1, 100))
}) })
b.Run("1 inhibition rule, 1000 inhibiting alerts", func(b *testing.B) { b.Run("1 inhibition rule, 1000 inhibiting alerts", func(b *testing.B) {
benchmarkMutes(b, defaultBenchmark(b, 1, 1000)) benchmarkMutes(b, allRulesMatchBenchmark(b, 1, 1000))
}) })
b.Run("1 inhibition rule, 10000 inhibiting alerts", func(b *testing.B) { b.Run("1 inhibition rule, 10000 inhibiting alerts", func(b *testing.B) {
benchmarkMutes(b, defaultBenchmark(b, 1, 10000)) benchmarkMutes(b, allRulesMatchBenchmark(b, 1, 10000))
}) })
b.Run("100 inhibition rules, 1000 inhibiting alerts", func(b *testing.B) { b.Run("100 inhibition rules, 1000 inhibiting alerts", func(b *testing.B) {
benchmarkMutes(b, defaultBenchmark(b, 100, 1000)) benchmarkMutes(b, allRulesMatchBenchmark(b, 100, 1000))
})
b.Run("10 inhibition rules, last rule matches", func(b *testing.B) {
benchmarkMutes(b, lastRuleMatchesBenchmark(b, 10))
})
b.Run("100 inhibition rules, last rule matches", func(b *testing.B) {
benchmarkMutes(b, lastRuleMatchesBenchmark(b, 100))
})
b.Run("1000 inhibition rules, last rule matches", func(b *testing.B) {
benchmarkMutes(b, lastRuleMatchesBenchmark(b, 1000))
})
b.Run("10000 inhibition rules, last rule matches", func(b *testing.B) {
benchmarkMutes(b, lastRuleMatchesBenchmark(b, 10000))
}) })
} }
@ -79,19 +91,20 @@ type benchmarkOptions struct {
benchFunc func(mutesFunc func(model.LabelSet) bool) error benchFunc func(mutesFunc func(model.LabelSet) bool) error
} }
// defaultBenchmark returns the default benchmark. It supports a number of // allRulesMatchBenchmark returns a new benchmark where all inhibition rules
// variations, including customization of the number of inhibition rules, // inhibit the label dst=0. It supports a number of variations, including
// and the number of inhibiting alerts per inhibition rule. // customization of the number of inhibition rules, and the number of
// inhibiting alerts per inhibition rule.
// //
// The source matchers are suffixed with the position of the inhibition rule // The source matchers are suffixed with the position of the inhibition rule
// in the list. For example, src=1, src=2, etc. The target matchers are // in the list (e.g. src=1, src=2, etc...). The target matchers are the same
// the same across all inhibition rules (dst=0). // across all inhibition rules (dst=0).
// //
// Each inhibition rule can have zero or more alerts that match the source // Each inhibition rule can have zero or more alerts that match the source
// matchers, and is determined with numInhibitingAlerts. // matchers, and is determined with numInhibitingAlerts.
// //
// The default benchmark expects dst=0 to be muted and will fail if not. // It expects dst=0 to be muted and will fail if not.
func defaultBenchmark(b *testing.B, numInhibitionRules, numInhibitingAlerts int) benchmarkOptions { func allRulesMatchBenchmark(b *testing.B, numInhibitionRules, numInhibitingAlerts int) benchmarkOptions {
return benchmarkOptions{ return benchmarkOptions{
n: numInhibitionRules, n: numInhibitionRules,
newRuleFunc: func(idx int) config.InhibitRule { newRuleFunc: func(idx int) config.InhibitRule {
@ -126,6 +139,48 @@ func defaultBenchmark(b *testing.B, numInhibitionRules, numInhibitingAlerts int)
} }
} }
// lastRuleMatchesBenchmark returns a new benchmark where the last inhibition
// rule inhibits the label dst=0. All other inhibition rules are no-ops.
//
// The source matchers are suffixed with the position of the inhibition rule
// in the list (e.g. src=1, src=2, etc...). The target matchers are the same
// across all inhibition rules (dst=0).
//
// It expects dst=0 to be muted and will fail if not.
func lastRuleMatchesBenchmark(b *testing.B, n int) benchmarkOptions {
return benchmarkOptions{
n: n,
newRuleFunc: func(idx int) config.InhibitRule {
return config.InhibitRule{
SourceMatchers: config.Matchers{
mustNewMatcher(b, labels.MatchEqual, "src", strconv.Itoa(idx)),
},
TargetMatchers: config.Matchers{
mustNewMatcher(b, labels.MatchEqual, "dst", "0"),
},
}
},
newAlertsFunc: func(idx int, _ config.InhibitRule) []types.Alert {
// Do not create an alert unless it is the last inhibition rule.
if idx < n-1 {
return nil
}
return []types.Alert{{
Alert: model.Alert{
Labels: model.LabelSet{
"src": model.LabelValue(strconv.Itoa(idx)),
},
},
}}
}, benchFunc: func(mutesFunc func(set model.LabelSet) bool) error {
if ok := mutesFunc(model.LabelSet{"dst": "0"}); !ok {
return errors.New("expected dst=0 to be muted")
}
return nil
},
}
}
func benchmarkMutes(b *testing.B, opts benchmarkOptions) { func benchmarkMutes(b *testing.B, opts benchmarkOptions) {
r := prometheus.NewRegistry() r := prometheus.NewRegistry()
m := types.NewMarker(r) m := types.NewMarker(r)
@ -148,11 +203,7 @@ func benchmarkMutes(b *testing.B, opts benchmarkOptions) {
go ih.Run() go ih.Run()
// Wait some time for the inhibitor to seed its cache. // Wait some time for the inhibitor to seed its cache.
waitDuration := time.Millisecond * time.Duration(len(alerts)) <-time.After(time.Second)
if waitDuration > time.Second {
waitDuration = time.Second
}
<-time.After(waitDuration)
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {