From 0f86edcf5cf38563c8d063d6e9ae84f4ed814867 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20=C5=A0tibran=C3=BD?= Date: Tue, 1 Jun 2021 09:33:47 +0200 Subject: [PATCH] Extract TestGroupsWithLimits, and remove limit test from TestGroups. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- dispatch/dispatch_test.go | 97 +++++++++++++++++++++++++++++++++------ 1 file changed, 83 insertions(+), 14 deletions(-) diff --git a/dispatch/dispatch_test.go b/dispatch/dispatch_test.go index 39575b36..022dfab9 100644 --- a/dispatch/dispatch_test.go +++ b/dispatch/dispatch_test.go @@ -374,9 +374,8 @@ route: timeout := func(d time.Duration) time.Duration { return time.Duration(0) } recorder := &recordStage{alerts: make(map[string]map[model.Fingerprint]*types.Alert)} - lim := limits{groups: 6} - m := NewDispatcherMetrics(true, prometheus.NewRegistry()) - dispatcher := NewDispatcher(alerts, route, recorder, marker, timeout, lim, logger, m) + m := NewDispatcherMetrics(false, prometheus.NewRegistry()) + dispatcher := NewDispatcher(alerts, route, recorder, marker, timeout, nil, logger, m) go dispatcher.Run() defer dispatcher.Stop() @@ -394,10 +393,7 @@ route: // Matches the second and third sub-route. newAlert(model.LabelSet{"env": "prod", "alertname": "HighLatency", "cluster": "bb", "service": "db", "kafka": "yes", "instance": "inst3"}), } - err = alerts.Put(inputAlerts...) - if err != nil { - t.Fatal(err) - } + alerts.Put(inputAlerts...) // Let alerts get processed. for i := 0; len(recorder.Alerts()) != 7 && i < 10; i++ { @@ -474,6 +470,85 @@ route: inputAlerts[4].Fingerprint(): {"prod"}, inputAlerts[5].Fingerprint(): {"kafka", "prod"}, }, receivers) +} + +func TestGroupsWithLimits(t *testing.T) { + confData := `receivers: +- name: 'kafka' +- name: 'prod' +- name: 'testing' + +route: + group_by: ['alertname'] + group_wait: 10ms + group_interval: 10ms + receiver: 'prod' + routes: + - match: + env: 'testing' + receiver: 'testing' + group_by: ['alertname', 'service'] + - match: + env: 'prod' + receiver: 'prod' + group_by: ['alertname', 'service', 'cluster'] + continue: true + - match: + kafka: 'yes' + receiver: 'kafka' + group_by: ['alertname', 'service', 'cluster']` + conf, err := config.Load(confData) + if err != nil { + t.Fatal(err) + } + + logger := log.NewNopLogger() + route := NewRoute(conf.Route, nil) + marker := types.NewMarker(prometheus.NewRegistry()) + alerts, err := mem.NewAlerts(context.Background(), marker, time.Hour, logger) + if err != nil { + t.Fatal(err) + } + defer alerts.Close() + + timeout := func(d time.Duration) time.Duration { return time.Duration(0) } + recorder := &recordStage{alerts: make(map[string]map[model.Fingerprint]*types.Alert)} + lim := limits{groups: 6} + m := NewDispatcherMetrics(true, prometheus.NewRegistry()) + dispatcher := NewDispatcher(alerts, route, recorder, marker, timeout, lim, logger, m) + go dispatcher.Run() + defer dispatcher.Stop() + + // Create alerts. the dispatcher will automatically create the groups. + inputAlerts := []*types.Alert{ + // Matches the parent route. + newAlert(model.LabelSet{"alertname": "OtherAlert", "cluster": "cc", "service": "dd"}), + // Matches the first sub-route. + newAlert(model.LabelSet{"env": "testing", "alertname": "TestingAlert", "service": "api", "instance": "inst1"}), + // Matches the second sub-route. + newAlert(model.LabelSet{"env": "prod", "alertname": "HighErrorRate", "cluster": "aa", "service": "api", "instance": "inst1"}), + newAlert(model.LabelSet{"env": "prod", "alertname": "HighErrorRate", "cluster": "aa", "service": "api", "instance": "inst2"}), + // Matches the second sub-route. + newAlert(model.LabelSet{"env": "prod", "alertname": "HighErrorRate", "cluster": "bb", "service": "api", "instance": "inst1"}), + // Matches the second and third sub-route. + newAlert(model.LabelSet{"env": "prod", "alertname": "HighLatency", "cluster": "bb", "service": "db", "kafka": "yes", "instance": "inst3"}), + } + err = alerts.Put(inputAlerts...) + if err != nil { + t.Fatal(err) + } + + // Let alerts get processed. + for i := 0; len(recorder.Alerts()) != 7 && i < 10; i++ { + time.Sleep(200 * time.Millisecond) + } + require.Equal(t, 7, len(recorder.Alerts())) + + routeFilter := func(*Route) bool { return true } + alertFilter := func(*types.Alert, time.Time) bool { return true } + + alertGroups, _ := dispatcher.Groups(routeFilter, alertFilter) + require.Len(t, alertGroups, 6) require.Equal(t, 0.0, testutil.ToFloat64(m.aggrGroupLimitReached)) @@ -490,13 +565,7 @@ route: require.Equal(t, 1.0, testutil.ToFloat64(m.aggrGroupLimitReached)) // Verify there are still only 6 groups. - alertGroups, _ = dispatcher.Groups( - func(*Route) bool { - return true - }, func(*types.Alert, time.Time) bool { - return true - }, - ) + alertGroups, _ = dispatcher.Groups(routeFilter, alertFilter) require.Len(t, alertGroups, 6) }