2015-10-11 15:24:49 +00:00
|
|
|
// Copyright 2015 Prometheus Team
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
2019-06-18 13:34:46 +00:00
|
|
|
|
2015-09-29 13:12:31 +00:00
|
|
|
package notify
|
2015-09-28 12:13:18 +00:00
|
|
|
|
|
|
|
import (
|
2018-11-09 09:00:23 +00:00
|
|
|
"context"
|
2016-08-16 12:09:06 +00:00
|
|
|
"errors"
|
2015-09-28 19:43:28 +00:00
|
|
|
"fmt"
|
2016-08-16 12:09:06 +00:00
|
|
|
"io"
|
2024-11-06 09:09:57 +00:00
|
|
|
"log/slog"
|
2015-09-28 12:13:18 +00:00
|
|
|
"reflect"
|
2024-05-13 10:16:26 +00:00
|
|
|
"sort"
|
2024-04-11 09:42:16 +00:00
|
|
|
"strings"
|
2015-09-28 12:13:18 +00:00
|
|
|
"testing"
|
2015-09-28 16:28:13 +00:00
|
|
|
"time"
|
2015-09-28 12:13:18 +00:00
|
|
|
|
2019-02-05 13:18:21 +00:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
2023-02-03 12:09:21 +00:00
|
|
|
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
2015-09-28 12:13:18 +00:00
|
|
|
"github.com/prometheus/common/model"
|
2024-11-06 09:09:57 +00:00
|
|
|
"github.com/prometheus/common/promslog"
|
2016-08-16 12:09:06 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2015-09-28 12:13:18 +00:00
|
|
|
|
2023-09-06 12:42:55 +00:00
|
|
|
"github.com/prometheus/alertmanager/featurecontrol"
|
2016-08-16 12:09:06 +00:00
|
|
|
"github.com/prometheus/alertmanager/nflog"
|
2016-08-16 12:32:24 +00:00
|
|
|
"github.com/prometheus/alertmanager/nflog/nflogpb"
|
2016-08-30 09:58:27 +00:00
|
|
|
"github.com/prometheus/alertmanager/silence"
|
|
|
|
"github.com/prometheus/alertmanager/silence/silencepb"
|
2020-10-13 00:04:16 +00:00
|
|
|
"github.com/prometheus/alertmanager/timeinterval"
|
2015-09-28 12:13:18 +00:00
|
|
|
"github.com/prometheus/alertmanager/types"
|
|
|
|
)
|
|
|
|
|
2019-06-07 08:37:49 +00:00
|
|
|
type sendResolved bool
|
2016-10-05 14:28:04 +00:00
|
|
|
|
2019-06-07 08:37:49 +00:00
|
|
|
func (s sendResolved) SendResolved() bool {
|
|
|
|
return bool(s)
|
2016-10-05 14:28:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type notifierFunc func(ctx context.Context, alerts ...*types.Alert) (bool, error)
|
|
|
|
|
|
|
|
func (f notifierFunc) Notify(ctx context.Context, alerts ...*types.Alert) (bool, error) {
|
|
|
|
return f(ctx, alerts...)
|
|
|
|
}
|
|
|
|
|
2016-08-12 17:18:26 +00:00
|
|
|
type failStage struct{}
|
2015-09-28 12:13:18 +00:00
|
|
|
|
2024-11-06 09:09:57 +00:00
|
|
|
func (s failStage) Exec(ctx context.Context, l *slog.Logger, as ...*types.Alert) (context.Context, []*types.Alert, error) {
|
2016-08-17 08:54:17 +00:00
|
|
|
return ctx, nil, fmt.Errorf("some error")
|
2015-09-28 19:43:28 +00:00
|
|
|
}
|
|
|
|
|
2016-08-16 12:09:06 +00:00
|
|
|
type testNflog struct {
|
|
|
|
qres []*nflogpb.Entry
|
|
|
|
qerr error
|
|
|
|
|
2022-07-05 10:09:18 +00:00
|
|
|
logFunc func(r *nflogpb.Receiver, gkey string, firingAlerts, resolvedAlerts []uint64, expiry time.Duration) error
|
2016-08-16 12:09:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (l *testNflog) Query(p ...nflog.QueryParam) ([]*nflogpb.Entry, error) {
|
|
|
|
return l.qres, l.qerr
|
|
|
|
}
|
|
|
|
|
2022-07-05 10:09:18 +00:00
|
|
|
func (l *testNflog) Log(r *nflogpb.Receiver, gkey string, firingAlerts, resolvedAlerts []uint64, expiry time.Duration) error {
|
|
|
|
return l.logFunc(r, gkey, firingAlerts, resolvedAlerts, expiry)
|
2016-08-16 12:09:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (l *testNflog) GC() (int, error) {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *testNflog) Snapshot(w io.Writer) (int, error) {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
2017-03-13 12:44:36 +00:00
|
|
|
func alertHashSet(hashes ...uint64) map[uint64]struct{} {
|
|
|
|
res := map[uint64]struct{}{}
|
|
|
|
|
|
|
|
for _, h := range hashes {
|
|
|
|
res[h] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
2016-08-16 12:09:06 +00:00
|
|
|
func TestDedupStageNeedsUpdate(t *testing.T) {
|
|
|
|
now := utcNow()
|
|
|
|
|
2016-01-08 14:15:14 +00:00
|
|
|
cases := []struct {
|
2018-01-18 10:12:17 +00:00
|
|
|
entry *nflogpb.Entry
|
|
|
|
firingAlerts map[uint64]struct{}
|
|
|
|
resolvedAlerts map[uint64]struct{}
|
|
|
|
repeat time.Duration
|
2018-06-08 09:37:38 +00:00
|
|
|
resolve bool
|
2016-08-16 12:09:06 +00:00
|
|
|
|
2018-06-08 09:37:38 +00:00
|
|
|
res bool
|
2016-01-08 14:15:14 +00:00
|
|
|
}{
|
|
|
|
{
|
2018-06-08 09:37:38 +00:00
|
|
|
// No matching nflog entry should update.
|
2017-03-13 12:44:36 +00:00
|
|
|
entry: nil,
|
|
|
|
firingAlerts: alertHashSet(2, 3, 4),
|
|
|
|
res: true,
|
2016-08-16 12:09:06 +00:00
|
|
|
}, {
|
2018-06-08 09:37:38 +00:00
|
|
|
// No matching nflog entry shouldn't update if no alert fires.
|
|
|
|
entry: nil,
|
|
|
|
resolvedAlerts: alertHashSet(2, 3, 4),
|
|
|
|
res: false,
|
|
|
|
}, {
|
|
|
|
// Different sets of firing alerts should update.
|
2017-03-13 12:44:36 +00:00
|
|
|
entry: &nflogpb.Entry{FiringAlerts: []uint64{1, 2, 3}},
|
|
|
|
firingAlerts: alertHashSet(2, 3, 4),
|
|
|
|
res: true,
|
2016-08-16 12:09:06 +00:00
|
|
|
}, {
|
2018-06-08 09:37:38 +00:00
|
|
|
// Zero timestamp in the nflog entry should always update.
|
2016-08-16 12:09:06 +00:00
|
|
|
entry: &nflogpb.Entry{
|
2017-03-13 12:44:36 +00:00
|
|
|
FiringAlerts: []uint64{1, 2, 3},
|
2018-06-08 09:37:38 +00:00
|
|
|
Timestamp: time.Time{},
|
2016-01-08 14:15:14 +00:00
|
|
|
},
|
2017-03-13 12:44:36 +00:00
|
|
|
firingAlerts: alertHashSet(1, 2, 3),
|
2017-04-18 08:03:57 +00:00
|
|
|
res: true,
|
2016-08-16 12:09:06 +00:00
|
|
|
}, {
|
2018-06-08 09:37:38 +00:00
|
|
|
// Identical sets of alerts shouldn't update before repeat_interval.
|
2016-08-16 12:09:06 +00:00
|
|
|
entry: &nflogpb.Entry{
|
2017-03-13 12:44:36 +00:00
|
|
|
FiringAlerts: []uint64{1, 2, 3},
|
2017-04-18 08:03:57 +00:00
|
|
|
Timestamp: now.Add(-9 * time.Minute),
|
2016-01-08 14:15:14 +00:00
|
|
|
},
|
2017-03-13 12:44:36 +00:00
|
|
|
repeat: 10 * time.Minute,
|
|
|
|
firingAlerts: alertHashSet(1, 2, 3),
|
|
|
|
res: false,
|
2016-08-16 12:09:06 +00:00
|
|
|
}, {
|
2018-06-08 09:37:38 +00:00
|
|
|
// Identical sets of alerts should update after repeat_interval.
|
2016-08-16 12:09:06 +00:00
|
|
|
entry: &nflogpb.Entry{
|
2017-03-13 12:44:36 +00:00
|
|
|
FiringAlerts: []uint64{1, 2, 3},
|
2017-04-18 08:03:57 +00:00
|
|
|
Timestamp: now.Add(-11 * time.Minute),
|
2016-01-08 14:15:14 +00:00
|
|
|
},
|
2017-03-13 12:44:36 +00:00
|
|
|
repeat: 10 * time.Minute,
|
|
|
|
firingAlerts: alertHashSet(1, 2, 3),
|
|
|
|
res: true,
|
2018-01-18 10:12:17 +00:00
|
|
|
}, {
|
2018-06-08 09:37:38 +00:00
|
|
|
// Different sets of resolved alerts without firing alerts shouldn't update after repeat_interval.
|
2018-01-18 10:12:17 +00:00
|
|
|
entry: &nflogpb.Entry{
|
|
|
|
ResolvedAlerts: []uint64{1, 2, 3},
|
|
|
|
Timestamp: now.Add(-11 * time.Minute),
|
|
|
|
},
|
|
|
|
repeat: 10 * time.Minute,
|
|
|
|
resolvedAlerts: alertHashSet(3, 4, 5),
|
2018-06-08 09:37:38 +00:00
|
|
|
resolve: true,
|
2018-01-18 10:12:17 +00:00
|
|
|
res: false,
|
2018-01-23 15:52:03 +00:00
|
|
|
}, {
|
2018-06-08 09:37:38 +00:00
|
|
|
// Different sets of resolved alerts shouldn't update when resolve is false.
|
2018-01-23 15:52:03 +00:00
|
|
|
entry: &nflogpb.Entry{
|
|
|
|
FiringAlerts: []uint64{1, 2},
|
|
|
|
ResolvedAlerts: []uint64{3},
|
2018-06-08 09:37:38 +00:00
|
|
|
Timestamp: now.Add(-9 * time.Minute),
|
2018-01-23 15:52:03 +00:00
|
|
|
},
|
|
|
|
repeat: 10 * time.Minute,
|
|
|
|
firingAlerts: alertHashSet(1),
|
|
|
|
resolvedAlerts: alertHashSet(2, 3),
|
2018-06-08 09:37:38 +00:00
|
|
|
resolve: false,
|
|
|
|
res: false,
|
2018-01-23 15:52:03 +00:00
|
|
|
}, {
|
2018-06-08 09:37:38 +00:00
|
|
|
// Different sets of resolved alerts should update when resolve is true.
|
2018-01-23 15:52:03 +00:00
|
|
|
entry: &nflogpb.Entry{
|
|
|
|
FiringAlerts: []uint64{1, 2},
|
|
|
|
ResolvedAlerts: []uint64{3},
|
|
|
|
Timestamp: now.Add(-9 * time.Minute),
|
|
|
|
},
|
|
|
|
repeat: 10 * time.Minute,
|
|
|
|
firingAlerts: alertHashSet(1),
|
|
|
|
resolvedAlerts: alertHashSet(2, 3),
|
2018-06-08 09:37:38 +00:00
|
|
|
resolve: true,
|
|
|
|
res: true,
|
|
|
|
}, {
|
|
|
|
// Empty set of firing alerts should update when resolve is false.
|
|
|
|
entry: &nflogpb.Entry{
|
|
|
|
FiringAlerts: []uint64{1, 2},
|
|
|
|
ResolvedAlerts: []uint64{3},
|
|
|
|
Timestamp: now.Add(-9 * time.Minute),
|
|
|
|
},
|
|
|
|
repeat: 10 * time.Minute,
|
|
|
|
firingAlerts: alertHashSet(),
|
|
|
|
resolvedAlerts: alertHashSet(1, 2, 3),
|
|
|
|
resolve: false,
|
|
|
|
res: true,
|
2018-01-23 15:52:03 +00:00
|
|
|
}, {
|
2018-06-08 09:37:38 +00:00
|
|
|
// Empty set of firing alerts should update when resolve is true.
|
2018-01-23 15:52:03 +00:00
|
|
|
entry: &nflogpb.Entry{
|
|
|
|
FiringAlerts: []uint64{1, 2},
|
|
|
|
ResolvedAlerts: []uint64{3},
|
|
|
|
Timestamp: now.Add(-9 * time.Minute),
|
|
|
|
},
|
|
|
|
repeat: 10 * time.Minute,
|
|
|
|
firingAlerts: alertHashSet(),
|
|
|
|
resolvedAlerts: alertHashSet(1, 2, 3),
|
2018-06-08 09:37:38 +00:00
|
|
|
resolve: true,
|
2018-01-23 15:52:03 +00:00
|
|
|
res: true,
|
2016-01-08 14:15:14 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
for i, c := range cases {
|
2016-08-16 12:09:06 +00:00
|
|
|
t.Log("case", i)
|
|
|
|
|
|
|
|
s := &DedupStage{
|
2019-06-07 08:37:49 +00:00
|
|
|
now: func() time.Time { return now },
|
|
|
|
rs: sendResolved(c.resolve),
|
2016-01-08 14:15:14 +00:00
|
|
|
}
|
2018-06-08 09:37:38 +00:00
|
|
|
res := s.needsUpdate(c.entry, c.firingAlerts, c.resolvedAlerts, c.repeat)
|
|
|
|
require.Equal(t, c.res, res)
|
2016-08-16 12:09:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDedupStage(t *testing.T) {
|
2017-03-13 12:44:36 +00:00
|
|
|
i := 0
|
|
|
|
now := utcNow()
|
2016-08-16 12:09:06 +00:00
|
|
|
s := &DedupStage{
|
2017-03-13 12:44:36 +00:00
|
|
|
hash: func(a *types.Alert) uint64 {
|
|
|
|
res := uint64(i)
|
|
|
|
i++
|
|
|
|
return res
|
|
|
|
},
|
|
|
|
now: func() time.Time {
|
|
|
|
return now
|
|
|
|
},
|
2019-06-07 08:37:49 +00:00
|
|
|
rs: sendResolved(false),
|
2016-08-16 12:09:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
|
2024-11-06 09:09:57 +00:00
|
|
|
_, _, err := s.Exec(ctx, promslog.NewNopLogger())
|
2016-08-16 12:09:06 +00:00
|
|
|
require.EqualError(t, err, "group key missing")
|
|
|
|
|
2017-04-21 09:43:12 +00:00
|
|
|
ctx = WithGroupKey(ctx, "1")
|
2016-08-16 12:09:06 +00:00
|
|
|
|
2024-11-06 09:09:57 +00:00
|
|
|
_, _, err = s.Exec(ctx, promslog.NewNopLogger())
|
2016-08-16 12:09:06 +00:00
|
|
|
require.EqualError(t, err, "repeat interval missing")
|
|
|
|
|
|
|
|
ctx = WithRepeatInterval(ctx, time.Hour)
|
|
|
|
|
|
|
|
alerts := []*types.Alert{{}, {}, {}}
|
|
|
|
|
|
|
|
// Must catch notification log query errors.
|
|
|
|
s.nflog = &testNflog{
|
|
|
|
qerr: errors.New("bad things"),
|
|
|
|
}
|
2024-11-06 09:09:57 +00:00
|
|
|
ctx, _, err = s.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
2016-08-16 12:09:06 +00:00
|
|
|
require.EqualError(t, err, "bad things")
|
|
|
|
|
|
|
|
// ... but skip ErrNotFound.
|
|
|
|
s.nflog = &testNflog{
|
|
|
|
qerr: nflog.ErrNotFound,
|
|
|
|
}
|
2024-11-06 09:09:57 +00:00
|
|
|
ctx, res, err := s.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
2016-08-16 12:09:06 +00:00
|
|
|
require.NoError(t, err, "unexpected error on not found log entry")
|
|
|
|
require.Equal(t, alerts, res, "input alerts differ from result alerts")
|
|
|
|
|
|
|
|
s.nflog = &testNflog{
|
|
|
|
qerr: nil,
|
|
|
|
qres: []*nflogpb.Entry{
|
2017-03-13 12:44:36 +00:00
|
|
|
{FiringAlerts: []uint64{0, 1, 2}},
|
|
|
|
{FiringAlerts: []uint64{1, 2, 3}},
|
2016-08-16 12:09:06 +00:00
|
|
|
},
|
|
|
|
}
|
2024-11-06 09:09:57 +00:00
|
|
|
ctx, _, err = s.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
2016-08-16 12:09:06 +00:00
|
|
|
require.Contains(t, err.Error(), "result size")
|
|
|
|
|
|
|
|
// Must return no error and no alerts no need to update.
|
2017-03-13 12:44:36 +00:00
|
|
|
i = 0
|
2016-08-16 12:09:06 +00:00
|
|
|
s.nflog = &testNflog{
|
|
|
|
qerr: nflog.ErrNotFound,
|
2017-03-13 12:44:36 +00:00
|
|
|
qres: []*nflogpb.Entry{
|
|
|
|
{
|
|
|
|
FiringAlerts: []uint64{0, 1, 2},
|
2017-04-18 08:03:57 +00:00
|
|
|
Timestamp: now,
|
2017-03-13 12:44:36 +00:00
|
|
|
},
|
|
|
|
},
|
2016-01-08 14:15:14 +00:00
|
|
|
}
|
2024-11-06 09:09:57 +00:00
|
|
|
ctx, res, err = s.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
2016-08-16 12:09:06 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Nil(t, res, "unexpected alerts returned")
|
|
|
|
|
|
|
|
// Must return no error and all input alerts on changes.
|
2017-03-13 12:44:36 +00:00
|
|
|
i = 0
|
2016-08-16 12:09:06 +00:00
|
|
|
s.nflog = &testNflog{
|
|
|
|
qerr: nil,
|
|
|
|
qres: []*nflogpb.Entry{
|
2017-03-13 12:44:36 +00:00
|
|
|
{
|
|
|
|
FiringAlerts: []uint64{1, 2, 3, 4},
|
2017-04-18 08:03:57 +00:00
|
|
|
Timestamp: now,
|
2017-03-13 12:44:36 +00:00
|
|
|
},
|
2016-08-16 12:09:06 +00:00
|
|
|
},
|
|
|
|
}
|
2024-11-06 09:09:57 +00:00
|
|
|
_, res, err = s.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
2016-08-16 12:09:06 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, alerts, res, "unexpected alerts returned")
|
2016-01-08 14:15:14 +00:00
|
|
|
}
|
|
|
|
|
2016-08-12 17:18:26 +00:00
|
|
|
func TestMultiStage(t *testing.T) {
|
|
|
|
var (
|
|
|
|
alerts1 = []*types.Alert{{}}
|
|
|
|
alerts2 = []*types.Alert{{}, {}}
|
|
|
|
alerts3 = []*types.Alert{{}, {}, {}}
|
|
|
|
)
|
|
|
|
|
|
|
|
stage := MultiStage{
|
2024-11-06 09:09:57 +00:00
|
|
|
StageFunc(func(ctx context.Context, l *slog.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
2016-08-12 17:18:26 +00:00
|
|
|
if !reflect.DeepEqual(alerts, alerts1) {
|
|
|
|
t.Fatal("Input not equal to input of MultiStage")
|
|
|
|
}
|
2021-03-16 09:29:03 +00:00
|
|
|
//nolint:staticcheck // Ignore SA1029
|
2016-08-17 08:54:17 +00:00
|
|
|
ctx = context.WithValue(ctx, "key", "value")
|
|
|
|
return ctx, alerts2, nil
|
2016-08-12 17:18:26 +00:00
|
|
|
}),
|
2024-11-06 09:09:57 +00:00
|
|
|
StageFunc(func(ctx context.Context, l *slog.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
2016-08-12 17:18:26 +00:00
|
|
|
if !reflect.DeepEqual(alerts, alerts2) {
|
|
|
|
t.Fatal("Input not equal to output of previous stage")
|
|
|
|
}
|
2016-08-17 08:54:17 +00:00
|
|
|
v, ok := ctx.Value("key").(string)
|
|
|
|
if !ok || v != "value" {
|
|
|
|
t.Fatalf("Expected value %q for key %q but got %q", "value", "key", v)
|
|
|
|
}
|
|
|
|
return ctx, alerts3, nil
|
2016-08-12 17:18:26 +00:00
|
|
|
}),
|
|
|
|
}
|
|
|
|
|
2024-11-06 09:09:57 +00:00
|
|
|
_, alerts, err := stage.Exec(context.Background(), promslog.NewNopLogger(), alerts1...)
|
2016-08-12 17:18:26 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Exec failed: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(alerts, alerts3) {
|
|
|
|
t.Fatal("Output of MultiStage is not equal to the output of the last stage")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestMultiStageFailure(t *testing.T) {
|
|
|
|
var (
|
|
|
|
ctx = context.Background()
|
|
|
|
s1 = failStage{}
|
|
|
|
stage = MultiStage{s1}
|
|
|
|
)
|
|
|
|
|
2024-11-06 09:09:57 +00:00
|
|
|
_, _, err := stage.Exec(ctx, promslog.NewNopLogger(), nil)
|
2016-08-12 17:18:26 +00:00
|
|
|
if err.Error() != "some error" {
|
|
|
|
t.Fatal("Errors were not propagated correctly by MultiStage")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestRoutingStage(t *testing.T) {
|
|
|
|
var (
|
|
|
|
alerts1 = []*types.Alert{{}}
|
|
|
|
alerts2 = []*types.Alert{{}, {}}
|
|
|
|
)
|
|
|
|
|
|
|
|
stage := RoutingStage{
|
2024-11-06 09:09:57 +00:00
|
|
|
"name": StageFunc(func(ctx context.Context, l *slog.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
2016-08-12 17:18:26 +00:00
|
|
|
if !reflect.DeepEqual(alerts, alerts1) {
|
|
|
|
t.Fatal("Input not equal to input of RoutingStage")
|
|
|
|
}
|
2016-08-17 08:54:17 +00:00
|
|
|
return ctx, alerts2, nil
|
2016-08-12 17:18:26 +00:00
|
|
|
}),
|
|
|
|
"not": failStage{},
|
|
|
|
}
|
|
|
|
|
2016-08-16 12:22:47 +00:00
|
|
|
ctx := WithReceiverName(context.Background(), "name")
|
2016-08-12 17:18:26 +00:00
|
|
|
|
2024-11-06 09:09:57 +00:00
|
|
|
_, alerts, err := stage.Exec(ctx, promslog.NewNopLogger(), alerts1...)
|
2016-08-12 17:18:26 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Exec failed: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(alerts, alerts2) {
|
|
|
|
t.Fatal("Output of RoutingStage is not equal to the output of the inner stage")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-08 09:37:38 +00:00
|
|
|
func TestRetryStageWithError(t *testing.T) {
|
|
|
|
fail, retry := true, true
|
|
|
|
sent := []*types.Alert{}
|
|
|
|
i := Integration{
|
|
|
|
notifier: notifierFunc(func(ctx context.Context, alerts ...*types.Alert) (bool, error) {
|
|
|
|
if fail {
|
|
|
|
fail = false
|
|
|
|
return retry, errors.New("fail to deliver notification")
|
|
|
|
}
|
|
|
|
sent = append(sent, alerts...)
|
|
|
|
return false, nil
|
|
|
|
}),
|
2019-06-07 08:37:49 +00:00
|
|
|
rs: sendResolved(false),
|
2018-06-08 09:37:38 +00:00
|
|
|
}
|
2023-09-06 12:42:55 +00:00
|
|
|
r := NewRetryStage(i, "", NewMetrics(prometheus.NewRegistry(), featurecontrol.NoopFlags{}))
|
2018-06-08 09:37:38 +00:00
|
|
|
|
|
|
|
alerts := []*types.Alert{
|
2022-03-25 16:59:51 +00:00
|
|
|
{
|
2018-06-08 09:37:38 +00:00
|
|
|
Alert: model.Alert{
|
|
|
|
EndsAt: time.Now().Add(time.Hour),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
ctx = WithFiringAlerts(ctx, []uint64{0})
|
|
|
|
|
|
|
|
// Notify with a recoverable error should retry and succeed.
|
2024-11-06 09:09:57 +00:00
|
|
|
resctx, res, err := r.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
2023-12-10 08:33:13 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-08 09:37:38 +00:00
|
|
|
require.Equal(t, alerts, res)
|
|
|
|
require.Equal(t, alerts, sent)
|
|
|
|
require.NotNil(t, resctx)
|
|
|
|
|
|
|
|
// Notify with an unrecoverable error should fail.
|
|
|
|
sent = sent[:0]
|
|
|
|
fail = true
|
|
|
|
retry = false
|
2024-11-06 09:09:57 +00:00
|
|
|
resctx, _, err = r.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
2023-12-10 08:33:13 +00:00
|
|
|
require.Error(t, err)
|
2018-06-08 09:37:38 +00:00
|
|
|
require.NotNil(t, resctx)
|
|
|
|
}
|
2016-10-05 14:28:04 +00:00
|
|
|
|
2023-02-03 12:09:21 +00:00
|
|
|
func TestRetryStageWithErrorCode(t *testing.T) {
|
|
|
|
testcases := map[string]struct {
|
|
|
|
isNewErrorWithReason bool
|
|
|
|
reason Reason
|
|
|
|
reasonlabel string
|
|
|
|
expectedCount int
|
|
|
|
}{
|
|
|
|
"for clientError": {isNewErrorWithReason: true, reason: ClientErrorReason, reasonlabel: ClientErrorReason.String(), expectedCount: 1},
|
|
|
|
"for serverError": {isNewErrorWithReason: true, reason: ServerErrorReason, reasonlabel: ServerErrorReason.String(), expectedCount: 1},
|
|
|
|
"for unexpected code": {isNewErrorWithReason: false, reason: DefaultReason, reasonlabel: DefaultReason.String(), expectedCount: 1},
|
|
|
|
}
|
|
|
|
for _, testData := range testcases {
|
|
|
|
retry := false
|
|
|
|
testData := testData
|
|
|
|
i := Integration{
|
|
|
|
name: "test",
|
|
|
|
notifier: notifierFunc(func(ctx context.Context, alerts ...*types.Alert) (bool, error) {
|
|
|
|
if !testData.isNewErrorWithReason {
|
|
|
|
return retry, errors.New("fail to deliver notification")
|
|
|
|
}
|
|
|
|
return retry, NewErrorWithReason(testData.reason, errors.New("fail to deliver notification"))
|
|
|
|
}),
|
|
|
|
rs: sendResolved(false),
|
|
|
|
}
|
2023-09-06 12:42:55 +00:00
|
|
|
r := NewRetryStage(i, "", NewMetrics(prometheus.NewRegistry(), featurecontrol.NoopFlags{}))
|
2023-02-03 12:09:21 +00:00
|
|
|
|
|
|
|
alerts := []*types.Alert{
|
|
|
|
{
|
|
|
|
Alert: model.Alert{
|
|
|
|
EndsAt: time.Now().Add(time.Hour),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
ctx = WithFiringAlerts(ctx, []uint64{0})
|
|
|
|
|
|
|
|
// Notify with a non-recoverable error.
|
2024-11-06 09:09:57 +00:00
|
|
|
resctx, _, err := r.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
2023-02-03 12:09:21 +00:00
|
|
|
counter := r.metrics.numTotalFailedNotifications
|
|
|
|
|
|
|
|
require.Equal(t, testData.expectedCount, int(prom_testutil.ToFloat64(counter.WithLabelValues(r.integration.Name(), testData.reasonlabel))))
|
|
|
|
|
2023-12-10 08:33:13 +00:00
|
|
|
require.Error(t, err)
|
2023-02-03 12:09:21 +00:00
|
|
|
require.NotNil(t, resctx)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-12-08 14:30:43 +00:00
|
|
|
func TestRetryStageWithContextCanceled(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
|
|
|
|
i := Integration{
|
|
|
|
name: "test",
|
|
|
|
notifier: notifierFunc(func(ctx context.Context, alerts ...*types.Alert) (bool, error) {
|
|
|
|
cancel()
|
|
|
|
return true, errors.New("request failed: context canceled")
|
|
|
|
}),
|
|
|
|
rs: sendResolved(false),
|
|
|
|
}
|
|
|
|
r := NewRetryStage(i, "", NewMetrics(prometheus.NewRegistry(), featurecontrol.NoopFlags{}))
|
|
|
|
|
|
|
|
alerts := []*types.Alert{
|
|
|
|
{
|
|
|
|
Alert: model.Alert{
|
|
|
|
EndsAt: time.Now().Add(time.Hour),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx = WithFiringAlerts(ctx, []uint64{0})
|
|
|
|
|
|
|
|
// Notify with a non-recoverable error.
|
2024-11-06 09:09:57 +00:00
|
|
|
resctx, _, err := r.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
2023-12-08 14:30:43 +00:00
|
|
|
counter := r.metrics.numTotalFailedNotifications
|
|
|
|
|
|
|
|
require.Equal(t, 1, int(prom_testutil.ToFloat64(counter.WithLabelValues(r.integration.Name(), ContextCanceledReason.String()))))
|
|
|
|
|
2023-12-10 08:33:13 +00:00
|
|
|
require.Error(t, err)
|
2023-12-08 14:30:43 +00:00
|
|
|
require.NotNil(t, resctx)
|
|
|
|
}
|
|
|
|
|
2018-06-08 09:37:38 +00:00
|
|
|
func TestRetryStageNoResolved(t *testing.T) {
|
|
|
|
sent := []*types.Alert{}
|
2017-01-04 12:50:40 +00:00
|
|
|
i := Integration{
|
2018-06-08 09:37:38 +00:00
|
|
|
notifier: notifierFunc(func(ctx context.Context, alerts ...*types.Alert) (bool, error) {
|
|
|
|
sent = append(sent, alerts...)
|
|
|
|
return false, nil
|
|
|
|
}),
|
2019-06-07 08:37:49 +00:00
|
|
|
rs: sendResolved(false),
|
2018-06-08 09:37:38 +00:00
|
|
|
}
|
2023-09-06 12:42:55 +00:00
|
|
|
r := NewRetryStage(i, "", NewMetrics(prometheus.NewRegistry(), featurecontrol.NoopFlags{}))
|
2017-01-04 12:50:40 +00:00
|
|
|
|
|
|
|
alerts := []*types.Alert{
|
2022-03-25 16:59:51 +00:00
|
|
|
{
|
2017-01-04 12:50:40 +00:00
|
|
|
Alert: model.Alert{
|
|
|
|
EndsAt: time.Now().Add(-time.Hour),
|
|
|
|
},
|
|
|
|
},
|
2022-03-25 16:59:51 +00:00
|
|
|
{
|
2017-01-04 12:50:40 +00:00
|
|
|
Alert: model.Alert{
|
|
|
|
EndsAt: time.Now().Add(time.Hour),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2018-06-08 09:37:38 +00:00
|
|
|
ctx := context.Background()
|
2017-01-04 12:50:40 +00:00
|
|
|
|
2024-11-06 09:09:57 +00:00
|
|
|
resctx, res, err := r.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
2018-06-08 09:37:38 +00:00
|
|
|
require.EqualError(t, err, "firing alerts missing")
|
|
|
|
require.Nil(t, res)
|
|
|
|
require.NotNil(t, resctx)
|
|
|
|
|
|
|
|
ctx = WithFiringAlerts(ctx, []uint64{0})
|
|
|
|
|
2024-11-06 09:09:57 +00:00
|
|
|
resctx, res, err = r.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
2023-12-10 08:33:13 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-08 09:37:38 +00:00
|
|
|
require.Equal(t, alerts, res)
|
|
|
|
require.Equal(t, []*types.Alert{alerts[1]}, sent)
|
|
|
|
require.NotNil(t, resctx)
|
|
|
|
|
|
|
|
// All alerts are resolved.
|
|
|
|
sent = sent[:0]
|
|
|
|
ctx = WithFiringAlerts(ctx, []uint64{})
|
|
|
|
alerts[1].Alert.EndsAt = time.Now().Add(-time.Hour)
|
2017-01-04 12:50:40 +00:00
|
|
|
|
2024-11-06 09:09:57 +00:00
|
|
|
resctx, res, err = r.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
2023-12-10 08:33:13 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-08 09:37:38 +00:00
|
|
|
require.Equal(t, alerts, res)
|
|
|
|
require.Equal(t, []*types.Alert{}, sent)
|
|
|
|
require.NotNil(t, resctx)
|
|
|
|
}
|
2017-01-04 12:50:40 +00:00
|
|
|
|
2018-06-08 09:37:38 +00:00
|
|
|
func TestRetryStageSendResolved(t *testing.T) {
|
|
|
|
sent := []*types.Alert{}
|
2017-01-04 12:50:40 +00:00
|
|
|
i := Integration{
|
2018-06-08 09:37:38 +00:00
|
|
|
notifier: notifierFunc(func(ctx context.Context, alerts ...*types.Alert) (bool, error) {
|
|
|
|
sent = append(sent, alerts...)
|
|
|
|
return false, nil
|
|
|
|
}),
|
2019-06-07 08:37:49 +00:00
|
|
|
rs: sendResolved(true),
|
2018-06-08 09:37:38 +00:00
|
|
|
}
|
2023-09-06 12:42:55 +00:00
|
|
|
r := NewRetryStage(i, "", NewMetrics(prometheus.NewRegistry(), featurecontrol.NoopFlags{}))
|
2016-10-05 14:28:04 +00:00
|
|
|
|
|
|
|
alerts := []*types.Alert{
|
2022-03-25 16:59:51 +00:00
|
|
|
{
|
2016-10-05 14:28:04 +00:00
|
|
|
Alert: model.Alert{
|
|
|
|
EndsAt: time.Now().Add(-time.Hour),
|
|
|
|
},
|
|
|
|
},
|
2022-03-25 16:59:51 +00:00
|
|
|
{
|
2018-06-08 09:37:38 +00:00
|
|
|
Alert: model.Alert{
|
|
|
|
EndsAt: time.Now().Add(time.Hour),
|
|
|
|
},
|
|
|
|
},
|
2016-10-05 14:28:04 +00:00
|
|
|
}
|
|
|
|
|
2018-06-08 09:37:38 +00:00
|
|
|
ctx := context.Background()
|
|
|
|
ctx = WithFiringAlerts(ctx, []uint64{0})
|
|
|
|
|
2024-11-06 09:09:57 +00:00
|
|
|
resctx, res, err := r.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
2023-12-10 08:33:13 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-08 09:37:38 +00:00
|
|
|
require.Equal(t, alerts, res)
|
|
|
|
require.Equal(t, alerts, sent)
|
|
|
|
require.NotNil(t, resctx)
|
|
|
|
|
|
|
|
// All alerts are resolved.
|
|
|
|
sent = sent[:0]
|
|
|
|
ctx = WithFiringAlerts(ctx, []uint64{})
|
|
|
|
alerts[1].Alert.EndsAt = time.Now().Add(-time.Hour)
|
2016-10-05 14:28:04 +00:00
|
|
|
|
2024-11-06 09:09:57 +00:00
|
|
|
resctx, res, err = r.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
2023-12-10 08:33:13 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-08 09:37:38 +00:00
|
|
|
require.Equal(t, alerts, res)
|
|
|
|
require.Equal(t, alerts, sent)
|
|
|
|
require.NotNil(t, resctx)
|
2016-10-05 14:28:04 +00:00
|
|
|
}
|
|
|
|
|
2016-08-16 12:32:24 +00:00
|
|
|
func TestSetNotifiesStage(t *testing.T) {
|
2016-08-16 12:09:06 +00:00
|
|
|
tnflog := &testNflog{}
|
|
|
|
s := &SetNotifiesStage{
|
|
|
|
recv: &nflogpb.Receiver{GroupName: "test"},
|
|
|
|
nflog: tnflog,
|
|
|
|
}
|
|
|
|
alerts := []*types.Alert{{}, {}, {}}
|
|
|
|
ctx := context.Background()
|
|
|
|
|
2024-11-06 09:09:57 +00:00
|
|
|
resctx, res, err := s.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
2017-03-13 12:44:36 +00:00
|
|
|
require.EqualError(t, err, "group key missing")
|
2016-08-16 12:09:06 +00:00
|
|
|
require.Nil(t, res)
|
|
|
|
require.NotNil(t, resctx)
|
|
|
|
|
2017-04-21 09:43:12 +00:00
|
|
|
ctx = WithGroupKey(ctx, "1")
|
2016-08-16 12:09:06 +00:00
|
|
|
|
2024-11-06 09:09:57 +00:00
|
|
|
resctx, res, err = s.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
2017-03-13 12:44:36 +00:00
|
|
|
require.EqualError(t, err, "firing alerts missing")
|
2016-08-16 12:09:06 +00:00
|
|
|
require.Nil(t, res)
|
|
|
|
require.NotNil(t, resctx)
|
|
|
|
|
2017-03-13 12:44:36 +00:00
|
|
|
ctx = WithFiringAlerts(ctx, []uint64{0, 1, 2})
|
|
|
|
|
2024-11-06 09:09:57 +00:00
|
|
|
resctx, res, err = s.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
2017-03-13 12:44:36 +00:00
|
|
|
require.EqualError(t, err, "resolved alerts missing")
|
|
|
|
require.Nil(t, res)
|
|
|
|
require.NotNil(t, resctx)
|
|
|
|
|
|
|
|
ctx = WithResolvedAlerts(ctx, []uint64{})
|
2022-07-05 10:09:18 +00:00
|
|
|
ctx = WithRepeatInterval(ctx, time.Hour)
|
2016-08-16 12:09:06 +00:00
|
|
|
|
2022-07-05 10:09:18 +00:00
|
|
|
tnflog.logFunc = func(r *nflogpb.Receiver, gkey string, firingAlerts, resolvedAlerts []uint64, expiry time.Duration) error {
|
2016-08-16 12:09:06 +00:00
|
|
|
require.Equal(t, s.recv, r)
|
2017-04-21 09:43:12 +00:00
|
|
|
require.Equal(t, "1", gkey)
|
2017-03-13 12:44:36 +00:00
|
|
|
require.Equal(t, []uint64{0, 1, 2}, firingAlerts)
|
|
|
|
require.Equal(t, []uint64{}, resolvedAlerts)
|
2022-07-05 10:09:18 +00:00
|
|
|
require.Equal(t, 2*time.Hour, expiry)
|
2016-08-16 12:09:06 +00:00
|
|
|
return nil
|
|
|
|
}
|
2024-11-06 09:09:57 +00:00
|
|
|
resctx, res, err = s.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
2023-12-10 08:33:13 +00:00
|
|
|
require.NoError(t, err)
|
2016-08-16 12:09:06 +00:00
|
|
|
require.Equal(t, alerts, res)
|
|
|
|
require.NotNil(t, resctx)
|
|
|
|
|
2017-03-13 12:44:36 +00:00
|
|
|
ctx = WithFiringAlerts(ctx, []uint64{})
|
|
|
|
ctx = WithResolvedAlerts(ctx, []uint64{0, 1, 2})
|
|
|
|
|
2022-07-05 10:09:18 +00:00
|
|
|
tnflog.logFunc = func(r *nflogpb.Receiver, gkey string, firingAlerts, resolvedAlerts []uint64, expiry time.Duration) error {
|
2016-08-16 12:09:06 +00:00
|
|
|
require.Equal(t, s.recv, r)
|
2017-04-21 09:43:12 +00:00
|
|
|
require.Equal(t, "1", gkey)
|
2017-03-13 12:44:36 +00:00
|
|
|
require.Equal(t, []uint64{}, firingAlerts)
|
|
|
|
require.Equal(t, []uint64{0, 1, 2}, resolvedAlerts)
|
2022-07-05 10:09:18 +00:00
|
|
|
require.Equal(t, 2*time.Hour, expiry)
|
2016-08-16 12:09:06 +00:00
|
|
|
return nil
|
|
|
|
}
|
2024-11-06 09:09:57 +00:00
|
|
|
resctx, res, err = s.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
2023-12-10 08:33:13 +00:00
|
|
|
require.NoError(t, err)
|
2016-08-16 12:09:06 +00:00
|
|
|
require.Equal(t, alerts, res)
|
|
|
|
require.NotNil(t, resctx)
|
2015-09-28 19:43:28 +00:00
|
|
|
}
|
|
|
|
|
2019-02-26 10:59:14 +00:00
|
|
|
func TestMuteStage(t *testing.T) {
|
|
|
|
// Mute all label sets that have a "mute" key.
|
|
|
|
muter := types.MuteFunc(func(lset model.LabelSet) bool {
|
|
|
|
_, ok := lset["mute"]
|
|
|
|
return ok
|
|
|
|
})
|
2015-09-28 12:13:18 +00:00
|
|
|
|
2024-02-13 11:17:24 +00:00
|
|
|
metrics := NewMetrics(prometheus.NewRegistry(), featurecontrol.NoopFlags{})
|
|
|
|
stage := NewMuteStage(muter, metrics)
|
2015-09-28 12:13:18 +00:00
|
|
|
|
|
|
|
in := []model.LabelSet{
|
|
|
|
{},
|
|
|
|
{"test": "set"},
|
|
|
|
{"mute": "me"},
|
|
|
|
{"foo": "bar", "test": "set"},
|
|
|
|
{"foo": "bar", "mute": "me"},
|
|
|
|
{},
|
|
|
|
{"not": "muted"},
|
|
|
|
}
|
|
|
|
out := []model.LabelSet{
|
|
|
|
{},
|
|
|
|
{"test": "set"},
|
|
|
|
{"foo": "bar", "test": "set"},
|
|
|
|
{},
|
|
|
|
{"not": "muted"},
|
|
|
|
}
|
|
|
|
|
|
|
|
var inAlerts []*types.Alert
|
|
|
|
for _, lset := range in {
|
2015-10-01 12:53:49 +00:00
|
|
|
inAlerts = append(inAlerts, &types.Alert{
|
|
|
|
Alert: model.Alert{Labels: lset},
|
|
|
|
})
|
2015-09-28 12:13:18 +00:00
|
|
|
}
|
|
|
|
|
2024-11-06 09:09:57 +00:00
|
|
|
_, alerts, err := stage.Exec(context.Background(), promslog.NewNopLogger(), inAlerts...)
|
2016-08-11 13:04:03 +00:00
|
|
|
if err != nil {
|
2016-08-12 17:18:26 +00:00
|
|
|
t.Fatalf("Exec failed: %s", err)
|
2015-12-03 16:27:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var got []model.LabelSet
|
2017-10-10 13:49:39 +00:00
|
|
|
for _, a := range alerts {
|
2015-12-03 16:27:36 +00:00
|
|
|
got = append(got, a.Labels)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(got, out) {
|
|
|
|
t.Fatalf("Muting failed, expected: %v\ngot %v", out, got)
|
|
|
|
}
|
2024-02-13 11:17:24 +00:00
|
|
|
suppressed := int(prom_testutil.ToFloat64(metrics.numNotificationSuppressedTotal))
|
|
|
|
if (len(in) - len(got)) != suppressed {
|
|
|
|
t.Fatalf("Expected %d alerts counted in suppressed metric but got %d", (len(in) - len(got)), suppressed)
|
|
|
|
}
|
2015-12-03 16:27:36 +00:00
|
|
|
}
|
|
|
|
|
2019-02-26 10:59:14 +00:00
|
|
|
func TestMuteStageWithSilences(t *testing.T) {
|
2019-02-27 11:33:46 +00:00
|
|
|
silences, err := silence.New(silence.Options{Retention: time.Hour})
|
2019-02-26 10:59:14 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2024-06-17 10:45:31 +00:00
|
|
|
sil := &silencepb.Silence{
|
2019-02-26 10:59:14 +00:00
|
|
|
EndsAt: utcNow().Add(time.Hour),
|
|
|
|
Matchers: []*silencepb.Matcher{{Name: "mute", Pattern: "me"}},
|
2024-06-17 10:45:31 +00:00
|
|
|
}
|
|
|
|
if err = silences.Set(sil); err != nil {
|
2019-02-26 10:59:14 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2015-12-03 16:27:36 +00:00
|
|
|
|
2024-02-13 11:17:24 +00:00
|
|
|
reg := prometheus.NewRegistry()
|
|
|
|
marker := types.NewMarker(reg)
|
2024-11-06 09:09:57 +00:00
|
|
|
silencer := silence.NewSilencer(silences, marker, promslog.NewNopLogger())
|
2024-02-13 11:17:24 +00:00
|
|
|
metrics := NewMetrics(reg, featurecontrol.NoopFlags{})
|
|
|
|
stage := NewMuteStage(silencer, metrics)
|
2015-12-03 16:27:36 +00:00
|
|
|
|
|
|
|
in := []model.LabelSet{
|
|
|
|
{},
|
|
|
|
{"test": "set"},
|
|
|
|
{"mute": "me"},
|
|
|
|
{"foo": "bar", "test": "set"},
|
|
|
|
{"foo": "bar", "mute": "me"},
|
|
|
|
{},
|
|
|
|
{"not": "muted"},
|
|
|
|
}
|
|
|
|
out := []model.LabelSet{
|
|
|
|
{},
|
|
|
|
{"test": "set"},
|
|
|
|
{"foo": "bar", "test": "set"},
|
|
|
|
{},
|
|
|
|
{"not": "muted"},
|
|
|
|
}
|
|
|
|
|
|
|
|
var inAlerts []*types.Alert
|
|
|
|
for _, lset := range in {
|
|
|
|
inAlerts = append(inAlerts, &types.Alert{
|
|
|
|
Alert: model.Alert{Labels: lset},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-02-27 11:33:46 +00:00
|
|
|
// Set the second alert as previously silenced with an old version
|
|
|
|
// number. This is expected to get unsilenced by the stage.
|
2022-06-14 13:40:22 +00:00
|
|
|
marker.SetActiveOrSilenced(inAlerts[1].Fingerprint(), 0, []string{"123"}, nil)
|
2019-02-26 10:59:14 +00:00
|
|
|
|
2024-11-06 09:09:57 +00:00
|
|
|
_, alerts, err := stage.Exec(context.Background(), promslog.NewNopLogger(), inAlerts...)
|
2016-08-11 13:04:03 +00:00
|
|
|
if err != nil {
|
2016-08-12 17:18:26 +00:00
|
|
|
t.Fatalf("Exec failed: %s", err)
|
2015-09-28 12:13:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var got []model.LabelSet
|
2017-10-10 13:49:39 +00:00
|
|
|
for _, a := range alerts {
|
2015-09-28 12:13:18 +00:00
|
|
|
got = append(got, a.Labels)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(got, out) {
|
|
|
|
t.Fatalf("Muting failed, expected: %v\ngot %v", out, got)
|
|
|
|
}
|
2024-02-13 11:17:24 +00:00
|
|
|
suppressedRoundOne := int(prom_testutil.ToFloat64(metrics.numNotificationSuppressedTotal))
|
|
|
|
if (len(in) - len(got)) != suppressedRoundOne {
|
|
|
|
t.Fatalf("Expected %d alerts counted in suppressed metric but got %d", (len(in) - len(got)), suppressedRoundOne)
|
|
|
|
}
|
2019-02-27 11:33:46 +00:00
|
|
|
|
|
|
|
// Do it again to exercise the version tracking of silences.
|
2024-11-06 09:09:57 +00:00
|
|
|
_, alerts, err = stage.Exec(context.Background(), promslog.NewNopLogger(), inAlerts...)
|
2019-02-27 11:33:46 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Exec failed: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
got = got[:0]
|
|
|
|
for _, a := range alerts {
|
|
|
|
got = append(got, a.Labels)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(got, out) {
|
|
|
|
t.Fatalf("Muting failed, expected: %v\ngot %v", out, got)
|
|
|
|
}
|
|
|
|
|
2024-02-13 11:17:24 +00:00
|
|
|
suppressedRoundTwo := int(prom_testutil.ToFloat64(metrics.numNotificationSuppressedTotal))
|
|
|
|
if (len(in) - len(got) + suppressedRoundOne) != suppressedRoundTwo {
|
|
|
|
t.Fatalf("Expected %d alerts counted in suppressed metric but got %d", (len(in) - len(got)), suppressedRoundTwo)
|
|
|
|
}
|
|
|
|
|
2019-02-27 11:33:46 +00:00
|
|
|
// Expire the silence and verify that no alerts are silenced now.
|
2024-06-17 10:45:31 +00:00
|
|
|
if err := silences.Expire(sil.Id); err != nil {
|
2019-02-27 11:33:46 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2024-11-06 09:09:57 +00:00
|
|
|
_, alerts, err = stage.Exec(context.Background(), promslog.NewNopLogger(), inAlerts...)
|
2019-02-27 11:33:46 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Exec failed: %s", err)
|
|
|
|
}
|
|
|
|
got = got[:0]
|
|
|
|
for _, a := range alerts {
|
|
|
|
got = append(got, a.Labels)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(got, in) {
|
|
|
|
t.Fatalf("Unmuting failed, expected: %v\ngot %v", in, got)
|
|
|
|
}
|
2024-02-13 11:17:24 +00:00
|
|
|
suppressedRoundThree := int(prom_testutil.ToFloat64(metrics.numNotificationSuppressedTotal))
|
|
|
|
if (len(in) - len(got) + suppressedRoundTwo) != suppressedRoundThree {
|
|
|
|
t.Fatalf("Expected %d alerts counted in suppressed metric but got %d", (len(in) - len(got)), suppressedRoundThree)
|
|
|
|
}
|
2015-09-28 12:13:18 +00:00
|
|
|
}
|
2020-10-12 23:52:48 +00:00
|
|
|
|
|
|
|
func TestTimeMuteStage(t *testing.T) {
|
2024-04-11 11:53:50 +00:00
|
|
|
sydney, err := time.LoadLocation("Australia/Sydney")
|
2020-10-12 23:52:48 +00:00
|
|
|
if err != nil {
|
2024-04-11 11:53:50 +00:00
|
|
|
t.Fatalf("Failed to load location Australia/Sydney: %s", err)
|
2020-10-12 23:52:48 +00:00
|
|
|
}
|
2024-04-11 11:53:50 +00:00
|
|
|
eveningsAndWeekends := map[string][]timeinterval.TimeInterval{
|
|
|
|
"evenings": {{
|
|
|
|
Times: []timeinterval.TimeRange{{
|
|
|
|
StartMinute: 0, // 00:00
|
|
|
|
EndMinute: 540, // 09:00
|
|
|
|
}, {
|
|
|
|
StartMinute: 1020, // 17:00
|
|
|
|
EndMinute: 1440, // 24:00
|
|
|
|
}},
|
|
|
|
Location: &timeinterval.Location{Location: sydney},
|
|
|
|
}},
|
|
|
|
"weekends": {{
|
|
|
|
Weekdays: []timeinterval.WeekdayRange{{
|
|
|
|
InclusiveRange: timeinterval.InclusiveRange{Begin: 6, End: 6}, // Saturday
|
|
|
|
}, {
|
|
|
|
InclusiveRange: timeinterval.InclusiveRange{Begin: 0, End: 0}, // Sunday
|
|
|
|
}},
|
|
|
|
Location: &timeinterval.Location{Location: sydney},
|
|
|
|
}},
|
2020-10-12 23:52:48 +00:00
|
|
|
}
|
2024-04-11 11:53:50 +00:00
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
name string
|
|
|
|
intervals map[string][]timeinterval.TimeInterval
|
|
|
|
now time.Time
|
|
|
|
alerts []*types.Alert
|
|
|
|
mutedBy []string
|
|
|
|
}{{
|
|
|
|
name: "Should be muted outside working hours",
|
|
|
|
intervals: eveningsAndWeekends,
|
|
|
|
now: time.Date(2024, 1, 1, 0, 0, 0, 0, sydney),
|
|
|
|
alerts: []*types.Alert{{Alert: model.Alert{Labels: model.LabelSet{"foo": "bar"}}}},
|
|
|
|
mutedBy: []string{"evenings"},
|
|
|
|
}, {
|
|
|
|
name: "Should not be muted during workings hours",
|
|
|
|
intervals: eveningsAndWeekends,
|
|
|
|
now: time.Date(2024, 1, 1, 9, 0, 0, 0, sydney),
|
|
|
|
alerts: []*types.Alert{{Alert: model.Alert{Labels: model.LabelSet{"foo": "bar"}}}},
|
|
|
|
mutedBy: nil,
|
|
|
|
}, {
|
|
|
|
name: "Should be muted during weekends",
|
|
|
|
intervals: eveningsAndWeekends,
|
|
|
|
now: time.Date(2024, 1, 6, 10, 0, 0, 0, sydney),
|
|
|
|
alerts: []*types.Alert{{Alert: model.Alert{Labels: model.LabelSet{"foo": "bar"}}}},
|
|
|
|
mutedBy: []string{"weekends"},
|
|
|
|
}, {
|
2024-05-13 10:16:26 +00:00
|
|
|
name: "Should be muted at 12pm UTC on a weekday",
|
2024-04-11 11:53:50 +00:00
|
|
|
intervals: eveningsAndWeekends,
|
2024-05-13 10:16:26 +00:00
|
|
|
now: time.Date(2024, 1, 1, 10, 0, 0, 0, time.UTC),
|
2024-04-11 11:53:50 +00:00
|
|
|
alerts: []*types.Alert{{Alert: model.Alert{Labels: model.LabelSet{"foo": "bar"}}}},
|
|
|
|
mutedBy: []string{"evenings"},
|
2024-05-13 10:16:26 +00:00
|
|
|
}, {
|
|
|
|
name: "Should be muted at 12pm UTC on a weekend",
|
|
|
|
intervals: eveningsAndWeekends,
|
|
|
|
now: time.Date(2024, 1, 6, 10, 0, 0, 0, time.UTC),
|
|
|
|
alerts: []*types.Alert{{Alert: model.Alert{Labels: model.LabelSet{"foo": "bar"}}}},
|
|
|
|
mutedBy: []string{"evenings", "weekends"},
|
2024-04-11 11:53:50 +00:00
|
|
|
}}
|
|
|
|
|
|
|
|
for _, test := range tests {
|
|
|
|
t.Run(test.name, func(t *testing.T) {
|
|
|
|
r := prometheus.NewRegistry()
|
2024-05-13 10:16:26 +00:00
|
|
|
marker := types.NewMarker(r)
|
2024-04-11 11:53:50 +00:00
|
|
|
metrics := NewMetrics(r, featurecontrol.NoopFlags{})
|
|
|
|
intervener := timeinterval.NewIntervener(test.intervals)
|
2024-05-13 10:16:26 +00:00
|
|
|
st := NewTimeMuteStage(intervener, marker, metrics)
|
2024-04-11 11:53:50 +00:00
|
|
|
|
|
|
|
// Get the names of all time intervals for the context.
|
|
|
|
muteTimeIntervalNames := make([]string, 0, len(test.intervals))
|
|
|
|
for name := range test.intervals {
|
|
|
|
muteTimeIntervalNames = append(muteTimeIntervalNames, name)
|
|
|
|
}
|
2024-05-13 10:16:26 +00:00
|
|
|
// Sort the names so we can compare mutedBy with test.mutedBy.
|
|
|
|
sort.Strings(muteTimeIntervalNames)
|
2024-04-11 11:53:50 +00:00
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
ctx = WithNow(ctx, test.now)
|
2024-05-13 10:16:26 +00:00
|
|
|
ctx = WithGroupKey(ctx, "group1")
|
2024-04-11 11:53:50 +00:00
|
|
|
ctx = WithActiveTimeIntervals(ctx, nil)
|
|
|
|
ctx = WithMuteTimeIntervals(ctx, muteTimeIntervalNames)
|
2024-05-13 10:16:26 +00:00
|
|
|
ctx = WithRouteID(ctx, "route1")
|
2024-04-11 11:53:50 +00:00
|
|
|
|
2024-11-06 09:09:57 +00:00
|
|
|
_, active, err := st.Exec(ctx, promslog.NewNopLogger(), test.alerts...)
|
2024-04-11 11:53:50 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
if len(test.mutedBy) == 0 {
|
|
|
|
// All alerts should be active.
|
|
|
|
require.Equal(t, len(test.alerts), len(active))
|
2024-05-13 10:16:26 +00:00
|
|
|
// The group should not be marked.
|
|
|
|
mutedBy, isMuted := marker.Muted("route1", "group1")
|
|
|
|
require.False(t, isMuted)
|
|
|
|
require.Empty(t, mutedBy)
|
2024-04-11 11:53:50 +00:00
|
|
|
// The metric for total suppressed notifications should not
|
|
|
|
// have been incremented, which means it will not be collected.
|
2024-05-13 10:16:26 +00:00
|
|
|
require.NoError(t, prom_testutil.GatherAndCompare(r, strings.NewReader(`
|
|
|
|
# HELP alertmanager_marked_alerts How many alerts by state are currently marked in the Alertmanager regardless of their expiry.
|
|
|
|
# TYPE alertmanager_marked_alerts gauge
|
|
|
|
alertmanager_marked_alerts{state="active"} 0
|
|
|
|
alertmanager_marked_alerts{state="suppressed"} 0
|
|
|
|
alertmanager_marked_alerts{state="unprocessed"} 0
|
|
|
|
`)))
|
2024-04-11 11:53:50 +00:00
|
|
|
} else {
|
|
|
|
// All alerts should be muted.
|
|
|
|
require.Empty(t, active)
|
2024-05-13 10:16:26 +00:00
|
|
|
// The group should be marked as muted.
|
|
|
|
mutedBy, isMuted := marker.Muted("route1", "group1")
|
|
|
|
require.True(t, isMuted)
|
|
|
|
require.Equal(t, test.mutedBy, mutedBy)
|
2024-04-11 11:53:50 +00:00
|
|
|
// Gets the metric for total suppressed notifications.
|
|
|
|
require.NoError(t, prom_testutil.GatherAndCompare(r, strings.NewReader(fmt.Sprintf(`
|
2024-05-13 10:16:26 +00:00
|
|
|
# HELP alertmanager_marked_alerts How many alerts by state are currently marked in the Alertmanager regardless of their expiry.
|
|
|
|
# TYPE alertmanager_marked_alerts gauge
|
|
|
|
alertmanager_marked_alerts{state="active"} 0
|
|
|
|
alertmanager_marked_alerts{state="suppressed"} 0
|
|
|
|
alertmanager_marked_alerts{state="unprocessed"} 0
|
2024-04-11 11:53:50 +00:00
|
|
|
# HELP alertmanager_notifications_suppressed_total The total number of notifications suppressed for being silenced, inhibited, outside of active time intervals or within muted time intervals.
|
|
|
|
# TYPE alertmanager_notifications_suppressed_total counter
|
|
|
|
alertmanager_notifications_suppressed_total{reason="mute_time_interval"} %d
|
|
|
|
`, len(test.alerts)))))
|
|
|
|
}
|
|
|
|
})
|
2024-02-13 11:17:24 +00:00
|
|
|
}
|
2020-10-12 23:52:48 +00:00
|
|
|
}
|
2021-10-18 23:09:37 +00:00
|
|
|
|
2022-03-04 14:24:29 +00:00
|
|
|
func TestTimeActiveStage(t *testing.T) {
|
2024-04-11 09:42:16 +00:00
|
|
|
sydney, err := time.LoadLocation("Australia/Sydney")
|
2022-03-04 14:24:29 +00:00
|
|
|
if err != nil {
|
2024-04-11 09:42:16 +00:00
|
|
|
t.Fatalf("Failed to load location Australia/Sydney: %s", err)
|
|
|
|
}
|
|
|
|
weekdays := map[string][]timeinterval.TimeInterval{
|
|
|
|
"weekdays": {{
|
|
|
|
Weekdays: []timeinterval.WeekdayRange{{
|
|
|
|
InclusiveRange: timeinterval.InclusiveRange{
|
|
|
|
Begin: 1, // Monday
|
|
|
|
End: 5, // Friday
|
|
|
|
},
|
|
|
|
}},
|
|
|
|
Times: []timeinterval.TimeRange{{
|
|
|
|
StartMinute: 540, // 09:00
|
|
|
|
EndMinute: 1020, // 17:00
|
|
|
|
}},
|
|
|
|
Location: &timeinterval.Location{Location: sydney},
|
|
|
|
}},
|
|
|
|
}
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
name string
|
|
|
|
intervals map[string][]timeinterval.TimeInterval
|
|
|
|
now time.Time
|
|
|
|
alerts []*types.Alert
|
|
|
|
mutedBy []string
|
|
|
|
}{{
|
|
|
|
name: "Should be muted outside working hours",
|
|
|
|
intervals: weekdays,
|
|
|
|
now: time.Date(2024, 1, 1, 0, 0, 0, 0, sydney),
|
|
|
|
alerts: []*types.Alert{{Alert: model.Alert{Labels: model.LabelSet{"foo": "bar"}}}},
|
|
|
|
mutedBy: []string{"weekdays"},
|
|
|
|
}, {
|
|
|
|
name: "Should not be muted during workings hours",
|
|
|
|
intervals: weekdays,
|
|
|
|
now: time.Date(2024, 1, 1, 9, 0, 0, 0, sydney),
|
|
|
|
alerts: []*types.Alert{{Alert: model.Alert{Labels: model.LabelSet{"foo": "bar"}}}},
|
|
|
|
mutedBy: nil,
|
|
|
|
}, {
|
|
|
|
name: "Should be muted during weekends",
|
|
|
|
intervals: weekdays,
|
|
|
|
now: time.Date(2024, 1, 6, 10, 0, 0, 0, sydney),
|
|
|
|
alerts: []*types.Alert{{Alert: model.Alert{Labels: model.LabelSet{"foo": "bar"}}}},
|
|
|
|
mutedBy: []string{"weekdays"},
|
|
|
|
}, {
|
|
|
|
name: "Should be muted at 12pm UTC",
|
|
|
|
intervals: weekdays,
|
|
|
|
now: time.Date(2024, 1, 6, 10, 0, 0, 0, time.UTC),
|
|
|
|
alerts: []*types.Alert{{Alert: model.Alert{Labels: model.LabelSet{"foo": "bar"}}}},
|
|
|
|
mutedBy: []string{"weekdays"},
|
|
|
|
}}
|
|
|
|
|
|
|
|
for _, test := range tests {
|
|
|
|
t.Run(test.name, func(t *testing.T) {
|
|
|
|
r := prometheus.NewRegistry()
|
2024-05-13 10:16:26 +00:00
|
|
|
marker := types.NewMarker(r)
|
2024-04-11 09:42:16 +00:00
|
|
|
metrics := NewMetrics(r, featurecontrol.NoopFlags{})
|
|
|
|
intervener := timeinterval.NewIntervener(test.intervals)
|
2024-05-13 10:16:26 +00:00
|
|
|
st := NewTimeActiveStage(intervener, marker, metrics)
|
2024-04-11 09:42:16 +00:00
|
|
|
|
|
|
|
// Get the names of all time intervals for the context.
|
|
|
|
activeTimeIntervalNames := make([]string, 0, len(test.intervals))
|
|
|
|
for name := range test.intervals {
|
|
|
|
activeTimeIntervalNames = append(activeTimeIntervalNames, name)
|
|
|
|
}
|
2024-05-13 10:16:26 +00:00
|
|
|
// Sort the names so we can compare mutedBy with test.mutedBy.
|
|
|
|
sort.Strings(activeTimeIntervalNames)
|
2022-03-04 14:24:29 +00:00
|
|
|
|
2024-04-11 09:42:16 +00:00
|
|
|
ctx := context.Background()
|
|
|
|
ctx = WithNow(ctx, test.now)
|
2024-05-13 10:16:26 +00:00
|
|
|
ctx = WithGroupKey(ctx, "group1")
|
2024-04-11 09:42:16 +00:00
|
|
|
ctx = WithActiveTimeIntervals(ctx, activeTimeIntervalNames)
|
|
|
|
ctx = WithMuteTimeIntervals(ctx, nil)
|
2024-05-13 10:16:26 +00:00
|
|
|
ctx = WithRouteID(ctx, "route1")
|
2024-04-11 09:42:16 +00:00
|
|
|
|
2024-11-06 09:09:57 +00:00
|
|
|
_, active, err := st.Exec(ctx, promslog.NewNopLogger(), test.alerts...)
|
2024-04-11 09:42:16 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
if len(test.mutedBy) == 0 {
|
|
|
|
// All alerts should be active.
|
|
|
|
require.Equal(t, len(test.alerts), len(active))
|
2024-05-13 10:16:26 +00:00
|
|
|
// The group should not be marked.
|
|
|
|
mutedBy, isMuted := marker.Muted("route1", "group1")
|
|
|
|
require.False(t, isMuted)
|
|
|
|
require.Empty(t, mutedBy)
|
2024-04-11 09:42:16 +00:00
|
|
|
// The metric for total suppressed notifications should not
|
|
|
|
// have been incremented, which means it will not be collected.
|
2024-05-13 10:16:26 +00:00
|
|
|
require.NoError(t, prom_testutil.GatherAndCompare(r, strings.NewReader(`
|
|
|
|
# HELP alertmanager_marked_alerts How many alerts by state are currently marked in the Alertmanager regardless of their expiry.
|
|
|
|
# TYPE alertmanager_marked_alerts gauge
|
|
|
|
alertmanager_marked_alerts{state="active"} 0
|
|
|
|
alertmanager_marked_alerts{state="suppressed"} 0
|
|
|
|
alertmanager_marked_alerts{state="unprocessed"} 0
|
|
|
|
`)))
|
2024-04-11 09:42:16 +00:00
|
|
|
} else {
|
|
|
|
// All alerts should be muted.
|
|
|
|
require.Empty(t, active)
|
2024-05-13 10:16:26 +00:00
|
|
|
// The group should be marked as muted.
|
|
|
|
mutedBy, isMuted := marker.Muted("route1", "group1")
|
|
|
|
require.True(t, isMuted)
|
|
|
|
require.Equal(t, test.mutedBy, mutedBy)
|
2024-04-11 09:42:16 +00:00
|
|
|
// Gets the metric for total suppressed notifications.
|
|
|
|
require.NoError(t, prom_testutil.GatherAndCompare(r, strings.NewReader(fmt.Sprintf(`
|
2024-05-13 10:16:26 +00:00
|
|
|
# HELP alertmanager_marked_alerts How many alerts by state are currently marked in the Alertmanager regardless of their expiry.
|
|
|
|
# TYPE alertmanager_marked_alerts gauge
|
|
|
|
alertmanager_marked_alerts{state="active"} 0
|
|
|
|
alertmanager_marked_alerts{state="suppressed"} 0
|
|
|
|
alertmanager_marked_alerts{state="unprocessed"} 0
|
2024-04-11 09:42:16 +00:00
|
|
|
# HELP alertmanager_notifications_suppressed_total The total number of notifications suppressed for being silenced, inhibited, outside of active time intervals or within muted time intervals.
|
|
|
|
# TYPE alertmanager_notifications_suppressed_total counter
|
|
|
|
alertmanager_notifications_suppressed_total{reason="active_time_interval"} %d
|
|
|
|
`, len(test.alerts)))))
|
|
|
|
}
|
|
|
|
})
|
2024-02-13 11:17:24 +00:00
|
|
|
}
|
2022-03-04 14:24:29 +00:00
|
|
|
}
|
|
|
|
|
2021-10-18 23:09:37 +00:00
|
|
|
func BenchmarkHashAlert(b *testing.B) {
|
|
|
|
alert := &types.Alert{
|
|
|
|
Alert: model.Alert{
|
|
|
|
Labels: model.LabelSet{"foo": "the_first_value", "bar": "the_second_value", "another": "value"},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
hashAlert(alert)
|
|
|
|
}
|
|
|
|
}
|