chore!: adopt log/slog, drop go-kit/log (#4089)
* chore!: adopt log/slog, drop go-kit/log The bulk of this change set was automated by the following script which is being used to aid in converting the various exporters/projects to use slog: https://gist.github.com/tjhop/49f96fb7ebbe55b12deee0b0312d8434 This commit includes several changes: - bump exporter-tookit to v0.13.1 for log/slog support - updates golangci-lint deprecated configs - enables sloglint linter - removes old go-kit/log linter configs - introduce some `if logger == nil { $newLogger }` additions to prevent nil references - converts cluster membership config to use a stdlib compatible slog adapter, rather than creating a custom io.Writer for use as the membership `logOutput` config Signed-off-by: TJ Hoplock <t.hoplock@gmail.com> * chore: address PR feedback Signed-off-by: TJ Hoplock <t.hoplock@gmail.com> --------- Signed-off-by: TJ Hoplock <t.hoplock@gmail.com>
This commit is contained in:
parent
82e804f651
commit
f6b942cf9b
|
@ -1,9 +1,3 @@
|
|||
run:
|
||||
skip-files:
|
||||
# Skip autogenerated files.
|
||||
- ^.*\.(pb|y)\.go$
|
||||
timeout: 5m
|
||||
|
||||
output:
|
||||
sort-results: true
|
||||
|
||||
|
@ -17,6 +11,7 @@ linters:
|
|||
- misspell
|
||||
- revive
|
||||
- testifylint
|
||||
- sloglint
|
||||
|
||||
issues:
|
||||
max-issues-per-linter: 0
|
||||
|
@ -25,6 +20,10 @@ issues:
|
|||
- path: _test.go
|
||||
linters:
|
||||
- errcheck
|
||||
exclude-files:
|
||||
# Skip autogenerated files.
|
||||
- ^.*\.(pb|y)\.go$
|
||||
timeout: 5m
|
||||
|
||||
linters-settings:
|
||||
depguard:
|
||||
|
@ -48,8 +47,6 @@ linters-settings:
|
|||
- (net/http.ResponseWriter).Write
|
||||
# No need to check for errors on server's shutdown.
|
||||
- (*net/http.Server).Shutdown
|
||||
# Never check for logger errors.
|
||||
- (github.com/go-kit/log.Logger).Log
|
||||
# Never check for rollback errors as Rollback() is called when a previous error was detected.
|
||||
- (github.com/prometheus/prometheus/storage.Appender).Rollback
|
||||
godot:
|
||||
|
|
11
api/api.go
11
api/api.go
|
@ -16,13 +16,14 @@ package api
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/prometheus/common/route"
|
||||
|
||||
apiv2 "github.com/prometheus/alertmanager/api/v2"
|
||||
|
@ -70,7 +71,7 @@ type Options struct {
|
|||
// the concurrency limit.
|
||||
Concurrency int
|
||||
// Logger is used for logging, if nil, no logging will happen.
|
||||
Logger log.Logger
|
||||
Logger *slog.Logger
|
||||
// Registry is used to register Prometheus metrics. If nil, no metrics
|
||||
// registration will happen.
|
||||
Registry prometheus.Registerer
|
||||
|
@ -107,7 +108,7 @@ func New(opts Options) (*API, error) {
|
|||
}
|
||||
l := opts.Logger
|
||||
if l == nil {
|
||||
l = log.NewNopLogger()
|
||||
l = promslog.NewNopLogger()
|
||||
}
|
||||
concurrency := opts.Concurrency
|
||||
if concurrency < 1 {
|
||||
|
@ -124,7 +125,7 @@ func New(opts Options) (*API, error) {
|
|||
opts.GroupMutedFunc,
|
||||
opts.Silences,
|
||||
opts.Peer,
|
||||
log.With(l, "version", "v2"),
|
||||
l.With("version", "v2"),
|
||||
opts.Registry,
|
||||
)
|
||||
if err != nil {
|
||||
|
@ -153,7 +154,7 @@ func New(opts Options) (*API, error) {
|
|||
}
|
||||
|
||||
return &API{
|
||||
deprecationRouter: NewV1DeprecationRouter(log.With(l, "version", "v1")),
|
||||
deprecationRouter: NewV1DeprecationRouter(l.With("version", "v1")),
|
||||
v2: v2,
|
||||
requestsInFlight: requestsInFlight,
|
||||
concurrencyLimitExceeded: concurrencyLimitExceeded,
|
||||
|
|
|
@ -13,20 +13,19 @@ package api
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/common/route"
|
||||
)
|
||||
|
||||
// V1DeprecationRouter is the router to signal v1 users that the API v1 is now removed.
|
||||
type V1DeprecationRouter struct {
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
// NewV1DeprecationRouter returns a new V1DeprecationRouter.
|
||||
func NewV1DeprecationRouter(l log.Logger) *V1DeprecationRouter {
|
||||
func NewV1DeprecationRouter(l *slog.Logger) *V1DeprecationRouter {
|
||||
return &V1DeprecationRouter{
|
||||
logger: l,
|
||||
}
|
||||
|
@ -47,7 +46,7 @@ func (dr *V1DeprecationRouter) Register(r *route.Router) {
|
|||
}
|
||||
|
||||
func (dr *V1DeprecationRouter) deprecationHandler(w http.ResponseWriter, req *http.Request) {
|
||||
level.Warn(dr.logger).Log("msg", "v1 API received a request on a removed endpoint", "path", req.URL.Path, "method", req.Method)
|
||||
dr.logger.Warn("v1 API received a request on a removed endpoint", "path", req.URL.Path, "method", req.Method)
|
||||
|
||||
resp := struct {
|
||||
Status string `json:"status"`
|
||||
|
@ -61,6 +60,6 @@ func (dr *V1DeprecationRouter) deprecationHandler(w http.ResponseWriter, req *ht
|
|||
w.WriteHeader(410)
|
||||
|
||||
if err := json.NewEncoder(w).Encode(resp); err != nil {
|
||||
level.Error(dr.logger).Log("msg", "failed to write response", "err", err)
|
||||
dr.logger.Error("failed to write response", "err", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,14 +16,13 @@ package v2
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/go-openapi/analysis"
|
||||
"github.com/go-openapi/loads"
|
||||
"github.com/go-openapi/runtime/middleware"
|
||||
|
@ -71,7 +70,7 @@ type API struct {
|
|||
route *dispatch.Route
|
||||
setAlertStatus setAlertStatusFn
|
||||
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
m *metrics.Alerts
|
||||
|
||||
Handler http.Handler
|
||||
|
@ -92,7 +91,7 @@ func NewAPI(
|
|||
gmf groupMutedFunc,
|
||||
silences *silence.Silences,
|
||||
peer cluster.ClusterPeer,
|
||||
l log.Logger,
|
||||
l *slog.Logger,
|
||||
r prometheus.Registerer,
|
||||
) (*API, error) {
|
||||
api := API{
|
||||
|
@ -154,8 +153,8 @@ func setResponseHeaders(h http.Handler) http.Handler {
|
|||
})
|
||||
}
|
||||
|
||||
func (api *API) requestLogger(req *http.Request) log.Logger {
|
||||
return log.With(api.logger, "path", req.URL.Path, "method", req.Method)
|
||||
func (api *API) requestLogger(req *http.Request) *slog.Logger {
|
||||
return api.logger.With("path", req.URL.Path, "method", req.Method)
|
||||
}
|
||||
|
||||
// Update sets the API struct members that may change between reloads of alertmanager.
|
||||
|
@ -249,14 +248,14 @@ func (api *API) getAlertsHandler(params alert_ops.GetAlertsParams) middleware.Re
|
|||
|
||||
matchers, err := parseFilter(params.Filter)
|
||||
if err != nil {
|
||||
level.Debug(logger).Log("msg", "Failed to parse matchers", "err", err)
|
||||
logger.Debug("Failed to parse matchers", "err", err)
|
||||
return alertgroup_ops.NewGetAlertGroupsBadRequest().WithPayload(err.Error())
|
||||
}
|
||||
|
||||
if params.Receiver != nil {
|
||||
receiverFilter, err = regexp.Compile("^(?:" + *params.Receiver + ")$")
|
||||
if err != nil {
|
||||
level.Debug(logger).Log("msg", "Failed to compile receiver regex", "err", err)
|
||||
logger.Debug("Failed to compile receiver regex", "err", err)
|
||||
return alert_ops.
|
||||
NewGetAlertsBadRequest().
|
||||
WithPayload(
|
||||
|
@ -301,7 +300,7 @@ func (api *API) getAlertsHandler(params alert_ops.GetAlertsParams) middleware.Re
|
|||
api.mtx.RUnlock()
|
||||
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "Failed to get alerts", "err", err)
|
||||
logger.Error("Failed to get alerts", "err", err)
|
||||
return alert_ops.NewGetAlertsInternalServerError().WithPayload(err.Error())
|
||||
}
|
||||
sort.Slice(res, func(i, j int) bool {
|
||||
|
@ -361,12 +360,12 @@ func (api *API) postAlertsHandler(params alert_ops.PostAlertsParams) middleware.
|
|||
validAlerts = append(validAlerts, a)
|
||||
}
|
||||
if err := api.alerts.Put(validAlerts...); err != nil {
|
||||
level.Error(logger).Log("msg", "Failed to create alerts", "err", err)
|
||||
logger.Error("Failed to create alerts", "err", err)
|
||||
return alert_ops.NewPostAlertsInternalServerError().WithPayload(err.Error())
|
||||
}
|
||||
|
||||
if validationErrs.Len() > 0 {
|
||||
level.Error(logger).Log("msg", "Failed to validate alerts", "err", validationErrs.Error())
|
||||
logger.Error("Failed to validate alerts", "err", validationErrs.Error())
|
||||
return alert_ops.NewPostAlertsBadRequest().WithPayload(validationErrs.Error())
|
||||
}
|
||||
|
||||
|
@ -378,7 +377,7 @@ func (api *API) getAlertGroupsHandler(params alertgroup_ops.GetAlertGroupsParams
|
|||
|
||||
matchers, err := parseFilter(params.Filter)
|
||||
if err != nil {
|
||||
level.Debug(logger).Log("msg", "Failed to parse matchers", "err", err)
|
||||
logger.Debug("Failed to parse matchers", "err", err)
|
||||
return alertgroup_ops.NewGetAlertGroupsBadRequest().WithPayload(err.Error())
|
||||
}
|
||||
|
||||
|
@ -386,7 +385,7 @@ func (api *API) getAlertGroupsHandler(params alertgroup_ops.GetAlertGroupsParams
|
|||
if params.Receiver != nil {
|
||||
receiverFilter, err = regexp.Compile("^(?:" + *params.Receiver + ")$")
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "Failed to compile receiver regex", "err", err)
|
||||
logger.Error("Failed to compile receiver regex", "err", err)
|
||||
return alertgroup_ops.
|
||||
NewGetAlertGroupsBadRequest().
|
||||
WithPayload(
|
||||
|
@ -518,13 +517,13 @@ func (api *API) getSilencesHandler(params silence_ops.GetSilencesParams) middlew
|
|||
|
||||
matchers, err := parseFilter(params.Filter)
|
||||
if err != nil {
|
||||
level.Debug(logger).Log("msg", "Failed to parse matchers", "err", err)
|
||||
logger.Debug("Failed to parse matchers", "err", err)
|
||||
return silence_ops.NewGetSilencesBadRequest().WithPayload(err.Error())
|
||||
}
|
||||
|
||||
psils, _, err := api.silences.Query()
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "Failed to get silences", "err", err)
|
||||
logger.Error("Failed to get silences", "err", err)
|
||||
return silence_ops.NewGetSilencesInternalServerError().WithPayload(err.Error())
|
||||
}
|
||||
|
||||
|
@ -535,7 +534,7 @@ func (api *API) getSilencesHandler(params silence_ops.GetSilencesParams) middlew
|
|||
}
|
||||
silence, err := GettableSilenceFromProto(ps)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "Failed to unmarshal silence from proto", "err", err)
|
||||
logger.Error("Failed to unmarshal silence from proto", "err", err)
|
||||
return silence_ops.NewGetSilencesInternalServerError().WithPayload(err.Error())
|
||||
}
|
||||
sils = append(sils, &silence)
|
||||
|
@ -614,18 +613,18 @@ func (api *API) getSilenceHandler(params silence_ops.GetSilenceParams) middlewar
|
|||
|
||||
sils, _, err := api.silences.Query(silence.QIDs(params.SilenceID.String()))
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "Failed to get silence by id", "err", err, "id", params.SilenceID.String())
|
||||
logger.Error("Failed to get silence by id", "err", err, "id", params.SilenceID.String())
|
||||
return silence_ops.NewGetSilenceInternalServerError().WithPayload(err.Error())
|
||||
}
|
||||
|
||||
if len(sils) == 0 {
|
||||
level.Error(logger).Log("msg", "Failed to find silence", "err", err, "id", params.SilenceID.String())
|
||||
logger.Error("Failed to find silence", "err", err, "id", params.SilenceID.String())
|
||||
return silence_ops.NewGetSilenceNotFound()
|
||||
}
|
||||
|
||||
sil, err := GettableSilenceFromProto(sils[0])
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "Failed to convert unmarshal from proto", "err", err)
|
||||
logger.Error("Failed to convert unmarshal from proto", "err", err)
|
||||
return silence_ops.NewGetSilenceInternalServerError().WithPayload(err.Error())
|
||||
}
|
||||
|
||||
|
@ -637,7 +636,7 @@ func (api *API) deleteSilenceHandler(params silence_ops.DeleteSilenceParams) mid
|
|||
|
||||
sid := params.SilenceID.String()
|
||||
if err := api.silences.Expire(sid); err != nil {
|
||||
level.Error(logger).Log("msg", "Failed to expire silence", "err", err)
|
||||
logger.Error("Failed to expire silence", "err", err)
|
||||
if errors.Is(err, silence.ErrNotFound) {
|
||||
return silence_ops.NewDeleteSilenceNotFound()
|
||||
}
|
||||
|
@ -651,7 +650,7 @@ func (api *API) postSilencesHandler(params silence_ops.PostSilencesParams) middl
|
|||
|
||||
sil, err := PostableSilenceToProto(params.Silence)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "Failed to marshal silence to proto", "err", err)
|
||||
logger.Error("Failed to marshal silence to proto", "err", err)
|
||||
return silence_ops.NewPostSilencesBadRequest().WithPayload(
|
||||
fmt.Sprintf("failed to convert API silence to internal silence: %v", err.Error()),
|
||||
)
|
||||
|
@ -659,18 +658,18 @@ func (api *API) postSilencesHandler(params silence_ops.PostSilencesParams) middl
|
|||
|
||||
if sil.StartsAt.After(sil.EndsAt) || sil.StartsAt.Equal(sil.EndsAt) {
|
||||
msg := "Failed to create silence: start time must be before end time"
|
||||
level.Error(logger).Log("msg", msg, "starts_at", sil.StartsAt, "ends_at", sil.EndsAt)
|
||||
logger.Error(msg, "starts_at", sil.StartsAt, "ends_at", sil.EndsAt)
|
||||
return silence_ops.NewPostSilencesBadRequest().WithPayload(msg)
|
||||
}
|
||||
|
||||
if sil.EndsAt.Before(time.Now()) {
|
||||
msg := "Failed to create silence: end time can't be in the past"
|
||||
level.Error(logger).Log("msg", msg, "ends_at", sil.EndsAt)
|
||||
logger.Error(msg, "ends_at", sil.EndsAt)
|
||||
return silence_ops.NewPostSilencesBadRequest().WithPayload(msg)
|
||||
}
|
||||
|
||||
if err = api.silences.Set(sil); err != nil {
|
||||
level.Error(logger).Log("msg", "Failed to create silence", "err", err)
|
||||
logger.Error("Failed to create silence", "err", err)
|
||||
if errors.Is(err, silence.ErrNotFound) {
|
||||
return silence_ops.NewPostSilencesNotFound().WithPayload(err.Error())
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"github.com/go-openapi/runtime/middleware"
|
||||
"github.com/go-openapi/strfmt"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
open_api_models "github.com/prometheus/alertmanager/api/v2/models"
|
||||
|
@ -39,8 +40,6 @@ import (
|
|||
"github.com/prometheus/alertmanager/silence"
|
||||
"github.com/prometheus/alertmanager/silence/silencepb"
|
||||
"github.com/prometheus/alertmanager/types"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
)
|
||||
|
||||
// If api.peers == nil, Alertmanager cluster feature is disabled. Make sure to
|
||||
|
@ -192,7 +191,7 @@ func TestDeleteSilenceHandler(t *testing.T) {
|
|||
api := API{
|
||||
uptime: time.Now(),
|
||||
silences: silences,
|
||||
logger: log.NewNopLogger(),
|
||||
logger: promslog.NewNopLogger(),
|
||||
}
|
||||
|
||||
r, err := http.NewRequest("DELETE", "/api/v2/silence/${tc.sid}", nil)
|
||||
|
@ -274,7 +273,7 @@ func TestPostSilencesHandler(t *testing.T) {
|
|||
api := API{
|
||||
uptime: time.Now(),
|
||||
silences: silences,
|
||||
logger: log.NewNopLogger(),
|
||||
logger: promslog.NewNopLogger(),
|
||||
}
|
||||
|
||||
sil := createSilence(t, tc.sid, "silenceCreator", tc.start, tc.end)
|
||||
|
@ -293,7 +292,7 @@ func TestPostSilencesHandlerMissingIdCreatesSilence(t *testing.T) {
|
|||
api := API{
|
||||
uptime: time.Now(),
|
||||
silences: silences,
|
||||
logger: log.NewNopLogger(),
|
||||
logger: promslog.NewNopLogger(),
|
||||
}
|
||||
|
||||
// Create a new silence. It should be assigned a random UUID.
|
||||
|
@ -557,7 +556,7 @@ receivers:
|
|||
cfg, _ := config.Load(in)
|
||||
api := API{
|
||||
uptime: time.Now(),
|
||||
logger: log.NewNopLogger(),
|
||||
logger: promslog.NewNopLogger(),
|
||||
alertmanagerConfig: cfg,
|
||||
}
|
||||
|
||||
|
|
10
cli/root.go
10
cli/root.go
|
@ -22,11 +22,10 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
clientruntime "github.com/go-openapi/runtime/client"
|
||||
"github.com/go-openapi/strfmt"
|
||||
promconfig "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/prometheus/common/version"
|
||||
"golang.org/x/mod/semver"
|
||||
|
||||
|
@ -51,12 +50,11 @@ var (
|
|||
)
|
||||
|
||||
func initMatchersCompat(_ *kingpin.ParseContext) error {
|
||||
logger := log.NewLogfmtLogger(os.Stdout)
|
||||
promslogConfig := &promslog.Config{Writer: os.Stdout}
|
||||
if verbose {
|
||||
logger = level.NewFilter(logger, level.AllowDebug())
|
||||
} else {
|
||||
logger = level.NewFilter(logger, level.AllowInfo())
|
||||
_ = promslogConfig.Level.Set("debug")
|
||||
}
|
||||
logger := promslog.New(promslogConfig)
|
||||
featureConfig, err := featurecontrol.NewFlags(logger, featureFlags)
|
||||
if err != nil {
|
||||
kingpin.Fatalf("error parsing the feature flag list: %v\n", err)
|
||||
|
|
|
@ -14,11 +14,10 @@
|
|||
package cluster
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/hashicorp/memberlist"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
@ -35,7 +34,7 @@ type Channel struct {
|
|||
sendOversize func(*memberlist.Node, []byte) error
|
||||
|
||||
msgc chan []byte
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
|
||||
oversizeGossipMessageFailureTotal prometheus.Counter
|
||||
oversizeGossipMessageDroppedTotal prometheus.Counter
|
||||
|
@ -50,7 +49,7 @@ func NewChannel(
|
|||
send func([]byte),
|
||||
peers func() []*memberlist.Node,
|
||||
sendOversize func(*memberlist.Node, []byte) error,
|
||||
logger log.Logger,
|
||||
logger *slog.Logger,
|
||||
stopc chan struct{},
|
||||
reg prometheus.Registerer,
|
||||
) *Channel {
|
||||
|
@ -113,7 +112,7 @@ func (c *Channel) handleOverSizedMessages(stopc chan struct{}) {
|
|||
c.oversizeGossipMessageSentTotal.Inc()
|
||||
start := time.Now()
|
||||
if err := c.sendOversize(n, b); err != nil {
|
||||
level.Debug(c.logger).Log("msg", "failed to send reliable", "key", c.key, "node", n, "err", err)
|
||||
c.logger.Debug("failed to send reliable", "key", c.key, "node", n, "err", err)
|
||||
c.oversizeGossipMessageFailureTotal.Inc()
|
||||
return
|
||||
}
|
||||
|
@ -139,7 +138,7 @@ func (c *Channel) Broadcast(b []byte) {
|
|||
select {
|
||||
case c.msgc <- b:
|
||||
default:
|
||||
level.Debug(c.logger).Log("msg", "oversized gossip channel full")
|
||||
c.logger.Debug("oversized gossip channel full")
|
||||
c.oversizeGossipMessageDroppedTotal.Inc()
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -20,9 +20,9 @@ import (
|
|||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/hashicorp/memberlist"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/promslog"
|
||||
)
|
||||
|
||||
func TestNormalMessagesGossiped(t *testing.T) {
|
||||
|
@ -82,7 +82,7 @@ func newChannel(
|
|||
send,
|
||||
peers,
|
||||
sendOversize,
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
make(chan struct{}),
|
||||
prometheus.NewRegistry(),
|
||||
)
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"math/rand"
|
||||
"net"
|
||||
"sort"
|
||||
|
@ -25,8 +26,6 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/hashicorp/memberlist"
|
||||
"github.com/oklog/ulid"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
@ -82,7 +81,7 @@ type Peer struct {
|
|||
peerUpdateCounter prometheus.Counter
|
||||
peerJoinCounter prometheus.Counter
|
||||
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
// peer is an internal type used for bookkeeping. It holds the state of peers
|
||||
|
@ -129,7 +128,7 @@ const (
|
|||
)
|
||||
|
||||
func Create(
|
||||
l log.Logger,
|
||||
l *slog.Logger,
|
||||
reg prometheus.Registerer,
|
||||
bindAddr string,
|
||||
advertiseAddr string,
|
||||
|
@ -171,19 +170,19 @@ func Create(
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("resolve peers: %w", err)
|
||||
}
|
||||
level.Debug(l).Log("msg", "resolved peers to following addresses", "peers", strings.Join(resolvedPeers, ","))
|
||||
l.Debug("resolved peers to following addresses", "peers", strings.Join(resolvedPeers, ","))
|
||||
|
||||
// Initial validation of user-specified advertise address.
|
||||
addr, err := calculateAdvertiseAddress(bindHost, advertiseHost, allowInsecureAdvertise)
|
||||
if err != nil {
|
||||
level.Warn(l).Log("err", "couldn't deduce an advertise address: "+err.Error())
|
||||
l.Warn("couldn't deduce an advertise address: " + err.Error())
|
||||
} else if hasNonlocal(resolvedPeers) && isUnroutable(addr.String()) {
|
||||
level.Warn(l).Log("err", "this node advertises itself on an unroutable address", "addr", addr.String())
|
||||
level.Warn(l).Log("err", "this node will be unreachable in the cluster")
|
||||
level.Warn(l).Log("err", "provide --cluster.advertise-address as a routable IP address or hostname")
|
||||
l.Warn("this node advertises itself on an unroutable address", "addr", addr.String())
|
||||
l.Warn("this node will be unreachable in the cluster")
|
||||
l.Warn("provide --cluster.advertise-address as a routable IP address or hostname")
|
||||
} else if isAny(bindAddr) && advertiseHost == "" {
|
||||
// memberlist doesn't advertise properly when the bind address is empty or unspecified.
|
||||
level.Info(l).Log("msg", "setting advertise address explicitly", "addr", addr.String(), "port", bindPort)
|
||||
l.Info("setting advertise address explicitly", "addr", addr.String(), "port", bindPort)
|
||||
advertiseHost = addr.String()
|
||||
advertisePort = bindPort
|
||||
}
|
||||
|
@ -225,7 +224,7 @@ func Create(
|
|||
cfg.TCPTimeout = tcpTimeout
|
||||
cfg.ProbeTimeout = probeTimeout
|
||||
cfg.ProbeInterval = probeInterval
|
||||
cfg.LogOutput = &logWriter{l: l}
|
||||
cfg.Logger = slog.NewLogLogger(l.Handler(), slog.LevelDebug)
|
||||
cfg.GossipNodes = retransmit
|
||||
cfg.UDPBufferSize = MaxGossipPacketSize
|
||||
cfg.Label = label
|
||||
|
@ -239,7 +238,7 @@ func Create(
|
|||
}
|
||||
|
||||
if tlsTransportConfig != nil {
|
||||
level.Info(l).Log("msg", "using TLS for gossip")
|
||||
l.Info("using TLS for gossip")
|
||||
cfg.Transport, err = NewTLSTransport(context.Background(), l, reg, cfg.BindAddr, cfg.BindPort, tlsTransportConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tls transport: %w", err)
|
||||
|
@ -260,12 +259,12 @@ func (p *Peer) Join(
|
|||
) error {
|
||||
n, err := p.mlist.Join(p.resolvedPeers)
|
||||
if err != nil {
|
||||
level.Warn(p.logger).Log("msg", "failed to join cluster", "err", err)
|
||||
p.logger.Warn("failed to join cluster", "err", err)
|
||||
if reconnectInterval != 0 {
|
||||
level.Info(p.logger).Log("msg", fmt.Sprintf("will retry joining cluster every %v", reconnectInterval.String()))
|
||||
p.logger.Info(fmt.Sprintf("will retry joining cluster every %v", reconnectInterval.String()))
|
||||
}
|
||||
} else {
|
||||
level.Debug(p.logger).Log("msg", "joined cluster", "peers", n)
|
||||
p.logger.Debug("joined cluster", "peers", n)
|
||||
}
|
||||
|
||||
if reconnectInterval != 0 {
|
||||
|
@ -333,14 +332,6 @@ func (p *Peer) setInitialFailed(peers []string, myAddr string) {
|
|||
}
|
||||
}
|
||||
|
||||
type logWriter struct {
|
||||
l log.Logger
|
||||
}
|
||||
|
||||
func (l *logWriter) Write(b []byte) (int, error) {
|
||||
return len(b), level.Debug(l.l).Log("memberlist", string(b))
|
||||
}
|
||||
|
||||
func (p *Peer) register(reg prometheus.Registerer, name string) {
|
||||
peerInfo := prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
|
@ -420,7 +411,7 @@ func (p *Peer) removeFailedPeers(timeout time.Duration) {
|
|||
if pr.leaveTime.Add(timeout).After(now) {
|
||||
keep = append(keep, pr)
|
||||
} else {
|
||||
level.Debug(p.logger).Log("msg", "failed peer has timed out", "peer", pr.Node, "addr", pr.Address())
|
||||
p.logger.Debug("failed peer has timed out", "peer", pr.Node, "addr", pr.Address())
|
||||
delete(p.peers, pr.Name)
|
||||
}
|
||||
}
|
||||
|
@ -433,27 +424,27 @@ func (p *Peer) reconnect() {
|
|||
failedPeers := p.failedPeers
|
||||
p.peerLock.RUnlock()
|
||||
|
||||
logger := log.With(p.logger, "msg", "reconnect")
|
||||
logger := p.logger.With("msg", "reconnect")
|
||||
for _, pr := range failedPeers {
|
||||
// No need to do book keeping on failedPeers here. If a
|
||||
// reconnect is successful, they will be announced in
|
||||
// peerJoin().
|
||||
if _, err := p.mlist.Join([]string{pr.Address()}); err != nil {
|
||||
p.failedReconnectionsCounter.Inc()
|
||||
level.Debug(logger).Log("result", "failure", "peer", pr.Node, "addr", pr.Address(), "err", err)
|
||||
logger.Debug("failure", "peer", pr.Node, "addr", pr.Address(), "err", err)
|
||||
} else {
|
||||
p.reconnectionsCounter.Inc()
|
||||
level.Debug(logger).Log("result", "success", "peer", pr.Node, "addr", pr.Address())
|
||||
logger.Debug("success", "peer", pr.Node, "addr", pr.Address())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Peer) refresh() {
|
||||
logger := log.With(p.logger, "msg", "refresh")
|
||||
logger := p.logger.With("msg", "refresh")
|
||||
|
||||
resolvedPeers, err := resolvePeers(context.Background(), p.knownPeers, p.advertiseAddr, &net.Resolver{}, false)
|
||||
if err != nil {
|
||||
level.Debug(logger).Log("peers", p.knownPeers, "err", err)
|
||||
logger.Debug(fmt.Sprintf("%v", p.knownPeers), "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -470,10 +461,10 @@ func (p *Peer) refresh() {
|
|||
if !isPeerFound {
|
||||
if _, err := p.mlist.Join([]string{peer}); err != nil {
|
||||
p.failedRefreshCounter.Inc()
|
||||
level.Warn(logger).Log("result", "failure", "addr", peer, "err", err)
|
||||
logger.Warn("failure", "addr", peer, "err", err)
|
||||
} else {
|
||||
p.refreshCounter.Inc()
|
||||
level.Debug(logger).Log("result", "success", "addr", peer)
|
||||
logger.Debug("success", "addr", peer)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -502,7 +493,7 @@ func (p *Peer) peerJoin(n *memberlist.Node) {
|
|||
p.peerJoinCounter.Inc()
|
||||
|
||||
if oldStatus == StatusFailed {
|
||||
level.Debug(p.logger).Log("msg", "peer rejoined", "peer", pr.Node)
|
||||
p.logger.Debug("peer rejoined", "peer", pr.Node)
|
||||
p.failedPeers = removeOldPeer(p.failedPeers, pr.Address())
|
||||
}
|
||||
}
|
||||
|
@ -524,7 +515,7 @@ func (p *Peer) peerLeave(n *memberlist.Node) {
|
|||
p.peers[n.Address()] = pr
|
||||
|
||||
p.peerLeaveCounter.Inc()
|
||||
level.Debug(p.logger).Log("msg", "peer left", "peer", pr.Node)
|
||||
p.logger.Debug("peer left", "peer", pr.Node)
|
||||
}
|
||||
|
||||
func (p *Peer) peerUpdate(n *memberlist.Node) {
|
||||
|
@ -542,7 +533,7 @@ func (p *Peer) peerUpdate(n *memberlist.Node) {
|
|||
p.peers[n.Address()] = pr
|
||||
|
||||
p.peerUpdateCounter.Inc()
|
||||
level.Debug(p.logger).Log("msg", "peer updated", "peer", pr.Node)
|
||||
p.logger.Debug("peer updated", "peer", pr.Node)
|
||||
}
|
||||
|
||||
// AddState adds a new state that will be gossiped. It returns a channel to which
|
||||
|
@ -574,7 +565,7 @@ func (p *Peer) AddState(key string, s State, reg prometheus.Registerer) ClusterC
|
|||
// Leave the cluster, waiting up to timeout.
|
||||
func (p *Peer) Leave(timeout time.Duration) error {
|
||||
close(p.stopc)
|
||||
level.Debug(p.logger).Log("msg", "leaving cluster")
|
||||
p.logger.Debug("leaving cluster")
|
||||
return p.mlist.Leave(timeout)
|
||||
}
|
||||
|
||||
|
@ -680,7 +671,7 @@ func (p *Peer) Position() int {
|
|||
// This is especially important for those that do not have persistent storage.
|
||||
func (p *Peer) Settle(ctx context.Context, interval time.Duration) {
|
||||
const NumOkayRequired = 3
|
||||
level.Info(p.logger).Log("msg", "Waiting for gossip to settle...", "interval", interval)
|
||||
p.logger.Info("Waiting for gossip to settle...", "interval", interval)
|
||||
start := time.Now()
|
||||
nPeers := 0
|
||||
nOkay := 0
|
||||
|
@ -689,7 +680,7 @@ func (p *Peer) Settle(ctx context.Context, interval time.Duration) {
|
|||
select {
|
||||
case <-ctx.Done():
|
||||
elapsed := time.Since(start)
|
||||
level.Info(p.logger).Log("msg", "gossip not settled but continuing anyway", "polls", totalPolls, "elapsed", elapsed)
|
||||
p.logger.Info("gossip not settled but continuing anyway", "polls", totalPolls, "elapsed", elapsed)
|
||||
close(p.readyc)
|
||||
return
|
||||
case <-time.After(interval):
|
||||
|
@ -697,15 +688,15 @@ func (p *Peer) Settle(ctx context.Context, interval time.Duration) {
|
|||
elapsed := time.Since(start)
|
||||
n := len(p.Peers())
|
||||
if nOkay >= NumOkayRequired {
|
||||
level.Info(p.logger).Log("msg", "gossip settled; proceeding", "elapsed", elapsed)
|
||||
p.logger.Info("gossip settled; proceeding", "elapsed", elapsed)
|
||||
break
|
||||
}
|
||||
if n == nPeers {
|
||||
nOkay++
|
||||
level.Debug(p.logger).Log("msg", "gossip looks settled", "elapsed", elapsed)
|
||||
p.logger.Debug("gossip looks settled", "elapsed", elapsed)
|
||||
} else {
|
||||
nOkay = 0
|
||||
level.Info(p.logger).Log("msg", "gossip not settled", "polls", totalPolls, "before", nPeers, "now", n, "elapsed", elapsed)
|
||||
p.logger.Info("gossip not settled", "polls", totalPolls, "before", nPeers, "now", n, "elapsed", elapsed)
|
||||
}
|
||||
nPeers = n
|
||||
totalPolls++
|
||||
|
|
|
@ -18,11 +18,11 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/hashicorp/go-sockaddr"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/promslog"
|
||||
)
|
||||
|
||||
func TestClusterJoinAndReconnect(t *testing.T) {
|
||||
|
@ -39,7 +39,7 @@ func TestClusterJoinAndReconnect(t *testing.T) {
|
|||
}
|
||||
|
||||
func testJoinLeave(t *testing.T) {
|
||||
logger := log.NewNopLogger()
|
||||
logger := promslog.NewNopLogger()
|
||||
p, err := Create(
|
||||
logger,
|
||||
prometheus.NewRegistry(),
|
||||
|
@ -110,7 +110,7 @@ func testJoinLeave(t *testing.T) {
|
|||
}
|
||||
|
||||
func testReconnect(t *testing.T) {
|
||||
logger := log.NewNopLogger()
|
||||
logger := promslog.NewNopLogger()
|
||||
p, err := Create(
|
||||
logger,
|
||||
prometheus.NewRegistry(),
|
||||
|
@ -177,7 +177,7 @@ func testReconnect(t *testing.T) {
|
|||
}
|
||||
|
||||
func testRemoveFailedPeers(t *testing.T) {
|
||||
logger := log.NewNopLogger()
|
||||
logger := promslog.NewNopLogger()
|
||||
p, err := Create(
|
||||
logger,
|
||||
prometheus.NewRegistry(),
|
||||
|
@ -227,7 +227,7 @@ func testRemoveFailedPeers(t *testing.T) {
|
|||
}
|
||||
|
||||
func testInitiallyFailingPeers(t *testing.T) {
|
||||
logger := log.NewNopLogger()
|
||||
logger := promslog.NewNopLogger()
|
||||
myAddr := "1.2.3.4:5000"
|
||||
peerAddrs := []string{myAddr, "2.3.4.5:5000", "3.4.5.6:5000", "foo.example.com:5000"}
|
||||
p, err := Create(
|
||||
|
@ -275,7 +275,7 @@ func testInitiallyFailingPeers(t *testing.T) {
|
|||
}
|
||||
|
||||
func testTLSConnection(t *testing.T) {
|
||||
logger := log.NewNopLogger()
|
||||
logger := promslog.NewNopLogger()
|
||||
tlsTransportConfig1, err := GetTLSTransportConfig("./testdata/tls_config_node1.yml")
|
||||
require.NoError(t, err)
|
||||
p1, err := Create(
|
||||
|
|
|
@ -14,10 +14,9 @@
|
|||
package cluster
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/hashicorp/memberlist"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
@ -37,7 +36,7 @@ const (
|
|||
type delegate struct {
|
||||
*Peer
|
||||
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
bcast *memberlist.TransmitLimitedQueue
|
||||
|
||||
messagesReceived *prometheus.CounterVec
|
||||
|
@ -49,7 +48,7 @@ type delegate struct {
|
|||
nodePingDuration *prometheus.HistogramVec
|
||||
}
|
||||
|
||||
func newDelegate(l log.Logger, reg prometheus.Registerer, p *Peer, retransmit int) *delegate {
|
||||
func newDelegate(l *slog.Logger, reg prometheus.Registerer, p *Peer, retransmit int) *delegate {
|
||||
bcast := &memberlist.TransmitLimitedQueue{
|
||||
NumNodes: p.ClusterSize,
|
||||
RetransmitMult: retransmit,
|
||||
|
@ -157,7 +156,7 @@ func (d *delegate) NotifyMsg(b []byte) {
|
|||
|
||||
var p clusterpb.Part
|
||||
if err := proto.Unmarshal(b, &p); err != nil {
|
||||
level.Warn(d.logger).Log("msg", "decode broadcast", "err", err)
|
||||
d.logger.Warn("decode broadcast", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -169,7 +168,7 @@ func (d *delegate) NotifyMsg(b []byte) {
|
|||
return
|
||||
}
|
||||
if err := s.Merge(p.Data); err != nil {
|
||||
level.Warn(d.logger).Log("msg", "merge broadcast", "err", err, "key", p.Key)
|
||||
d.logger.Warn("merge broadcast", "err", err, "key", p.Key)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -195,14 +194,14 @@ func (d *delegate) LocalState(_ bool) []byte {
|
|||
for key, s := range d.states {
|
||||
b, err := s.MarshalBinary()
|
||||
if err != nil {
|
||||
level.Warn(d.logger).Log("msg", "encode local state", "err", err, "key", key)
|
||||
d.logger.Warn("encode local state", "err", err, "key", key)
|
||||
return nil
|
||||
}
|
||||
all.Parts = append(all.Parts, clusterpb.Part{Key: key, Data: b})
|
||||
}
|
||||
b, err := proto.Marshal(all)
|
||||
if err != nil {
|
||||
level.Warn(d.logger).Log("msg", "encode local state", "err", err)
|
||||
d.logger.Warn("encode local state", "err", err)
|
||||
return nil
|
||||
}
|
||||
d.messagesSent.WithLabelValues(fullState).Inc()
|
||||
|
@ -216,7 +215,7 @@ func (d *delegate) MergeRemoteState(buf []byte, _ bool) {
|
|||
|
||||
var fs clusterpb.FullState
|
||||
if err := proto.Unmarshal(buf, &fs); err != nil {
|
||||
level.Warn(d.logger).Log("msg", "merge remote state", "err", err)
|
||||
d.logger.Warn("merge remote state", "err", err)
|
||||
return
|
||||
}
|
||||
d.mtx.RLock()
|
||||
|
@ -224,11 +223,11 @@ func (d *delegate) MergeRemoteState(buf []byte, _ bool) {
|
|||
for _, p := range fs.Parts {
|
||||
s, ok := d.states[p.Key]
|
||||
if !ok {
|
||||
level.Warn(d.logger).Log("received", "unknown state key", "len", len(buf), "key", p.Key)
|
||||
d.logger.Warn("unknown state key", "len", len(buf), "key", p.Key)
|
||||
continue
|
||||
}
|
||||
if err := s.Merge(p.Data); err != nil {
|
||||
level.Warn(d.logger).Log("msg", "merge remote state", "err", err, "key", p.Key)
|
||||
d.logger.Warn("merge remote state", "err", err, "key", p.Key)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -236,19 +235,19 @@ func (d *delegate) MergeRemoteState(buf []byte, _ bool) {
|
|||
|
||||
// NotifyJoin is called if a peer joins the cluster.
|
||||
func (d *delegate) NotifyJoin(n *memberlist.Node) {
|
||||
level.Debug(d.logger).Log("received", "NotifyJoin", "node", n.Name, "addr", n.Address())
|
||||
d.logger.Debug("NotifyJoin", "node", n.Name, "addr", n.Address())
|
||||
d.Peer.peerJoin(n)
|
||||
}
|
||||
|
||||
// NotifyLeave is called if a peer leaves the cluster.
|
||||
func (d *delegate) NotifyLeave(n *memberlist.Node) {
|
||||
level.Debug(d.logger).Log("received", "NotifyLeave", "node", n.Name, "addr", n.Address())
|
||||
d.logger.Debug("NotifyLeave", "node", n.Name, "addr", n.Address())
|
||||
d.Peer.peerLeave(n)
|
||||
}
|
||||
|
||||
// NotifyUpdate is called if a cluster peer gets updated.
|
||||
func (d *delegate) NotifyUpdate(n *memberlist.Node) {
|
||||
level.Debug(d.logger).Log("received", "NotifyUpdate", "node", n.Name, "addr", n.Address())
|
||||
d.logger.Debug("NotifyUpdate", "node", n.Name, "addr", n.Address())
|
||||
d.Peer.peerUpdate(n)
|
||||
}
|
||||
|
||||
|
@ -278,7 +277,7 @@ func (d *delegate) handleQueueDepth() {
|
|||
case <-time.After(15 * time.Minute):
|
||||
n := d.bcast.NumQueued()
|
||||
if n > maxQueueSize {
|
||||
level.Warn(d.logger).Log("msg", "dropping messages because too many are queued", "current", n, "limit", maxQueueSize)
|
||||
d.logger.Warn("dropping messages because too many are queued", "current", n, "limit", maxQueueSize)
|
||||
d.bcast.Prune(maxQueueSize)
|
||||
d.messagesPruned.Add(float64(n - maxQueueSize))
|
||||
}
|
||||
|
|
|
@ -22,12 +22,11 @@ import (
|
|||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/hashicorp/go-sockaddr"
|
||||
"github.com/hashicorp/memberlist"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
@ -46,7 +45,7 @@ const (
|
|||
type TLSTransport struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
bindAddr string
|
||||
bindPort int
|
||||
done chan struct{}
|
||||
|
@ -71,7 +70,7 @@ type TLSTransport struct {
|
|||
// a free port automatically.
|
||||
func NewTLSTransport(
|
||||
ctx context.Context,
|
||||
logger log.Logger,
|
||||
logger *slog.Logger,
|
||||
reg prometheus.Registerer,
|
||||
bindAddr string,
|
||||
bindPort int,
|
||||
|
@ -188,7 +187,7 @@ func (t *TLSTransport) StreamCh() <-chan net.Conn {
|
|||
// Shutdown is called when memberlist is shutting down; this gives the
|
||||
// TLS Transport a chance to clean up the listener and other goroutines.
|
||||
func (t *TLSTransport) Shutdown() error {
|
||||
level.Debug(t.logger).Log("msg", "shutting down tls transport")
|
||||
t.logger.Debug("shutting down tls transport")
|
||||
t.cancel()
|
||||
err := t.listener.Close()
|
||||
t.connPool.shutdown()
|
||||
|
@ -255,7 +254,7 @@ func (t *TLSTransport) listen() {
|
|||
return
|
||||
}
|
||||
t.readErrs.Inc()
|
||||
level.Debug(t.logger).Log("msg", "error accepting connection", "err", err)
|
||||
t.logger.Debug("error accepting connection", "err", err)
|
||||
|
||||
} else {
|
||||
go t.handle(conn)
|
||||
|
@ -268,7 +267,7 @@ func (t *TLSTransport) handle(conn net.Conn) {
|
|||
for {
|
||||
packet, err := rcvTLSConn(conn).read()
|
||||
if err != nil {
|
||||
level.Debug(t.logger).Log("msg", "error reading from connection", "err", err)
|
||||
t.logger.Debug("error reading from connection", "err", err)
|
||||
t.readErrs.Inc()
|
||||
return
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ package cluster
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
context2 "context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -23,11 +24,11 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var logger = log.NewNopLogger()
|
||||
var logger = promslog.NewNopLogger()
|
||||
|
||||
func freeport() int {
|
||||
lis, _ := net.Listen(network, "127.0.0.1:0")
|
||||
|
@ -42,7 +43,7 @@ func newTLSTransport(file, address string, port int) (*TLSTransport, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return NewTLSTransport(context2.Background(), log.NewNopLogger(), nil, address, port, cfg)
|
||||
return NewTLSTransport(context2.Background(), promslog.NewNopLogger(), nil, address, port, cfg)
|
||||
}
|
||||
|
||||
func TestNewTLSTransport(t *testing.T) {
|
||||
|
@ -229,25 +230,21 @@ func TestDialTimeout(t *testing.T) {
|
|||
require.Equal(t, sent, buf)
|
||||
}
|
||||
|
||||
type logWr struct {
|
||||
bytes []byte
|
||||
}
|
||||
|
||||
func (l *logWr) Write(p []byte) (n int, err error) {
|
||||
l.bytes = append(l.bytes, p...)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func TestShutdown(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
promslogConfig := &promslog.Config{Writer: &buf}
|
||||
logger := promslog.New(promslogConfig)
|
||||
// Set logger to debug, otherwise it won't catch some logging from `Shutdown()` method.
|
||||
_ = promslogConfig.Level.Set("debug")
|
||||
|
||||
tlsConf1 := loadTLSTransportConfig(t, "testdata/tls_config_node1.yml")
|
||||
l := &logWr{}
|
||||
t1, _ := NewTLSTransport(context2.Background(), log.NewLogfmtLogger(l), nil, "127.0.0.1", 0, tlsConf1)
|
||||
t1, _ := NewTLSTransport(context2.Background(), logger, nil, "127.0.0.1", 0, tlsConf1)
|
||||
// Sleeping to make sure listeners have started and can subsequently be shut down gracefully.
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
err := t1.Shutdown()
|
||||
require.NoError(t, err)
|
||||
require.NotContains(t, string(l.bytes), "use of closed network connection")
|
||||
require.Contains(t, string(l.bytes), "shutting down tls transport")
|
||||
require.NotContains(t, buf.String(), "use of closed network connection")
|
||||
require.Contains(t, buf.String(), "shutting down tls transport")
|
||||
}
|
||||
|
||||
func loadTLSTransportConfig(tb testing.TB, filename string) *TLSTransportConfig {
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
@ -31,14 +32,12 @@ import (
|
|||
|
||||
"github.com/KimMachineGun/automemlimit/memlimit"
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promlog"
|
||||
promlogflag "github.com/prometheus/common/promlog/flag"
|
||||
"github.com/prometheus/common/promslog"
|
||||
promslogflag "github.com/prometheus/common/promslog/flag"
|
||||
"github.com/prometheus/common/route"
|
||||
"github.com/prometheus/common/version"
|
||||
"github.com/prometheus/exporter-toolkit/web"
|
||||
|
@ -107,7 +106,7 @@ var (
|
|||
Name: "alertmanager_inhibition_rules",
|
||||
Help: "Number of configured inhibition rules.",
|
||||
})
|
||||
promlogConfig = promlog.Config{}
|
||||
promslogConfig = promslog.Config{}
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -180,28 +179,28 @@ func run() int {
|
|||
featureFlags = kingpin.Flag("enable-feature", fmt.Sprintf("Experimental features to enable. The flag can be repeated to enable multiple features. Valid options: %s", strings.Join(featurecontrol.AllowedFlags, ", "))).Default("").String()
|
||||
)
|
||||
|
||||
promlogflag.AddFlags(kingpin.CommandLine, &promlogConfig)
|
||||
promslogflag.AddFlags(kingpin.CommandLine, &promslogConfig)
|
||||
kingpin.CommandLine.UsageWriter(os.Stdout)
|
||||
|
||||
kingpin.Version(version.Print("alertmanager"))
|
||||
kingpin.CommandLine.GetFlag("help").Short('h')
|
||||
kingpin.Parse()
|
||||
|
||||
logger := promlog.New(&promlogConfig)
|
||||
logger := promslog.New(&promslogConfig)
|
||||
|
||||
level.Info(logger).Log("msg", "Starting Alertmanager", "version", version.Info())
|
||||
level.Info(logger).Log("build_context", version.BuildContext())
|
||||
logger.Info("Starting Alertmanager", "version", version.Info())
|
||||
logger.Info("Build context", "build_context", version.BuildContext())
|
||||
|
||||
ff, err := featurecontrol.NewFlags(logger, *featureFlags)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "error parsing the feature flag list", "err", err)
|
||||
logger.Error("error parsing the feature flag list", "err", err)
|
||||
return 1
|
||||
}
|
||||
compat.InitFromFlags(logger, ff)
|
||||
|
||||
if ff.EnableAutoGOMEMLIMIT() {
|
||||
if *memlimitRatio <= 0.0 || *memlimitRatio > 1.0 {
|
||||
level.Error(logger).Log("msg", "--auto-gomemlimit.ratio must be greater than 0 and less than or equal to 1.")
|
||||
logger.Error("--auto-gomemlimit.ratio must be greater than 0 and less than or equal to 1.")
|
||||
return 1
|
||||
}
|
||||
|
||||
|
@ -214,34 +213,34 @@ func run() int {
|
|||
),
|
||||
),
|
||||
); err != nil {
|
||||
level.Warn(logger).Log("component", "automemlimit", "msg", "Failed to set GOMEMLIMIT automatically", "err", err)
|
||||
logger.Warn("automemlimit", "msg", "Failed to set GOMEMLIMIT automatically", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if ff.EnableAutoGOMAXPROCS() {
|
||||
l := func(format string, a ...interface{}) {
|
||||
level.Info(logger).Log("component", "automaxprocs", "msg", fmt.Sprintf(strings.TrimPrefix(format, "maxprocs: "), a...))
|
||||
logger.Info("automaxprocs", "msg", fmt.Sprintf(strings.TrimPrefix(format, "maxprocs: "), a...))
|
||||
}
|
||||
if _, err := maxprocs.Set(maxprocs.Logger(l)); err != nil {
|
||||
level.Warn(logger).Log("msg", "Failed to set GOMAXPROCS automatically", "err", err)
|
||||
logger.Warn("Failed to set GOMAXPROCS automatically", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = os.MkdirAll(*dataDir, 0o777)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "Unable to create data directory", "err", err)
|
||||
logger.Error("Unable to create data directory", "err", err)
|
||||
return 1
|
||||
}
|
||||
|
||||
tlsTransportConfig, err := cluster.GetTLSTransportConfig(*tlsConfigFile)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "unable to initialize TLS transport configuration for gossip mesh", "err", err)
|
||||
logger.Error("unable to initialize TLS transport configuration for gossip mesh", "err", err)
|
||||
return 1
|
||||
}
|
||||
var peer *cluster.Peer
|
||||
if *clusterBindAddr != "" {
|
||||
peer, err = cluster.Create(
|
||||
log.With(logger, "component", "cluster"),
|
||||
logger.With("component", "cluster"),
|
||||
prometheus.DefaultRegisterer,
|
||||
*clusterBindAddr,
|
||||
*clusterAdvertiseAddr,
|
||||
|
@ -257,7 +256,7 @@ func run() int {
|
|||
*label,
|
||||
)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "unable to initialize gossip mesh", "err", err)
|
||||
logger.Error("unable to initialize gossip mesh", "err", err)
|
||||
return 1
|
||||
}
|
||||
clusterEnabled.Set(1)
|
||||
|
@ -269,13 +268,13 @@ func run() int {
|
|||
notificationLogOpts := nflog.Options{
|
||||
SnapshotFile: filepath.Join(*dataDir, "nflog"),
|
||||
Retention: *retention,
|
||||
Logger: log.With(logger, "component", "nflog"),
|
||||
Logger: logger.With("component", "nflog"),
|
||||
Metrics: prometheus.DefaultRegisterer,
|
||||
}
|
||||
|
||||
notificationLog, err := nflog.New(notificationLogOpts)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("err", err)
|
||||
logger.Error("error creating notification log", "err", err)
|
||||
return 1
|
||||
}
|
||||
if peer != nil {
|
||||
|
@ -298,13 +297,13 @@ func run() int {
|
|||
MaxSilences: func() int { return *maxSilences },
|
||||
MaxSilenceSizeBytes: func() int { return *maxSilenceSizeBytes },
|
||||
},
|
||||
Logger: log.With(logger, "component", "silences"),
|
||||
Logger: logger.With("component", "silences"),
|
||||
Metrics: prometheus.DefaultRegisterer,
|
||||
}
|
||||
|
||||
silences, err := silence.New(silenceOpts)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("err", err)
|
||||
logger.Error("error creating silence", "err", err)
|
||||
return 1
|
||||
}
|
||||
if peer != nil {
|
||||
|
@ -331,13 +330,13 @@ func run() int {
|
|||
*peerReconnectTimeout,
|
||||
)
|
||||
if err != nil {
|
||||
level.Warn(logger).Log("msg", "unable to join gossip mesh", "err", err)
|
||||
logger.Warn("unable to join gossip mesh", "err", err)
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), *settleTimeout)
|
||||
defer func() {
|
||||
cancel()
|
||||
if err := peer.Leave(10 * time.Second); err != nil {
|
||||
level.Warn(logger).Log("msg", "unable to leave gossip mesh", "err", err)
|
||||
logger.Warn("unable to leave gossip mesh", "err", err)
|
||||
}
|
||||
}()
|
||||
go peer.Settle(ctx, *gossipInterval*10)
|
||||
|
@ -345,7 +344,7 @@ func run() int {
|
|||
|
||||
alerts, err := mem.NewAlerts(context.Background(), marker, *alertGCInterval, nil, logger, prometheus.DefaultRegisterer)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("err", err)
|
||||
logger.Error("error creating memory provider", "err", err)
|
||||
return 1
|
||||
}
|
||||
defer alerts.Close()
|
||||
|
@ -375,21 +374,21 @@ func run() int {
|
|||
Peer: clusterPeer,
|
||||
Timeout: *httpTimeout,
|
||||
Concurrency: *getConcurrency,
|
||||
Logger: log.With(logger, "component", "api"),
|
||||
Logger: logger.With("component", "api"),
|
||||
Registry: prometheus.DefaultRegisterer,
|
||||
GroupFunc: groupFn,
|
||||
})
|
||||
if err != nil {
|
||||
level.Error(logger).Log("err", fmt.Errorf("failed to create API: %w", err))
|
||||
logger.Error("failed to create API", "err", err)
|
||||
return 1
|
||||
}
|
||||
|
||||
amURL, err := extURL(logger, os.Hostname, (*webConfig.WebListenAddresses)[0], *externalURL)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "failed to determine external URL", "err", err)
|
||||
logger.Error("failed to determine external URL", "err", err)
|
||||
return 1
|
||||
}
|
||||
level.Debug(logger).Log("externalURL", amURL.String())
|
||||
logger.Debug("external url", "externalUrl", amURL.String())
|
||||
|
||||
waitFunc := func() time.Duration { return 0 }
|
||||
if peer != nil {
|
||||
|
@ -409,7 +408,7 @@ func run() int {
|
|||
|
||||
dispMetrics := dispatch.NewDispatcherMetrics(false, prometheus.DefaultRegisterer)
|
||||
pipelineBuilder := notify.NewPipelineBuilder(prometheus.DefaultRegisterer, ff)
|
||||
configLogger := log.With(logger, "component", "configuration")
|
||||
configLogger := logger.With("component", "configuration")
|
||||
configCoordinator := config.NewCoordinator(
|
||||
*configFile,
|
||||
prometheus.DefaultRegisterer,
|
||||
|
@ -435,7 +434,7 @@ func run() int {
|
|||
for _, rcv := range conf.Receivers {
|
||||
if _, found := activeReceivers[rcv.Name]; !found {
|
||||
// No need to build a receiver if no route is using it.
|
||||
level.Info(configLogger).Log("msg", "skipping creation of receiver not referenced by any route", "receiver", rcv.Name)
|
||||
configLogger.Info("skipping creation of receiver not referenced by any route", "receiver", rcv.Name)
|
||||
continue
|
||||
}
|
||||
integrations, err := receiver.BuildReceiverIntegrations(rcv, tmpl, logger)
|
||||
|
@ -496,8 +495,7 @@ func run() int {
|
|||
disp = dispatch.NewDispatcher(alerts, routes, pipeline, marker, timeoutFunc, nil, logger, dispMetrics)
|
||||
routes.Walk(func(r *dispatch.Route) {
|
||||
if r.RouteOpts.RepeatInterval > *retention {
|
||||
level.Warn(configLogger).Log(
|
||||
"msg",
|
||||
configLogger.Warn(
|
||||
"repeat_interval is greater than the data retention period. It can lead to notifications being repeated more often than expected.",
|
||||
"repeat_interval",
|
||||
r.RouteOpts.RepeatInterval,
|
||||
|
@ -509,8 +507,7 @@ func run() int {
|
|||
}
|
||||
|
||||
if r.RouteOpts.RepeatInterval < r.RouteOpts.GroupInterval {
|
||||
level.Warn(configLogger).Log(
|
||||
"msg",
|
||||
configLogger.Warn(
|
||||
"repeat_interval is less than group_interval. Notifications will not repeat until the next group_interval.",
|
||||
"repeat_interval",
|
||||
r.RouteOpts.RepeatInterval,
|
||||
|
@ -537,7 +534,7 @@ func run() int {
|
|||
*routePrefix = amURL.Path
|
||||
}
|
||||
*routePrefix = "/" + strings.Trim(*routePrefix, "/")
|
||||
level.Debug(logger).Log("routePrefix", *routePrefix)
|
||||
logger.Debug("route prefix", "routePrefix", *routePrefix)
|
||||
|
||||
router := route.New().WithInstrumentation(instrumentHandler)
|
||||
if *routePrefix != "/" {
|
||||
|
@ -559,12 +556,12 @@ func run() int {
|
|||
|
||||
go func() {
|
||||
if err := web.ListenAndServe(srv, webConfig, logger); !errors.Is(err, http.ErrServerClosed) {
|
||||
level.Error(logger).Log("msg", "Listen error", "err", err)
|
||||
logger.Error("Listen error", "err", err)
|
||||
close(srvc)
|
||||
}
|
||||
defer func() {
|
||||
if err := srv.Close(); err != nil {
|
||||
level.Error(logger).Log("msg", "Error on closing the server", "err", err)
|
||||
logger.Error("Error on closing the server", "err", err)
|
||||
}
|
||||
}()
|
||||
}()
|
||||
|
@ -584,7 +581,7 @@ func run() int {
|
|||
case errc := <-webReload:
|
||||
errc <- configCoordinator.Reload()
|
||||
case <-term:
|
||||
level.Info(logger).Log("msg", "Received SIGTERM, exiting gracefully...")
|
||||
logger.Info("Received SIGTERM, exiting gracefully...")
|
||||
return 0
|
||||
case <-srvc:
|
||||
return 1
|
||||
|
@ -600,7 +597,7 @@ func clusterWait(p *cluster.Peer, timeout time.Duration) func() time.Duration {
|
|||
}
|
||||
}
|
||||
|
||||
func extURL(logger log.Logger, hostnamef func() (string, error), listen, external string) (*url.URL, error) {
|
||||
func extURL(logger *slog.Logger, hostnamef func() (string, error), listen, external string) (*url.URL, error) {
|
||||
if external == "" {
|
||||
hostname, err := hostnamef()
|
||||
if err != nil {
|
||||
|
@ -611,7 +608,7 @@ func extURL(logger log.Logger, hostnamef func() (string, error), listen, externa
|
|||
return nil, err
|
||||
}
|
||||
if port == "" {
|
||||
level.Warn(logger).Log("msg", "no port found for listen address", "address", listen)
|
||||
logger.Warn("no port found for listen address", "address", listen)
|
||||
}
|
||||
|
||||
external = fmt.Sprintf("http://%s:%s/", hostname, port)
|
||||
|
|
|
@ -17,7 +17,7 @@ import (
|
|||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -86,7 +86,7 @@ func TestExternalURL(t *testing.T) {
|
|||
}
|
||||
}
|
||||
t.Run(fmt.Sprintf("external=%q,listen=%q", tc.external, tc.listen), func(t *testing.T) {
|
||||
u, err := extURL(log.NewNopLogger(), tc.hostnameResolver, tc.listen, tc.external)
|
||||
u, err := extURL(promslog.NewNopLogger(), tc.hostnameResolver, tc.listen, tc.external)
|
||||
if tc.err {
|
||||
require.Error(t, err)
|
||||
return
|
||||
|
|
|
@ -16,10 +16,9 @@ package config
|
|||
import (
|
||||
"crypto/md5"
|
||||
"encoding/binary"
|
||||
"log/slog"
|
||||
"sync"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
|
@ -27,7 +26,7 @@ import (
|
|||
// single configuration.
|
||||
type Coordinator struct {
|
||||
configFilePath string
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
|
||||
// Protects config and subscribers
|
||||
mutex sync.Mutex
|
||||
|
@ -42,7 +41,7 @@ type Coordinator struct {
|
|||
// NewCoordinator returns a new coordinator with the given configuration file
|
||||
// path. It does not yet load the configuration from file. This is done in
|
||||
// `Reload()`.
|
||||
func NewCoordinator(configFilePath string, r prometheus.Registerer, l log.Logger) *Coordinator {
|
||||
func NewCoordinator(configFilePath string, r prometheus.Registerer, l *slog.Logger) *Coordinator {
|
||||
c := &Coordinator{
|
||||
configFilePath: configFilePath,
|
||||
logger: l,
|
||||
|
@ -110,27 +109,27 @@ func (c *Coordinator) Reload() error {
|
|||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
level.Info(c.logger).Log(
|
||||
"msg", "Loading configuration file",
|
||||
c.logger.Info(
|
||||
"Loading configuration file",
|
||||
"file", c.configFilePath,
|
||||
)
|
||||
if err := c.loadFromFile(); err != nil {
|
||||
level.Error(c.logger).Log(
|
||||
"msg", "Loading configuration file failed",
|
||||
c.logger.Error(
|
||||
"Loading configuration file failed",
|
||||
"file", c.configFilePath,
|
||||
"err", err,
|
||||
)
|
||||
c.configSuccessMetric.Set(0)
|
||||
return err
|
||||
}
|
||||
level.Info(c.logger).Log(
|
||||
"msg", "Completed loading of configuration file",
|
||||
c.logger.Info(
|
||||
"Completed loading of configuration file",
|
||||
"file", c.configFilePath,
|
||||
)
|
||||
|
||||
if err := c.notifySubscribers(); err != nil {
|
||||
c.logger.Log(
|
||||
"msg", "one or more config change subscribers failed to apply new config",
|
||||
c.logger.Error(
|
||||
"one or more config change subscribers failed to apply new config",
|
||||
"file", c.configFilePath,
|
||||
"err", err,
|
||||
)
|
||||
|
|
|
@ -17,8 +17,8 @@ import (
|
|||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/promslog"
|
||||
)
|
||||
|
||||
type fakeRegisterer struct {
|
||||
|
@ -39,7 +39,7 @@ func (r *fakeRegisterer) Unregister(prometheus.Collector) bool {
|
|||
|
||||
func TestCoordinatorRegistersMetrics(t *testing.T) {
|
||||
fr := fakeRegisterer{}
|
||||
NewCoordinator("testdata/conf.good.yml", &fr, log.NewNopLogger())
|
||||
NewCoordinator("testdata/conf.good.yml", &fr, promslog.NewNopLogger())
|
||||
|
||||
if len(fr.registeredCollectors) == 0 {
|
||||
t.Error("expected NewCoordinator to register metrics on the given registerer")
|
||||
|
@ -48,7 +48,7 @@ func TestCoordinatorRegistersMetrics(t *testing.T) {
|
|||
|
||||
func TestCoordinatorNotifiesSubscribers(t *testing.T) {
|
||||
callBackCalled := false
|
||||
c := NewCoordinator("testdata/conf.good.yml", prometheus.NewRegistry(), log.NewNopLogger())
|
||||
c := NewCoordinator("testdata/conf.good.yml", prometheus.NewRegistry(), promslog.NewNopLogger())
|
||||
c.Subscribe(func(*Config) error {
|
||||
callBackCalled = true
|
||||
return nil
|
||||
|
@ -66,7 +66,7 @@ func TestCoordinatorNotifiesSubscribers(t *testing.T) {
|
|||
|
||||
func TestCoordinatorFailReloadWhenSubscriberFails(t *testing.T) {
|
||||
errMessage := "something happened"
|
||||
c := NewCoordinator("testdata/conf.good.yml", prometheus.NewRegistry(), log.NewNopLogger())
|
||||
c := NewCoordinator("testdata/conf.good.yml", prometheus.NewRegistry(), promslog.NewNopLogger())
|
||||
|
||||
c.Subscribe(func(*Config) error {
|
||||
return errors.New(errMessage)
|
||||
|
|
|
@ -14,9 +14,10 @@
|
|||
package receiver
|
||||
|
||||
import (
|
||||
"github.com/go-kit/log"
|
||||
"log/slog"
|
||||
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/promslog"
|
||||
|
||||
"github.com/prometheus/alertmanager/config"
|
||||
"github.com/prometheus/alertmanager/notify"
|
||||
|
@ -42,12 +43,16 @@ import (
|
|||
|
||||
// BuildReceiverIntegrations builds a list of integration notifiers off of a
|
||||
// receiver config.
|
||||
func BuildReceiverIntegrations(nc config.Receiver, tmpl *template.Template, logger log.Logger, httpOpts ...commoncfg.HTTPClientOption) ([]notify.Integration, error) {
|
||||
func BuildReceiverIntegrations(nc config.Receiver, tmpl *template.Template, logger *slog.Logger, httpOpts ...commoncfg.HTTPClientOption) ([]notify.Integration, error) {
|
||||
if logger == nil {
|
||||
logger = promslog.NewNopLogger()
|
||||
}
|
||||
|
||||
var (
|
||||
errs types.MultiError
|
||||
integrations []notify.Integration
|
||||
add = func(name string, i int, rs notify.ResolvedSender, f func(l log.Logger) (notify.Notifier, error)) {
|
||||
n, err := f(log.With(logger, "integration", name))
|
||||
add = func(name string, i int, rs notify.ResolvedSender, f func(l *slog.Logger) (notify.Notifier, error)) {
|
||||
n, err := f(logger.With("integration", name))
|
||||
if err != nil {
|
||||
errs.Add(err)
|
||||
return
|
||||
|
@ -57,52 +62,52 @@ func BuildReceiverIntegrations(nc config.Receiver, tmpl *template.Template, logg
|
|||
)
|
||||
|
||||
for i, c := range nc.WebhookConfigs {
|
||||
add("webhook", i, c, func(l log.Logger) (notify.Notifier, error) { return webhook.New(c, tmpl, l, httpOpts...) })
|
||||
add("webhook", i, c, func(l *slog.Logger) (notify.Notifier, error) { return webhook.New(c, tmpl, l, httpOpts...) })
|
||||
}
|
||||
for i, c := range nc.EmailConfigs {
|
||||
add("email", i, c, func(l log.Logger) (notify.Notifier, error) { return email.New(c, tmpl, l), nil })
|
||||
add("email", i, c, func(l *slog.Logger) (notify.Notifier, error) { return email.New(c, tmpl, l), nil })
|
||||
}
|
||||
for i, c := range nc.PagerdutyConfigs {
|
||||
add("pagerduty", i, c, func(l log.Logger) (notify.Notifier, error) { return pagerduty.New(c, tmpl, l, httpOpts...) })
|
||||
add("pagerduty", i, c, func(l *slog.Logger) (notify.Notifier, error) { return pagerduty.New(c, tmpl, l, httpOpts...) })
|
||||
}
|
||||
for i, c := range nc.OpsGenieConfigs {
|
||||
add("opsgenie", i, c, func(l log.Logger) (notify.Notifier, error) { return opsgenie.New(c, tmpl, l, httpOpts...) })
|
||||
add("opsgenie", i, c, func(l *slog.Logger) (notify.Notifier, error) { return opsgenie.New(c, tmpl, l, httpOpts...) })
|
||||
}
|
||||
for i, c := range nc.WechatConfigs {
|
||||
add("wechat", i, c, func(l log.Logger) (notify.Notifier, error) { return wechat.New(c, tmpl, l, httpOpts...) })
|
||||
add("wechat", i, c, func(l *slog.Logger) (notify.Notifier, error) { return wechat.New(c, tmpl, l, httpOpts...) })
|
||||
}
|
||||
for i, c := range nc.SlackConfigs {
|
||||
add("slack", i, c, func(l log.Logger) (notify.Notifier, error) { return slack.New(c, tmpl, l, httpOpts...) })
|
||||
add("slack", i, c, func(l *slog.Logger) (notify.Notifier, error) { return slack.New(c, tmpl, l, httpOpts...) })
|
||||
}
|
||||
for i, c := range nc.VictorOpsConfigs {
|
||||
add("victorops", i, c, func(l log.Logger) (notify.Notifier, error) { return victorops.New(c, tmpl, l, httpOpts...) })
|
||||
add("victorops", i, c, func(l *slog.Logger) (notify.Notifier, error) { return victorops.New(c, tmpl, l, httpOpts...) })
|
||||
}
|
||||
for i, c := range nc.PushoverConfigs {
|
||||
add("pushover", i, c, func(l log.Logger) (notify.Notifier, error) { return pushover.New(c, tmpl, l, httpOpts...) })
|
||||
add("pushover", i, c, func(l *slog.Logger) (notify.Notifier, error) { return pushover.New(c, tmpl, l, httpOpts...) })
|
||||
}
|
||||
for i, c := range nc.SNSConfigs {
|
||||
add("sns", i, c, func(l log.Logger) (notify.Notifier, error) { return sns.New(c, tmpl, l, httpOpts...) })
|
||||
add("sns", i, c, func(l *slog.Logger) (notify.Notifier, error) { return sns.New(c, tmpl, l, httpOpts...) })
|
||||
}
|
||||
for i, c := range nc.TelegramConfigs {
|
||||
add("telegram", i, c, func(l log.Logger) (notify.Notifier, error) { return telegram.New(c, tmpl, l, httpOpts...) })
|
||||
add("telegram", i, c, func(l *slog.Logger) (notify.Notifier, error) { return telegram.New(c, tmpl, l, httpOpts...) })
|
||||
}
|
||||
for i, c := range nc.DiscordConfigs {
|
||||
add("discord", i, c, func(l log.Logger) (notify.Notifier, error) { return discord.New(c, tmpl, l, httpOpts...) })
|
||||
add("discord", i, c, func(l *slog.Logger) (notify.Notifier, error) { return discord.New(c, tmpl, l, httpOpts...) })
|
||||
}
|
||||
for i, c := range nc.WebexConfigs {
|
||||
add("webex", i, c, func(l log.Logger) (notify.Notifier, error) { return webex.New(c, tmpl, l, httpOpts...) })
|
||||
add("webex", i, c, func(l *slog.Logger) (notify.Notifier, error) { return webex.New(c, tmpl, l, httpOpts...) })
|
||||
}
|
||||
for i, c := range nc.MSTeamsConfigs {
|
||||
add("msteams", i, c, func(l log.Logger) (notify.Notifier, error) { return msteams.New(c, tmpl, l, httpOpts...) })
|
||||
add("msteams", i, c, func(l *slog.Logger) (notify.Notifier, error) { return msteams.New(c, tmpl, l, httpOpts...) })
|
||||
}
|
||||
for i, c := range nc.MSTeamsV2Configs {
|
||||
add("msteamsv2", i, c, func(l log.Logger) (notify.Notifier, error) { return msteamsv2.New(c, tmpl, l, httpOpts...) })
|
||||
add("msteamsv2", i, c, func(l *slog.Logger) (notify.Notifier, error) { return msteamsv2.New(c, tmpl, l, httpOpts...) })
|
||||
}
|
||||
for i, c := range nc.JiraConfigs {
|
||||
add("jira", i, c, func(l log.Logger) (notify.Notifier, error) { return jira.New(c, tmpl, l, httpOpts...) })
|
||||
add("jira", i, c, func(l *slog.Logger) (notify.Notifier, error) { return jira.New(c, tmpl, l, httpOpts...) })
|
||||
}
|
||||
for i, c := range nc.RocketchatConfigs {
|
||||
add("rocketchat", i, c, func(l log.Logger) (notify.Notifier, error) { return rocketchat.New(c, tmpl, l, httpOpts...) })
|
||||
add("rocketchat", i, c, func(l *slog.Logger) (notify.Notifier, error) { return rocketchat.New(c, tmpl, l, httpOpts...) })
|
||||
}
|
||||
|
||||
if errs.Len() > 0 {
|
||||
|
|
|
@ -17,12 +17,11 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
|
@ -92,7 +91,7 @@ type Dispatcher struct {
|
|||
ctx context.Context
|
||||
cancel func()
|
||||
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
// Limits describes limits used by Dispatcher.
|
||||
|
@ -111,7 +110,7 @@ func NewDispatcher(
|
|||
mk types.GroupMarker,
|
||||
to func(time.Duration) time.Duration,
|
||||
lim Limits,
|
||||
l log.Logger,
|
||||
l *slog.Logger,
|
||||
m *DispatcherMetrics,
|
||||
) *Dispatcher {
|
||||
if lim == nil {
|
||||
|
@ -124,7 +123,7 @@ func NewDispatcher(
|
|||
route: r,
|
||||
marker: mk,
|
||||
timeout: to,
|
||||
logger: log.With(l, "component", "dispatcher"),
|
||||
logger: l.With("component", "dispatcher"),
|
||||
metrics: m,
|
||||
limits: lim,
|
||||
}
|
||||
|
@ -158,16 +157,16 @@ func (d *Dispatcher) run(it provider.AlertIterator) {
|
|||
if !ok {
|
||||
// Iterator exhausted for some reason.
|
||||
if err := it.Err(); err != nil {
|
||||
level.Error(d.logger).Log("msg", "Error on alert update", "err", err)
|
||||
d.logger.Error("Error on alert update", "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
level.Debug(d.logger).Log("msg", "Received alert", "alert", alert)
|
||||
d.logger.Debug("Received alert", "alert", alert)
|
||||
|
||||
// Log errors but keep trying.
|
||||
if err := it.Err(); err != nil {
|
||||
level.Error(d.logger).Log("msg", "Error on alert update", "err", err)
|
||||
d.logger.Error("Error on alert update", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -334,7 +333,7 @@ func (d *Dispatcher) processAlert(alert *types.Alert, route *Route) {
|
|||
// If the group does not exist, create it. But check the limit first.
|
||||
if limit := d.limits.MaxNumberOfAggregationGroups(); limit > 0 && d.aggrGroupsNum >= limit {
|
||||
d.metrics.aggrGroupLimitReached.Inc()
|
||||
level.Error(d.logger).Log("msg", "Too many aggregation groups, cannot create new group for alert", "groups", d.aggrGroupsNum, "limit", limit, "alert", alert.Name())
|
||||
d.logger.Error("Too many aggregation groups, cannot create new group for alert", "groups", d.aggrGroupsNum, "limit", limit, "alert", alert.Name())
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -351,14 +350,15 @@ func (d *Dispatcher) processAlert(alert *types.Alert, route *Route) {
|
|||
go ag.run(func(ctx context.Context, alerts ...*types.Alert) bool {
|
||||
_, _, err := d.stage.Exec(ctx, d.logger, alerts...)
|
||||
if err != nil {
|
||||
lvl := level.Error(d.logger)
|
||||
logger := d.logger.With("num_alerts", len(alerts), "err", err)
|
||||
if errors.Is(ctx.Err(), context.Canceled) {
|
||||
// It is expected for the context to be canceled on
|
||||
// configuration reload or shutdown. In this case, the
|
||||
// message should only be logged at the debug level.
|
||||
lvl = level.Debug(d.logger)
|
||||
logger.Debug("Notify for alerts failed")
|
||||
} else {
|
||||
logger.Error("Notify for alerts failed")
|
||||
}
|
||||
lvl.Log("msg", "Notify for alerts failed", "num_alerts", len(alerts), "err", err)
|
||||
}
|
||||
return err == nil
|
||||
})
|
||||
|
@ -381,7 +381,7 @@ func getGroupLabels(alert *types.Alert, route *Route) model.LabelSet {
|
|||
type aggrGroup struct {
|
||||
labels model.LabelSet
|
||||
opts *RouteOpts
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
routeID string
|
||||
routeKey string
|
||||
|
||||
|
@ -397,7 +397,7 @@ type aggrGroup struct {
|
|||
}
|
||||
|
||||
// newAggrGroup returns a new aggregation group.
|
||||
func newAggrGroup(ctx context.Context, labels model.LabelSet, r *Route, to func(time.Duration) time.Duration, logger log.Logger) *aggrGroup {
|
||||
func newAggrGroup(ctx context.Context, labels model.LabelSet, r *Route, to func(time.Duration) time.Duration, logger *slog.Logger) *aggrGroup {
|
||||
if to == nil {
|
||||
to = func(d time.Duration) time.Duration { return d }
|
||||
}
|
||||
|
@ -412,7 +412,7 @@ func newAggrGroup(ctx context.Context, labels model.LabelSet, r *Route, to func(
|
|||
}
|
||||
ag.ctx, ag.cancel = context.WithCancel(ctx)
|
||||
|
||||
ag.logger = log.With(logger, "aggrGroup", ag)
|
||||
ag.logger = logger.With("aggrGroup", ag)
|
||||
|
||||
// Set an initial one-time wait before flushing
|
||||
// the first batch of notifications.
|
||||
|
@ -487,7 +487,7 @@ func (ag *aggrGroup) stop() {
|
|||
// insert inserts the alert into the aggregation group.
|
||||
func (ag *aggrGroup) insert(alert *types.Alert) {
|
||||
if err := ag.alerts.Set(alert); err != nil {
|
||||
level.Error(ag.logger).Log("msg", "error on set alert", "err", err)
|
||||
ag.logger.Error("error on set alert", "err", err)
|
||||
}
|
||||
|
||||
// Immediately trigger a flush if the wait duration for this
|
||||
|
@ -527,7 +527,7 @@ func (ag *aggrGroup) flush(notify func(...*types.Alert) bool) {
|
|||
}
|
||||
sort.Stable(alertsSlice)
|
||||
|
||||
level.Debug(ag.logger).Log("msg", "flushing", "alerts", fmt.Sprintf("%v", alertsSlice))
|
||||
ag.logger.Debug("flushing", "alerts", fmt.Sprintf("%v", alertsSlice))
|
||||
|
||||
if notify(alertsSlice...) {
|
||||
// Delete all resolved alerts as we just sent a notification for them,
|
||||
|
@ -535,7 +535,7 @@ func (ag *aggrGroup) flush(notify func(...*types.Alert) bool) {
|
|||
// that each resolved alert has not fired again during the flush as then
|
||||
// we would delete an active alert thinking it was resolved.
|
||||
if err := ag.alerts.DeleteIfNotModified(resolvedSlice); err != nil {
|
||||
level.Error(ag.logger).Log("msg", "error on delete alerts", "err", err)
|
||||
ag.logger.Error("error on delete alerts", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,16 +16,17 @@ package dispatch
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/testutil"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/alertmanager/config"
|
||||
|
@ -138,7 +139,7 @@ func TestAggrGroup(t *testing.T) {
|
|||
}
|
||||
|
||||
// Test regular situation where we wait for group_wait to send out alerts.
|
||||
ag := newAggrGroup(context.Background(), lset, route, nil, log.NewNopLogger())
|
||||
ag := newAggrGroup(context.Background(), lset, route, nil, promslog.NewNopLogger())
|
||||
go ag.run(ntfy)
|
||||
|
||||
ag.insert(a1)
|
||||
|
@ -192,7 +193,7 @@ func TestAggrGroup(t *testing.T) {
|
|||
// immediate flushing.
|
||||
// Finally, set all alerts to be resolved. After successful notify the aggregation group
|
||||
// should empty itself.
|
||||
ag = newAggrGroup(context.Background(), lset, route, nil, log.NewNopLogger())
|
||||
ag = newAggrGroup(context.Background(), lset, route, nil, promslog.NewNopLogger())
|
||||
go ag.run(ntfy)
|
||||
|
||||
ag.insert(a1)
|
||||
|
@ -387,7 +388,7 @@ route:
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
logger := log.NewNopLogger()
|
||||
logger := promslog.NewNopLogger()
|
||||
route := NewRoute(conf.Route, nil)
|
||||
marker := types.NewMarker(prometheus.NewRegistry())
|
||||
alerts, err := mem.NewAlerts(context.Background(), marker, time.Hour, nil, logger, nil)
|
||||
|
@ -537,7 +538,7 @@ route:
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
logger := log.NewNopLogger()
|
||||
logger := promslog.NewNopLogger()
|
||||
route := NewRoute(conf.Route, nil)
|
||||
marker := types.NewMarker(prometheus.NewRegistry())
|
||||
alerts, err := mem.NewAlerts(context.Background(), marker, time.Hour, nil, logger, nil)
|
||||
|
@ -621,7 +622,7 @@ func (r *recordStage) Alerts() []*types.Alert {
|
|||
return alerts
|
||||
}
|
||||
|
||||
func (r *recordStage) Exec(ctx context.Context, l log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
func (r *recordStage) Exec(ctx context.Context, l *slog.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
gk, ok := notify.GroupKey(ctx)
|
||||
|
@ -659,7 +660,7 @@ func newAlert(labels model.LabelSet) *types.Alert {
|
|||
}
|
||||
|
||||
func TestDispatcherRace(t *testing.T) {
|
||||
logger := log.NewNopLogger()
|
||||
logger := promslog.NewNopLogger()
|
||||
marker := types.NewMarker(prometheus.NewRegistry())
|
||||
alerts, err := mem.NewAlerts(context.Background(), marker, time.Hour, nil, logger, nil)
|
||||
if err != nil {
|
||||
|
@ -676,7 +677,7 @@ func TestDispatcherRace(t *testing.T) {
|
|||
func TestDispatcherRaceOnFirstAlertNotDeliveredWhenGroupWaitIsZero(t *testing.T) {
|
||||
const numAlerts = 5000
|
||||
|
||||
logger := log.NewNopLogger()
|
||||
logger := promslog.NewNopLogger()
|
||||
marker := types.NewMarker(prometheus.NewRegistry())
|
||||
alerts, err := mem.NewAlerts(context.Background(), marker, time.Hour, nil, logger, nil)
|
||||
if err != nil {
|
||||
|
@ -732,7 +733,7 @@ func TestDispatcher_DoMaintenance(t *testing.T) {
|
|||
r := prometheus.NewRegistry()
|
||||
marker := types.NewMarker(r)
|
||||
|
||||
alerts, err := mem.NewAlerts(context.Background(), marker, time.Minute, nil, log.NewNopLogger(), nil)
|
||||
alerts, err := mem.NewAlerts(context.Background(), marker, time.Minute, nil, promslog.NewNopLogger(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -748,13 +749,13 @@ func TestDispatcher_DoMaintenance(t *testing.T) {
|
|||
recorder := &recordStage{alerts: make(map[string]map[model.Fingerprint]*types.Alert)}
|
||||
|
||||
ctx := context.Background()
|
||||
dispatcher := NewDispatcher(alerts, route, recorder, marker, timeout, nil, log.NewNopLogger(), NewDispatcherMetrics(false, r))
|
||||
dispatcher := NewDispatcher(alerts, route, recorder, marker, timeout, nil, promslog.NewNopLogger(), NewDispatcherMetrics(false, r))
|
||||
aggrGroups := make(map[*Route]map[model.Fingerprint]*aggrGroup)
|
||||
aggrGroups[route] = make(map[model.Fingerprint]*aggrGroup)
|
||||
|
||||
// Insert an aggregation group with no alerts.
|
||||
labels := model.LabelSet{"alertname": "1"}
|
||||
aggrGroup1 := newAggrGroup(ctx, labels, route, timeout, log.NewNopLogger())
|
||||
aggrGroup1 := newAggrGroup(ctx, labels, route, timeout, promslog.NewNopLogger())
|
||||
aggrGroups[route][aggrGroup1.fingerprint()] = aggrGroup1
|
||||
dispatcher.aggrGroupsPerRoute = aggrGroups
|
||||
// Must run otherwise doMaintenance blocks on aggrGroup1.stop().
|
||||
|
|
|
@ -16,10 +16,8 @@ package featurecontrol
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strings"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -47,7 +45,7 @@ type Flagger interface {
|
|||
}
|
||||
|
||||
type Flags struct {
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
enableReceiverNamesInMetrics bool
|
||||
classicMode bool
|
||||
utf8StrictMode bool
|
||||
|
@ -107,7 +105,7 @@ func enableAutoGOMAXPROCS() flagOption {
|
|||
}
|
||||
}
|
||||
|
||||
func NewFlags(logger log.Logger, features string) (Flagger, error) {
|
||||
func NewFlags(logger *slog.Logger, features string) (Flagger, error) {
|
||||
fc := &Flags{logger: logger}
|
||||
opts := []flagOption{}
|
||||
|
||||
|
@ -119,19 +117,19 @@ func NewFlags(logger log.Logger, features string) (Flagger, error) {
|
|||
switch feature {
|
||||
case FeatureReceiverNameInMetrics:
|
||||
opts = append(opts, enableReceiverNameInMetrics())
|
||||
level.Warn(logger).Log("msg", "Experimental receiver name in metrics enabled")
|
||||
logger.Warn("Experimental receiver name in metrics enabled")
|
||||
case FeatureClassicMode:
|
||||
opts = append(opts, enableClassicMode())
|
||||
level.Warn(logger).Log("msg", "Classic mode enabled")
|
||||
logger.Warn("Classic mode enabled")
|
||||
case FeatureUTF8StrictMode:
|
||||
opts = append(opts, enableUTF8StrictMode())
|
||||
level.Warn(logger).Log("msg", "UTF-8 strict mode enabled")
|
||||
logger.Warn("UTF-8 strict mode enabled")
|
||||
case FeatureAutoGOMEMLIMIT:
|
||||
opts = append(opts, enableAutoGOMEMLIMIT())
|
||||
level.Warn(logger).Log("msg", "Automatically set GOMEMLIMIT to match the Linux container or system memory limit.")
|
||||
logger.Warn("Automatically set GOMEMLIMIT to match the Linux container or system memory limit.")
|
||||
case FeatureAutoGOMAXPROCS:
|
||||
opts = append(opts, enableAutoGOMAXPROCS())
|
||||
level.Warn(logger).Log("msg", "Automatically set GOMAXPROCS to match Linux container CPU quota")
|
||||
logger.Warn("Automatically set GOMAXPROCS to match Linux container CPU quota")
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown option '%s' for --enable-feature", feature)
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -46,7 +46,7 @@ func TestFlags(t *testing.T) {
|
|||
|
||||
for _, tt := range tc {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fc, err := NewFlags(log.NewNopLogger(), tt.featureFlags)
|
||||
fc, err := NewFlags(promslog.NewNopLogger(), tt.featureFlags)
|
||||
if tt.err != nil {
|
||||
require.EqualError(t, err, tt.err.Error())
|
||||
} else {
|
||||
|
|
10
go.mod
10
go.mod
|
@ -11,7 +11,6 @@ require (
|
|||
github.com/cespare/xxhash/v2 v2.3.0
|
||||
github.com/coder/quartz v0.1.2
|
||||
github.com/emersion/go-smtp v0.21.3
|
||||
github.com/go-kit/log v0.2.1
|
||||
github.com/go-openapi/analysis v0.23.0
|
||||
github.com/go-openapi/errors v0.22.0
|
||||
github.com/go-openapi/loads v0.22.0
|
||||
|
@ -30,11 +29,11 @@ require (
|
|||
github.com/matttproud/golang_protobuf_extensions v1.0.4
|
||||
github.com/oklog/run v1.1.0
|
||||
github.com/oklog/ulid v1.3.1
|
||||
github.com/prometheus/client_golang v1.20.4
|
||||
github.com/prometheus/common v0.60.0
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
github.com/prometheus/common v0.60.1
|
||||
github.com/prometheus/common/assets v0.2.0
|
||||
github.com/prometheus/common/sigv4 v0.1.0
|
||||
github.com/prometheus/exporter-toolkit v0.11.0
|
||||
github.com/prometheus/exporter-toolkit v0.13.1
|
||||
github.com/rs/cors v1.11.1
|
||||
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749
|
||||
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546
|
||||
|
@ -61,7 +60,6 @@ require (
|
|||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/emersion/go-sasl v0.0.0-20200509203442-7bfe0ed36a21 // indirect
|
||||
github.com/go-logfmt/logfmt v0.5.1 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
|
@ -81,6 +79,8 @@ require (
|
|||
github.com/julienschmidt/httprouter v1.3.0 // indirect
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mdlayher/socket v0.4.1 // indirect
|
||||
github.com/mdlayher/vsock v1.2.1 // indirect
|
||||
github.com/miekg/dns v1.1.41 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
|
|
20
go.sum
20
go.sum
|
@ -153,13 +153,9 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2
|
|||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
|
||||
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
|
@ -387,6 +383,10 @@ github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27k
|
|||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U=
|
||||
github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA=
|
||||
github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ=
|
||||
github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE=
|
||||
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
|
||||
github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY=
|
||||
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
|
||||
|
@ -439,8 +439,8 @@ github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3O
|
|||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI=
|
||||
github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
|
||||
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
|
@ -452,14 +452,14 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b
|
|||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA=
|
||||
github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
|
||||
github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc=
|
||||
github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
|
||||
github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM=
|
||||
github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
|
||||
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
|
||||
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
|
||||
github.com/prometheus/exporter-toolkit v0.11.0 h1:yNTsuZ0aNCNFQ3aFTD2uhPOvr4iD7fdBvKPAEGkNf+g=
|
||||
github.com/prometheus/exporter-toolkit v0.11.0/go.mod h1:BVnENhnNecpwoTLiABx7mrPB/OLRIgN74qlQbV+FK1Q=
|
||||
github.com/prometheus/exporter-toolkit v0.13.1 h1:Evsh0gWQo2bdOHlnz9+0Nm7/OFfIwhE2Ws4A2jIlR04=
|
||||
github.com/prometheus/exporter-toolkit v0.13.1/go.mod h1:ujdv2YIOxtdFxxqtloLpbqmxd5J0Le6IITUvIRSWjj0=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
|
|
|
@ -15,11 +15,10 @@ package inhibit
|
|||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/oklog/run"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
|
@ -37,14 +36,14 @@ type Inhibitor struct {
|
|||
alerts provider.Alerts
|
||||
rules []*InhibitRule
|
||||
marker types.AlertMarker
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
|
||||
mtx sync.RWMutex
|
||||
cancel func()
|
||||
}
|
||||
|
||||
// NewInhibitor returns a new Inhibitor.
|
||||
func NewInhibitor(ap provider.Alerts, rs []config.InhibitRule, mk types.AlertMarker, logger log.Logger) *Inhibitor {
|
||||
func NewInhibitor(ap provider.Alerts, rs []config.InhibitRule, mk types.AlertMarker, logger *slog.Logger) *Inhibitor {
|
||||
ih := &Inhibitor{
|
||||
alerts: ap,
|
||||
marker: mk,
|
||||
|
@ -67,14 +66,14 @@ func (ih *Inhibitor) run(ctx context.Context) {
|
|||
return
|
||||
case a := <-it.Next():
|
||||
if err := it.Err(); err != nil {
|
||||
level.Error(ih.logger).Log("msg", "Error iterating alerts", "err", err)
|
||||
ih.logger.Error("Error iterating alerts", "err", err)
|
||||
continue
|
||||
}
|
||||
// Update the inhibition rules' cache.
|
||||
for _, r := range ih.rules {
|
||||
if r.SourceMatchers.Matches(a.Labels) {
|
||||
if err := r.scache.Set(a); err != nil {
|
||||
level.Error(ih.logger).Log("msg", "error on set alert", "err", err)
|
||||
ih.logger.Error("error on set alert", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -106,7 +105,7 @@ func (ih *Inhibitor) Run() {
|
|||
})
|
||||
|
||||
if err := g.Run(); err != nil {
|
||||
level.Warn(ih.logger).Log("msg", "error running inhibitor", "err", err)
|
||||
ih.logger.Warn("error running inhibitor", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,9 +20,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/alertmanager/config"
|
||||
|
@ -184,7 +184,7 @@ func lastRuleMatchesBenchmark(b *testing.B, n int) benchmarkOptions {
|
|||
func benchmarkMutes(b *testing.B, opts benchmarkOptions) {
|
||||
r := prometheus.NewRegistry()
|
||||
m := types.NewMarker(r)
|
||||
s, err := mem.NewAlerts(context.TODO(), m, time.Minute, nil, log.NewNopLogger(), r)
|
||||
s, err := mem.NewAlerts(context.TODO(), m, time.Minute, nil, promslog.NewNopLogger(), r)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
@ -198,7 +198,7 @@ func benchmarkMutes(b *testing.B, opts benchmarkOptions) {
|
|||
}
|
||||
}
|
||||
|
||||
ih := NewInhibitor(s, rules, m, log.NewNopLogger())
|
||||
ih := NewInhibitor(s, rules, m, promslog.NewNopLogger())
|
||||
defer ih.Stop()
|
||||
go ih.Run()
|
||||
|
||||
|
|
|
@ -17,9 +17,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
|
||||
"github.com/prometheus/alertmanager/config"
|
||||
"github.com/prometheus/alertmanager/pkg/labels"
|
||||
|
@ -28,7 +28,7 @@ import (
|
|||
"github.com/prometheus/alertmanager/types"
|
||||
)
|
||||
|
||||
var nopLogger = log.NewNopLogger()
|
||||
var nopLogger = promslog.NewNopLogger()
|
||||
|
||||
func TestInhibitRuleHasEqual(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
|
|
@ -15,13 +15,13 @@ package compat
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
|
||||
"github.com/prometheus/alertmanager/featurecontrol"
|
||||
"github.com/prometheus/alertmanager/matcher/parse"
|
||||
|
@ -29,9 +29,9 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
isValidLabelName = isValidClassicLabelName(log.NewNopLogger())
|
||||
parseMatcher = ClassicMatcherParser(log.NewNopLogger())
|
||||
parseMatchers = ClassicMatchersParser(log.NewNopLogger())
|
||||
isValidLabelName = isValidClassicLabelName(promslog.NewNopLogger())
|
||||
parseMatcher = ClassicMatcherParser(promslog.NewNopLogger())
|
||||
parseMatchers = ClassicMatchersParser(promslog.NewNopLogger())
|
||||
)
|
||||
|
||||
// IsValidLabelName returns true if the string is a valid label name.
|
||||
|
@ -56,7 +56,7 @@ func Matchers(input, origin string) (labels.Matchers, error) {
|
|||
}
|
||||
|
||||
// InitFromFlags initializes the compat package from the flagger.
|
||||
func InitFromFlags(l log.Logger, f featurecontrol.Flagger) {
|
||||
func InitFromFlags(l *slog.Logger, f featurecontrol.Flagger) {
|
||||
if f.ClassicMode() {
|
||||
isValidLabelName = isValidClassicLabelName(l)
|
||||
parseMatcher = ClassicMatcherParser(l)
|
||||
|
@ -74,27 +74,27 @@ func InitFromFlags(l log.Logger, f featurecontrol.Flagger) {
|
|||
|
||||
// ClassicMatcherParser uses the pkg/labels parser to parse the matcher in
|
||||
// the input string.
|
||||
func ClassicMatcherParser(l log.Logger) ParseMatcher {
|
||||
func ClassicMatcherParser(l *slog.Logger) ParseMatcher {
|
||||
return func(input, origin string) (matcher *labels.Matcher, err error) {
|
||||
level.Debug(l).Log("msg", "Parsing with classic matchers parser", "input", input, "origin", origin)
|
||||
l.Debug("Parsing with classic matchers parser", "input", input, "origin", origin)
|
||||
return labels.ParseMatcher(input)
|
||||
}
|
||||
}
|
||||
|
||||
// ClassicMatchersParser uses the pkg/labels parser to parse zero or more
|
||||
// matchers in the input string. It returns an error if the input is invalid.
|
||||
func ClassicMatchersParser(l log.Logger) ParseMatchers {
|
||||
func ClassicMatchersParser(l *slog.Logger) ParseMatchers {
|
||||
return func(input, origin string) (matchers labels.Matchers, err error) {
|
||||
level.Debug(l).Log("msg", "Parsing with classic matchers parser", "input", input, "origin", origin)
|
||||
l.Debug("Parsing with classic matchers parser", "input", input, "origin", origin)
|
||||
return labels.ParseMatchers(input)
|
||||
}
|
||||
}
|
||||
|
||||
// UTF8MatcherParser uses the new matcher/parse parser to parse the matcher
|
||||
// in the input string. If this fails it does not revert to the pkg/labels parser.
|
||||
func UTF8MatcherParser(l log.Logger) ParseMatcher {
|
||||
func UTF8MatcherParser(l *slog.Logger) ParseMatcher {
|
||||
return func(input, origin string) (matcher *labels.Matcher, err error) {
|
||||
level.Debug(l).Log("msg", "Parsing with UTF-8 matchers parser", "input", input, "origin", origin)
|
||||
l.Debug("Parsing with UTF-8 matchers parser", "input", input, "origin", origin)
|
||||
if strings.HasPrefix(input, "{") || strings.HasSuffix(input, "}") {
|
||||
return nil, fmt.Errorf("unexpected open or close brace: %s", input)
|
||||
}
|
||||
|
@ -105,9 +105,9 @@ func UTF8MatcherParser(l log.Logger) ParseMatcher {
|
|||
// UTF8MatchersParser uses the new matcher/parse parser to parse zero or more
|
||||
// matchers in the input string. If this fails it does not revert to the
|
||||
// pkg/labels parser.
|
||||
func UTF8MatchersParser(l log.Logger) ParseMatchers {
|
||||
func UTF8MatchersParser(l *slog.Logger) ParseMatchers {
|
||||
return func(input, origin string) (matchers labels.Matchers, err error) {
|
||||
level.Debug(l).Log("msg", "Parsing with UTF-8 matchers parser", "input", input, "origin", origin)
|
||||
l.Debug("Parsing with UTF-8 matchers parser", "input", input, "origin", origin)
|
||||
return parse.Matchers(input)
|
||||
}
|
||||
}
|
||||
|
@ -115,9 +115,9 @@ func UTF8MatchersParser(l log.Logger) ParseMatchers {
|
|||
// FallbackMatcherParser uses the new matcher/parse parser to parse zero or more
|
||||
// matchers in the string. If this fails it reverts to the pkg/labels parser and
|
||||
// emits a warning log line.
|
||||
func FallbackMatcherParser(l log.Logger) ParseMatcher {
|
||||
func FallbackMatcherParser(l *slog.Logger) ParseMatcher {
|
||||
return func(input, origin string) (matcher *labels.Matcher, err error) {
|
||||
level.Debug(l).Log("msg", "Parsing with UTF-8 matchers parser, with fallback to classic matchers parser", "input", input, "origin", origin)
|
||||
l.Debug("Parsing with UTF-8 matchers parser, with fallback to classic matchers parser", "input", input, "origin", origin)
|
||||
if strings.HasPrefix(input, "{") || strings.HasSuffix(input, "}") {
|
||||
return nil, fmt.Errorf("unexpected open or close brace: %s", input)
|
||||
}
|
||||
|
@ -133,13 +133,13 @@ func FallbackMatcherParser(l log.Logger) ParseMatcher {
|
|||
// The input is valid in the pkg/labels parser, but not the matcher/parse
|
||||
// parser. This means the input is not forwards compatible.
|
||||
suggestion := cMatcher.String()
|
||||
level.Warn(l).Log("msg", "Alertmanager is moving to a new parser for labels and matchers, and this input is incompatible. Alertmanager has instead parsed the input using the classic matchers parser as a fallback. To make this input compatible with the UTF-8 matchers parser please make sure all regular expressions and values are double-quoted. If you are still seeing this message please open an issue.", "input", input, "origin", origin, "err", nErr, "suggestion", suggestion)
|
||||
l.Warn("Alertmanager is moving to a new parser for labels and matchers, and this input is incompatible. Alertmanager has instead parsed the input using the classic matchers parser as a fallback. To make this input compatible with the UTF-8 matchers parser please make sure all regular expressions and values are double-quoted. If you are still seeing this message please open an issue.", "input", input, "origin", origin, "err", nErr, "suggestion", suggestion)
|
||||
return cMatcher, nil
|
||||
}
|
||||
// If the input is valid in both parsers, but produces different results,
|
||||
// then there is disagreement.
|
||||
if nErr == nil && cErr == nil && !reflect.DeepEqual(nMatcher, cMatcher) {
|
||||
level.Warn(l).Log("msg", "Matchers input has disagreement", "input", input, "origin", origin)
|
||||
l.Warn("Matchers input has disagreement", "input", input, "origin", origin)
|
||||
return cMatcher, nil
|
||||
}
|
||||
return nMatcher, nil
|
||||
|
@ -149,9 +149,9 @@ func FallbackMatcherParser(l log.Logger) ParseMatcher {
|
|||
// FallbackMatchersParser uses the new matcher/parse parser to parse the
|
||||
// matcher in the input string. If this fails it falls back to the pkg/labels
|
||||
// parser and emits a warning log line.
|
||||
func FallbackMatchersParser(l log.Logger) ParseMatchers {
|
||||
func FallbackMatchersParser(l *slog.Logger) ParseMatchers {
|
||||
return func(input, origin string) (matchers labels.Matchers, err error) {
|
||||
level.Debug(l).Log("msg", "Parsing with UTF-8 matchers parser, with fallback to classic matchers parser", "input", input, "origin", origin)
|
||||
l.Debug("Parsing with UTF-8 matchers parser, with fallback to classic matchers parser", "input", input, "origin", origin)
|
||||
// Parse the input in both parsers to look for disagreement and incompatible
|
||||
// inputs.
|
||||
nMatchers, nErr := parse.Matchers(input)
|
||||
|
@ -173,14 +173,14 @@ func FallbackMatchersParser(l log.Logger) ParseMatchers {
|
|||
suggestion := sb.String()
|
||||
// The input is valid in the pkg/labels parser, but not the
|
||||
// new matcher/parse parser.
|
||||
level.Warn(l).Log("msg", "Alertmanager is moving to a new parser for labels and matchers, and this input is incompatible. Alertmanager has instead parsed the input using the classic matchers parser as a fallback. To make this input compatible with the UTF-8 matchers parser please make sure all regular expressions and values are double-quoted. If you are still seeing this message please open an issue.", "input", input, "origin", origin, "err", nErr, "suggestion", suggestion)
|
||||
l.Warn("Alertmanager is moving to a new parser for labels and matchers, and this input is incompatible. Alertmanager has instead parsed the input using the classic matchers parser as a fallback. To make this input compatible with the UTF-8 matchers parser please make sure all regular expressions and values are double-quoted. If you are still seeing this message please open an issue.", "input", input, "origin", origin, "err", nErr, "suggestion", suggestion)
|
||||
return cMatchers, nil
|
||||
}
|
||||
// If the input is valid in both parsers, but produces different results,
|
||||
// then there is disagreement. We need to compare to labels.Matchers(cMatchers)
|
||||
// as cMatchers is a []*labels.Matcher not labels.Matchers.
|
||||
if nErr == nil && cErr == nil && !reflect.DeepEqual(nMatchers, labels.Matchers(cMatchers)) {
|
||||
level.Warn(l).Log("msg", "Matchers input has disagreement", "input", input, "origin", origin)
|
||||
l.Warn("Matchers input has disagreement", "input", input, "origin", origin)
|
||||
return cMatchers, nil
|
||||
}
|
||||
return nMatchers, nil
|
||||
|
@ -188,14 +188,14 @@ func FallbackMatchersParser(l log.Logger) ParseMatchers {
|
|||
}
|
||||
|
||||
// isValidClassicLabelName returns true if the string is a valid classic label name.
|
||||
func isValidClassicLabelName(_ log.Logger) func(model.LabelName) bool {
|
||||
func isValidClassicLabelName(_ *slog.Logger) func(model.LabelName) bool {
|
||||
return func(name model.LabelName) bool {
|
||||
return name.IsValid()
|
||||
}
|
||||
}
|
||||
|
||||
// isValidUTF8LabelName returns true if the string is a valid UTF-8 label name.
|
||||
func isValidUTF8LabelName(_ log.Logger) func(model.LabelName) bool {
|
||||
func isValidUTF8LabelName(_ *slog.Logger) func(model.LabelName) bool {
|
||||
return func(name model.LabelName) bool {
|
||||
if len(name) == 0 {
|
||||
return false
|
||||
|
|
|
@ -16,8 +16,8 @@ package compat
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/alertmanager/pkg/labels"
|
||||
|
@ -56,7 +56,7 @@ func TestFallbackMatcherParser(t *testing.T) {
|
|||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
f := FallbackMatcherParser(log.NewNopLogger())
|
||||
f := FallbackMatcherParser(promslog.NewNopLogger())
|
||||
matcher, err := f(test.input, "test")
|
||||
if test.err != "" {
|
||||
require.EqualError(t, err, test.err)
|
||||
|
@ -112,7 +112,7 @@ func TestFallbackMatchersParser(t *testing.T) {
|
|||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
f := FallbackMatchersParser(log.NewNopLogger())
|
||||
f := FallbackMatchersParser(promslog.NewNopLogger())
|
||||
matchers, err := f(test.input, "test")
|
||||
if test.err != "" {
|
||||
require.EqualError(t, err, test.err)
|
||||
|
@ -158,7 +158,7 @@ func TestIsValidClassicLabelName(t *testing.T) {
|
|||
}}
|
||||
|
||||
for _, test := range tests {
|
||||
fn := isValidClassicLabelName(log.NewNopLogger())
|
||||
fn := isValidClassicLabelName(promslog.NewNopLogger())
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
require.Equal(t, test.expected, fn(test.input))
|
||||
})
|
||||
|
@ -193,7 +193,7 @@ func TestIsValidUTF8LabelName(t *testing.T) {
|
|||
}}
|
||||
|
||||
for _, test := range tests {
|
||||
fn := isValidUTF8LabelName(log.NewNopLogger())
|
||||
fn := isValidUTF8LabelName(promslog.NewNopLogger())
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
require.Equal(t, test.expected, fn(test.input))
|
||||
})
|
||||
|
|
|
@ -22,16 +22,16 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coder/quartz"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/matttproud/golang_protobuf_extensions/pbutil"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/promslog"
|
||||
|
||||
"github.com/prometheus/alertmanager/cluster"
|
||||
pb "github.com/prometheus/alertmanager/nflog/nflogpb"
|
||||
|
@ -78,7 +78,7 @@ func QGroupKey(gk string) QueryParam {
|
|||
type Log struct {
|
||||
clock quartz.Clock
|
||||
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
metrics *metrics
|
||||
retention time.Duration
|
||||
|
||||
|
@ -239,7 +239,7 @@ type Options struct {
|
|||
|
||||
Retention time.Duration
|
||||
|
||||
Logger log.Logger
|
||||
Logger *slog.Logger
|
||||
Metrics prometheus.Registerer
|
||||
}
|
||||
|
||||
|
@ -261,7 +261,7 @@ func New(o Options) (*Log, error) {
|
|||
l := &Log{
|
||||
clock: quartz.NewReal(),
|
||||
retention: o.Retention,
|
||||
logger: log.NewNopLogger(),
|
||||
logger: promslog.NewNopLogger(),
|
||||
st: state{},
|
||||
broadcast: func([]byte) {},
|
||||
metrics: newMetrics(o.Metrics),
|
||||
|
@ -276,7 +276,7 @@ func New(o Options) (*Log, error) {
|
|||
if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
level.Debug(l.logger).Log("msg", "notification log snapshot file doesn't exist", "err", err)
|
||||
l.logger.Debug("notification log snapshot file doesn't exist", "err", err)
|
||||
} else {
|
||||
o.SnapshotReader = r
|
||||
defer r.Close()
|
||||
|
@ -302,7 +302,7 @@ func (l *Log) now() time.Time {
|
|||
// If not nil, the last argument is an override for what to do as part of the maintenance - for advanced usage.
|
||||
func (l *Log) Maintenance(interval time.Duration, snapf string, stopc <-chan struct{}, override MaintenanceFunc) {
|
||||
if interval == 0 || stopc == nil {
|
||||
level.Error(l.logger).Log("msg", "interval or stop signal are missing - not running maintenance")
|
||||
l.logger.Error("interval or stop signal are missing - not running maintenance")
|
||||
return
|
||||
}
|
||||
t := l.clock.NewTicker(interval)
|
||||
|
@ -335,14 +335,14 @@ func (l *Log) Maintenance(interval time.Duration, snapf string, stopc <-chan str
|
|||
runMaintenance := func(do func() (int64, error)) error {
|
||||
l.metrics.maintenanceTotal.Inc()
|
||||
start := l.now().UTC()
|
||||
level.Debug(l.logger).Log("msg", "Running maintenance")
|
||||
l.logger.Debug("Running maintenance")
|
||||
size, err := do()
|
||||
l.metrics.snapshotSize.Set(float64(size))
|
||||
if err != nil {
|
||||
l.metrics.maintenanceErrorsTotal.Inc()
|
||||
return err
|
||||
}
|
||||
level.Debug(l.logger).Log("msg", "Maintenance done", "duration", l.now().Sub(start), "size", size)
|
||||
l.logger.Debug("Maintenance done", "duration", l.now().Sub(start), "size", size)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -353,7 +353,7 @@ Loop:
|
|||
break Loop
|
||||
case <-t.C:
|
||||
if err := runMaintenance(doMaintenance); err != nil {
|
||||
level.Error(l.logger).Log("msg", "Running maintenance failed", "err", err)
|
||||
l.logger.Error("Running maintenance failed", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -363,7 +363,7 @@ Loop:
|
|||
return
|
||||
}
|
||||
if err := runMaintenance(doMaintenance); err != nil {
|
||||
level.Error(l.logger).Log("msg", "Creating shutdown snapshot failed", "err", err)
|
||||
l.logger.Error("Creating shutdown snapshot failed", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -534,7 +534,7 @@ func (l *Log) Merge(b []byte) error {
|
|||
// all nodes already.
|
||||
l.broadcast(b)
|
||||
l.metrics.propagatedMessagesTotal.Inc()
|
||||
level.Debug(l.logger).Log("msg", "gossiping new entry", "entry", e)
|
||||
l.logger.Debug("gossiping new entry", "entry", e)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -18,13 +18,12 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
netUrl "net/url"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
|
@ -53,14 +52,14 @@ const (
|
|||
type Notifier struct {
|
||||
conf *config.DiscordConfig
|
||||
tmpl *template.Template
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
client *http.Client
|
||||
retrier *notify.Retrier
|
||||
webhookURL *config.SecretURL
|
||||
}
|
||||
|
||||
// New returns a new Discord notifier.
|
||||
func New(c *config.DiscordConfig, t *template.Template, l log.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
func New(c *config.DiscordConfig, t *template.Template, l *slog.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
client, err := commoncfg.NewClientFromConfig(*c.HTTPConfig, "discord", httpOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -96,7 +95,7 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error)
|
|||
return false, err
|
||||
}
|
||||
|
||||
level.Debug(n.logger).Log("incident", key)
|
||||
n.logger.Debug("extracted group key", "key", key)
|
||||
|
||||
alerts := types.Alerts(as...)
|
||||
data := notify.GetTemplateData(ctx, n.tmpl, as, n.logger)
|
||||
|
@ -110,14 +109,14 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error)
|
|||
return false, err
|
||||
}
|
||||
if truncated {
|
||||
level.Warn(n.logger).Log("msg", "Truncated title", "key", key, "max_runes", maxTitleLenRunes)
|
||||
n.logger.Warn("Truncated title", "key", key, "max_runes", maxTitleLenRunes)
|
||||
}
|
||||
description, truncated := notify.TruncateInRunes(tmpl(n.conf.Message), maxDescriptionLenRunes)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if truncated {
|
||||
level.Warn(n.logger).Log("msg", "Truncated message", "key", key, "max_runes", maxDescriptionLenRunes)
|
||||
n.logger.Warn("Truncated message", "key", key, "max_runes", maxDescriptionLenRunes)
|
||||
}
|
||||
|
||||
content, truncated := notify.TruncateInRunes(tmpl(n.conf.Content), maxContentLenRunes)
|
||||
|
@ -125,7 +124,7 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error)
|
|||
return false, err
|
||||
}
|
||||
if truncated {
|
||||
level.Warn(n.logger).Log("msg", "Truncated message", "key", key, "max_runes", maxContentLenRunes)
|
||||
n.logger.Warn("Truncated message", "key", key, "max_runes", maxContentLenRunes)
|
||||
}
|
||||
|
||||
color := colorGrey
|
||||
|
@ -161,7 +160,7 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error)
|
|||
if _, err := netUrl.Parse(n.conf.AvatarURL); err == nil {
|
||||
w.AvatarURL = n.conf.AvatarURL
|
||||
} else {
|
||||
level.Warn(n.logger).Log("msg", "Bad avatar url", "key", key)
|
||||
n.logger.Warn("Bad avatar url", "key", key)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -25,9 +25,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/alertmanager/config"
|
||||
|
@ -46,7 +46,7 @@ func TestDiscordRetry(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -102,7 +102,7 @@ func TestDiscordTemplating(t *testing.T) {
|
|||
t.Run(tc.title, func(t *testing.T) {
|
||||
tc.cfg.WebhookURL = &config.SecretURL{URL: u}
|
||||
tc.cfg.HTTPConfig = &commoncfg.HTTPClientConfig{}
|
||||
pd, err := New(tc.cfg, test.CreateTmpl(t), log.NewNopLogger())
|
||||
pd, err := New(tc.cfg, test.CreateTmpl(t), promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
@ -141,7 +141,7 @@ func TestDiscordRedactedURL(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -163,7 +163,7 @@ func TestDiscordReadingURLFromFile(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -206,7 +206,7 @@ func TestDiscord_Notify(t *testing.T) {
|
|||
}
|
||||
|
||||
// Create a new Discord notifier
|
||||
notifier, err := New(cfg, test.CreateTmpl(t), log.NewNopLogger())
|
||||
notifier, err := New(cfg, test.CreateTmpl(t), promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a context and alerts
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"math/rand"
|
||||
"mime"
|
||||
"mime/multipart"
|
||||
|
@ -32,8 +33,6 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
|
||||
"github.com/prometheus/alertmanager/config"
|
||||
|
@ -46,12 +45,12 @@ import (
|
|||
type Email struct {
|
||||
conf *config.EmailConfig
|
||||
tmpl *template.Template
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
hostname string
|
||||
}
|
||||
|
||||
// New returns a new Email notifier.
|
||||
func New(c *config.EmailConfig, t *template.Template, l log.Logger) *Email {
|
||||
func New(c *config.EmailConfig, t *template.Template, l *slog.Logger) *Email {
|
||||
if _, ok := c.Headers["Subject"]; !ok {
|
||||
c.Headers["Subject"] = config.DefaultEmailSubject
|
||||
}
|
||||
|
@ -76,7 +75,7 @@ func (n *Email) auth(mechs string) (smtp.Auth, error) {
|
|||
|
||||
// If no username is set, keep going without authentication.
|
||||
if n.conf.AuthUsername == "" {
|
||||
level.Debug(n.logger).Log("msg", "smtp_auth_username is not configured. Attempting to send email without authenticating")
|
||||
n.logger.Debug("smtp_auth_username is not configured. Attempting to send email without authenticating")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
@ -162,7 +161,7 @@ func (n *Email) Notify(ctx context.Context, as ...*types.Alert) (bool, error) {
|
|||
defer func() {
|
||||
// Try to clean up after ourselves but don't log anything if something has failed.
|
||||
if err := c.Quit(); success && err != nil {
|
||||
level.Warn(n.logger).Log("msg", "failed to close SMTP connection", "err", err)
|
||||
n.logger.Warn("failed to close SMTP connection", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
|
|
|
@ -42,9 +42,9 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/emersion/go-smtp"
|
||||
"github.com/go-kit/log"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
|
||||
// nolint:depguard // require cannot be called outside the main goroutine: https://pkg.go.dev/testing#T.FailNow
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -181,7 +181,7 @@ func notifyEmailWithContext(ctx context.Context, cfg *config.EmailConfig, server
|
|||
return nil, false, err
|
||||
}
|
||||
|
||||
email := New(cfg, tmpl, log.NewNopLogger())
|
||||
email := New(cfg, tmpl, promslog.NewNopLogger())
|
||||
|
||||
retry, err := email.Notify(ctx, firingAlert)
|
||||
if err != nil {
|
||||
|
@ -627,7 +627,7 @@ func TestEmailNotifyWithAuthentication(t *testing.T) {
|
|||
|
||||
func TestEmailConfigNoAuthMechs(t *testing.T) {
|
||||
email := &Email{
|
||||
conf: &config.EmailConfig{AuthUsername: "test"}, tmpl: &template.Template{}, logger: log.NewNopLogger(),
|
||||
conf: &config.EmailConfig{AuthUsername: "test"}, tmpl: &template.Template{}, logger: promslog.NewNopLogger(),
|
||||
}
|
||||
_, err := email.auth("")
|
||||
require.Error(t, err)
|
||||
|
@ -637,7 +637,7 @@ func TestEmailConfigNoAuthMechs(t *testing.T) {
|
|||
func TestEmailConfigMissingAuthParam(t *testing.T) {
|
||||
conf := &config.EmailConfig{AuthUsername: "test"}
|
||||
email := &Email{
|
||||
conf: conf, tmpl: &template.Template{}, logger: log.NewNopLogger(),
|
||||
conf: conf, tmpl: &template.Template{}, logger: promslog.NewNopLogger(),
|
||||
}
|
||||
_, err := email.auth("CRAM-MD5")
|
||||
require.Error(t, err)
|
||||
|
@ -658,7 +658,7 @@ func TestEmailConfigMissingAuthParam(t *testing.T) {
|
|||
|
||||
func TestEmailNoUsernameStillOk(t *testing.T) {
|
||||
email := &Email{
|
||||
conf: &config.EmailConfig{}, tmpl: &template.Template{}, logger: log.NewNopLogger(),
|
||||
conf: &config.EmailConfig{}, tmpl: &template.Template{}, logger: promslog.NewNopLogger(),
|
||||
}
|
||||
a, err := email.auth("CRAM-MD5")
|
||||
require.NoError(t, err)
|
||||
|
@ -722,7 +722,7 @@ func TestEmailRejected(t *testing.T) {
|
|||
tmpl, firingAlert, err := prepare(cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
e := New(cfg, tmpl, log.NewNopLogger())
|
||||
e := New(cfg, tmpl, promslog.NewNopLogger())
|
||||
|
||||
// Send the alert to mock SMTP server.
|
||||
retry, err := e.Notify(context.Background(), firingAlert)
|
||||
|
|
|
@ -19,13 +19,12 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/trivago/tgo/tcontainer"
|
||||
|
@ -45,12 +44,12 @@ const (
|
|||
type Notifier struct {
|
||||
conf *config.JiraConfig
|
||||
tmpl *template.Template
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
client *http.Client
|
||||
retrier *notify.Retrier
|
||||
}
|
||||
|
||||
func New(c *config.JiraConfig, t *template.Template, l log.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
func New(c *config.JiraConfig, t *template.Template, l *slog.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
client, err := commoncfg.NewClientFromConfig(*c.HTTPConfig, "jira", httpOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -72,7 +71,7 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error)
|
|||
return false, err
|
||||
}
|
||||
|
||||
logger := log.With(n.logger, "group_key", key.String())
|
||||
logger := n.logger.With("group_key", key.String())
|
||||
|
||||
var (
|
||||
alerts = types.Alerts(as...)
|
||||
|
@ -99,12 +98,12 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error)
|
|||
return false, nil
|
||||
}
|
||||
|
||||
level.Debug(logger).Log("msg", "create new issue")
|
||||
logger.Debug("create new issue")
|
||||
} else {
|
||||
path = "issue/" + existingIssue.Key
|
||||
method = http.MethodPut
|
||||
|
||||
level.Debug(logger).Log("msg", "updating existing issue", "issue_key", existingIssue.Key)
|
||||
logger.Debug("updating existing issue", "issue_key", existingIssue.Key)
|
||||
}
|
||||
|
||||
requestBody, err := n.prepareIssueRequestBody(ctx, logger, key.Hash(), tmplTextFunc)
|
||||
|
@ -120,7 +119,7 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error)
|
|||
return n.transitionIssue(ctx, logger, existingIssue, alerts.HasFiring())
|
||||
}
|
||||
|
||||
func (n *Notifier) prepareIssueRequestBody(ctx context.Context, logger log.Logger, groupID string, tmplTextFunc templateFunc) (issue, error) {
|
||||
func (n *Notifier) prepareIssueRequestBody(ctx context.Context, logger *slog.Logger, groupID string, tmplTextFunc templateFunc) (issue, error) {
|
||||
summary, err := tmplTextFunc(n.conf.Summary)
|
||||
if err != nil {
|
||||
return issue{}, fmt.Errorf("summary template: %w", err)
|
||||
|
@ -135,7 +134,7 @@ func (n *Notifier) prepareIssueRequestBody(ctx context.Context, logger log.Logge
|
|||
|
||||
summary, truncated := notify.TruncateInRunes(summary, maxSummaryLenRunes)
|
||||
if truncated {
|
||||
level.Warn(logger).Log("msg", "Truncated summary", "max_runes", maxSummaryLenRunes)
|
||||
logger.Warn("Truncated summary", "max_runes", maxSummaryLenRunes)
|
||||
}
|
||||
|
||||
requestBody := issue{Fields: &issueFields{
|
||||
|
@ -153,7 +152,7 @@ func (n *Notifier) prepareIssueRequestBody(ctx context.Context, logger log.Logge
|
|||
|
||||
issueDescriptionString, truncated = notify.TruncateInRunes(issueDescriptionString, maxDescriptionLenRunes)
|
||||
if truncated {
|
||||
level.Warn(logger).Log("msg", "Truncated description", "max_runes", maxDescriptionLenRunes)
|
||||
logger.Warn("Truncated description", "max_runes", maxDescriptionLenRunes)
|
||||
}
|
||||
|
||||
requestBody.Fields.Description = issueDescriptionString
|
||||
|
@ -187,7 +186,7 @@ func (n *Notifier) prepareIssueRequestBody(ctx context.Context, logger log.Logge
|
|||
return requestBody, nil
|
||||
}
|
||||
|
||||
func (n *Notifier) searchExistingIssue(ctx context.Context, logger log.Logger, groupID string, firing bool) (*issue, bool, error) {
|
||||
func (n *Notifier) searchExistingIssue(ctx context.Context, logger *slog.Logger, groupID string, firing bool) (*issue, bool, error) {
|
||||
jql := strings.Builder{}
|
||||
|
||||
if n.conf.WontFixResolution != "" {
|
||||
|
@ -216,7 +215,7 @@ func (n *Notifier) searchExistingIssue(ctx context.Context, logger log.Logger, g
|
|||
Expand: []string{},
|
||||
}
|
||||
|
||||
level.Debug(logger).Log("msg", "search for recent issues", "jql", requestBody.JQL)
|
||||
logger.Debug("search for recent issues", "jql", requestBody.JQL)
|
||||
|
||||
responseBody, shouldRetry, err := n.doAPIRequest(ctx, http.MethodPost, "search", requestBody)
|
||||
if err != nil {
|
||||
|
@ -230,12 +229,12 @@ func (n *Notifier) searchExistingIssue(ctx context.Context, logger log.Logger, g
|
|||
}
|
||||
|
||||
if issueSearchResult.Total == 0 {
|
||||
level.Debug(logger).Log("msg", "found no existing issue")
|
||||
logger.Debug("found no existing issue")
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
if issueSearchResult.Total > 1 {
|
||||
level.Warn(logger).Log("msg", "more than one issue matched, selecting the most recently resolved", "selected_issue", issueSearchResult.Issues[0].Key)
|
||||
logger.Warn("more than one issue matched, selecting the most recently resolved", "selected_issue", issueSearchResult.Issues[0].Key)
|
||||
}
|
||||
|
||||
return &issueSearchResult.Issues[0], false, nil
|
||||
|
@ -264,7 +263,7 @@ func (n *Notifier) getIssueTransitionByName(ctx context.Context, issueKey, trans
|
|||
return "", false, fmt.Errorf("can't find transition %s for issue %s", transitionName, issueKey)
|
||||
}
|
||||
|
||||
func (n *Notifier) transitionIssue(ctx context.Context, logger log.Logger, i *issue, firing bool) (bool, error) {
|
||||
func (n *Notifier) transitionIssue(ctx context.Context, logger *slog.Logger, i *issue, firing bool) (bool, error) {
|
||||
if i == nil || i.Key == "" || i.Fields == nil || i.Fields.Status == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
@ -297,7 +296,7 @@ func (n *Notifier) transitionIssue(ctx context.Context, logger log.Logger, i *is
|
|||
|
||||
path := fmt.Sprintf("issue/%s/transitions", i.Key)
|
||||
|
||||
level.Debug(logger).Log("msg", "transitions jira issue", "issue_key", i.Key, "transition", transition)
|
||||
logger.Debug("transitions jira issue", "issue_key", i.Key, "transition", transition)
|
||||
_, shouldRetry, err = n.doAPIRequest(ctx, http.MethodPost, path, requestBody)
|
||||
|
||||
return shouldRetry, err
|
||||
|
|
|
@ -27,10 +27,9 @@ import (
|
|||
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
|
||||
"github.com/prometheus/alertmanager/config"
|
||||
"github.com/prometheus/alertmanager/notify"
|
||||
"github.com/prometheus/alertmanager/notify/test"
|
||||
|
@ -51,7 +50,7 @@ func TestJiraRetry(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -126,7 +125,7 @@ func TestJiraTemplating(t *testing.T) {
|
|||
t.Run(tc.title, func(t *testing.T) {
|
||||
tc.cfg.APIURL = &config.URL{URL: u}
|
||||
tc.cfg.HTTPConfig = &commoncfg.HTTPClientConfig{}
|
||||
pd, err := New(tc.cfg, test.CreateTmpl(t), log.NewNopLogger())
|
||||
pd, err := New(tc.cfg, test.CreateTmpl(t), promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
@ -576,7 +575,7 @@ func TestJiraNotify(t *testing.T) {
|
|||
tc.cfg.APIURL = &config.URL{URL: u}
|
||||
tc.cfg.HTTPConfig = &commoncfg.HTTPClientConfig{}
|
||||
|
||||
notifier, err := New(tc.cfg, test.CreateTmpl(t), log.NewNopLogger())
|
||||
notifier, err := New(tc.cfg, test.CreateTmpl(t), promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
|
|
@ -19,12 +19,11 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
|
@ -43,7 +42,7 @@ const (
|
|||
type Notifier struct {
|
||||
conf *config.MSTeamsConfig
|
||||
tmpl *template.Template
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
client *http.Client
|
||||
retrier *notify.Retrier
|
||||
webhookURL *config.SecretURL
|
||||
|
@ -61,7 +60,7 @@ type teamsMessage struct {
|
|||
}
|
||||
|
||||
// New returns a new notifier that uses the Microsoft Teams Webhook API.
|
||||
func New(c *config.MSTeamsConfig, t *template.Template, l log.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
func New(c *config.MSTeamsConfig, t *template.Template, l *slog.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
client, err := commoncfg.NewClientFromConfig(*c.HTTPConfig, "msteams", httpOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -86,7 +85,7 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error)
|
|||
return false, err
|
||||
}
|
||||
|
||||
level.Debug(n.logger).Log("incident", key)
|
||||
n.logger.Debug("extracted group key", "key", key)
|
||||
|
||||
data := notify.GetTemplateData(ctx, n.tmpl, as, n.logger)
|
||||
tmpl := notify.TmplText(n.tmpl, data, &err)
|
||||
|
|
|
@ -25,9 +25,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/alertmanager/config"
|
||||
|
@ -46,7 +46,7 @@ func TestMSTeamsRetry(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -112,7 +112,7 @@ func TestMSTeamsTemplating(t *testing.T) {
|
|||
t.Run(tc.title, func(t *testing.T) {
|
||||
tc.cfg.WebhookURL = &config.SecretURL{URL: u}
|
||||
tc.cfg.HTTPConfig = &commoncfg.HTTPClientConfig{}
|
||||
pd, err := New(tc.cfg, test.CreateTmpl(t), log.NewNopLogger())
|
||||
pd, err := New(tc.cfg, test.CreateTmpl(t), promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
@ -163,7 +163,7 @@ func TestNotifier_Notify_WithReason(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -205,7 +205,7 @@ func TestMSTeamsRedactedURL(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -227,7 +227,7 @@ func TestMSTeamsReadingURLFromFile(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
|
@ -19,12 +19,11 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
|
@ -43,7 +42,7 @@ const (
|
|||
type Notifier struct {
|
||||
conf *config.MSTeamsV2Config
|
||||
tmpl *template.Template
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
client *http.Client
|
||||
retrier *notify.Retrier
|
||||
webhookURL *config.SecretURL
|
||||
|
@ -80,7 +79,7 @@ type teamsMessage struct {
|
|||
}
|
||||
|
||||
// New returns a new notifier that uses the Microsoft Teams Power Platform connector.
|
||||
func New(c *config.MSTeamsV2Config, t *template.Template, l log.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
func New(c *config.MSTeamsV2Config, t *template.Template, l *slog.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
client, err := commoncfg.NewClientFromConfig(*c.HTTPConfig, "msteamsv2", httpOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -105,7 +104,7 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error)
|
|||
return false, err
|
||||
}
|
||||
|
||||
level.Debug(n.logger).Log("incident", key)
|
||||
n.logger.Debug("extracted group key", "key", key)
|
||||
|
||||
data := notify.GetTemplateData(ctx, n.tmpl, as, n.logger)
|
||||
tmpl := notify.TmplText(n.tmpl, data, &err)
|
||||
|
|
|
@ -25,9 +25,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/alertmanager/config"
|
||||
|
@ -46,7 +46,7 @@ func TestMSTeamsV2Retry(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -79,7 +79,7 @@ func TestNotifier_Notify_WithReason(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -156,7 +156,7 @@ func TestMSTeamsV2Templating(t *testing.T) {
|
|||
t.Run(tc.title, func(t *testing.T) {
|
||||
tc.cfg.WebhookURL = &config.SecretURL{URL: u}
|
||||
tc.cfg.HTTPConfig = &commoncfg.HTTPClientConfig{}
|
||||
pd, err := New(tc.cfg, test.CreateTmpl(t), log.NewNopLogger())
|
||||
pd, err := New(tc.cfg, test.CreateTmpl(t), promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
@ -195,7 +195,7 @@ func TestMSTeamsV2RedactedURL(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -217,7 +217,7 @@ func TestMSTeamsV2ReadingURLFromFile(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
|
@ -17,14 +17,13 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
"github.com/cespare/xxhash/v2"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
|
@ -242,14 +241,14 @@ func RouteID(ctx context.Context) (string, bool) {
|
|||
|
||||
// A Stage processes alerts under the constraints of the given context.
|
||||
type Stage interface {
|
||||
Exec(ctx context.Context, l log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error)
|
||||
Exec(ctx context.Context, l *slog.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error)
|
||||
}
|
||||
|
||||
// StageFunc wraps a function to represent a Stage.
|
||||
type StageFunc func(ctx context.Context, l log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error)
|
||||
type StageFunc func(ctx context.Context, l *slog.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error)
|
||||
|
||||
// Exec implements Stage interface.
|
||||
func (f StageFunc) Exec(ctx context.Context, l log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
func (f StageFunc) Exec(ctx context.Context, l *slog.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
return f(ctx, l, alerts...)
|
||||
}
|
||||
|
||||
|
@ -452,7 +451,7 @@ func createReceiverStage(
|
|||
type RoutingStage map[string]Stage
|
||||
|
||||
// Exec implements the Stage interface.
|
||||
func (rs RoutingStage) Exec(ctx context.Context, l log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
func (rs RoutingStage) Exec(ctx context.Context, l *slog.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
receiver, ok := ReceiverName(ctx)
|
||||
if !ok {
|
||||
return ctx, nil, errors.New("receiver missing")
|
||||
|
@ -470,7 +469,7 @@ func (rs RoutingStage) Exec(ctx context.Context, l log.Logger, alerts ...*types.
|
|||
type MultiStage []Stage
|
||||
|
||||
// Exec implements the Stage interface.
|
||||
func (ms MultiStage) Exec(ctx context.Context, l log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
func (ms MultiStage) Exec(ctx context.Context, l *slog.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
var err error
|
||||
for _, s := range ms {
|
||||
if len(alerts) == 0 {
|
||||
|
@ -490,7 +489,7 @@ type FanoutStage []Stage
|
|||
|
||||
// Exec attempts to execute all stages concurrently and discards the results.
|
||||
// It returns its input alerts and a types.MultiError if one or more stages fail.
|
||||
func (fs FanoutStage) Exec(ctx context.Context, l log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
func (fs FanoutStage) Exec(ctx context.Context, l *slog.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
me types.MultiError
|
||||
|
@ -523,7 +522,7 @@ func NewGossipSettleStage(p Peer) *GossipSettleStage {
|
|||
return &GossipSettleStage{peer: p}
|
||||
}
|
||||
|
||||
func (n *GossipSettleStage) Exec(ctx context.Context, _ log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
func (n *GossipSettleStage) Exec(ctx context.Context, _ *slog.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
if n.peer != nil {
|
||||
if err := n.peer.WaitReady(ctx); err != nil {
|
||||
return ctx, nil, err
|
||||
|
@ -551,7 +550,7 @@ func NewMuteStage(m types.Muter, metrics *Metrics) *MuteStage {
|
|||
}
|
||||
|
||||
// Exec implements the Stage interface.
|
||||
func (n *MuteStage) Exec(ctx context.Context, logger log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
func (n *MuteStage) Exec(ctx context.Context, logger *slog.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
var (
|
||||
filtered []*types.Alert
|
||||
muted []*types.Alert
|
||||
|
@ -577,7 +576,7 @@ func (n *MuteStage) Exec(ctx context.Context, logger log.Logger, alerts ...*type
|
|||
default:
|
||||
}
|
||||
n.metrics.numNotificationSuppressedTotal.WithLabelValues(reason).Add(float64(len(muted)))
|
||||
level.Debug(logger).Log("msg", "Notifications will not be sent for muted alerts", "alerts", fmt.Sprintf("%v", muted), "reason", reason)
|
||||
logger.Debug("Notifications will not be sent for muted alerts", "alerts", fmt.Sprintf("%v", muted), "reason", reason)
|
||||
}
|
||||
|
||||
return ctx, filtered, nil
|
||||
|
@ -597,7 +596,7 @@ func NewWaitStage(wait func() time.Duration) *WaitStage {
|
|||
}
|
||||
|
||||
// Exec implements the Stage interface.
|
||||
func (ws *WaitStage) Exec(ctx context.Context, _ log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
func (ws *WaitStage) Exec(ctx context.Context, _ *slog.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
select {
|
||||
case <-time.After(ws.wait()):
|
||||
case <-ctx.Done():
|
||||
|
@ -698,7 +697,7 @@ func (n *DedupStage) needsUpdate(entry *nflogpb.Entry, firing, resolved map[uint
|
|||
}
|
||||
|
||||
// Exec implements the Stage interface.
|
||||
func (n *DedupStage) Exec(ctx context.Context, _ log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
func (n *DedupStage) Exec(ctx context.Context, _ *slog.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
gkey, ok := GroupKey(ctx)
|
||||
if !ok {
|
||||
return ctx, nil, errors.New("group key missing")
|
||||
|
@ -774,7 +773,7 @@ func NewRetryStage(i Integration, groupName string, metrics *Metrics) *RetryStag
|
|||
}
|
||||
}
|
||||
|
||||
func (r RetryStage) Exec(ctx context.Context, l log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
func (r RetryStage) Exec(ctx context.Context, l *slog.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
r.metrics.numNotifications.WithLabelValues(r.labelValues...).Inc()
|
||||
ctx, alerts, err := r.exec(ctx, l, alerts...)
|
||||
|
||||
|
@ -789,7 +788,7 @@ func (r RetryStage) Exec(ctx context.Context, l log.Logger, alerts ...*types.Ale
|
|||
return ctx, alerts, err
|
||||
}
|
||||
|
||||
func (r RetryStage) exec(ctx context.Context, l log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
func (r RetryStage) exec(ctx context.Context, l *slog.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
var sent []*types.Alert
|
||||
|
||||
// If we shouldn't send notifications for resolved alerts, but there are only
|
||||
|
@ -823,9 +822,9 @@ func (r RetryStage) exec(ctx context.Context, l log.Logger, alerts ...*types.Ale
|
|||
iErr error
|
||||
)
|
||||
|
||||
l = log.With(l, "receiver", r.groupName, "integration", r.integration.String())
|
||||
l = l.With("receiver", r.groupName, "integration", r.integration.String())
|
||||
if groupKey, ok := GroupKey(ctx); ok {
|
||||
l = log.With(l, "aggrGroup", groupKey)
|
||||
l = l.With("aggrGroup", groupKey)
|
||||
}
|
||||
|
||||
for {
|
||||
|
@ -864,19 +863,21 @@ func (r RetryStage) exec(ctx context.Context, l log.Logger, alerts ...*types.Ale
|
|||
if ctx.Err() == nil {
|
||||
if iErr == nil || err.Error() != iErr.Error() {
|
||||
// Log the error if the context isn't done and the error isn't the same as before.
|
||||
level.Warn(l).Log("msg", "Notify attempt failed, will retry later", "attempts", i, "err", err)
|
||||
l.Warn("Notify attempt failed, will retry later", "attempts", i, "err", err)
|
||||
}
|
||||
// Save this error to be able to return the last seen error by an
|
||||
// integration upon context timeout.
|
||||
iErr = err
|
||||
}
|
||||
} else {
|
||||
lvl := level.Info(l)
|
||||
l := l.With("attempts", i, "duration", dur)
|
||||
if i <= 1 {
|
||||
lvl = level.Debug(log.With(l, "alerts", fmt.Sprintf("%v", alerts)))
|
||||
l = l.With("alerts", fmt.Sprintf("%v", alerts))
|
||||
l.Debug("Notify success")
|
||||
} else {
|
||||
l.Info("Notify success")
|
||||
}
|
||||
|
||||
lvl.Log("msg", "Notify success", "attempts", i, "duration", dur)
|
||||
return ctx, alerts, nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
|
@ -912,7 +913,7 @@ func NewSetNotifiesStage(l NotificationLog, recv *nflogpb.Receiver) *SetNotifies
|
|||
}
|
||||
|
||||
// Exec implements the Stage interface.
|
||||
func (n SetNotifiesStage) Exec(ctx context.Context, l log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
func (n SetNotifiesStage) Exec(ctx context.Context, l *slog.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
gkey, ok := GroupKey(ctx)
|
||||
if !ok {
|
||||
return ctx, nil, errors.New("group key missing")
|
||||
|
@ -951,7 +952,7 @@ func NewTimeMuteStage(muter types.TimeMuter, marker types.GroupMarker, metrics *
|
|||
|
||||
// Exec implements the stage interface for TimeMuteStage.
|
||||
// TimeMuteStage is responsible for muting alerts whose route is not in an active time.
|
||||
func (tms TimeMuteStage) Exec(ctx context.Context, l log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
func (tms TimeMuteStage) Exec(ctx context.Context, l *slog.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
routeID, ok := RouteID(ctx)
|
||||
if !ok {
|
||||
return ctx, nil, errors.New("route ID missing")
|
||||
|
@ -986,7 +987,7 @@ func (tms TimeMuteStage) Exec(ctx context.Context, l log.Logger, alerts ...*type
|
|||
// If the current time is inside a mute time, all alerts are removed from the pipeline.
|
||||
if muted {
|
||||
tms.metrics.numNotificationSuppressedTotal.WithLabelValues(SuppressedReasonMuteTimeInterval).Add(float64(len(alerts)))
|
||||
level.Debug(l).Log("msg", "Notifications not sent, route is within mute time", "alerts", len(alerts))
|
||||
l.Debug("Notifications not sent, route is within mute time", "alerts", len(alerts))
|
||||
return ctx, nil, nil
|
||||
}
|
||||
|
||||
|
@ -1001,7 +1002,7 @@ func NewTimeActiveStage(muter types.TimeMuter, marker types.GroupMarker, metrics
|
|||
|
||||
// Exec implements the stage interface for TimeActiveStage.
|
||||
// TimeActiveStage is responsible for muting alerts whose route is not in an active time.
|
||||
func (tas TimeActiveStage) Exec(ctx context.Context, l log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
func (tas TimeActiveStage) Exec(ctx context.Context, l *slog.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
routeID, ok := RouteID(ctx)
|
||||
if !ok {
|
||||
return ctx, nil, errors.New("route ID missing")
|
||||
|
@ -1044,7 +1045,7 @@ func (tas TimeActiveStage) Exec(ctx context.Context, l log.Logger, alerts ...*ty
|
|||
// If the current time is not inside an active time, all alerts are removed from the pipeline
|
||||
if !active {
|
||||
tas.metrics.numNotificationSuppressedTotal.WithLabelValues(SuppressedReasonActiveTimeInterval).Add(float64(len(alerts)))
|
||||
level.Debug(l).Log("msg", "Notifications not sent, route is not within active time", "alerts", len(alerts))
|
||||
l.Debug("Notifications not sent, route is not within active time", "alerts", len(alerts))
|
||||
return ctx, nil, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -18,16 +18,17 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/alertmanager/featurecontrol"
|
||||
|
@ -53,7 +54,7 @@ func (f notifierFunc) Notify(ctx context.Context, alerts ...*types.Alert) (bool,
|
|||
|
||||
type failStage struct{}
|
||||
|
||||
func (s failStage) Exec(ctx context.Context, l log.Logger, as ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
func (s failStage) Exec(ctx context.Context, l *slog.Logger, as ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
return ctx, nil, fmt.Errorf("some error")
|
||||
}
|
||||
|
||||
|
@ -232,12 +233,12 @@ func TestDedupStage(t *testing.T) {
|
|||
|
||||
ctx := context.Background()
|
||||
|
||||
_, _, err := s.Exec(ctx, log.NewNopLogger())
|
||||
_, _, err := s.Exec(ctx, promslog.NewNopLogger())
|
||||
require.EqualError(t, err, "group key missing")
|
||||
|
||||
ctx = WithGroupKey(ctx, "1")
|
||||
|
||||
_, _, err = s.Exec(ctx, log.NewNopLogger())
|
||||
_, _, err = s.Exec(ctx, promslog.NewNopLogger())
|
||||
require.EqualError(t, err, "repeat interval missing")
|
||||
|
||||
ctx = WithRepeatInterval(ctx, time.Hour)
|
||||
|
@ -248,14 +249,14 @@ func TestDedupStage(t *testing.T) {
|
|||
s.nflog = &testNflog{
|
||||
qerr: errors.New("bad things"),
|
||||
}
|
||||
ctx, _, err = s.Exec(ctx, log.NewNopLogger(), alerts...)
|
||||
ctx, _, err = s.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
||||
require.EqualError(t, err, "bad things")
|
||||
|
||||
// ... but skip ErrNotFound.
|
||||
s.nflog = &testNflog{
|
||||
qerr: nflog.ErrNotFound,
|
||||
}
|
||||
ctx, res, err := s.Exec(ctx, log.NewNopLogger(), alerts...)
|
||||
ctx, res, err := s.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
||||
require.NoError(t, err, "unexpected error on not found log entry")
|
||||
require.Equal(t, alerts, res, "input alerts differ from result alerts")
|
||||
|
||||
|
@ -266,7 +267,7 @@ func TestDedupStage(t *testing.T) {
|
|||
{FiringAlerts: []uint64{1, 2, 3}},
|
||||
},
|
||||
}
|
||||
ctx, _, err = s.Exec(ctx, log.NewNopLogger(), alerts...)
|
||||
ctx, _, err = s.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
||||
require.Contains(t, err.Error(), "result size")
|
||||
|
||||
// Must return no error and no alerts no need to update.
|
||||
|
@ -280,7 +281,7 @@ func TestDedupStage(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
ctx, res, err = s.Exec(ctx, log.NewNopLogger(), alerts...)
|
||||
ctx, res, err = s.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, res, "unexpected alerts returned")
|
||||
|
||||
|
@ -295,7 +296,7 @@ func TestDedupStage(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
_, res, err = s.Exec(ctx, log.NewNopLogger(), alerts...)
|
||||
_, res, err = s.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, alerts, res, "unexpected alerts returned")
|
||||
}
|
||||
|
@ -308,7 +309,7 @@ func TestMultiStage(t *testing.T) {
|
|||
)
|
||||
|
||||
stage := MultiStage{
|
||||
StageFunc(func(ctx context.Context, l log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
StageFunc(func(ctx context.Context, l *slog.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
if !reflect.DeepEqual(alerts, alerts1) {
|
||||
t.Fatal("Input not equal to input of MultiStage")
|
||||
}
|
||||
|
@ -316,7 +317,7 @@ func TestMultiStage(t *testing.T) {
|
|||
ctx = context.WithValue(ctx, "key", "value")
|
||||
return ctx, alerts2, nil
|
||||
}),
|
||||
StageFunc(func(ctx context.Context, l log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
StageFunc(func(ctx context.Context, l *slog.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
if !reflect.DeepEqual(alerts, alerts2) {
|
||||
t.Fatal("Input not equal to output of previous stage")
|
||||
}
|
||||
|
@ -328,7 +329,7 @@ func TestMultiStage(t *testing.T) {
|
|||
}),
|
||||
}
|
||||
|
||||
_, alerts, err := stage.Exec(context.Background(), log.NewNopLogger(), alerts1...)
|
||||
_, alerts, err := stage.Exec(context.Background(), promslog.NewNopLogger(), alerts1...)
|
||||
if err != nil {
|
||||
t.Fatalf("Exec failed: %s", err)
|
||||
}
|
||||
|
@ -345,7 +346,7 @@ func TestMultiStageFailure(t *testing.T) {
|
|||
stage = MultiStage{s1}
|
||||
)
|
||||
|
||||
_, _, err := stage.Exec(ctx, log.NewNopLogger(), nil)
|
||||
_, _, err := stage.Exec(ctx, promslog.NewNopLogger(), nil)
|
||||
if err.Error() != "some error" {
|
||||
t.Fatal("Errors were not propagated correctly by MultiStage")
|
||||
}
|
||||
|
@ -358,7 +359,7 @@ func TestRoutingStage(t *testing.T) {
|
|||
)
|
||||
|
||||
stage := RoutingStage{
|
||||
"name": StageFunc(func(ctx context.Context, l log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
"name": StageFunc(func(ctx context.Context, l *slog.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) {
|
||||
if !reflect.DeepEqual(alerts, alerts1) {
|
||||
t.Fatal("Input not equal to input of RoutingStage")
|
||||
}
|
||||
|
@ -369,7 +370,7 @@ func TestRoutingStage(t *testing.T) {
|
|||
|
||||
ctx := WithReceiverName(context.Background(), "name")
|
||||
|
||||
_, alerts, err := stage.Exec(ctx, log.NewNopLogger(), alerts1...)
|
||||
_, alerts, err := stage.Exec(ctx, promslog.NewNopLogger(), alerts1...)
|
||||
if err != nil {
|
||||
t.Fatalf("Exec failed: %s", err)
|
||||
}
|
||||
|
@ -407,7 +408,7 @@ func TestRetryStageWithError(t *testing.T) {
|
|||
ctx = WithFiringAlerts(ctx, []uint64{0})
|
||||
|
||||
// Notify with a recoverable error should retry and succeed.
|
||||
resctx, res, err := r.Exec(ctx, log.NewNopLogger(), alerts...)
|
||||
resctx, res, err := r.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, alerts, res)
|
||||
require.Equal(t, alerts, sent)
|
||||
|
@ -417,7 +418,7 @@ func TestRetryStageWithError(t *testing.T) {
|
|||
sent = sent[:0]
|
||||
fail = true
|
||||
retry = false
|
||||
resctx, _, err = r.Exec(ctx, log.NewNopLogger(), alerts...)
|
||||
resctx, _, err = r.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
||||
require.Error(t, err)
|
||||
require.NotNil(t, resctx)
|
||||
}
|
||||
|
@ -460,7 +461,7 @@ func TestRetryStageWithErrorCode(t *testing.T) {
|
|||
ctx = WithFiringAlerts(ctx, []uint64{0})
|
||||
|
||||
// Notify with a non-recoverable error.
|
||||
resctx, _, err := r.Exec(ctx, log.NewNopLogger(), alerts...)
|
||||
resctx, _, err := r.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
||||
counter := r.metrics.numTotalFailedNotifications
|
||||
|
||||
require.Equal(t, testData.expectedCount, int(prom_testutil.ToFloat64(counter.WithLabelValues(r.integration.Name(), testData.reasonlabel))))
|
||||
|
@ -494,7 +495,7 @@ func TestRetryStageWithContextCanceled(t *testing.T) {
|
|||
ctx = WithFiringAlerts(ctx, []uint64{0})
|
||||
|
||||
// Notify with a non-recoverable error.
|
||||
resctx, _, err := r.Exec(ctx, log.NewNopLogger(), alerts...)
|
||||
resctx, _, err := r.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
||||
counter := r.metrics.numTotalFailedNotifications
|
||||
|
||||
require.Equal(t, 1, int(prom_testutil.ToFloat64(counter.WithLabelValues(r.integration.Name(), ContextCanceledReason.String()))))
|
||||
|
@ -529,14 +530,14 @@ func TestRetryStageNoResolved(t *testing.T) {
|
|||
|
||||
ctx := context.Background()
|
||||
|
||||
resctx, res, err := r.Exec(ctx, log.NewNopLogger(), alerts...)
|
||||
resctx, res, err := r.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
||||
require.EqualError(t, err, "firing alerts missing")
|
||||
require.Nil(t, res)
|
||||
require.NotNil(t, resctx)
|
||||
|
||||
ctx = WithFiringAlerts(ctx, []uint64{0})
|
||||
|
||||
resctx, res, err = r.Exec(ctx, log.NewNopLogger(), alerts...)
|
||||
resctx, res, err = r.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, alerts, res)
|
||||
require.Equal(t, []*types.Alert{alerts[1]}, sent)
|
||||
|
@ -547,7 +548,7 @@ func TestRetryStageNoResolved(t *testing.T) {
|
|||
ctx = WithFiringAlerts(ctx, []uint64{})
|
||||
alerts[1].Alert.EndsAt = time.Now().Add(-time.Hour)
|
||||
|
||||
resctx, res, err = r.Exec(ctx, log.NewNopLogger(), alerts...)
|
||||
resctx, res, err = r.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, alerts, res)
|
||||
require.Equal(t, []*types.Alert{}, sent)
|
||||
|
@ -581,7 +582,7 @@ func TestRetryStageSendResolved(t *testing.T) {
|
|||
ctx := context.Background()
|
||||
ctx = WithFiringAlerts(ctx, []uint64{0})
|
||||
|
||||
resctx, res, err := r.Exec(ctx, log.NewNopLogger(), alerts...)
|
||||
resctx, res, err := r.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, alerts, res)
|
||||
require.Equal(t, alerts, sent)
|
||||
|
@ -592,7 +593,7 @@ func TestRetryStageSendResolved(t *testing.T) {
|
|||
ctx = WithFiringAlerts(ctx, []uint64{})
|
||||
alerts[1].Alert.EndsAt = time.Now().Add(-time.Hour)
|
||||
|
||||
resctx, res, err = r.Exec(ctx, log.NewNopLogger(), alerts...)
|
||||
resctx, res, err = r.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, alerts, res)
|
||||
require.Equal(t, alerts, sent)
|
||||
|
@ -608,21 +609,21 @@ func TestSetNotifiesStage(t *testing.T) {
|
|||
alerts := []*types.Alert{{}, {}, {}}
|
||||
ctx := context.Background()
|
||||
|
||||
resctx, res, err := s.Exec(ctx, log.NewNopLogger(), alerts...)
|
||||
resctx, res, err := s.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
||||
require.EqualError(t, err, "group key missing")
|
||||
require.Nil(t, res)
|
||||
require.NotNil(t, resctx)
|
||||
|
||||
ctx = WithGroupKey(ctx, "1")
|
||||
|
||||
resctx, res, err = s.Exec(ctx, log.NewNopLogger(), alerts...)
|
||||
resctx, res, err = s.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
||||
require.EqualError(t, err, "firing alerts missing")
|
||||
require.Nil(t, res)
|
||||
require.NotNil(t, resctx)
|
||||
|
||||
ctx = WithFiringAlerts(ctx, []uint64{0, 1, 2})
|
||||
|
||||
resctx, res, err = s.Exec(ctx, log.NewNopLogger(), alerts...)
|
||||
resctx, res, err = s.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
||||
require.EqualError(t, err, "resolved alerts missing")
|
||||
require.Nil(t, res)
|
||||
require.NotNil(t, resctx)
|
||||
|
@ -638,7 +639,7 @@ func TestSetNotifiesStage(t *testing.T) {
|
|||
require.Equal(t, 2*time.Hour, expiry)
|
||||
return nil
|
||||
}
|
||||
resctx, res, err = s.Exec(ctx, log.NewNopLogger(), alerts...)
|
||||
resctx, res, err = s.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, alerts, res)
|
||||
require.NotNil(t, resctx)
|
||||
|
@ -654,7 +655,7 @@ func TestSetNotifiesStage(t *testing.T) {
|
|||
require.Equal(t, 2*time.Hour, expiry)
|
||||
return nil
|
||||
}
|
||||
resctx, res, err = s.Exec(ctx, log.NewNopLogger(), alerts...)
|
||||
resctx, res, err = s.Exec(ctx, promslog.NewNopLogger(), alerts...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, alerts, res)
|
||||
require.NotNil(t, resctx)
|
||||
|
@ -694,7 +695,7 @@ func TestMuteStage(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
_, alerts, err := stage.Exec(context.Background(), log.NewNopLogger(), inAlerts...)
|
||||
_, alerts, err := stage.Exec(context.Background(), promslog.NewNopLogger(), inAlerts...)
|
||||
if err != nil {
|
||||
t.Fatalf("Exec failed: %s", err)
|
||||
}
|
||||
|
@ -728,7 +729,7 @@ func TestMuteStageWithSilences(t *testing.T) {
|
|||
|
||||
reg := prometheus.NewRegistry()
|
||||
marker := types.NewMarker(reg)
|
||||
silencer := silence.NewSilencer(silences, marker, log.NewNopLogger())
|
||||
silencer := silence.NewSilencer(silences, marker, promslog.NewNopLogger())
|
||||
metrics := NewMetrics(reg, featurecontrol.NoopFlags{})
|
||||
stage := NewMuteStage(silencer, metrics)
|
||||
|
||||
|
@ -760,7 +761,7 @@ func TestMuteStageWithSilences(t *testing.T) {
|
|||
// number. This is expected to get unsilenced by the stage.
|
||||
marker.SetActiveOrSilenced(inAlerts[1].Fingerprint(), 0, []string{"123"}, nil)
|
||||
|
||||
_, alerts, err := stage.Exec(context.Background(), log.NewNopLogger(), inAlerts...)
|
||||
_, alerts, err := stage.Exec(context.Background(), promslog.NewNopLogger(), inAlerts...)
|
||||
if err != nil {
|
||||
t.Fatalf("Exec failed: %s", err)
|
||||
}
|
||||
|
@ -779,7 +780,7 @@ func TestMuteStageWithSilences(t *testing.T) {
|
|||
}
|
||||
|
||||
// Do it again to exercise the version tracking of silences.
|
||||
_, alerts, err = stage.Exec(context.Background(), log.NewNopLogger(), inAlerts...)
|
||||
_, alerts, err = stage.Exec(context.Background(), promslog.NewNopLogger(), inAlerts...)
|
||||
if err != nil {
|
||||
t.Fatalf("Exec failed: %s", err)
|
||||
}
|
||||
|
@ -803,7 +804,7 @@ func TestMuteStageWithSilences(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, alerts, err = stage.Exec(context.Background(), log.NewNopLogger(), inAlerts...)
|
||||
_, alerts, err = stage.Exec(context.Background(), promslog.NewNopLogger(), inAlerts...)
|
||||
if err != nil {
|
||||
t.Fatalf("Exec failed: %s", err)
|
||||
}
|
||||
|
@ -908,7 +909,7 @@ func TestTimeMuteStage(t *testing.T) {
|
|||
ctx = WithMuteTimeIntervals(ctx, muteTimeIntervalNames)
|
||||
ctx = WithRouteID(ctx, "route1")
|
||||
|
||||
_, active, err := st.Exec(ctx, log.NewNopLogger(), test.alerts...)
|
||||
_, active, err := st.Exec(ctx, promslog.NewNopLogger(), test.alerts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
if len(test.mutedBy) == 0 {
|
||||
|
@ -1026,7 +1027,7 @@ func TestTimeActiveStage(t *testing.T) {
|
|||
ctx = WithMuteTimeIntervals(ctx, nil)
|
||||
ctx = WithRouteID(ctx, "route1")
|
||||
|
||||
_, active, err := st.Exec(ctx, log.NewNopLogger(), test.alerts...)
|
||||
_, active, err := st.Exec(ctx, promslog.NewNopLogger(), test.alerts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
if len(test.mutedBy) == 0 {
|
||||
|
|
|
@ -18,12 +18,11 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
|
@ -40,13 +39,13 @@ const maxMessageLenRunes = 130
|
|||
type Notifier struct {
|
||||
conf *config.OpsGenieConfig
|
||||
tmpl *template.Template
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
client *http.Client
|
||||
retrier *notify.Retrier
|
||||
}
|
||||
|
||||
// New returns a new OpsGenie notifier.
|
||||
func New(c *config.OpsGenieConfig, t *template.Template, l log.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
func New(c *config.OpsGenieConfig, t *template.Template, l *slog.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
client, err := commoncfg.NewClientFromConfig(*c.HTTPConfig, "opsgenie", httpOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -135,7 +134,7 @@ func (n *Notifier) createRequests(ctx context.Context, as ...*types.Alert) ([]*h
|
|||
}
|
||||
data := notify.GetTemplateData(ctx, n.tmpl, as, n.logger)
|
||||
|
||||
level.Debug(n.logger).Log("alert", key)
|
||||
n.logger.Debug("extracted group key", "key", key)
|
||||
|
||||
tmpl := notify.TmplText(n.tmpl, data, &err)
|
||||
|
||||
|
@ -175,7 +174,7 @@ func (n *Notifier) createRequests(ctx context.Context, as ...*types.Alert) ([]*h
|
|||
default:
|
||||
message, truncated := notify.TruncateInRunes(tmpl(n.conf.Message), maxMessageLenRunes)
|
||||
if truncated {
|
||||
level.Warn(n.logger).Log("msg", "Truncated message", "alert", key, "max_runes", maxMessageLenRunes)
|
||||
n.logger.Warn("Truncated message", "alert", key, "max_runes", maxMessageLenRunes)
|
||||
}
|
||||
|
||||
createEndpointURL := n.conf.APIURL.Copy()
|
||||
|
|
|
@ -23,9 +23,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/alertmanager/config"
|
||||
|
@ -40,7 +40,7 @@ func TestOpsGenieRetry(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -63,7 +63,7 @@ func TestOpsGenieRedactedURL(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -88,7 +88,7 @@ func TestGettingOpsGegineApikeyFromFile(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -100,7 +100,7 @@ func TestOpsGenie(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("failed to parse URL: %v", err)
|
||||
}
|
||||
logger := log.NewNopLogger()
|
||||
logger := promslog.NewNopLogger()
|
||||
tmpl := test.CreateTmpl(t)
|
||||
|
||||
for _, tc := range []struct {
|
||||
|
@ -287,7 +287,7 @@ func TestOpsGenieWithUpdate(t *testing.T) {
|
|||
APIURL: &config.URL{URL: u},
|
||||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
}
|
||||
notifierWithUpdate, err := New(&opsGenieConfigWithUpdate, tmpl, log.NewNopLogger())
|
||||
notifierWithUpdate, err := New(&opsGenieConfigWithUpdate, tmpl, promslog.NewNopLogger())
|
||||
alert := &types.Alert{
|
||||
Alert: model.Alert{
|
||||
StartsAt: time.Now(),
|
||||
|
|
|
@ -20,13 +20,12 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/units"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
|
@ -48,14 +47,14 @@ const (
|
|||
type Notifier struct {
|
||||
conf *config.PagerdutyConfig
|
||||
tmpl *template.Template
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
apiV1 string // for tests.
|
||||
client *http.Client
|
||||
retrier *notify.Retrier
|
||||
}
|
||||
|
||||
// New returns a new PagerDuty notifier.
|
||||
func New(c *config.PagerdutyConfig, t *template.Template, l log.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
func New(c *config.PagerdutyConfig, t *template.Template, l *slog.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
client, err := commoncfg.NewClientFromConfig(*c.HTTPConfig, "pagerduty", httpOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -133,7 +132,7 @@ func (n *Notifier) encodeMessage(msg *pagerDutyMessage) (bytes.Buffer, error) {
|
|||
}
|
||||
|
||||
warningMsg := fmt.Sprintf("Truncated Details because message of size %s exceeds limit %s", units.MetricBytes(buf.Len()).String(), units.MetricBytes(maxEventSize).String())
|
||||
level.Warn(n.logger).Log("msg", warningMsg)
|
||||
n.logger.Warn(warningMsg)
|
||||
|
||||
buf.Reset()
|
||||
if err := json.NewEncoder(&buf).Encode(msg); err != nil {
|
||||
|
@ -157,7 +156,7 @@ func (n *Notifier) notifyV1(
|
|||
|
||||
description, truncated := notify.TruncateInRunes(tmpl(n.conf.Description), maxV1DescriptionLenRunes)
|
||||
if truncated {
|
||||
level.Warn(n.logger).Log("msg", "Truncated description", "key", key, "max_runes", maxV1DescriptionLenRunes)
|
||||
n.logger.Warn("Truncated description", "key", key, "max_runes", maxV1DescriptionLenRunes)
|
||||
}
|
||||
|
||||
serviceKey := string(n.conf.ServiceKey)
|
||||
|
@ -222,7 +221,7 @@ func (n *Notifier) notifyV2(
|
|||
|
||||
summary, truncated := notify.TruncateInRunes(tmpl(n.conf.Description), maxV2SummaryLenRunes)
|
||||
if truncated {
|
||||
level.Warn(n.logger).Log("msg", "Truncated summary", "key", key, "max_runes", maxV2SummaryLenRunes)
|
||||
n.logger.Warn("Truncated summary", "key", key, "max_runes", maxV2SummaryLenRunes)
|
||||
}
|
||||
|
||||
routingKey := string(n.conf.RoutingKey)
|
||||
|
@ -319,7 +318,7 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error)
|
|||
eventType = pagerDutyEventResolve
|
||||
}
|
||||
|
||||
level.Debug(n.logger).Log("incident", key, "eventType", eventType)
|
||||
n.logger.Debug("extracted group key", "key", key, "eventType", eventType)
|
||||
|
||||
details := make(map[string]string, len(n.conf.Details))
|
||||
for k, v := range n.conf.Details {
|
||||
|
|
|
@ -27,9 +27,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/alertmanager/config"
|
||||
|
@ -45,7 +45,7 @@ func TestPagerDutyRetryV1(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -63,7 +63,7 @@ func TestPagerDutyRetryV2(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -85,7 +85,7 @@ func TestPagerDutyRedactedURLV1(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
notifier.apiV1 = u.String()
|
||||
|
@ -105,7 +105,7 @@ func TestPagerDutyRedactedURLV2(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -128,7 +128,7 @@ func TestPagerDutyV1ServiceKeyFromFile(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
notifier.apiV1 = u.String()
|
||||
|
@ -153,7 +153,7 @@ func TestPagerDutyV2RoutingKeyFromFile(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -251,7 +251,7 @@ func TestPagerDutyTemplating(t *testing.T) {
|
|||
t.Run(tc.title, func(t *testing.T) {
|
||||
tc.cfg.URL = &config.URL{URL: u}
|
||||
tc.cfg.HTTPConfig = &commoncfg.HTTPClientConfig{}
|
||||
pd, err := New(tc.cfg, test.CreateTmpl(t), log.NewNopLogger())
|
||||
pd, err := New(tc.cfg, test.CreateTmpl(t), promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
if pd.apiV1 != "" {
|
||||
pd.apiV1 = u.String()
|
||||
|
@ -340,7 +340,7 @@ func TestEventSizeEnforcement(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -363,7 +363,7 @@ func TestEventSizeEnforcement(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -478,7 +478,7 @@ func TestPagerDutyEmptySrcHref(t *testing.T) {
|
|||
Links: links,
|
||||
}
|
||||
|
||||
pagerDuty, err := New(&pagerDutyConfig, test.CreateTmpl(t), log.NewNopLogger())
|
||||
pagerDuty, err := New(&pagerDutyConfig, test.CreateTmpl(t), promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
|
|
@ -16,14 +16,13 @@ package pushover
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
|
||||
"github.com/prometheus/alertmanager/config"
|
||||
|
@ -45,14 +44,14 @@ const (
|
|||
type Notifier struct {
|
||||
conf *config.PushoverConfig
|
||||
tmpl *template.Template
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
client *http.Client
|
||||
retrier *notify.Retrier
|
||||
apiURL string // for tests.
|
||||
}
|
||||
|
||||
// New returns a new Pushover notifier.
|
||||
func New(c *config.PushoverConfig, t *template.Template, l log.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
func New(c *config.PushoverConfig, t *template.Template, l *slog.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
client, err := commoncfg.NewClientFromConfig(*c.HTTPConfig, "pushover", httpOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -75,7 +74,8 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error)
|
|||
}
|
||||
data := notify.GetTemplateData(ctx, n.tmpl, as, n.logger)
|
||||
|
||||
level.Debug(n.logger).Log("incident", key)
|
||||
// @tjhop: should this use `group` for the keyval like most other notify implementations?
|
||||
n.logger.Debug("extracted group key", "incident", key)
|
||||
|
||||
var (
|
||||
err error
|
||||
|
@ -113,7 +113,7 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error)
|
|||
|
||||
title, truncated := notify.TruncateInRunes(tmpl(n.conf.Title), maxTitleLenRunes)
|
||||
if truncated {
|
||||
level.Warn(n.logger).Log("msg", "Truncated title", "incident", key, "max_runes", maxTitleLenRunes)
|
||||
n.logger.Warn("Truncated title", "incident", key, "max_runes", maxTitleLenRunes)
|
||||
}
|
||||
parameters.Add("title", title)
|
||||
|
||||
|
@ -126,7 +126,7 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error)
|
|||
|
||||
message, truncated = notify.TruncateInRunes(message, maxMessageLenRunes)
|
||||
if truncated {
|
||||
level.Warn(n.logger).Log("msg", "Truncated message", "incident", key, "max_runes", maxMessageLenRunes)
|
||||
n.logger.Warn("Truncated message", "incident", key, "max_runes", maxMessageLenRunes)
|
||||
}
|
||||
message = strings.TrimSpace(message)
|
||||
if message == "" {
|
||||
|
@ -137,7 +137,7 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error)
|
|||
|
||||
supplementaryURL, truncated := notify.TruncateInRunes(tmpl(n.conf.URL), maxURLLenRunes)
|
||||
if truncated {
|
||||
level.Warn(n.logger).Log("msg", "Truncated URL", "incident", key, "max_runes", maxURLLenRunes)
|
||||
n.logger.Warn("Truncated URL", "incident", key, "max_runes", maxURLLenRunes)
|
||||
}
|
||||
parameters.Add("url", supplementaryURL)
|
||||
parameters.Add("url_title", tmpl(n.conf.URLTitle))
|
||||
|
@ -163,7 +163,7 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error)
|
|||
}
|
||||
u.RawQuery = parameters.Encode()
|
||||
// Don't log the URL as it contains secret data (see #1825).
|
||||
level.Debug(n.logger).Log("msg", "Sending message", "incident", key)
|
||||
n.logger.Debug("Sending message", "incident", key)
|
||||
resp, err := notify.PostText(ctx, n.client, u.String(), nil)
|
||||
if err != nil {
|
||||
return true, notify.RedactURL(err)
|
||||
|
|
|
@ -18,8 +18,8 @@ import (
|
|||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/alertmanager/config"
|
||||
|
@ -32,7 +32,7 @@ func TestPushoverRetry(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
for statusCode, expected := range test.RetryTests(test.DefaultRetryCodes()) {
|
||||
|
@ -53,7 +53,7 @@ func TestPushoverRedactedURL(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
notifier.apiURL = u.String()
|
||||
|
@ -78,7 +78,7 @@ func TestPushoverReadingUserKeyFromFile(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
notifier.apiURL = apiURL.String()
|
||||
require.NoError(t, err)
|
||||
|
@ -103,7 +103,7 @@ func TestPushoverReadingTokenFromFile(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
notifier.apiURL = apiURL.String()
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -19,12 +19,11 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
|
||||
"github.com/prometheus/alertmanager/config"
|
||||
|
@ -38,7 +37,7 @@ const maxTitleLenRunes = 1024
|
|||
type Notifier struct {
|
||||
conf *config.RocketchatConfig
|
||||
tmpl *template.Template
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
client *http.Client
|
||||
retrier *notify.Retrier
|
||||
token string
|
||||
|
@ -88,7 +87,7 @@ func (t *rocketchatRoundTripper) RoundTrip(req *http.Request) (res *http.Respons
|
|||
}
|
||||
|
||||
// New returns a new Rocketchat notification handler.
|
||||
func New(c *config.RocketchatConfig, t *template.Template, l log.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
func New(c *config.RocketchatConfig, t *template.Template, l *slog.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
client, err := commoncfg.NewClientFromConfig(*c.HTTPConfig, "rocketchat", httpOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -157,7 +156,7 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error)
|
|||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
level.Warn(n.logger).Log("msg", "Truncated title", "key", key, "max_runes", maxTitleLenRunes)
|
||||
n.logger.Warn("Truncated title", "key", key, "max_runes", maxTitleLenRunes)
|
||||
}
|
||||
att := &Attachment{
|
||||
Title: title,
|
||||
|
|
|
@ -19,8 +19,8 @@ import (
|
|||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/alertmanager/config"
|
||||
|
@ -36,7 +36,7 @@ func TestRocketchatRetry(t *testing.T) {
|
|||
TokenID: &secret,
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -60,7 +60,7 @@ func TestGettingRocketchatTokenFromFile(t *testing.T) {
|
|||
APIURL: &config.URL{URL: &url.URL{Scheme: "http", Host: "example.com", Path: "/api/v1/"}},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
|
|
@ -19,12 +19,11 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
|
||||
"github.com/prometheus/alertmanager/config"
|
||||
|
@ -40,7 +39,7 @@ const maxTitleLenRunes = 1024
|
|||
type Notifier struct {
|
||||
conf *config.SlackConfig
|
||||
tmpl *template.Template
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
client *http.Client
|
||||
retrier *notify.Retrier
|
||||
|
||||
|
@ -48,7 +47,7 @@ type Notifier struct {
|
|||
}
|
||||
|
||||
// New returns a new Slack notification handler.
|
||||
func New(c *config.SlackConfig, t *template.Template, l log.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
func New(c *config.SlackConfig, t *template.Template, l *slog.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
client, err := commoncfg.NewClientFromConfig(*c.HTTPConfig, "slack", httpOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -112,7 +111,7 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error)
|
|||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
level.Warn(n.logger).Log("msg", "Truncated title", "key", key, "max_runes", maxTitleLenRunes)
|
||||
n.logger.Warn("Truncated title", "key", key, "max_runes", maxTitleLenRunes)
|
||||
}
|
||||
att := &attachment{
|
||||
Title: title,
|
||||
|
|
|
@ -25,9 +25,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/alertmanager/config"
|
||||
|
@ -42,7 +42,7 @@ func TestSlackRetry(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -62,7 +62,7 @@ func TestSlackRedactedURL(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -84,7 +84,7 @@ func TestGettingSlackURLFromFile(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -106,7 +106,7 @@ func TestTrimmingSlackURLFromFile(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -199,7 +199,7 @@ func TestNotifier_Notify_WithReason(t *testing.T) {
|
|||
Channel: "channelname",
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
@ -27,8 +28,6 @@ import (
|
|||
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/sns"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
|
||||
"github.com/prometheus/alertmanager/config"
|
||||
|
@ -41,13 +40,13 @@ import (
|
|||
type Notifier struct {
|
||||
conf *config.SNSConfig
|
||||
tmpl *template.Template
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
client *http.Client
|
||||
retrier *notify.Retrier
|
||||
}
|
||||
|
||||
// New returns a new SNS notification handler.
|
||||
func New(c *config.SNSConfig, t *template.Template, l log.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
func New(c *config.SNSConfig, t *template.Template, l *slog.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
client, err := commoncfg.NewClientFromConfig(*c.HTTPConfig, "sns", httpOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -94,7 +93,7 @@ func (n *Notifier) Notify(ctx context.Context, alert ...*types.Alert) (bool, err
|
|||
return true, err
|
||||
}
|
||||
|
||||
level.Debug(n.logger).Log("msg", "SNS message successfully published", "message_id", publishOutput.MessageId, "sequence number", publishOutput.SequenceNumber)
|
||||
n.logger.Debug("SNS message successfully published", "message_id", publishOutput.MessageId, "sequence number", publishOutput.SequenceNumber)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/prometheus/common/sigv4"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
|
@ -29,7 +29,7 @@ import (
|
|||
"github.com/prometheus/alertmanager/types"
|
||||
)
|
||||
|
||||
var logger = log.NewNopLogger()
|
||||
var logger = promslog.NewNopLogger()
|
||||
|
||||
func TestValidateAndTruncateMessage(t *testing.T) {
|
||||
sBuff := make([]byte, 257*1024)
|
||||
|
|
|
@ -16,12 +16,11 @@ package telegram
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
"gopkg.in/telebot.v3"
|
||||
|
||||
|
@ -38,13 +37,13 @@ const maxMessageLenRunes = 4096
|
|||
type Notifier struct {
|
||||
conf *config.TelegramConfig
|
||||
tmpl *template.Template
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
client *telebot.Bot
|
||||
retrier *notify.Retrier
|
||||
}
|
||||
|
||||
// New returns a new Telegram notification handler.
|
||||
func New(conf *config.TelegramConfig, t *template.Template, l log.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
func New(conf *config.TelegramConfig, t *template.Template, l *slog.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
httpclient, err := commoncfg.NewClientFromConfig(*conf.HTTPConfig, "telegram", httpOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -82,7 +81,7 @@ func (n *Notifier) Notify(ctx context.Context, alert ...*types.Alert) (bool, err
|
|||
|
||||
messageText, truncated := notify.TruncateInRunes(tmpl(n.conf.Message), maxMessageLenRunes)
|
||||
if truncated {
|
||||
level.Warn(n.logger).Log("msg", "Truncated message", "alert", key, "max_runes", maxMessageLenRunes)
|
||||
n.logger.Warn("Truncated message", "alert", key, "max_runes", maxMessageLenRunes)
|
||||
}
|
||||
|
||||
n.client.Token, err = n.getBotToken()
|
||||
|
@ -99,7 +98,7 @@ func (n *Notifier) Notify(ctx context.Context, alert ...*types.Alert) (bool, err
|
|||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
level.Debug(n.logger).Log("msg", "Telegram message successfully published", "message_id", message.ID, "chat_id", message.Chat.ID)
|
||||
n.logger.Debug("Telegram message successfully published", "message_id", message.ID, "chat_id", message.Chat.ID)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
|
|
@ -25,9 +25,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
|
@ -76,7 +76,7 @@ func TestTelegramRetry(t *testing.T) {
|
|||
APIUrl: &fakeURL,
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -142,7 +142,7 @@ func TestTelegramNotify(t *testing.T) {
|
|||
|
||||
tc.cfg.APIUrl = &config.URL{URL: u}
|
||||
|
||||
notifier, err := New(&tc.cfg, test.CreateTmpl(t), log.NewNopLogger())
|
||||
notifier, err := New(&tc.cfg, test.CreateTmpl(t), promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
|
|
|
@ -19,12 +19,11 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/common/version"
|
||||
|
||||
"github.com/prometheus/alertmanager/template"
|
||||
|
@ -180,14 +179,14 @@ func (k Key) String() string {
|
|||
}
|
||||
|
||||
// GetTemplateData creates the template data from the context and the alerts.
|
||||
func GetTemplateData(ctx context.Context, tmpl *template.Template, alerts []*types.Alert, l log.Logger) *template.Data {
|
||||
func GetTemplateData(ctx context.Context, tmpl *template.Template, alerts []*types.Alert, l *slog.Logger) *template.Data {
|
||||
recv, ok := ReceiverName(ctx)
|
||||
if !ok {
|
||||
level.Error(l).Log("msg", "Missing receiver")
|
||||
l.Error("Missing receiver")
|
||||
}
|
||||
groupLabels, ok := GroupLabels(ctx)
|
||||
if !ok {
|
||||
level.Error(l).Log("msg", "Missing group labels")
|
||||
l.Error("Missing group labels")
|
||||
}
|
||||
return tmpl.Data(recv, groupLabels, alerts...)
|
||||
}
|
||||
|
|
|
@ -18,12 +18,11 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
|
@ -40,13 +39,13 @@ const maxMessageLenRunes = 20480
|
|||
type Notifier struct {
|
||||
conf *config.VictorOpsConfig
|
||||
tmpl *template.Template
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
client *http.Client
|
||||
retrier *notify.Retrier
|
||||
}
|
||||
|
||||
// New returns a new VictorOps notifier.
|
||||
func New(c *config.VictorOpsConfig, t *template.Template, l log.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
func New(c *config.VictorOpsConfig, t *template.Template, l *slog.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
client, err := commoncfg.NewClientFromConfig(*c.HTTPConfig, "victorops", httpOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -142,7 +141,7 @@ func (n *Notifier) createVictorOpsPayload(ctx context.Context, as ...*types.Aler
|
|||
|
||||
stateMessage, truncated := notify.TruncateInRunes(stateMessage, maxMessageLenRunes)
|
||||
if truncated {
|
||||
level.Warn(n.logger).Log("msg", "Truncated state_message", "incident", key, "max_runes", maxMessageLenRunes)
|
||||
n.logger.Warn("Truncated state_message", "incident", key, "max_runes", maxMessageLenRunes)
|
||||
}
|
||||
|
||||
msg := map[string]string{
|
||||
|
|
|
@ -24,9 +24,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/alertmanager/config"
|
||||
|
@ -36,7 +36,7 @@ import (
|
|||
)
|
||||
|
||||
func TestVictorOpsCustomFields(t *testing.T) {
|
||||
logger := log.NewNopLogger()
|
||||
logger := promslog.NewNopLogger()
|
||||
tmpl := test.CreateTmpl(t)
|
||||
|
||||
url, err := url.Parse("http://nowhere.com")
|
||||
|
@ -92,7 +92,7 @@ func TestVictorOpsRetry(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
for statusCode, expected := range test.RetryTests(test.DefaultRetryCodes()) {
|
||||
|
@ -113,7 +113,7 @@ func TestVictorOpsRedactedURL(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -137,7 +137,7 @@ func TestVictorOpsReadingApiKeyFromFile(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -207,7 +207,7 @@ func TestVictorOpsTemplating(t *testing.T) {
|
|||
tc.cfg.HTTPConfig = &commoncfg.HTTPClientConfig{}
|
||||
tc.cfg.APIURL = &config.URL{URL: u}
|
||||
tc.cfg.APIKey = "test"
|
||||
vo, err := New(tc.cfg, test.CreateTmpl(t), log.NewNopLogger())
|
||||
vo, err := New(tc.cfg, test.CreateTmpl(t), promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
ctx := context.Background()
|
||||
ctx = notify.WithGroupKey(ctx, "1")
|
||||
|
|
|
@ -17,10 +17,9 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
|
||||
"github.com/prometheus/alertmanager/config"
|
||||
|
@ -38,13 +37,13 @@ const (
|
|||
type Notifier struct {
|
||||
conf *config.WebexConfig
|
||||
tmpl *template.Template
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
client *http.Client
|
||||
retrier *notify.Retrier
|
||||
}
|
||||
|
||||
// New returns a new Webex notifier.
|
||||
func New(c *config.WebexConfig, t *template.Template, l log.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
func New(c *config.WebexConfig, t *template.Template, l *slog.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
client, err := commoncfg.NewClientFromConfig(*c.HTTPConfig, "webex", httpOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -73,7 +72,7 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error)
|
|||
return false, err
|
||||
}
|
||||
|
||||
level.Debug(n.logger).Log("incident", key)
|
||||
n.logger.Debug("extracted group key", "key", key)
|
||||
|
||||
data := notify.GetTemplateData(ctx, n.tmpl, as, n.logger)
|
||||
tmpl := notify.TmplText(n.tmpl, data, &err)
|
||||
|
@ -88,7 +87,7 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error)
|
|||
|
||||
message, truncated := notify.TruncateInBytes(message, maxMessageSize)
|
||||
if truncated {
|
||||
level.Debug(n.logger).Log("msg", "message truncated due to exceeding maximum allowed length by webex", "truncated_message", message)
|
||||
n.logger.Debug("message truncated due to exceeding maximum allowed length by webex", "truncated_message", message)
|
||||
}
|
||||
|
||||
w := webhook{
|
||||
|
|
|
@ -23,9 +23,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/alertmanager/config"
|
||||
|
@ -44,7 +44,7 @@ func TestWebexRetry(t *testing.T) {
|
|||
APIURL: &config.URL{URL: testWebhookURL},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -123,7 +123,7 @@ func TestWebexTemplating(t *testing.T) {
|
|||
|
||||
tt.cfg.APIURL = &config.URL{URL: u}
|
||||
tt.cfg.HTTPConfig = tt.commonCfg
|
||||
notifierWebex, err := New(tt.cfg, test.CreateTmpl(t), log.NewNopLogger())
|
||||
notifierWebex, err := New(tt.cfg, test.CreateTmpl(t), promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
|
|
|
@ -18,12 +18,11 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
|
||||
"github.com/prometheus/alertmanager/config"
|
||||
|
@ -36,13 +35,13 @@ import (
|
|||
type Notifier struct {
|
||||
conf *config.WebhookConfig
|
||||
tmpl *template.Template
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
client *http.Client
|
||||
retrier *notify.Retrier
|
||||
}
|
||||
|
||||
// New returns a new Webhook.
|
||||
func New(conf *config.WebhookConfig, t *template.Template, l log.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
func New(conf *config.WebhookConfig, t *template.Template, l *slog.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
client, err := commoncfg.NewClientFromConfig(*conf.HTTPConfig, "webhook", httpOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -83,9 +82,13 @@ func (n *Notifier) Notify(ctx context.Context, alerts ...*types.Alert) (bool, er
|
|||
|
||||
groupKey, err := notify.ExtractGroupKey(ctx)
|
||||
if err != nil {
|
||||
level.Error(n.logger).Log("err", err)
|
||||
// @tjhop: should we `return false, err` here as we do in most
|
||||
// other Notify() implementations?
|
||||
n.logger.Error("error extracting group key", "err", err)
|
||||
}
|
||||
|
||||
// @tjhop: should we debug log the key here like most other Notify() implementations?
|
||||
|
||||
msg := &Message{
|
||||
Version: "4",
|
||||
Data: data,
|
||||
|
|
|
@ -22,8 +22,8 @@ import (
|
|||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/alertmanager/config"
|
||||
|
@ -42,7 +42,7 @@ func TestWebhookRetry(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
if err != nil {
|
||||
require.NoError(t, err)
|
||||
|
@ -111,7 +111,7 @@ func TestWebhookRedactedURL(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -133,7 +133,7 @@ func TestWebhookReadingURLFromFile(t *testing.T) {
|
|||
HTTPConfig: &commoncfg.HTTPClientConfig{},
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
|
@ -20,12 +20,11 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
|
||||
"github.com/prometheus/alertmanager/config"
|
||||
|
@ -38,7 +37,7 @@ import (
|
|||
type Notifier struct {
|
||||
conf *config.WechatConfig
|
||||
tmpl *template.Template
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
client *http.Client
|
||||
|
||||
accessToken string
|
||||
|
@ -71,7 +70,7 @@ type weChatResponse struct {
|
|||
}
|
||||
|
||||
// New returns a new Wechat notifier.
|
||||
func New(c *config.WechatConfig, t *template.Template, l log.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
func New(c *config.WechatConfig, t *template.Template, l *slog.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) {
|
||||
client, err := commoncfg.NewClientFromConfig(*c.HTTPConfig, "wechat", httpOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -87,7 +86,7 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error)
|
|||
return false, err
|
||||
}
|
||||
|
||||
level.Debug(n.logger).Log("incident", key)
|
||||
n.logger.Debug("extracted group key", "key", key)
|
||||
data := notify.GetTemplateData(ctx, n.tmpl, as, n.logger)
|
||||
|
||||
tmpl := notify.TmplText(n.tmpl, data, &err)
|
||||
|
@ -175,7 +174,7 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error)
|
|||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
level.Debug(n.logger).Log("response", string(body), "incident", key)
|
||||
n.logger.Debug(string(body), "incident", key)
|
||||
|
||||
var weResp weChatResponse
|
||||
if err := json.Unmarshal(body, &weResp); err != nil {
|
||||
|
|
|
@ -18,8 +18,8 @@ import (
|
|||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
commoncfg "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/alertmanager/config"
|
||||
|
@ -39,7 +39,7 @@ func TestWechatRedactedURLOnInitialAuthentication(t *testing.T) {
|
|||
APISecret: config.Secret(secret),
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -61,7 +61,7 @@ func TestWechatRedactedURLOnNotify(t *testing.T) {
|
|||
APISecret: config.Secret(secret),
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -84,7 +84,7 @@ func TestWechatMessageTypeSelector(t *testing.T) {
|
|||
MessageType: "markdown",
|
||||
},
|
||||
test.CreateTmpl(t),
|
||||
log.NewNopLogger(),
|
||||
promslog.NewNopLogger(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
|
@ -15,11 +15,10 @@ package mem
|
|||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
|
@ -44,7 +43,7 @@ type Alerts struct {
|
|||
|
||||
callback AlertStoreCallback
|
||||
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
type AlertStoreCallback interface {
|
||||
|
@ -86,7 +85,7 @@ func (a *Alerts) registerMetrics(r prometheus.Registerer) {
|
|||
}
|
||||
|
||||
// NewAlerts returns a new alert provider.
|
||||
func NewAlerts(ctx context.Context, m types.AlertMarker, intervalGC time.Duration, alertCallback AlertStoreCallback, l log.Logger, r prometheus.Registerer) (*Alerts, error) {
|
||||
func NewAlerts(ctx context.Context, m types.AlertMarker, intervalGC time.Duration, alertCallback AlertStoreCallback, l *slog.Logger, r prometheus.Registerer) (*Alerts, error) {
|
||||
if alertCallback == nil {
|
||||
alertCallback = noopCallback{}
|
||||
}
|
||||
|
@ -98,7 +97,7 @@ func NewAlerts(ctx context.Context, m types.AlertMarker, intervalGC time.Duratio
|
|||
cancel: cancel,
|
||||
listeners: map[int]listeningAlerts{},
|
||||
next: 0,
|
||||
logger: log.With(l, "component", "provider"),
|
||||
logger: l.With("component", "provider"),
|
||||
callback: alertCallback,
|
||||
}
|
||||
|
||||
|
@ -239,12 +238,12 @@ func (a *Alerts) Put(alerts ...*types.Alert) error {
|
|||
}
|
||||
|
||||
if err := a.callback.PreStore(alert, existing); err != nil {
|
||||
level.Error(a.logger).Log("msg", "pre-store callback returned error on set alert", "err", err)
|
||||
a.logger.Error("pre-store callback returned error on set alert", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := a.alerts.Set(alert); err != nil {
|
||||
level.Error(a.logger).Log("msg", "error on set alert", "err", err)
|
||||
a.logger.Error("error on set alert", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
@ -23,10 +23,10 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/kylelemons/godebug/pretty"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/atomic"
|
||||
|
||||
|
@ -87,7 +87,7 @@ func init() {
|
|||
// a listener can not unsubscribe as the lock is hold by `alerts.Lock`.
|
||||
func TestAlertsSubscribePutStarvation(t *testing.T) {
|
||||
marker := types.NewMarker(prometheus.NewRegistry())
|
||||
alerts, err := NewAlerts(context.Background(), marker, 30*time.Minute, noopCallback{}, log.NewNopLogger(), nil)
|
||||
alerts, err := NewAlerts(context.Background(), marker, 30*time.Minute, noopCallback{}, promslog.NewNopLogger(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -142,7 +142,7 @@ func TestDeadLock(t *testing.T) {
|
|||
|
||||
marker := types.NewMarker(prometheus.NewRegistry())
|
||||
// Run gc every 5 milliseconds to increase the possibility of a deadlock with Subscribe()
|
||||
alerts, err := NewAlerts(context.Background(), marker, 5*time.Millisecond, noopCallback{}, log.NewNopLogger(), nil)
|
||||
alerts, err := NewAlerts(context.Background(), marker, 5*time.Millisecond, noopCallback{}, promslog.NewNopLogger(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -195,7 +195,7 @@ func TestDeadLock(t *testing.T) {
|
|||
|
||||
func TestAlertsPut(t *testing.T) {
|
||||
marker := types.NewMarker(prometheus.NewRegistry())
|
||||
alerts, err := NewAlerts(context.Background(), marker, 30*time.Minute, noopCallback{}, log.NewNopLogger(), nil)
|
||||
alerts, err := NewAlerts(context.Background(), marker, 30*time.Minute, noopCallback{}, promslog.NewNopLogger(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -223,7 +223,7 @@ func TestAlertsSubscribe(t *testing.T) {
|
|||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
alerts, err := NewAlerts(ctx, marker, 30*time.Minute, noopCallback{}, log.NewNopLogger(), nil)
|
||||
alerts, err := NewAlerts(ctx, marker, 30*time.Minute, noopCallback{}, promslog.NewNopLogger(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -300,7 +300,7 @@ func TestAlertsSubscribe(t *testing.T) {
|
|||
|
||||
func TestAlertsGetPending(t *testing.T) {
|
||||
marker := types.NewMarker(prometheus.NewRegistry())
|
||||
alerts, err := NewAlerts(context.Background(), marker, 30*time.Minute, noopCallback{}, log.NewNopLogger(), nil)
|
||||
alerts, err := NewAlerts(context.Background(), marker, 30*time.Minute, noopCallback{}, promslog.NewNopLogger(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -343,7 +343,7 @@ func TestAlertsGetPending(t *testing.T) {
|
|||
|
||||
func TestAlertsGC(t *testing.T) {
|
||||
marker := types.NewMarker(prometheus.NewRegistry())
|
||||
alerts, err := NewAlerts(context.Background(), marker, 200*time.Millisecond, noopCallback{}, log.NewNopLogger(), nil)
|
||||
alerts, err := NewAlerts(context.Background(), marker, 200*time.Millisecond, noopCallback{}, promslog.NewNopLogger(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -380,7 +380,7 @@ func TestAlertsStoreCallback(t *testing.T) {
|
|||
cb := &limitCountCallback{limit: 3}
|
||||
|
||||
marker := types.NewMarker(prometheus.NewRegistry())
|
||||
alerts, err := NewAlerts(context.Background(), marker, 200*time.Millisecond, cb, log.NewNopLogger(), nil)
|
||||
alerts, err := NewAlerts(context.Background(), marker, 200*time.Millisecond, cb, promslog.NewNopLogger(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -443,7 +443,7 @@ func TestAlertsStoreCallback(t *testing.T) {
|
|||
|
||||
func TestAlerts_Count(t *testing.T) {
|
||||
marker := types.NewMarker(prometheus.NewRegistry())
|
||||
alerts, err := NewAlerts(context.Background(), marker, 200*time.Millisecond, nil, log.NewNopLogger(), nil)
|
||||
alerts, err := NewAlerts(context.Background(), marker, 200*time.Millisecond, nil, promslog.NewNopLogger(), nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
states := []types.AlertState{types.AlertStateActive, types.AlertStateSuppressed, types.AlertStateUnprocessed}
|
||||
|
@ -565,7 +565,7 @@ func (l *limitCountCallback) PostDelete(_ *types.Alert) {
|
|||
|
||||
func TestAlertsConcurrently(t *testing.T) {
|
||||
callback := &limitCountCallback{limit: 100}
|
||||
a, err := NewAlerts(context.Background(), types.NewMarker(prometheus.NewRegistry()), time.Millisecond, callback, log.NewNopLogger(), nil)
|
||||
a, err := NewAlerts(context.Background(), types.NewMarker(prometheus.NewRegistry()), time.Millisecond, callback, promslog.NewNopLogger(), nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
stopc := make(chan struct{})
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"math/rand"
|
||||
"os"
|
||||
"reflect"
|
||||
|
@ -29,12 +30,11 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/coder/quartz"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
uuid "github.com/gofrs/uuid"
|
||||
"github.com/matttproud/golang_protobuf_extensions/pbutil"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
|
||||
"github.com/prometheus/alertmanager/cluster"
|
||||
"github.com/prometheus/alertmanager/matcher/compat"
|
||||
|
@ -97,11 +97,11 @@ func (c matcherCache) add(s *pb.Silence) (labels.Matchers, error) {
|
|||
type Silencer struct {
|
||||
silences *Silences
|
||||
marker types.AlertMarker
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
// NewSilencer returns a new Silencer.
|
||||
func NewSilencer(s *Silences, m types.AlertMarker, l log.Logger) *Silencer {
|
||||
func NewSilencer(s *Silences, m types.AlertMarker, l *slog.Logger) *Silencer {
|
||||
return &Silencer{
|
||||
silences: s,
|
||||
marker: m,
|
||||
|
@ -148,7 +148,7 @@ func (s *Silencer) Mutes(lset model.LabelSet) bool {
|
|||
)
|
||||
}
|
||||
if err != nil {
|
||||
level.Error(s.logger).Log("msg", "Querying silences failed, alerts might not get silenced correctly", "err", err)
|
||||
s.logger.Error("Querying silences failed, alerts might not get silenced correctly", "err", err)
|
||||
}
|
||||
if len(allSils) == 0 {
|
||||
// Easy case, neither active nor pending silences anymore.
|
||||
|
@ -171,8 +171,8 @@ func (s *Silencer) Mutes(lset model.LabelSet) bool {
|
|||
// Do nothing, silence has expired in the meantime.
|
||||
}
|
||||
}
|
||||
level.Debug(s.logger).Log(
|
||||
"msg", "determined current silences state",
|
||||
s.logger.Debug(
|
||||
"determined current silences state",
|
||||
"now", now,
|
||||
"total", len(allSils),
|
||||
"active", len(activeIDs),
|
||||
|
@ -190,7 +190,7 @@ func (s *Silencer) Mutes(lset model.LabelSet) bool {
|
|||
type Silences struct {
|
||||
clock quartz.Clock
|
||||
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
metrics *metrics
|
||||
retention time.Duration
|
||||
limits Limits
|
||||
|
@ -241,7 +241,7 @@ func newSilenceMetricByState(s *Silences, st types.SilenceState) prometheus.Gaug
|
|||
func() float64 {
|
||||
count, err := s.CountState(st)
|
||||
if err != nil {
|
||||
level.Error(s.logger).Log("msg", "Counting silences failed", "err", err)
|
||||
s.logger.Error("Counting silences failed", "err", err)
|
||||
}
|
||||
return float64(count)
|
||||
},
|
||||
|
@ -332,7 +332,7 @@ type Options struct {
|
|||
Limits Limits
|
||||
|
||||
// A logger used by background processing.
|
||||
Logger log.Logger
|
||||
Logger *slog.Logger
|
||||
Metrics prometheus.Registerer
|
||||
}
|
||||
|
||||
|
@ -352,7 +352,7 @@ func New(o Options) (*Silences, error) {
|
|||
s := &Silences{
|
||||
clock: quartz.NewReal(),
|
||||
mc: matcherCache{},
|
||||
logger: log.NewNopLogger(),
|
||||
logger: promslog.NewNopLogger(),
|
||||
retention: o.Retention,
|
||||
limits: o.Limits,
|
||||
broadcast: func([]byte) {},
|
||||
|
@ -369,7 +369,7 @@ func New(o Options) (*Silences, error) {
|
|||
if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
level.Debug(s.logger).Log("msg", "silences snapshot file doesn't exist", "err", err)
|
||||
s.logger.Debug("silences snapshot file doesn't exist", "err", err)
|
||||
} else {
|
||||
o.SnapshotReader = r
|
||||
defer r.Close()
|
||||
|
@ -394,7 +394,7 @@ func (s *Silences) nowUTC() time.Time {
|
|||
// If not nil, the last argument is an override for what to do as part of the maintenance - for advanced usage.
|
||||
func (s *Silences) Maintenance(interval time.Duration, snapf string, stopc <-chan struct{}, override MaintenanceFunc) {
|
||||
if interval == 0 || stopc == nil {
|
||||
level.Error(s.logger).Log("msg", "interval or stop signal are missing - not running maintenance")
|
||||
s.logger.Error("interval or stop signal are missing - not running maintenance")
|
||||
return
|
||||
}
|
||||
t := s.clock.NewTicker(interval)
|
||||
|
@ -427,7 +427,7 @@ func (s *Silences) Maintenance(interval time.Duration, snapf string, stopc <-cha
|
|||
|
||||
runMaintenance := func(do MaintenanceFunc) error {
|
||||
s.metrics.maintenanceTotal.Inc()
|
||||
level.Debug(s.logger).Log("msg", "Running maintenance")
|
||||
s.logger.Debug("Running maintenance")
|
||||
start := s.nowUTC()
|
||||
size, err := do()
|
||||
s.metrics.snapshotSize.Set(float64(size))
|
||||
|
@ -435,7 +435,7 @@ func (s *Silences) Maintenance(interval time.Duration, snapf string, stopc <-cha
|
|||
s.metrics.maintenanceErrorsTotal.Inc()
|
||||
return err
|
||||
}
|
||||
level.Debug(s.logger).Log("msg", "Maintenance done", "duration", s.clock.Since(start), "size", size)
|
||||
s.logger.Debug("Maintenance done", "duration", s.clock.Since(start), "size", size)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -446,7 +446,8 @@ Loop:
|
|||
break Loop
|
||||
case <-t.C:
|
||||
if err := runMaintenance(doMaintenance); err != nil {
|
||||
level.Info(s.logger).Log("msg", "Running maintenance failed", "err", err)
|
||||
// @tjhop: this should probably log at error level
|
||||
s.logger.Info("Running maintenance failed", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -456,7 +457,8 @@ Loop:
|
|||
return
|
||||
}
|
||||
if err := runMaintenance(doMaintenance); err != nil {
|
||||
level.Info(s.logger).Log("msg", "Creating shutdown snapshot failed", "err", err)
|
||||
// @tjhop: this should probably log at error level
|
||||
s.logger.Info("Creating shutdown snapshot failed", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -940,7 +942,7 @@ func (s *Silences) Merge(b []byte) error {
|
|||
// all nodes already.
|
||||
s.broadcast(b)
|
||||
s.metrics.propagatedMessagesTotal.Inc()
|
||||
level.Debug(s.logger).Log("msg", "Gossiping new silence", "silence", e)
|
||||
s.logger.Debug("Gossiping new silence", "silence", e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,9 +19,9 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/coder/quartz"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/alertmanager/silence/silencepb"
|
||||
|
@ -74,7 +74,7 @@ func benchmarkMutes(b *testing.B, n int) {
|
|||
require.Len(b, silenceIDs, n)
|
||||
|
||||
m := types.NewMarker(prometheus.NewRegistry())
|
||||
s := NewSilencer(silences, m, log.NewNopLogger())
|
||||
s := NewSilencer(silences, m, promslog.NewNopLogger())
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
|
|
@ -25,11 +25,11 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/coder/quartz"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/matttproud/golang_protobuf_extensions/pbutil"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/testutil"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/atomic"
|
||||
|
||||
|
@ -298,7 +298,7 @@ func TestSilences_Maintenance_DefaultMaintenanceFuncDoesntCrash(t *testing.T) {
|
|||
f, err := os.CreateTemp("", "snapshot")
|
||||
require.NoError(t, err, "creating temp file failed")
|
||||
clock := quartz.NewMock(t)
|
||||
s := &Silences{st: state{}, logger: log.NewNopLogger(), clock: clock, metrics: newMetrics(nil, nil)}
|
||||
s := &Silences{st: state{}, logger: promslog.NewNopLogger(), clock: clock, metrics: newMetrics(nil, nil)}
|
||||
stopc := make(chan struct{})
|
||||
|
||||
done := make(chan struct{})
|
||||
|
@ -319,7 +319,7 @@ func TestSilences_Maintenance_SupportsCustomCallback(t *testing.T) {
|
|||
require.NoError(t, err, "creating temp file failed")
|
||||
clock := quartz.NewMock(t)
|
||||
reg := prometheus.NewRegistry()
|
||||
s := &Silences{st: state{}, logger: log.NewNopLogger(), clock: clock}
|
||||
s := &Silences{st: state{}, logger: promslog.NewNopLogger(), clock: clock}
|
||||
s.metrics = newMetrics(reg, s)
|
||||
stopc := make(chan struct{})
|
||||
|
||||
|
@ -1378,7 +1378,7 @@ func TestSilencer(t *testing.T) {
|
|||
now := ss.nowUTC()
|
||||
|
||||
m := types.NewMarker(prometheus.NewRegistry())
|
||||
s := NewSilencer(ss, m, log.NewNopLogger())
|
||||
s := NewSilencer(ss, m, promslog.NewNopLogger())
|
||||
|
||||
require.False(t, s.Mutes(model.LabelSet{"foo": "bar"}), "expected alert not silenced without any silences")
|
||||
|
||||
|
@ -1618,14 +1618,14 @@ func TestValidateUTF8Matcher(t *testing.T) {
|
|||
}
|
||||
|
||||
// Change the mode to UTF-8 mode.
|
||||
ff, err := featurecontrol.NewFlags(log.NewNopLogger(), featurecontrol.FeatureUTF8StrictMode)
|
||||
ff, err := featurecontrol.NewFlags(promslog.NewNopLogger(), featurecontrol.FeatureUTF8StrictMode)
|
||||
require.NoError(t, err)
|
||||
compat.InitFromFlags(log.NewNopLogger(), ff)
|
||||
compat.InitFromFlags(promslog.NewNopLogger(), ff)
|
||||
|
||||
// Restore the mode to classic at the end of the test.
|
||||
ff, err = featurecontrol.NewFlags(log.NewNopLogger(), featurecontrol.FeatureClassicMode)
|
||||
ff, err = featurecontrol.NewFlags(promslog.NewNopLogger(), featurecontrol.FeatureClassicMode)
|
||||
require.NoError(t, err)
|
||||
defer compat.InitFromFlags(log.NewNopLogger(), ff)
|
||||
defer compat.InitFromFlags(promslog.NewNopLogger(), ff)
|
||||
|
||||
for _, c := range cases {
|
||||
checkErr(t, c.err, validateMatcher(c.m))
|
||||
|
|
|
@ -20,9 +20,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/alertmanager/featurecontrol"
|
||||
|
@ -396,14 +396,14 @@ func TestValidateUTF8Ls(t *testing.T) {
|
|||
}}
|
||||
|
||||
// Change the mode to UTF-8 mode.
|
||||
ff, err := featurecontrol.NewFlags(log.NewNopLogger(), featurecontrol.FeatureUTF8StrictMode)
|
||||
ff, err := featurecontrol.NewFlags(promslog.NewNopLogger(), featurecontrol.FeatureUTF8StrictMode)
|
||||
require.NoError(t, err)
|
||||
compat.InitFromFlags(log.NewNopLogger(), ff)
|
||||
compat.InitFromFlags(promslog.NewNopLogger(), ff)
|
||||
|
||||
// Restore the mode to classic at the end of the test.
|
||||
ff, err = featurecontrol.NewFlags(log.NewNopLogger(), featurecontrol.FeatureClassicMode)
|
||||
ff, err = featurecontrol.NewFlags(promslog.NewNopLogger(), featurecontrol.FeatureClassicMode)
|
||||
require.NoError(t, err)
|
||||
defer compat.InitFromFlags(log.NewNopLogger(), ff)
|
||||
defer compat.InitFromFlags(promslog.NewNopLogger(), ff)
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
|
|
|
@ -16,11 +16,10 @@ package reactapp
|
|||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"path"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
|
||||
"github.com/prometheus/common/route"
|
||||
"github.com/prometheus/common/server"
|
||||
)
|
||||
|
@ -30,7 +29,7 @@ var reactRouterPaths = []string{
|
|||
"/status",
|
||||
}
|
||||
|
||||
func Register(r *route.Router, logger log.Logger) {
|
||||
func Register(r *route.Router, logger *slog.Logger) {
|
||||
serveReactApp := func(w http.ResponseWriter, r *http.Request) {
|
||||
f, err := Assets.Open("/dist/index.html")
|
||||
if err != nil {
|
||||
|
|
|
@ -15,11 +15,11 @@ package ui
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
_ "net/http/pprof" // Comment this line to disable pprof endpoint.
|
||||
"path"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/prometheus/common/route"
|
||||
|
||||
|
@ -27,7 +27,7 @@ import (
|
|||
)
|
||||
|
||||
// Register registers handlers to serve files for the web interface.
|
||||
func Register(r *route.Router, reloadCh chan<- chan error, logger log.Logger) {
|
||||
func Register(r *route.Router, reloadCh chan<- chan error, logger *slog.Logger) {
|
||||
r.Get("/metrics", promhttp.Handler().ServeHTTP)
|
||||
|
||||
r.Get("/", func(w http.ResponseWriter, req *http.Request) {
|
||||
|
|
Loading…
Reference in New Issue