notifier: fix increment of metric prometheus_notifications_errors_total

Previously, prometheus_notifications_errors_total was incremented by
one whenever a batch of alerts was affected by an error during sending
to a specific alertmanager. However, the corresponding metric
prometheus_notifications_sent_total, counting all alerts that were
sent (including those where the sent ended in error), is incremented
by the batch size, i.e. the number of alerts.

Therefore, the ratio used in the mixin for the
PrometheusErrorSendingAlertsToSomeAlertmanagers alert is inconsistent.

This commit changes the increment of
prometheus_notifications_errors_total to the number of alerts that
were sent in the attempt that ended in an error. It also adjusts the
metrics help string accordingly and makes the wording in the alert in
the mixin more precise.

Signed-off-by: beorn7 <beorn@grafana.com>
This commit is contained in:
beorn7 2024-11-20 12:58:03 +01:00
parent a6fb16fcb4
commit e01c5cefac
3 changed files with 7 additions and 6 deletions

View File

@ -2,6 +2,7 @@
## unreleased
* [CHANGE] Notifier: Increment the prometheus_notifications_errors_total metric by the number of affected alerts rather than by one per batch of affected alerts. #15428
* [ENHANCEMENT] OTLP receiver: Convert also metric metadata. #15416
## 3.0.0 / 2024-11-14

View File

@ -84,8 +84,8 @@
severity: 'warning',
},
annotations: {
summary: 'Prometheus has encountered more than 1% errors sending alerts to a specific Alertmanager.',
description: '{{ printf "%%.1f" $value }}%% errors while sending alerts from Prometheus %(prometheusName)s to Alertmanager {{$labels.alertmanager}}.' % $._config,
summary: 'More than 1% of alerts sent by Prometheus to a specific Alertmanager were affected by errors.',
description: '{{ printf "%%.1f" $value }}%% of alerts sent by Prometheus %(prometheusName)s to Alertmanager {{$labels.alertmanager}} were affected by errors.' % $._config,
},
},
{

View File

@ -160,7 +160,7 @@ func newAlertMetrics(r prometheus.Registerer, queueCap int, queueLen, alertmanag
Namespace: namespace,
Subsystem: subsystem,
Name: "errors_total",
Help: "Total number of errors sending alert notifications.",
Help: "Total number of sent alerts affected by errors.",
},
[]string{alertmanagerLabel},
),
@ -619,13 +619,13 @@ func (n *Manager) sendAll(alerts ...*Alert) bool {
go func(ctx context.Context, client *http.Client, url string, payload []byte, count int) {
if err := n.sendOne(ctx, client, url, payload); err != nil {
n.logger.Error("Error sending alert", "alertmanager", url, "count", count, "err", err)
n.metrics.errors.WithLabelValues(url).Inc()
n.logger.Error("Error sending alerts", "alertmanager", url, "count", count, "err", err)
n.metrics.errors.WithLabelValues(url).Add(float64(count))
} else {
numSuccess.Inc()
}
n.metrics.latency.WithLabelValues(url).Observe(time.Since(begin).Seconds())
n.metrics.sent.WithLabelValues(url).Add(float64(len(amAlerts)))
n.metrics.sent.WithLabelValues(url).Add(float64(count))
wg.Done()
}(ctx, ams.client, am.url().String(), payload, len(amAlerts))