feature: add native histogram support to latency metrics (#3737)
Note that this does not stop showing classic metrics, for now it is up to the scrape config to decide whether to keep those instead or both. Signed-off-by: György Krajcsovits <gyorgy.krajcsovits@grafana.com>
This commit is contained in:
parent
d1fe4b7f6f
commit
d85bef20d9
|
@ -73,6 +73,10 @@ func NewChannel(
|
|||
Name: "alertmanager_oversize_gossip_message_duration_seconds",
|
||||
Help: "Duration of oversized gossip message requests.",
|
||||
ConstLabels: prometheus.Labels{"key": key},
|
||||
Buckets: prometheus.DefBuckets,
|
||||
NativeHistogramBucketFactor: 1.1,
|
||||
NativeHistogramMaxBucketNumber: 100,
|
||||
NativeHistogramMinResetDuration: 1 * time.Hour,
|
||||
})
|
||||
|
||||
reg.MustRegister(oversizeGossipDuration, oversizeGossipMessageFailureTotal, oversizeGossipMessageDroppedTotal, oversizeGossipMessageSentTotal)
|
||||
|
|
|
@ -107,6 +107,9 @@ func newDelegate(l log.Logger, reg prometheus.Registerer, p *Peer, retransmit in
|
|||
Name: "alertmanager_cluster_pings_seconds",
|
||||
Help: "Histogram of latencies for ping messages.",
|
||||
Buckets: []float64{.005, .01, .025, .05, .1, .25, .5},
|
||||
NativeHistogramBucketFactor: 1.1,
|
||||
NativeHistogramMaxBucketNumber: 100,
|
||||
NativeHistogramMinResetDuration: 1 * time.Hour,
|
||||
}, []string{"peer"},
|
||||
)
|
||||
|
||||
|
|
|
@ -67,6 +67,9 @@ var (
|
|||
Name: "alertmanager_http_request_duration_seconds",
|
||||
Help: "Histogram of latencies for HTTP requests.",
|
||||
Buckets: []float64{.05, 0.1, .25, .5, .75, 1, 2, 5, 20, 60},
|
||||
NativeHistogramBucketFactor: 1.1,
|
||||
NativeHistogramMaxBucketNumber: 100,
|
||||
NativeHistogramMinResetDuration: 1 * time.Hour,
|
||||
},
|
||||
[]string{"handler", "method"},
|
||||
)
|
||||
|
|
|
@ -141,6 +141,10 @@ func newMetrics(r prometheus.Registerer) *metrics {
|
|||
m.queryDuration = prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "alertmanager_nflog_query_duration_seconds",
|
||||
Help: "Duration of notification log query evaluation.",
|
||||
Buckets: prometheus.DefBuckets,
|
||||
NativeHistogramBucketFactor: 1.1,
|
||||
NativeHistogramMaxBucketNumber: 100,
|
||||
NativeHistogramMinResetDuration: 1 * time.Hour,
|
||||
})
|
||||
m.propagatedMessagesTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "alertmanager_nflog_gossip_messages_propagated_total",
|
||||
|
|
|
@ -295,6 +295,9 @@ func NewMetrics(r prometheus.Registerer, ff featurecontrol.Flagger) *Metrics {
|
|||
Name: "notification_latency_seconds",
|
||||
Help: "The latency of notifications in seconds.",
|
||||
Buckets: []float64{1, 5, 10, 15, 20},
|
||||
NativeHistogramBucketFactor: 1.1,
|
||||
NativeHistogramMaxBucketNumber: 100,
|
||||
NativeHistogramMinResetDuration: 1 * time.Hour,
|
||||
}, labels),
|
||||
ff: ff,
|
||||
}
|
||||
|
|
|
@ -273,6 +273,10 @@ func newMetrics(r prometheus.Registerer, s *Silences) *metrics {
|
|||
m.queryDuration = prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "alertmanager_silences_query_duration_seconds",
|
||||
Help: "Duration of silence query evaluation.",
|
||||
Buckets: prometheus.DefBuckets,
|
||||
NativeHistogramBucketFactor: 1.1,
|
||||
NativeHistogramMaxBucketNumber: 100,
|
||||
NativeHistogramMinResetDuration: 1 * time.Hour,
|
||||
})
|
||||
m.propagatedMessagesTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "alertmanager_silences_gossip_messages_propagated_total",
|
||||
|
|
Loading…
Reference in New Issue