Use Registerer to Register All Metrics

* Made Metric a Gauge so that it can be registered.
This commit is contained in:
Goutham Veeramachaneni 2017-04-01 00:14:30 +05:30
parent 7ba0a9e81a
commit f27ce34a13
No known key found for this signature in database
GPG Key ID: F1C217E8E9023CAD
3 changed files with 24 additions and 50 deletions

View File

@ -184,7 +184,6 @@ func Main() int {
if instrumentedStorage, ok := localStorage.(prometheus.Collector); ok {
prometheus.MustRegister(instrumentedStorage)
}
prometheus.MustRegister(notifier)
prometheus.MustRegister(configSuccess)
prometheus.MustRegister(configSuccessTime)

View File

@ -83,11 +83,11 @@ type alertMetrics struct {
errors *prometheus.CounterVec
sent *prometheus.CounterVec
dropped prometheus.Counter
queueLength prometheus.Gauge
queueCapacity prometheus.Metric
queueLength prometheus.GaugeFunc
queueCapacity prometheus.Gauge
}
func newAlertMetrics(r prometheus.Registerer, o *Options) *alertMetrics {
func newAlertMetrics(r prometheus.Registerer, queueCap int, queueLen func() float64) *alertMetrics {
m := &alertMetrics{
latency: prometheus.NewSummaryVec(prometheus.SummaryOpts{
Namespace: namespace,
@ -119,29 +119,30 @@ func newAlertMetrics(r prometheus.Registerer, o *Options) *alertMetrics {
Name: "dropped_total",
Help: "Total number of alerts dropped due to errors when sending to Alertmanager.",
}),
queueLength: prometheus.NewGauge(prometheus.GaugeOpts{
queueLength: prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "queue_length",
Help: "The number of alert notifications in the queue.",
}, queueLen),
queueCapacity: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "queue_capacity",
Help: "The capacity of the alert notifications queue.",
}),
queueCapacity: prometheus.MustNewConstMetric(
prometheus.NewDesc(
prometheus.BuildFQName(namespace, subsystem, "queue_capacity"),
"The capacity of the alert notifications queue.",
nil, nil,
),
prometheus.GaugeValue,
float64(o.QueueCapacity),
),
}
m.queueCapacity.Set(float64(queueCap))
if r != nil {
r.MustRegister(
m.latency,
m.errors,
m.sent,
m.dropped,
m.queueLength,
m.queueCapacity,
)
}
@ -156,14 +157,17 @@ func New(o *Options) *Notifier {
o.Do = ctxhttp.Do
}
return &Notifier{
queue: make(model.Alerts, 0, o.QueueCapacity),
ctx: ctx,
cancel: cancel,
more: make(chan struct{}, 1),
opts: o,
metrics: newAlertMetrics(o.Registerer, o),
n := &Notifier{
queue: make(model.Alerts, 0, o.QueueCapacity),
ctx: ctx,
cancel: cancel,
more: make(chan struct{}, 1),
opts: o,
}
queueLenFunc := func() float64 { return float64(n.queueLen()) }
n.metrics = newAlertMetrics(o.Registerer, o.QueueCapacity, queueLenFunc)
return n
}
// ApplyConfig updates the status state as the new config requires.
@ -406,20 +410,6 @@ func (n *Notifier) Stop() {
n.cancel()
}
// Describe implements prometheus.Collector.
func (n *Notifier) Describe(ch chan<- *prometheus.Desc) {
ch <- n.metrics.queueCapacity.Desc()
ch <- n.metrics.queueLength.Desc()
}
// Collect implements prometheus.Collector.
func (n *Notifier) Collect(ch chan<- prometheus.Metric) {
n.metrics.queueLength.Set(float64(n.queueLen()))
ch <- n.metrics.queueLength
ch <- n.metrics.queueCapacity
}
// alertmanager holds Alertmanager endpoint information.
type alertmanager interface {
url() string

View File

@ -25,7 +25,6 @@ import (
"golang.org/x/net/context"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/config"
)
@ -64,7 +63,6 @@ func TestPostPath(t *testing.T) {
func TestHandlerNextBatch(t *testing.T) {
h := New(&Options{})
defer unregisterMetrics()
for i := range make([]struct{}, 2*maxBatchSize+1) {
h.queue = append(h.queue, &model.Alert{
@ -120,14 +118,6 @@ func alertsEqual(a, b model.Alerts) bool {
return true
}
func unregisterMetrics() {
m := newAlertMetrics(nil, &Options{})
prometheus.DefaultRegisterer.Unregister(m.latency)
prometheus.DefaultRegisterer.Unregister(m.errors)
prometheus.DefaultRegisterer.Unregister(m.sent)
prometheus.DefaultRegisterer.Unregister(m.dropped)
}
func TestHandlerSendAll(t *testing.T) {
var (
expected model.Alerts
@ -160,7 +150,6 @@ func TestHandlerSendAll(t *testing.T) {
defer server2.Close()
h := New(&Options{})
defer unregisterMetrics()
h.alertmanagers = append(h.alertmanagers, &alertmanagerSet{
ams: []alertmanager{
alertmanagerMock{
@ -228,7 +217,6 @@ func TestCustomDo(t *testing.T) {
}, nil
},
})
defer unregisterMetrics()
h.sendOne(context.Background(), nil, testURL, []byte(testBody))
@ -251,7 +239,6 @@ func TestExternalLabels(t *testing.T) {
},
},
})
defer unregisterMetrics()
// This alert should get the external label attached.
h.Send(&model.Alert{
@ -306,7 +293,6 @@ func TestHandlerRelabel(t *testing.T) {
},
},
})
defer unregisterMetrics()
// This alert should be dropped due to the configuration
h.Send(&model.Alert{
@ -361,7 +347,6 @@ func TestHandlerQueueing(t *testing.T) {
h := New(&Options{
QueueCapacity: 3 * maxBatchSize,
})
defer unregisterMetrics()
h.alertmanagers = append(h.alertmanagers, &alertmanagerSet{
ams: []alertmanager{
alertmanagerMock{