windows_exporter/pkg/collector/prometheus.go

272 lines
6.5 KiB
Go
Raw Normal View History

//go:build windows
package collector
import (
"context"
"fmt"
2024-09-10 22:34:10 +00:00
"log/slog"
"runtime/debug"
"sync"
2024-09-10 22:34:10 +00:00
"sync/atomic"
"time"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
2024-09-10 22:34:10 +00:00
// Interface guard.
var _ prometheus.Collector = (*Prometheus)(nil)
// Prometheus implements prometheus.Collector for a set of Windows MetricCollectors.
type Prometheus struct {
maxScrapeDuration time.Duration
2024-09-10 22:34:10 +00:00
logger *slog.Logger
metricCollectors *MetricCollectors
// Base metrics returned by Prometheus
2024-09-10 22:34:10 +00:00
scrapeDurationDesc *prometheus.Desc
collectorScrapeDurationDesc *prometheus.Desc
collectorScrapeSuccessDesc *prometheus.Desc
collectorScrapeTimeoutDesc *prometheus.Desc
snapshotDuration *prometheus.Desc
}
type collectorStatus struct {
name string
statusCode collectorStatusCode
}
2024-09-10 22:34:10 +00:00
type collectorStatusCode int
const (
pending collectorStatusCode = iota
success
failed
)
// NewPrometheusCollector returns a new Prometheus where the set of MetricCollectors must
// return metrics within the given timeout.
2024-09-10 22:34:10 +00:00
func (c *MetricCollectors) NewPrometheusCollector(timeout time.Duration, logger *slog.Logger) *Prometheus {
return &Prometheus{
maxScrapeDuration: timeout,
2024-09-10 22:34:10 +00:00
metricCollectors: c,
logger: logger,
scrapeDurationDesc: prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, "exporter", "scrape_duration_seconds"),
2024-09-10 22:34:10 +00:00
"windows_exporter: Total scrape duration.",
nil,
nil,
),
collectorScrapeDurationDesc: prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, "exporter", "collector_duration_seconds"),
"windows_exporter: Duration of a collection.",
[]string{"collector"},
nil,
),
2024-09-10 22:34:10 +00:00
collectorScrapeSuccessDesc: prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, "exporter", "collector_success"),
"windows_exporter: Whether the collector was successful.",
[]string{"collector"},
nil,
),
2024-09-10 22:34:10 +00:00
collectorScrapeTimeoutDesc: prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, "exporter", "collector_timeout"),
"windows_exporter: Whether the collector timed out.",
[]string{"collector"},
nil,
),
snapshotDuration: prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, "exporter", "perflib_snapshot_duration_seconds"),
"Duration of perflib snapshot capture",
nil,
nil,
),
}
}
2024-09-10 22:34:10 +00:00
func (p *Prometheus) Describe(_ chan<- *prometheus.Desc) {}
2024-09-10 22:34:10 +00:00
// Collect sends the collected metrics from each of the MetricCollectors to
// prometheus.
2024-09-10 22:34:10 +00:00
func (p *Prometheus) Collect(ch chan<- prometheus.Metric) {
t := time.Now()
ch <- prometheus.MustNewConstMetric(
2024-09-10 22:34:10 +00:00
p.snapshotDuration,
prometheus.GaugeValue,
time.Since(t).Seconds(),
)
2024-09-10 22:34:10 +00:00
// WaitGroup to wait for all collectors to finish
wg := sync.WaitGroup{}
2024-09-10 22:34:10 +00:00
wg.Add(len(p.metricCollectors.Collectors))
2024-09-10 22:34:10 +00:00
// Using a channel to collect the status of each collector
// A channel is safe to use concurrently while a map is not
collectorStatusCh := make(chan collectorStatus, len(p.metricCollectors.Collectors))
2024-09-10 22:34:10 +00:00
// Execute all collectors concurrently
// timeout handling is done in the execute function
for name, metricsCollector := range p.metricCollectors.Collectors {
go func(name string, metricsCollector Collector) {
defer wg.Done()
2024-09-10 22:34:10 +00:00
collectorStatusCh <- collectorStatus{
name: name,
statusCode: p.execute(name, metricsCollector, ch),
}
2024-09-10 22:34:10 +00:00
}(name, metricsCollector)
}
2024-09-10 22:34:10 +00:00
// Wait for all collectors to finish
wg.Wait()
2024-09-10 22:34:10 +00:00
// Close the channel since we are done writing to it
close(collectorStatusCh)
2024-09-10 22:34:10 +00:00
for status := range collectorStatusCh {
var successValue, timeoutValue float64
2024-09-10 22:34:10 +00:00
if status.statusCode == pending {
timeoutValue = 1.0
}
2024-09-10 22:34:10 +00:00
if status.statusCode == success {
successValue = 1.0
}
ch <- prometheus.MustNewConstMetric(
2024-09-10 22:34:10 +00:00
p.collectorScrapeSuccessDesc,
prometheus.GaugeValue,
successValue,
2024-09-10 22:34:10 +00:00
status.name,
)
2024-09-10 22:34:10 +00:00
ch <- prometheus.MustNewConstMetric(
2024-09-10 22:34:10 +00:00
p.collectorScrapeTimeoutDesc,
prometheus.GaugeValue,
timeoutValue,
2024-09-10 22:34:10 +00:00
status.name,
)
}
ch <- prometheus.MustNewConstMetric(
2024-09-10 22:34:10 +00:00
p.scrapeDurationDesc,
prometheus.GaugeValue,
2024-09-10 22:34:10 +00:00
time.Since(t).Seconds(),
)
2024-09-10 22:34:10 +00:00
}
func (p *Prometheus) execute(name string, c Collector, ch chan<- prometheus.Metric) collectorStatusCode {
2024-09-10 22:34:10 +00:00
var (
err error
numMetrics int
duration time.Duration
timeout atomic.Bool
2024-09-10 22:34:10 +00:00
)
// bufCh is a buffer channel to store the metrics
// This is needed because once timeout is reached, the prometheus registry channel is closed.
bufCh := make(chan prometheus.Metric, 1000)
2024-09-10 22:34:10 +00:00
errCh := make(chan error, 1)
ctx, cancel := context.WithTimeout(context.Background(), p.maxScrapeDuration)
defer cancel()
2024-09-10 22:34:10 +00:00
// Execute the collector
go func() {
defer func() {
if r := recover(); r != nil {
2024-10-03 12:31:44 +00:00
errCh <- fmt.Errorf("panic in collector %s: %v. stack: %s", name, r,
string(debug.Stack()),
)
}
2024-10-03 12:31:44 +00:00
close(bufCh)
}()
errCh <- c.Collect(bufCh)
2024-09-10 22:34:10 +00:00
}()
wg := sync.WaitGroup{}
wg.Add(1)
2024-09-10 22:34:10 +00:00
go func() {
defer func() {
// This prevents a panic from race-condition when closing the ch channel too early.
_ = recover()
wg.Done()
2024-09-10 22:34:10 +00:00
}()
// Pass metrics to the prometheus registry
// If timeout is reached, the channel is closed.
// This will cause a panic if we try to write to it.
for {
select {
case <-ctx.Done():
return
case m, ok := <-bufCh:
if !ok {
return
}
if !timeout.Load() {
ch <- m
numMetrics++
}
2024-09-10 22:34:10 +00:00
}
}
}()
t := time.Now()
// Wait for the collector to finish or timeout
select {
case err = <-errCh:
wg.Wait() // Wait for the buffer channel to be closed and empty
2024-09-10 22:34:10 +00:00
duration = time.Since(t)
ch <- prometheus.MustNewConstMetric(
p.collectorScrapeDurationDesc,
prometheus.GaugeValue,
duration.Seconds(),
name,
)
case <-ctx.Done():
2024-09-10 22:34:10 +00:00
timeout.Store(true)
duration = time.Since(t)
ch <- prometheus.MustNewConstMetric(
p.collectorScrapeDurationDesc,
prometheus.GaugeValue,
duration.Seconds(),
name,
)
p.logger.Warn(fmt.Sprintf("collector %s timeouted after %s, resulting in %d metrics", name, p.maxScrapeDuration, numMetrics))
go func() {
// Drain channel in case of premature return to not leak a goroutine.
//nolint:revive
for range bufCh {
}
}()
2024-09-10 22:34:10 +00:00
return pending
}
if err != nil {
p.logger.Error(fmt.Sprintf("collector %s failed after %s, resulting in %d metrics", name, duration, numMetrics),
2024-09-10 22:34:10 +00:00
slog.Any("err", err),
)
return failed
}
p.logger.Debug(fmt.Sprintf("collector %s succeeded after %s, resulting in %d metrics", name, duration, numMetrics))
2024-09-10 22:34:10 +00:00
return success
}