From 70156cd106a274ebefcb7ac49ae82f292f3f278c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan-Otto=20Kr=C3=B6pke?= Date: Thu, 3 Oct 2024 14:31:44 +0200 Subject: [PATCH] cache: Implement PDH collector (#1662) --- pkg/collector/ad/ad.go | 3 +- pkg/collector/adcs/adcs.go | 4 +- pkg/collector/adfs/adfs.go | 14 +- pkg/collector/cache/cache.go | 279 ++++++++++++++++++++++++++++++----- pkg/collector/cache/const.go | 69 +++++++++ pkg/collector/prometheus.go | 9 +- pkg/perfdata/collector.go | 22 ++- tools/e2e-output.txt | 60 ++++++++ tools/end-to-end-test.ps1 | 4 +- 9 files changed, 410 insertions(+), 54 deletions(-) create mode 100644 pkg/collector/cache/const.go diff --git a/pkg/collector/ad/ad.go b/pkg/collector/ad/ad.go index 72298802..a92f4f53 100644 --- a/pkg/collector/ad/ad.go +++ b/pkg/collector/ad/ad.go @@ -1389,7 +1389,7 @@ func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error { return fmt.Errorf("failed to collect DirectoryServices (AD) metrics: %w", err) } - adData, ok := data[perfdata.EmptyInstance] + adData, ok := data["NTDS"] if !ok { return errors.New("perflib query for DirectoryServices (AD) returned empty result set") @@ -1687,6 +1687,7 @@ func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error { c.nameTranslationsTotal, prometheus.CounterValue, adData[dsClientNameTranslationsPerSec].FirstValue, + "client", ) ch <- prometheus.MustNewConstMetric( c.nameTranslationsTotal, diff --git a/pkg/collector/adcs/adcs.go b/pkg/collector/adcs/adcs.go index fa81f9d5..a311a124 100644 --- a/pkg/collector/adcs/adcs.go +++ b/pkg/collector/adcs/adcs.go @@ -94,9 +94,9 @@ func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error { var err error - c.perfDataCollector, err = perfdata.NewCollector("Processor Information", []string{"*"}, counters) + c.perfDataCollector, err = perfdata.NewCollector("Certification Authority", []string{"*"}, counters) if err != nil { - return fmt.Errorf("failed to create Processor Information collector: %w", err) + return fmt.Errorf("failed to create Certification Authority collector: %w", err) } } diff --git a/pkg/collector/adfs/adfs.go b/pkg/collector/adfs/adfs.go index 1f738528..48b987a8 100644 --- a/pkg/collector/adfs/adfs.go +++ b/pkg/collector/adfs/adfs.go @@ -6,7 +6,9 @@ import ( "errors" "fmt" "log/slog" + "maps" "math" + "slices" "github.com/alecthomas/kingpin/v2" "github.com/prometheus-community/windows_exporter/pkg/perfdata" @@ -705,13 +707,19 @@ func (c *Collector) collect(ctx *types.ScrapeContext, logger *slog.Logger, ch ch func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error { data, err := c.perfDataCollector.Collect() if err != nil { - return fmt.Errorf("failed to collect Certification Authority (ADCS) metrics: %w", err) + return fmt.Errorf("failed to collect ADFS metrics: %w", err) } - adfsData, ok := data[perfdata.EmptyInstance] + instanceKey := slices.Collect(maps.Keys(data)) + + if len(instanceKey) == 0 { + return errors.New("perflib query for ADFS returned empty result set") + } + + adfsData, ok := data[instanceKey[0]] if !ok { - return errors.New("perflib query for Certification Authority (ADCS) returned empty result set") + return errors.New("perflib query for ADFS returned empty result set") } ch <- prometheus.MustNewConstMetric( diff --git a/pkg/collector/cache/cache.go b/pkg/collector/cache/cache.go index 6b9a0656..2a75890b 100644 --- a/pkg/collector/cache/cache.go +++ b/pkg/collector/cache/cache.go @@ -4,11 +4,14 @@ package cache import ( "errors" + "fmt" "log/slog" "github.com/alecthomas/kingpin/v2" + "github.com/prometheus-community/windows_exporter/pkg/perfdata" "github.com/prometheus-community/windows_exporter/pkg/perflib" "github.com/prometheus-community/windows_exporter/pkg/types" + "github.com/prometheus-community/windows_exporter/pkg/utils" "github.com/prometheus/client_golang/prometheus" "github.com/yusufpapurcu/wmi" ) @@ -23,6 +26,8 @@ var ConfigDefaults = Config{} type Collector struct { config Config + perfDataCollector *perfdata.Collector + asyncCopyReadsTotal *prometheus.Desc asyncDataMapsTotal *prometheus.Desc asyncFastReadsTotal *prometheus.Desc @@ -75,6 +80,10 @@ func (c *Collector) GetName() string { } func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) { + if utils.PDHEnabled() { + return []string{}, nil + } + return []string{"Cache"}, nil } @@ -83,6 +92,47 @@ func (c *Collector) Close(_ *slog.Logger) error { } func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error { + if utils.PDHEnabled() { + counters := []string{ + asyncCopyReadsTotal, + asyncDataMapsTotal, + asyncFastReadsTotal, + asyncMDLReadsTotal, + asyncPinReadsTotal, + copyReadHitsTotal, + copyReadsTotal, + dataFlushesTotal, + dataFlushPagesTotal, + dataMapHitsPercent, + dataMapPinsTotal, + dataMapsTotal, + dirtyPages, + dirtyPageThreshold, + fastReadNotPossiblesTotal, + fastReadResourceMissesTotal, + fastReadsTotal, + lazyWriteFlushesTotal, + lazyWritePagesTotal, + mdlReadHitsTotal, + mdlReadsTotal, + pinReadHitsTotal, + pinReadsTotal, + readAheadsTotal, + syncCopyReadsTotal, + syncDataMapsTotal, + syncFastReadsTotal, + syncMDLReadsTotal, + syncPinReadsTotal, + } + + var err error + + c.perfDataCollector, err = perfdata.NewCollector("Cache", []string{"*"}, counters) + if err != nil { + return fmt.Errorf("failed to create Cache collector: %w", err) + } + } + c.asyncCopyReadsTotal = prometheus.NewDesc( prometheus.BuildFQName(types.Namespace, Name, "async_copy_reads_total"), "(AsyncCopyReadsTotal)", @@ -263,6 +313,10 @@ func (c *Collector) Build(_ *slog.Logger, _ *wmi.Client) error { // Collect implements the Collector interface. func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error { + if utils.PDHEnabled() { + return c.collectPDH(ch) + } + logger = logger.With(slog.String("collector", Name)) if err := c.collect(ctx, logger, ch); err != nil { logger.Error("failed collecting cache metrics", @@ -275,43 +329,7 @@ func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch ch return nil } -// Perflib "Cache": -// - https://docs.microsoft.com/en-us/previous-versions/aa394267(v=vs.85) -type perflibCache struct { - AsyncCopyReadsTotal float64 `perflib:"Async Copy Reads/sec"` - AsyncDataMapsTotal float64 `perflib:"Async Data Maps/sec"` - AsyncFastReadsTotal float64 `perflib:"Async Fast Reads/sec"` - AsyncMDLReadsTotal float64 `perflib:"Async MDL Reads/sec"` - AsyncPinReadsTotal float64 `perflib:"Async Pin Reads/sec"` - CopyReadHitsTotal float64 `perflib:"Copy Read Hits %"` - CopyReadsTotal float64 `perflib:"Copy Reads/sec"` - DataFlushesTotal float64 `perflib:"Data Flushes/sec"` - DataFlushPagesTotal float64 `perflib:"Data Flush Pages/sec"` - DataMapHitsPercent float64 `perflib:"Data Map Hits %"` - DataMapPinsTotal float64 `perflib:"Data Map Pins/sec"` - DataMapsTotal float64 `perflib:"Data Maps/sec"` - DirtyPages float64 `perflib:"Dirty Pages"` - DirtyPageThreshold float64 `perflib:"Dirty Page Threshold"` - FastReadNotPossiblesTotal float64 `perflib:"Fast Read Not Possibles/sec"` - FastReadResourceMissesTotal float64 `perflib:"Fast Read Resource Misses/sec"` - FastReadsTotal float64 `perflib:"Fast Reads/sec"` - LazyWriteFlushesTotal float64 `perflib:"Lazy Write Flushes/sec"` - LazyWritePagesTotal float64 `perflib:"Lazy Write Pages/sec"` - MDLReadHitsTotal float64 `perflib:"MDL Read Hits %"` - MDLReadsTotal float64 `perflib:"MDL Reads/sec"` - PinReadHitsTotal float64 `perflib:"Pin Read Hits %"` - PinReadsTotal float64 `perflib:"Pin Reads/sec"` - ReadAheadsTotal float64 `perflib:"Read Aheads/sec"` - SyncCopyReadsTotal float64 `perflib:"Sync Copy Reads/sec"` - SyncDataMapsTotal float64 `perflib:"Sync Data Maps/sec"` - SyncFastReadsTotal float64 `perflib:"Sync Fast Reads/sec"` - SyncMDLReadsTotal float64 `perflib:"Sync MDL Reads/sec"` - SyncPinReadsTotal float64 `perflib:"Sync Pin Reads/sec"` -} - func (c *Collector) collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error { - logger = logger.With(slog.String("collector", Name)) - var dst []perflibCache // Single-instance class, array is required but will have single entry. if err := perflib.UnmarshalObject(ctx.PerfObjects["Cache"], &dst, logger); err != nil { @@ -498,3 +516,192 @@ func (c *Collector) collect(ctx *types.ScrapeContext, logger *slog.Logger, ch ch return nil } + +func (c *Collector) collectPDH(ch chan<- prometheus.Metric) error { + data, err := c.perfDataCollector.Collect() + if err != nil { + return fmt.Errorf("failed to collect Cache metrics: %w", err) + } + + cacheData, ok := data["*"] + + if !ok { + return errors.New("perflib query for Cache returned empty result set") + } + + ch <- prometheus.MustNewConstMetric( + c.asyncCopyReadsTotal, + prometheus.CounterValue, + cacheData[asyncCopyReadsTotal].FirstValue, + ) + + ch <- prometheus.MustNewConstMetric( + c.asyncDataMapsTotal, + prometheus.CounterValue, + cacheData[asyncDataMapsTotal].FirstValue, + ) + + ch <- prometheus.MustNewConstMetric( + c.asyncFastReadsTotal, + prometheus.CounterValue, + cacheData[asyncFastReadsTotal].FirstValue, + ) + + ch <- prometheus.MustNewConstMetric( + c.asyncMDLReadsTotal, + prometheus.CounterValue, + cacheData[asyncMDLReadsTotal].FirstValue, + ) + + ch <- prometheus.MustNewConstMetric( + c.asyncPinReadsTotal, + prometheus.CounterValue, + cacheData[asyncPinReadsTotal].FirstValue, + ) + + ch <- prometheus.MustNewConstMetric( + c.copyReadHitsTotal, + prometheus.GaugeValue, + cacheData[copyReadHitsTotal].FirstValue, + ) + + ch <- prometheus.MustNewConstMetric( + c.copyReadsTotal, + prometheus.CounterValue, + cacheData[copyReadsTotal].FirstValue, + ) + + ch <- prometheus.MustNewConstMetric( + c.dataFlushesTotal, + prometheus.CounterValue, + cacheData[dataFlushesTotal].FirstValue, + ) + + ch <- prometheus.MustNewConstMetric( + c.dataFlushPagesTotal, + prometheus.CounterValue, + cacheData[dataFlushPagesTotal].FirstValue, + ) + + ch <- prometheus.MustNewConstMetric( + c.dataMapHitsPercent, + prometheus.GaugeValue, + cacheData[dataMapHitsPercent].FirstValue, + ) + + ch <- prometheus.MustNewConstMetric( + c.dataMapPinsTotal, + prometheus.CounterValue, + cacheData[dataMapPinsTotal].FirstValue, + ) + + ch <- prometheus.MustNewConstMetric( + c.dataMapsTotal, + prometheus.CounterValue, + cacheData[dataMapsTotal].FirstValue, + ) + + ch <- prometheus.MustNewConstMetric( + c.dirtyPages, + prometheus.GaugeValue, + cacheData[dirtyPages].FirstValue, + ) + + ch <- prometheus.MustNewConstMetric( + c.dirtyPageThreshold, + prometheus.GaugeValue, + cacheData[dirtyPageThreshold].FirstValue, + ) + + ch <- prometheus.MustNewConstMetric( + c.fastReadNotPossiblesTotal, + prometheus.CounterValue, + cacheData[fastReadNotPossiblesTotal].FirstValue, + ) + + ch <- prometheus.MustNewConstMetric( + c.fastReadResourceMissesTotal, + prometheus.CounterValue, + cacheData[fastReadResourceMissesTotal].FirstValue, + ) + + ch <- prometheus.MustNewConstMetric( + c.fastReadsTotal, + prometheus.CounterValue, + cacheData[fastReadsTotal].FirstValue, + ) + + ch <- prometheus.MustNewConstMetric( + c.lazyWriteFlushesTotal, + prometheus.CounterValue, + cacheData[lazyWriteFlushesTotal].FirstValue, + ) + + ch <- prometheus.MustNewConstMetric( + c.lazyWritePagesTotal, + prometheus.CounterValue, + cacheData[lazyWritePagesTotal].FirstValue, + ) + + ch <- prometheus.MustNewConstMetric( + c.mdlReadHitsTotal, + prometheus.CounterValue, + cacheData[mdlReadHitsTotal].FirstValue, + ) + + ch <- prometheus.MustNewConstMetric( + c.mdlReadsTotal, + prometheus.CounterValue, + cacheData[mdlReadsTotal].FirstValue, + ) + + ch <- prometheus.MustNewConstMetric( + c.pinReadHitsTotal, + prometheus.CounterValue, + cacheData[pinReadHitsTotal].FirstValue, + ) + + ch <- prometheus.MustNewConstMetric( + c.pinReadsTotal, + prometheus.CounterValue, + cacheData[pinReadsTotal].FirstValue, + ) + + ch <- prometheus.MustNewConstMetric( + c.readAheadsTotal, + prometheus.CounterValue, + cacheData[readAheadsTotal].FirstValue, + ) + + ch <- prometheus.MustNewConstMetric( + c.syncCopyReadsTotal, + prometheus.CounterValue, + cacheData[syncCopyReadsTotal].FirstValue, + ) + + ch <- prometheus.MustNewConstMetric( + c.syncDataMapsTotal, + prometheus.CounterValue, + cacheData[syncDataMapsTotal].FirstValue, + ) + + ch <- prometheus.MustNewConstMetric( + c.syncFastReadsTotal, + prometheus.CounterValue, + cacheData[syncFastReadsTotal].FirstValue, + ) + + ch <- prometheus.MustNewConstMetric( + c.syncMDLReadsTotal, + prometheus.CounterValue, + cacheData[syncMDLReadsTotal].FirstValue, + ) + + ch <- prometheus.MustNewConstMetric( + c.syncPinReadsTotal, + prometheus.CounterValue, + cacheData[syncPinReadsTotal].FirstValue, + ) + + return nil +} diff --git a/pkg/collector/cache/const.go b/pkg/collector/cache/const.go new file mode 100644 index 00000000..7774ef25 --- /dev/null +++ b/pkg/collector/cache/const.go @@ -0,0 +1,69 @@ +package cache + +// Perflib "Cache": +// - https://docs.microsoft.com/en-us/previous-versions/aa394267(v=vs.85) +const ( + asyncCopyReadsTotal = "Async Copy Reads/sec" + asyncDataMapsTotal = "Async Data Maps/sec" + asyncFastReadsTotal = "Async Fast Reads/sec" + asyncMDLReadsTotal = "Async MDL Reads/sec" + asyncPinReadsTotal = "Async Pin Reads/sec" + copyReadHitsTotal = "Copy Read Hits %" + copyReadsTotal = "Copy Reads/sec" + dataFlushesTotal = "Data Flushes/sec" + dataFlushPagesTotal = "Data Flush Pages/sec" + dataMapHitsPercent = "Data Map Hits %" + dataMapPinsTotal = "Data Map Pins/sec" + dataMapsTotal = "Data Maps/sec" + dirtyPages = "Dirty Pages" + dirtyPageThreshold = "Dirty Page Threshold" + fastReadNotPossiblesTotal = "Fast Read Not Possibles/sec" + fastReadResourceMissesTotal = "Fast Read Resource Misses/sec" + fastReadsTotal = "Fast Reads/sec" + lazyWriteFlushesTotal = "Lazy Write Flushes/sec" + lazyWritePagesTotal = "Lazy Write Pages/sec" + mdlReadHitsTotal = "MDL Read Hits %" + mdlReadsTotal = "MDL Reads/sec" + pinReadHitsTotal = "Pin Read Hits %" + pinReadsTotal = "Pin Reads/sec" + readAheadsTotal = "Read Aheads/sec" + syncCopyReadsTotal = "Sync Copy Reads/sec" + syncDataMapsTotal = "Sync Data Maps/sec" + syncFastReadsTotal = "Sync Fast Reads/sec" + syncMDLReadsTotal = "Sync MDL Reads/sec" + syncPinReadsTotal = "Sync Pin Reads/sec" +) + +// Perflib "Cache": +// - https://docs.microsoft.com/en-us/previous-versions/aa394267(v=vs.85) +type perflibCache struct { + AsyncCopyReadsTotal float64 `perflib:"Async Copy Reads/sec"` + AsyncDataMapsTotal float64 `perflib:"Async Data Maps/sec"` + AsyncFastReadsTotal float64 `perflib:"Async Fast Reads/sec"` + AsyncMDLReadsTotal float64 `perflib:"Async MDL Reads/sec"` + AsyncPinReadsTotal float64 `perflib:"Async Pin Reads/sec"` + CopyReadHitsTotal float64 `perflib:"Copy Read Hits %"` + CopyReadsTotal float64 `perflib:"Copy Reads/sec"` + DataFlushesTotal float64 `perflib:"Data Flushes/sec"` + DataFlushPagesTotal float64 `perflib:"Data Flush Pages/sec"` + DataMapHitsPercent float64 `perflib:"Data Map Hits %"` + DataMapPinsTotal float64 `perflib:"Data Map Pins/sec"` + DataMapsTotal float64 `perflib:"Data Maps/sec"` + DirtyPages float64 `perflib:"Dirty Pages"` + DirtyPageThreshold float64 `perflib:"Dirty Page Threshold"` + FastReadNotPossiblesTotal float64 `perflib:"Fast Read Not Possibles/sec"` + FastReadResourceMissesTotal float64 `perflib:"Fast Read Resource Misses/sec"` + FastReadsTotal float64 `perflib:"Fast Reads/sec"` + LazyWriteFlushesTotal float64 `perflib:"Lazy Write Flushes/sec"` + LazyWritePagesTotal float64 `perflib:"Lazy Write Pages/sec"` + MDLReadHitsTotal float64 `perflib:"MDL Read Hits %"` + MDLReadsTotal float64 `perflib:"MDL Reads/sec"` + PinReadHitsTotal float64 `perflib:"Pin Read Hits %"` + PinReadsTotal float64 `perflib:"Pin Reads/sec"` + ReadAheadsTotal float64 `perflib:"Read Aheads/sec"` + SyncCopyReadsTotal float64 `perflib:"Sync Copy Reads/sec"` + SyncDataMapsTotal float64 `perflib:"Sync Data Maps/sec"` + SyncFastReadsTotal float64 `perflib:"Sync Fast Reads/sec"` + SyncMDLReadsTotal float64 `perflib:"Sync MDL Reads/sec"` + SyncPinReadsTotal float64 `perflib:"Sync Pin Reads/sec"` +} diff --git a/pkg/collector/prometheus.go b/pkg/collector/prometheus.go index 7a5dd525..a6a7033f 100644 --- a/pkg/collector/prometheus.go +++ b/pkg/collector/prometheus.go @@ -186,16 +186,15 @@ func (p *Prometheus) execute(name string, c Collector, scrapeCtx *types.ScrapeCo go func() { defer func() { if r := recover(); r != nil { - p.logger.Error("panic in collector "+name, - slog.Any("panic", r), - slog.Any("stack", string(debug.Stack())), + errCh <- fmt.Errorf("panic in collector %s: %v. stack: %s", name, r, + string(debug.Stack()), ) } + + close(bufCh) }() errCh <- c.Collect(scrapeCtx, p.logger, bufCh) - - close(bufCh) }() wg := sync.WaitGroup{} diff --git a/pkg/perfdata/collector.go b/pkg/perfdata/collector.go index 644cbd0f..45b5e8fa 100644 --- a/pkg/perfdata/collector.go +++ b/pkg/perfdata/collector.go @@ -18,12 +18,13 @@ const EmptyInstance = "------" type Collector struct { time time.Time object string - counters []Counter + counters map[string]Counter handle pdhQueryHandle } type Counter struct { Name string + Desc string Instances map[string]pdhCounterHandle Type uint32 Frequency float64 @@ -48,7 +49,7 @@ func NewCollector(object string, instances []string, counters []string) (*Collec collector := &Collector{ object: object, - counters: make([]Counter, 0, len(counters)), + counters: make(map[string]Counter, len(counters)), handle: handle, } @@ -79,17 +80,18 @@ func NewCollector(object string, instances []string, counters []string) (*Collec // Get the info with the current buffer size bufLen := uint32(0) - if ret := PdhGetCounterInfo(counterHandle, 0, &bufLen, nil); ret != PdhMoreData { + if ret := PdhGetCounterInfo(counterHandle, 1, &bufLen, nil); ret != PdhMoreData { return nil, fmt.Errorf("PdhGetCounterInfo: %w", NewPdhError(ret)) } buf := make([]byte, bufLen) - if ret := PdhGetCounterInfo(counterHandle, 0, &bufLen, &buf[0]); ret != ErrorSuccess { + if ret := PdhGetCounterInfo(counterHandle, 1, &bufLen, &buf[0]); ret != ErrorSuccess { return nil, fmt.Errorf("PdhGetCounterInfo: %w", NewPdhError(ret)) } ci := (*PdhCounterInfo)(unsafe.Pointer(&buf[0])) counter.Type = ci.DwType + counter.Desc = windows.UTF16PtrToString(ci.SzExplainText) frequency := float64(0) @@ -101,7 +103,7 @@ func NewCollector(object string, instances []string, counters []string) (*Collec } } - collector.counters = append(collector.counters, counter) + collector.counters[counterName] = counter } if len(collector.counters) == 0 { @@ -115,6 +117,16 @@ func NewCollector(object string, instances []string, counters []string) (*Collec return collector, nil } +func (c *Collector) Describe() map[string]string { + desc := make(map[string]string, len(c.counters)) + + for _, counter := range c.counters { + desc[counter.Name] = counter.Desc + } + + return desc +} + func (c *Collector) Collect() (map[string]map[string]CounterValues, error) { if len(c.counters) == 0 { return map[string]map[string]CounterValues{}, nil diff --git a/tools/e2e-output.txt b/tools/e2e-output.txt index fb1da2a4..e95a9e6a 100644 --- a/tools/e2e-output.txt +++ b/tools/e2e-output.txt @@ -1,6 +1,64 @@ # HELP test_alpha_total Some random metric. # TYPE test_alpha_total counter test_alpha_total 42 +# HELP windows_cache_async_copy_reads_total (AsyncCopyReadsTotal) +# TYPE windows_cache_async_copy_reads_total counter +# HELP windows_cache_async_data_maps_total (AsyncDataMapsTotal) +# TYPE windows_cache_async_data_maps_total counter +# HELP windows_cache_async_fast_reads_total (AsyncFastReadsTotal) +# TYPE windows_cache_async_fast_reads_total counter +# HELP windows_cache_async_mdl_reads_total (AsyncMDLReadsTotal) +# TYPE windows_cache_async_mdl_reads_total counter +# HELP windows_cache_async_pin_reads_total (AsyncPinReadsTotal) +# TYPE windows_cache_async_pin_reads_total counter +# HELP windows_cache_copy_read_hits_total (CopyReadHitsTotal) +# TYPE windows_cache_copy_read_hits_total gauge +# HELP windows_cache_copy_reads_total (CopyReadsTotal) +# TYPE windows_cache_copy_reads_total counter +# HELP windows_cache_data_flush_pages_total (DataFlushPagesTotal) +# TYPE windows_cache_data_flush_pages_total counter +# HELP windows_cache_data_flushes_total (DataFlushesTotal) +# TYPE windows_cache_data_flushes_total counter +# HELP windows_cache_data_map_hits_percent (DataMapHitsPercent) +# TYPE windows_cache_data_map_hits_percent gauge +# HELP windows_cache_data_map_pins_total (DataMapPinsTotal) +# TYPE windows_cache_data_map_pins_total counter +# HELP windows_cache_data_maps_total (DataMapsTotal) +# TYPE windows_cache_data_maps_total counter +# HELP windows_cache_dirty_page_threshold (DirtyPageThreshold) +# TYPE windows_cache_dirty_page_threshold gauge +# HELP windows_cache_dirty_pages (DirtyPages) +# TYPE windows_cache_dirty_pages gauge +# HELP windows_cache_fast_read_not_possibles_total (FastReadNotPossiblesTotal) +# TYPE windows_cache_fast_read_not_possibles_total counter +# HELP windows_cache_fast_read_resource_misses_total (FastReadResourceMissesTotal) +# TYPE windows_cache_fast_read_resource_misses_total counter +# HELP windows_cache_fast_reads_total (FastReadsTotal) +# TYPE windows_cache_fast_reads_total counter +# HELP windows_cache_lazy_write_flushes_total (LazyWriteFlushesTotal) +# TYPE windows_cache_lazy_write_flushes_total counter +# HELP windows_cache_lazy_write_pages_total (LazyWritePagesTotal) +# TYPE windows_cache_lazy_write_pages_total counter +# HELP windows_cache_mdl_read_hits_total (MDLReadHitsTotal) +# TYPE windows_cache_mdl_read_hits_total counter +# HELP windows_cache_mdl_reads_total (MDLReadsTotal) +# TYPE windows_cache_mdl_reads_total counter +# HELP windows_cache_pin_read_hits_total (PinReadHitsTotal) +# TYPE windows_cache_pin_read_hits_total counter +# HELP windows_cache_pin_reads_total (PinReadsTotal) +# TYPE windows_cache_pin_reads_total counter +# HELP windows_cache_read_aheads_total (ReadAheadsTotal) +# TYPE windows_cache_read_aheads_total counter +# HELP windows_cache_sync_copy_reads_total (SyncCopyReadsTotal) +# TYPE windows_cache_sync_copy_reads_total counter +# HELP windows_cache_sync_data_maps_total (SyncDataMapsTotal) +# TYPE windows_cache_sync_data_maps_total counter +# HELP windows_cache_sync_fast_reads_total (SyncFastReadsTotal) +# TYPE windows_cache_sync_fast_reads_total counter +# HELP windows_cache_sync_mdl_reads_total (SyncMDLReadsTotal) +# TYPE windows_cache_sync_mdl_reads_total counter +# HELP windows_cache_sync_pin_reads_total (SyncPinReadsTotal) +# TYPE windows_cache_sync_pin_reads_total counter # HELP windows_cpu_clock_interrupts_total Total number of received and serviced clock tick interrupts # TYPE windows_cpu_clock_interrupts_total counter # HELP windows_cpu_core_frequency_mhz Core frequency in megahertz @@ -57,6 +115,7 @@ test_alpha_total 42 # TYPE windows_exporter_collector_duration_seconds gauge # HELP windows_exporter_collector_success windows_exporter: Whether the collector was successful. # TYPE windows_exporter_collector_success gauge +windows_exporter_collector_success{collector="cache"} 1 windows_exporter_collector_success{collector="cpu"} 1 windows_exporter_collector_success{collector="cpu_info"} 1 windows_exporter_collector_success{collector="cs"} 1 @@ -73,6 +132,7 @@ windows_exporter_collector_success{collector="system"} 1 windows_exporter_collector_success{collector="textfile"} 1 # HELP windows_exporter_collector_timeout windows_exporter: Whether the collector timed out. # TYPE windows_exporter_collector_timeout gauge +windows_exporter_collector_timeout{collector="cache"} 0 windows_exporter_collector_timeout{collector="cpu"} 0 windows_exporter_collector_timeout{collector="cpu_info"} 0 windows_exporter_collector_timeout{collector="cs"} 0 diff --git a/tools/end-to-end-test.ps1 b/tools/end-to-end-test.ps1 index 5076e42e..0beee530 100644 --- a/tools/end-to-end-test.ps1 +++ b/tools/end-to-end-test.ps1 @@ -18,14 +18,14 @@ mkdir $textfile_dir | Out-Null Copy-Item 'e2e-textfile.prom' -Destination "$($textfile_dir)/e2e-textfile.prom" # Omit dynamic collector information that will change after each run -$skip_re = "^(go_|windows_exporter_build_info|windows_exporter_collector_duration_seconds|windows_exporter_perflib_snapshot_duration_seconds|windows_exporter_scrape_duration_seconds|process_|windows_textfile_mtime_seconds|windows_cpu|windows_cs|windows_logical_disk|windows_physical_disk|windows_memory|windows_net|windows_os|windows_process|windows_service_process|windows_system|windows_perfdata|windows_textfile_mtime_seconds)" +$skip_re = "^(go_|windows_exporter_build_info|windows_exporter_collector_duration_seconds|windows_exporter_perflib_snapshot_duration_seconds|windows_exporter_scrape_duration_seconds|process_|windows_textfile_mtime_seconds|windows_cpu|windows_cs|windows_cache|windows_logical_disk|windows_physical_disk|windows_memory|windows_net|windows_os|windows_process|windows_service_process|windows_system|windows_perfdata|windows_textfile_mtime_seconds)" # Start process in background, awaiting HTTP requests. # Use default collectors, port and address: http://localhost:9182/metrics $exporter_proc = Start-Process ` -PassThru ` -FilePath ..\windows_exporter.exe ` - -ArgumentList "--log.level=debug","--web.disable-exporter-metrics","--collectors.enabled=[defaults],cpu_info,textfile,process,perfdata,scheduled_task","--collector.process.include=explorer.exe","--collector.scheduled_task.include=.*WinSAT","--collector.service.include=Themes","--collector.textfile.directories=$($textfile_dir)",@" + -ArgumentList "--log.level=debug","--web.disable-exporter-metrics","--collectors.enabled=[defaults],cache,cpu_info,textfile,process,perfdata,scheduled_task","--collector.process.include=explorer.exe","--collector.scheduled_task.include=.*WinSAT","--collector.service.include=Themes","--collector.textfile.directories=$($textfile_dir)",@" --collector.perfdata.objects="[{\"object\":\"Processor Information\",\"instance_label\":\"core\",\"instances\":[\"*\"],\"counters\":{\"% Processor Time\":{},\"% Privileged Time\":{}}},{\"object\":\"Memory\",\"counters\":{\"Cache Faults/sec\":{\"type\":\"counter\"}}}]" "@ ` -WindowStyle Hidden `