remote_fx: refactor collector (#1738)
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
This commit is contained in:
parent
a4ec0a96f1
commit
78bd720e88
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/internal/mi"
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
"github.com/prometheus-community/windows_exporter/internal/toggle"
|
||||
"github.com/prometheus-community/windows_exporter/internal/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
@ -115,6 +116,10 @@ func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
|||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
if toggle.IsPDHEnabled() {
|
||||
c.perfDataCollector.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -268,7 +273,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
|||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V1, "DirectoryServices", perfdata.AllInstances, counters)
|
||||
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "DirectoryServices", perfdata.AllInstances, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DirectoryServices collector: %w", err)
|
||||
}
|
||||
|
|
|
@ -72,6 +72,10 @@ func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
|||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
if toggle.IsPDHEnabled() {
|
||||
c.perfDataCollector.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -95,7 +99,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
|||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V1, "Certification Authority", perfdata.AllInstances, counters)
|
||||
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "Certification Authority", perfdata.AllInstances, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Certification Authority collector: %w", err)
|
||||
}
|
||||
|
|
|
@ -104,6 +104,10 @@ func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
|||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
if toggle.IsPDHEnabled() {
|
||||
c.perfDataCollector.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -157,7 +161,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
|||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V1, "AD FS", perfdata.AllInstances, counters)
|
||||
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "AD FS", perfdata.AllInstances, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create AD FS collector: %w", err)
|
||||
}
|
||||
|
|
|
@ -89,6 +89,10 @@ func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
|||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
if toggle.IsPDHEnabled() {
|
||||
c.perfDataCollector.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -128,7 +132,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
|||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V1, "Cache", perfdata.AllInstances, counters)
|
||||
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "Cache", perfdata.AllInstances, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Cache collector: %w", err)
|
||||
}
|
||||
|
|
|
@ -76,6 +76,10 @@ func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
|||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
if toggle.IsPDHEnabled() {
|
||||
c.perfDataCollector.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -164,6 +164,20 @@ func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
|||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
if toggle.IsPDHEnabled() {
|
||||
if slices.Contains(c.config.CollectorsEnabled, "connection") {
|
||||
c.perfDataCollectorConnection.Close()
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "folder") {
|
||||
c.perfDataCollectorFolder.Close()
|
||||
}
|
||||
|
||||
if slices.Contains(c.config.CollectorsEnabled, "volume") {
|
||||
c.perfDataCollectorVolume.Close()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -189,7 +203,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
|||
sizeOfFilesReceivedTotal,
|
||||
}
|
||||
|
||||
c.perfDataCollectorConnection, err = perfdata.NewCollector(perfdata.V1, "DFS Replication Connections", perfdata.AllInstances, counters)
|
||||
c.perfDataCollectorConnection, err = perfdata.NewCollector(perfdata.V2, "DFS Replication Connections", perfdata.AllInstances, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DFS Replication Connections collector: %w", err)
|
||||
}
|
||||
|
@ -226,7 +240,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
|||
updatesDroppedTotal,
|
||||
}
|
||||
|
||||
c.perfDataCollectorFolder, err = perfdata.NewCollector(perfdata.V1, "DFS Replicated Folders", perfdata.AllInstances, counters)
|
||||
c.perfDataCollectorFolder, err = perfdata.NewCollector(perfdata.V2, "DFS Replicated Folders", perfdata.AllInstances, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DFS Replicated Folders collector: %w", err)
|
||||
}
|
||||
|
@ -241,7 +255,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
|||
usnJournalUnreadPercentage,
|
||||
}
|
||||
|
||||
c.perfDataCollectorVolume, err = perfdata.NewCollector(perfdata.V1, "DFS Replication Service Volumes", perfdata.AllInstances, counters)
|
||||
c.perfDataCollectorVolume, err = perfdata.NewCollector(perfdata.V2, "DFS Replication Service Volumes", perfdata.AllInstances, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DFS Replication Service Volumes collector: %w", err)
|
||||
}
|
||||
|
|
|
@ -85,12 +85,18 @@ func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
|||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
if toggle.IsPDHEnabled() {
|
||||
c.perfDataCollector.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
if toggle.IsPDHEnabled() {
|
||||
counters := []string{
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "DHCP Server", perfdata.AllInstances, []string{
|
||||
acksTotal,
|
||||
activeQueueLength,
|
||||
conflictCheckQueueLength,
|
||||
|
@ -116,11 +122,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
|||
packetsReceivedTotal,
|
||||
releasesTotal,
|
||||
requestsTotal,
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V1, "DHCP Server", perfdata.AllInstances, counters)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DHCP Server collector: %w", err)
|
||||
}
|
||||
|
|
|
@ -76,11 +76,15 @@ func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
|||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
c.perfDataCollector.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
counters := []string{
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "DNS", perfdata.AllInstances, []string{
|
||||
axfrRequestReceived,
|
||||
axfrRequestSent,
|
||||
axfrResponseReceived,
|
||||
|
@ -121,11 +125,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
|||
winsReverseResponseSent,
|
||||
zoneTransferFailure,
|
||||
zoneTransferSOARequestSent,
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V1, "DNS", perfdata.AllInstances, counters)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create DNS collector: %w", err)
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ func (c *Collector) buildActiveSync() error {
|
|||
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorActiveSync, err = perfdata.NewCollector(perfdata.V1, "MSExchange ActiveSync", perfdata.AllInstances, counters)
|
||||
c.perfDataCollectorActiveSync, err = perfdata.NewCollector(perfdata.V2, "MSExchange ActiveSync", perfdata.AllInstances, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MSExchange ActiveSync collector: %w", err)
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ func (c *Collector) buildADAccessProcesses() error {
|
|||
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorADAccessProcesses, err = perfdata.NewCollector(perfdata.V1, "MSExchange ADAccess Processes", perfdata.AllInstances, counters)
|
||||
c.perfDataCollectorADAccessProcesses, err = perfdata.NewCollector(perfdata.V2, "MSExchange ADAccess Processes", perfdata.AllInstances, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MSExchange ADAccess Processes collector: %w", err)
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ func (c *Collector) buildAutoDiscover() error {
|
|||
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorAutoDiscover, err = perfdata.NewCollector(perfdata.V1, "MSExchange Autodiscover", perfdata.AllInstances, counters)
|
||||
c.perfDataCollectorAutoDiscover, err = perfdata.NewCollector(perfdata.V2, "MSExchange Autodiscover", perfdata.AllInstances, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MSExchange Autodiscover collector: %w", err)
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ func (c *Collector) buildAvailabilityService() error {
|
|||
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorAvailabilityService, err = perfdata.NewCollector(perfdata.V1, "MSExchange Availability Service", perfdata.AllInstances, counters)
|
||||
c.perfDataCollectorAvailabilityService, err = perfdata.NewCollector(perfdata.V2, "MSExchange Availability Service", perfdata.AllInstances, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MSExchange Availability Service collector: %w", err)
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ func (c *Collector) buildHTTPProxy() error {
|
|||
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorHttpProxy, err = perfdata.NewCollector(perfdata.V1, "MSExchange HttpProxy", perfdata.AllInstances, counters)
|
||||
c.perfDataCollectorHttpProxy, err = perfdata.NewCollector(perfdata.V2, "MSExchange HttpProxy", perfdata.AllInstances, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MSExchange HttpProxy collector: %w", err)
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ func (c *Collector) buildMapiHttpEmsmdb() error {
|
|||
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorMapiHttpEmsmdb, err = perfdata.NewCollector(perfdata.V1, "MSExchange MapiHttp Emsmdb", perfdata.AllInstances, counters)
|
||||
c.perfDataCollectorMapiHttpEmsmdb, err = perfdata.NewCollector(perfdata.V2, "MSExchange MapiHttp Emsmdb", perfdata.AllInstances, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MSExchange MapiHttp Emsmdb: %w", err)
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ func (c *Collector) buildOWA() error {
|
|||
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorOWA, err = perfdata.NewCollector(perfdata.V1, "MSExchange OWA", perfdata.AllInstances, counters)
|
||||
c.perfDataCollectorOWA, err = perfdata.NewCollector(perfdata.V2, "MSExchange OWA", perfdata.AllInstances, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MSExchange OWA collector: %w", err)
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ func (c *Collector) buildRPC() error {
|
|||
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorRpcClientAccess, err = perfdata.NewCollector(perfdata.V1, "MSExchange RpcClientAccess", perfdata.AllInstances, counters)
|
||||
c.perfDataCollectorRpcClientAccess, err = perfdata.NewCollector(perfdata.V2, "MSExchange RpcClientAccess", perfdata.AllInstances, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MSExchange RpcClientAccess collector: %w", err)
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ func (c *Collector) buildTransportQueues() error {
|
|||
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorTransportQueues, err = perfdata.NewCollector(perfdata.V1, "MSExchangeTransport Queues", perfdata.AllInstances, counters)
|
||||
c.perfDataCollectorTransportQueues, err = perfdata.NewCollector(perfdata.V2, "MSExchangeTransport Queues", perfdata.AllInstances, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MSExchangeTransport Queues collector: %w", err)
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ func (c *Collector) buildWorkloadManagementWorkloads() error {
|
|||
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorWorkloadManagementWorkloads, err = perfdata.NewCollector(perfdata.V1, "MSExchange WorkloadManagement Workloads", perfdata.AllInstances, counters)
|
||||
c.perfDataCollectorWorkloadManagementWorkloads, err = perfdata.NewCollector(perfdata.V2, "MSExchange WorkloadManagement Workloads", perfdata.AllInstances, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MSExchange WorkloadManagement Workloads collector: %w", err)
|
||||
}
|
||||
|
|
|
@ -163,7 +163,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
|||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V1, "LogicalDisk", perfdata.AllInstances, counters)
|
||||
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "LogicalDisk", perfdata.AllInstances, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create LogicalDisk collector: %w", err)
|
||||
}
|
||||
|
|
|
@ -146,7 +146,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
|||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V1, "Memory", perfdata.AllInstances, counters)
|
||||
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "Memory", perfdata.AllInstances, counters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create LogicalDisk collector: %w", err)
|
||||
}
|
||||
|
|
|
@ -146,12 +146,18 @@ func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
|||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
if toggle.IsPDHEnabled() {
|
||||
c.perfDataCollector.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
||||
if toggle.IsPDHEnabled() {
|
||||
counters := []string{
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "Network Interface", perfdata.AllInstances, []string{
|
||||
BytesReceivedPerSec,
|
||||
BytesSentPerSec,
|
||||
BytesTotalPerSec,
|
||||
|
@ -165,11 +171,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
|||
PacketsReceivedUnknown,
|
||||
PacketsSentPerSec,
|
||||
CurrentBandwidth,
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V1, "Network Interface", perfdata.AllInstances, counters)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Processor Information collector: %w", err)
|
||||
}
|
||||
|
|
|
@ -57,6 +57,8 @@ func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
|||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
c.perfDataCollector.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -138,6 +138,10 @@ func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
|||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
if toggle.IsPDHEnabled() {
|
||||
c.perfDataCollector.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -194,7 +198,7 @@ func (c *Collector) Build(logger *slog.Logger, miSession *mi.Session) error {
|
|||
if errors.Is(err, v2.NewPdhError(v2.PdhCstatusNoObject)) {
|
||||
counters[0] = idProcess
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V1, "Process", perfdata.AllInstances, counters)
|
||||
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "Process", perfdata.AllInstances, counters)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
package remote_fx
|
||||
|
||||
const (
|
||||
BaseTCPRTT = "Base TCP RTT"
|
||||
BaseUDPRTT = "Base UDP RTT"
|
||||
CurrentTCPBandwidth = "Current TCP Bandwidth"
|
||||
CurrentTCPRTT = "Current TCP RTT"
|
||||
CurrentUDPBandwidth = "Current UDP Bandwidth"
|
||||
CurrentUDPRTT = "Current UDP RTT"
|
||||
TotalReceivedBytes = "Total Received Bytes"
|
||||
TotalSentBytes = "Total Sent Bytes"
|
||||
UDPPacketsReceivedPersec = "UDP Packets Received/sec"
|
||||
UDPPacketsSentPersec = "UDP Packets Sent/sec"
|
||||
FECRate = "Forward Error Correction (FEC) percentage"
|
||||
LossRate = "Loss percentage"
|
||||
RetransmissionRate = "Percentage of packets that have been retransmitted"
|
||||
|
||||
AverageEncodingTime = "Average Encoding Time"
|
||||
FrameQuality = "Frame Quality"
|
||||
FramesSkippedPerSecondInsufficientClientResources = "Frames Skipped/Second - Insufficient Server Resources"
|
||||
FramesSkippedPerSecondInsufficientNetworkResources = "Frames Skipped/Second - Insufficient Network Resources"
|
||||
FramesSkippedPerSecondInsufficientServerResources = "Frames Skipped/Second - Insufficient Client Resources"
|
||||
GraphicsCompressionratio = "Graphics Compression ratio"
|
||||
InputFramesPerSecond = "Input Frames/Second"
|
||||
OutputFramesPerSecond = "Output Frames/Second"
|
||||
SourceFramesPerSecond = "Source Frames/Second"
|
||||
)
|
|
@ -3,12 +3,14 @@
|
|||
package remote_fx
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus-community/windows_exporter/internal/mi"
|
||||
v1 "github.com/prometheus-community/windows_exporter/internal/perfdata/v1"
|
||||
"github.com/prometheus-community/windows_exporter/internal/perfdata"
|
||||
"github.com/prometheus-community/windows_exporter/internal/types"
|
||||
"github.com/prometheus-community/windows_exporter/internal/utils"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
@ -28,6 +30,9 @@ var ConfigDefaults = Config{}
|
|||
type Collector struct {
|
||||
config Config
|
||||
|
||||
perfDataCollectorNetwork perfdata.Collector
|
||||
perfDataCollectorGraphics perfdata.Collector
|
||||
|
||||
// net
|
||||
baseTCPRTT *prometheus.Desc
|
||||
baseUDPRTT *prometheus.Desc
|
||||
|
@ -74,14 +79,53 @@ func (c *Collector) GetName() string {
|
|||
}
|
||||
|
||||
func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
||||
return []string{"RemoteFX Network", "RemoteFX Graphics"}, nil
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
c.perfDataCollectorNetwork.Close()
|
||||
c.perfDataCollectorGraphics.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(*slog.Logger, *mi.Session) error {
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorNetwork, err = perfdata.NewCollector(perfdata.V2, "RemoteFX Network", perfdata.AllInstances, []string{
|
||||
BaseTCPRTT,
|
||||
BaseUDPRTT,
|
||||
CurrentTCPBandwidth,
|
||||
CurrentTCPRTT,
|
||||
CurrentUDPBandwidth,
|
||||
CurrentUDPRTT,
|
||||
TotalReceivedBytes,
|
||||
TotalSentBytes,
|
||||
UDPPacketsReceivedPersec,
|
||||
UDPPacketsSentPersec,
|
||||
FECRate,
|
||||
LossRate,
|
||||
RetransmissionRate,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create RemoteFX Network collector: %w", err)
|
||||
}
|
||||
|
||||
c.perfDataCollectorGraphics, err = perfdata.NewCollector(perfdata.V2, "RemoteFX Graphics", perfdata.AllInstances, []string{
|
||||
AverageEncodingTime,
|
||||
FrameQuality,
|
||||
FramesSkippedPerSecondInsufficientClientResources,
|
||||
FramesSkippedPerSecondInsufficientNetworkResources,
|
||||
FramesSkippedPerSecondInsufficientServerResources,
|
||||
GraphicsCompressionratio,
|
||||
InputFramesPerSecond,
|
||||
OutputFramesPerSecond,
|
||||
SourceFramesPerSecond,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create RemoteFX Graphics collector: %w", err)
|
||||
}
|
||||
|
||||
// net
|
||||
c.baseTCPRTT = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(types.Namespace, Name, "net_base_tcp_rtt_seconds"),
|
||||
|
@ -211,228 +255,187 @@ func (c *Collector) Build(*slog.Logger, *mi.Session) error {
|
|||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
if err := c.collectRemoteFXNetworkCount(ctx, logger, ch); err != nil {
|
||||
logger.Error("failed collecting terminal services session count metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
func (c *Collector) Collect(_ *types.ScrapeContext, _ *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
errs := make([]error, 0, 2)
|
||||
|
||||
return err
|
||||
if err := c.collectRemoteFXNetworkCount(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed collecting RemoteFX Network metrics: %w", err))
|
||||
}
|
||||
|
||||
if err := c.collectRemoteFXGraphicsCounters(ctx, logger, ch); err != nil {
|
||||
logger.Error("failed collecting terminal services session count metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return err
|
||||
if err := c.collectRemoteFXGraphicsCounters(ch); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed collecting RemoteFX Graphics metrics: %w", err))
|
||||
}
|
||||
|
||||
return nil
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
type perflibRemoteFxNetwork struct {
|
||||
Name string
|
||||
BaseTCPRTT float64 `perflib:"Base TCP RTT"`
|
||||
BaseUDPRTT float64 `perflib:"Base UDP RTT"`
|
||||
CurrentTCPBandwidth float64 `perflib:"Current TCP Bandwidth"`
|
||||
CurrentTCPRTT float64 `perflib:"Current TCP RTT"`
|
||||
CurrentUDPBandwidth float64 `perflib:"Current UDP Bandwidth"`
|
||||
CurrentUDPRTT float64 `perflib:"Current UDP RTT"`
|
||||
TotalReceivedBytes float64 `perflib:"Total Received Bytes"`
|
||||
TotalSentBytes float64 `perflib:"Total Sent Bytes"`
|
||||
UDPPacketsReceivedPersec float64 `perflib:"UDP Packets Received/sec"`
|
||||
UDPPacketsSentPersec float64 `perflib:"UDP Packets Sent/sec"`
|
||||
FECRate float64 `perflib:"Forward Error Correction (FEC) percentage"`
|
||||
LossRate float64 `perflib:"Loss percentage"`
|
||||
RetransmissionRate float64 `perflib:"Percentage of packets that have been retransmitted"`
|
||||
}
|
||||
|
||||
func (c *Collector) collectRemoteFXNetworkCount(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
dst := make([]perflibRemoteFxNetwork, 0)
|
||||
|
||||
err := v1.UnmarshalObject(ctx.PerfObjects["RemoteFX Network"], &dst, logger)
|
||||
func (c *Collector) collectRemoteFXNetworkCount(ch chan<- prometheus.Metric) error {
|
||||
perfData, err := c.perfDataCollectorNetwork.Collect()
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to collect RemoteFX Network metrics: %w", err)
|
||||
}
|
||||
|
||||
for _, d := range dst {
|
||||
for name, data := range perfData {
|
||||
// only connect metrics for remote named sessions
|
||||
n := strings.ToLower(normalizeSessionName(d.Name))
|
||||
if n == "" || n == "services" || n == "console" {
|
||||
sessionName := normalizeSessionName(name)
|
||||
if n := strings.ToLower(sessionName); n == "" || n == "services" || n == "console" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.baseTCPRTT,
|
||||
prometheus.GaugeValue,
|
||||
utils.MilliSecToSec(d.BaseTCPRTT),
|
||||
normalizeSessionName(d.Name),
|
||||
utils.MilliSecToSec(data[BaseTCPRTT].FirstValue),
|
||||
sessionName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.baseUDPRTT,
|
||||
prometheus.GaugeValue,
|
||||
utils.MilliSecToSec(d.BaseUDPRTT),
|
||||
normalizeSessionName(d.Name),
|
||||
utils.MilliSecToSec(data[BaseUDPRTT].FirstValue),
|
||||
sessionName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.currentTCPBandwidth,
|
||||
prometheus.GaugeValue,
|
||||
(d.CurrentTCPBandwidth*1000)/8,
|
||||
normalizeSessionName(d.Name),
|
||||
(data[CurrentTCPBandwidth].FirstValue*1000)/8,
|
||||
sessionName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.currentTCPRTT,
|
||||
prometheus.GaugeValue,
|
||||
utils.MilliSecToSec(d.CurrentTCPRTT),
|
||||
normalizeSessionName(d.Name),
|
||||
utils.MilliSecToSec(data[CurrentTCPRTT].FirstValue),
|
||||
sessionName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.currentUDPBandwidth,
|
||||
prometheus.GaugeValue,
|
||||
(d.CurrentUDPBandwidth*1000)/8,
|
||||
normalizeSessionName(d.Name),
|
||||
(data[CurrentUDPBandwidth].FirstValue*1000)/8,
|
||||
sessionName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.currentUDPRTT,
|
||||
prometheus.GaugeValue,
|
||||
utils.MilliSecToSec(d.CurrentUDPRTT),
|
||||
normalizeSessionName(d.Name),
|
||||
utils.MilliSecToSec(data[CurrentUDPRTT].FirstValue),
|
||||
sessionName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalReceivedBytes,
|
||||
prometheus.CounterValue,
|
||||
d.TotalReceivedBytes,
|
||||
normalizeSessionName(d.Name),
|
||||
data[TotalReceivedBytes].FirstValue,
|
||||
sessionName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.totalSentBytes,
|
||||
prometheus.CounterValue,
|
||||
d.TotalSentBytes,
|
||||
normalizeSessionName(d.Name),
|
||||
data[TotalSentBytes].FirstValue,
|
||||
sessionName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.udpPacketsReceivedPerSec,
|
||||
prometheus.CounterValue,
|
||||
d.UDPPacketsReceivedPersec,
|
||||
normalizeSessionName(d.Name),
|
||||
data[UDPPacketsReceivedPersec].FirstValue,
|
||||
sessionName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.udpPacketsSentPerSec,
|
||||
prometheus.CounterValue,
|
||||
d.UDPPacketsSentPersec,
|
||||
normalizeSessionName(d.Name),
|
||||
data[UDPPacketsSentPersec].FirstValue,
|
||||
sessionName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.fecRate,
|
||||
prometheus.GaugeValue,
|
||||
d.FECRate,
|
||||
normalizeSessionName(d.Name),
|
||||
data[FECRate].FirstValue,
|
||||
sessionName,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.lossRate,
|
||||
prometheus.GaugeValue,
|
||||
d.LossRate,
|
||||
normalizeSessionName(d.Name),
|
||||
data[LossRate].FirstValue,
|
||||
sessionName,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.retransmissionRate,
|
||||
prometheus.GaugeValue,
|
||||
d.RetransmissionRate,
|
||||
normalizeSessionName(d.Name),
|
||||
data[RetransmissionRate].FirstValue,
|
||||
sessionName,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type perflibRemoteFxGraphics struct {
|
||||
Name string
|
||||
AverageEncodingTime float64 `perflib:"Average Encoding Time"`
|
||||
FrameQuality float64 `perflib:"Frame Quality"`
|
||||
FramesSkippedPerSecondInsufficientClientResources float64 `perflib:"Frames Skipped/Second - Insufficient Server Resources"`
|
||||
FramesSkippedPerSecondInsufficientNetworkResources float64 `perflib:"Frames Skipped/Second - Insufficient Network Resources"`
|
||||
FramesSkippedPerSecondInsufficientServerResources float64 `perflib:"Frames Skipped/Second - Insufficient Client Resources"`
|
||||
GraphicsCompressionratio float64 `perflib:"Graphics Compression ratio"`
|
||||
InputFramesPerSecond float64 `perflib:"Input Frames/Second"`
|
||||
OutputFramesPerSecond float64 `perflib:"Output Frames/Second"`
|
||||
SourceFramesPerSecond float64 `perflib:"Source Frames/Second"`
|
||||
}
|
||||
|
||||
func (c *Collector) collectRemoteFXGraphicsCounters(ctx *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
dst := make([]perflibRemoteFxGraphics, 0)
|
||||
|
||||
err := v1.UnmarshalObject(ctx.PerfObjects["RemoteFX Graphics"], &dst, logger)
|
||||
func (c *Collector) collectRemoteFXGraphicsCounters(ch chan<- prometheus.Metric) error {
|
||||
perfData, err := c.perfDataCollectorNetwork.Collect()
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to collect RemoteFX Graphics metrics: %w", err)
|
||||
}
|
||||
|
||||
for _, d := range dst {
|
||||
for name, data := range perfData {
|
||||
// only connect metrics for remote named sessions
|
||||
n := strings.ToLower(normalizeSessionName(d.Name))
|
||||
if n == "" || n == "services" || n == "console" {
|
||||
sessionName := normalizeSessionName(name)
|
||||
if n := strings.ToLower(sessionName); n == "" || n == "services" || n == "console" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.averageEncodingTime,
|
||||
prometheus.GaugeValue,
|
||||
utils.MilliSecToSec(d.AverageEncodingTime),
|
||||
normalizeSessionName(d.Name),
|
||||
utils.MilliSecToSec(data[AverageEncodingTime].FirstValue),
|
||||
sessionName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.frameQuality,
|
||||
prometheus.GaugeValue,
|
||||
d.FrameQuality,
|
||||
normalizeSessionName(d.Name),
|
||||
data[FrameQuality].FirstValue,
|
||||
sessionName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.framesSkippedPerSecondInsufficientResources,
|
||||
prometheus.CounterValue,
|
||||
d.FramesSkippedPerSecondInsufficientClientResources,
|
||||
normalizeSessionName(d.Name),
|
||||
data[FramesSkippedPerSecondInsufficientClientResources].FirstValue,
|
||||
sessionName,
|
||||
"client",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.framesSkippedPerSecondInsufficientResources,
|
||||
prometheus.CounterValue,
|
||||
d.FramesSkippedPerSecondInsufficientNetworkResources,
|
||||
normalizeSessionName(d.Name),
|
||||
data[FramesSkippedPerSecondInsufficientNetworkResources].FirstValue,
|
||||
sessionName,
|
||||
"network",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.framesSkippedPerSecondInsufficientResources,
|
||||
prometheus.CounterValue,
|
||||
d.FramesSkippedPerSecondInsufficientServerResources,
|
||||
normalizeSessionName(d.Name),
|
||||
data[FramesSkippedPerSecondInsufficientServerResources].FirstValue,
|
||||
sessionName,
|
||||
"server",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.graphicsCompressionRatio,
|
||||
prometheus.GaugeValue,
|
||||
d.GraphicsCompressionratio,
|
||||
normalizeSessionName(d.Name),
|
||||
data[GraphicsCompressionratio].FirstValue,
|
||||
sessionName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.inputFramesPerSecond,
|
||||
prometheus.CounterValue,
|
||||
d.InputFramesPerSecond,
|
||||
normalizeSessionName(d.Name),
|
||||
data[InputFramesPerSecond].FirstValue,
|
||||
sessionName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.outputFramesPerSecond,
|
||||
prometheus.CounterValue,
|
||||
d.OutputFramesPerSecond,
|
||||
normalizeSessionName(d.Name),
|
||||
data[OutputFramesPerSecond].FirstValue,
|
||||
sessionName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.sourceFramesPerSecond,
|
||||
prometheus.CounterValue,
|
||||
d.SourceFramesPerSecond,
|
||||
normalizeSessionName(d.Name),
|
||||
data[SourceFramesPerSecond].FirstValue,
|
||||
sessionName,
|
||||
)
|
||||
}
|
||||
|
||||
|
|
|
@ -165,10 +165,6 @@ func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan
|
|||
logger = logger.With(slog.String("collector", Name))
|
||||
|
||||
if err := c.collect(logger, ch); err != nil {
|
||||
logger.Error("failed collecting API service metrics:",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return fmt.Errorf("failed collecting API service metrics: %w", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
package system
|
||||
|
||||
const (
|
||||
ContextSwitchesPersec = "Context Switches/sec"
|
||||
ExceptionDispatchesPersec = "Exception Dispatches/sec"
|
||||
ProcessorQueueLength = "Processor Queue Length"
|
||||
SystemCallsPersec = "System Calls/sec"
|
||||
SystemUpTime = "System Up Time"
|
||||
Processes = "Processes"
|
||||
Threads = "Threads"
|
||||
contextSwitchesPersec = "Context Switches/sec"
|
||||
exceptionDispatchesPersec = "Exception Dispatches/sec"
|
||||
processorQueueLength = "Processor Queue Length"
|
||||
systemCallsPersec = "System Calls/sec"
|
||||
systemUpTime = "System Up Time"
|
||||
processes = "Processes"
|
||||
threads = "Threads"
|
||||
)
|
||||
|
|
|
@ -62,23 +62,23 @@ func (c *Collector) GetPerfCounter(_ *slog.Logger) ([]string, error) {
|
|||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
c.perfDataCollector.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
counters := []string{
|
||||
ContextSwitchesPersec,
|
||||
ExceptionDispatchesPersec,
|
||||
ProcessorQueueLength,
|
||||
SystemCallsPersec,
|
||||
SystemUpTime,
|
||||
Processes,
|
||||
Threads,
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "System", nil, counters)
|
||||
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "System", nil, []string{
|
||||
contextSwitchesPersec,
|
||||
exceptionDispatchesPersec,
|
||||
processorQueueLength,
|
||||
systemCallsPersec,
|
||||
systemUpTime,
|
||||
processes,
|
||||
threads,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create System collector: %w", err)
|
||||
}
|
||||
|
@ -160,37 +160,37 @@ func (c *Collector) collect(ch chan<- prometheus.Metric) error {
|
|||
ch <- prometheus.MustNewConstMetric(
|
||||
c.contextSwitchesTotal,
|
||||
prometheus.CounterValue,
|
||||
data[ContextSwitchesPersec].FirstValue,
|
||||
data[contextSwitchesPersec].FirstValue,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.exceptionDispatchesTotal,
|
||||
prometheus.CounterValue,
|
||||
data[ExceptionDispatchesPersec].FirstValue,
|
||||
data[exceptionDispatchesPersec].FirstValue,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.processorQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
data[ProcessorQueueLength].FirstValue,
|
||||
data[processorQueueLength].FirstValue,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.processes,
|
||||
prometheus.GaugeValue,
|
||||
data[Processes].FirstValue,
|
||||
data[processes].FirstValue,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.systemCallsTotal,
|
||||
prometheus.CounterValue,
|
||||
data[SystemCallsPersec].FirstValue,
|
||||
data[systemCallsPersec].FirstValue,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.systemUpTime,
|
||||
prometheus.GaugeValue,
|
||||
data[SystemUpTime].FirstValue,
|
||||
data[systemUpTime].FirstValue,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.threads,
|
||||
prometheus.GaugeValue,
|
||||
data[Threads].FirstValue,
|
||||
data[threads].FirstValue,
|
||||
)
|
||||
|
||||
// Windows has no defined limit, and is based off available resources. This currently isn't calculated by WMI and is set to default value.
|
||||
|
|
|
@ -101,14 +101,9 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
|
|||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Collector) Collect(_ *types.ScrapeContext, logger *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
logger = logger.With(slog.String("collector", Name))
|
||||
func (c *Collector) Collect(_ *types.ScrapeContext, _ *slog.Logger, ch chan<- prometheus.Metric) error {
|
||||
if err := c.collect(ch); err != nil {
|
||||
logger.Error("failed collecting thermalzone metrics",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
return err
|
||||
return fmt.Errorf("failed collecting thermalzone metrics: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -115,18 +115,16 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
|||
}
|
||||
}
|
||||
|
||||
counters := []string{
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "Windows Time Service", nil, []string{
|
||||
ClockFrequencyAdjustmentPPBTotal,
|
||||
ComputedTimeOffset,
|
||||
NTPClientTimeSourceCount,
|
||||
NTPRoundTripDelay,
|
||||
NTPServerIncomingRequestsTotal,
|
||||
NTPServerOutgoingResponsesTotal,
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollector, err = perfdata.NewCollector(perfdata.V2, "Windows Time Service", nil, counters)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Windows Time Service collector: %w", err)
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
package update
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
@ -37,7 +38,8 @@ var ErrNoUpdates = errors.New("no updates available")
|
|||
type Collector struct {
|
||||
config Config
|
||||
|
||||
mu sync.RWMutex
|
||||
mu sync.RWMutex
|
||||
ctxCancelFn context.CancelFunc
|
||||
|
||||
metricsBuf []prometheus.Metric
|
||||
|
||||
|
@ -77,6 +79,8 @@ func NewWithFlags(app *kingpin.Application) *Collector {
|
|||
}
|
||||
|
||||
func (c *Collector) Close(_ *slog.Logger) error {
|
||||
c.ctxCancelFn()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -85,8 +89,12 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
|
|||
|
||||
logger.Info("update collector is in an experimental state! The configuration and metrics may change in future. Please report any issues.")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
initErrCh := make(chan error, 1)
|
||||
go c.scheduleUpdateStatus(logger, initErrCh, c.config.online)
|
||||
go c.scheduleUpdateStatus(ctx, logger, initErrCh, c.config.online)
|
||||
|
||||
c.ctxCancelFn = cancel
|
||||
|
||||
if err := <-initErrCh; err != nil {
|
||||
return fmt.Errorf("failed to initialize Windows Update collector: %w", err)
|
||||
|
@ -137,7 +145,7 @@ func (c *Collector) Collect(_ *types.ScrapeContext, _ *slog.Logger, ch chan<- pr
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Collector) scheduleUpdateStatus(logger *slog.Logger, initErrCh chan<- error, online bool) {
|
||||
func (c *Collector) scheduleUpdateStatus(ctx context.Context, logger *slog.Logger, initErrCh chan<- error, online bool) {
|
||||
// The only way to run WMI queries in parallel while being thread-safe is to
|
||||
// ensure the CoInitialize[Ex]() call is bound to its current OS thread.
|
||||
// Otherwise, attempting to initialize and run parallel queries across
|
||||
|
@ -226,10 +234,12 @@ func (c *Collector) scheduleUpdateStatus(logger *slog.Logger, initErrCh chan<- e
|
|||
usd := us.ToIDispatch()
|
||||
defer usd.Release()
|
||||
|
||||
var metricsBuf []prometheus.Metric
|
||||
|
||||
for {
|
||||
metricsBuf, err := c.fetchUpdates(logger, usd)
|
||||
metricsBuf, err = c.fetchUpdates(logger, usd)
|
||||
if err != nil {
|
||||
logger.Error("failed to fetch updates",
|
||||
logger.ErrorContext(ctx, "failed to fetch updates",
|
||||
slog.Any("err", err),
|
||||
)
|
||||
|
||||
|
@ -244,7 +254,11 @@ func (c *Collector) scheduleUpdateStatus(logger *slog.Logger, initErrCh chan<- e
|
|||
c.metricsBuf = metricsBuf
|
||||
c.mu.Unlock()
|
||||
|
||||
time.Sleep(c.config.scrapeInterval)
|
||||
select {
|
||||
case <-time.After(c.config.scrapeInterval):
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -82,7 +82,9 @@ func (c *Collector) Close(_ *slog.Logger) error {
|
|||
}
|
||||
|
||||
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
||||
counters := []string{
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorCPU, err = perfdata.NewCollector(perfdata.V2, "VM Processor", perftypes.TotalInstance, []string{
|
||||
cpuLimitMHz,
|
||||
cpuReservationMHz,
|
||||
cpuShares,
|
||||
|
@ -90,11 +92,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
|||
cpuTimePercents,
|
||||
couEffectiveVMSpeedMHz,
|
||||
cpuHostProcessorSpeedMHz,
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
c.perfDataCollectorCPU, err = perfdata.NewCollector(perfdata.V2, "VM Processor", perftypes.TotalInstance, counters)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create VM Processor collector: %w", err)
|
||||
}
|
||||
|
@ -142,7 +140,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
|||
nil,
|
||||
)
|
||||
|
||||
counters = []string{
|
||||
c.perfDataCollectorMemory, err = perfdata.NewCollector(perfdata.V2, "VM Memory", nil, []string{
|
||||
memActiveMB,
|
||||
memBalloonedMB,
|
||||
memLimitMB,
|
||||
|
@ -155,9 +153,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
|
|||
memSwappedMB,
|
||||
memTargetSizeMB,
|
||||
memUsedMB,
|
||||
}
|
||||
|
||||
c.perfDataCollectorMemory, err = perfdata.NewCollector(perfdata.V2, "VM Memory", nil, counters)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create VM Memory collector: %w", err)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue