Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
This commit is contained in:
Jan-Otto Kröpke 2023-06-08 02:29:50 +02:00
parent 4350587141
commit 6890f391d4
No known key found for this signature in database
49 changed files with 183 additions and 183 deletions

View File

@ -458,7 +458,7 @@ func newADCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *ADCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
level.Error(c.logger).Log("msg", "failed collecting ad metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("msg", "failed collecting ad metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -119,7 +119,7 @@ func adcsCollectorMethod(logger log.Logger) (Collector, error) {
func (c *adcsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collectADCSCounters(ctx, ch); err != nil {
level.Error(c.logger).Log("msg", "failed collecting ADCS metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("msg", "failed collecting ADCS metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -230,7 +230,7 @@ func newCacheCollector(logger log.Logger) (Collector, error) {
// Collect implements the Collector interface
func (c *CacheCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
level.Error(c.logger).Log("msg", "failed collecting cache metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("msg", "failed collecting cache metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -29,25 +29,25 @@ const (
func getWindowsVersion(logger log.Logger) float64 {
k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
if err != nil {
level.Warn(logger).Log("msg", "Couldn't open registry", "err", err)
_ = level.Warn(logger).Log("msg", "Couldn't open registry", "err", err)
return 0
}
defer func() {
err = k.Close()
if err != nil {
level.Warn(logger).Log("msg", "Failed to close registry key", "err", err)
_ = level.Warn(logger).Log("msg", "Failed to close registry key", "err", err)
}
}()
currentv, _, err := k.GetStringValue("CurrentVersion")
if err != nil {
level.Warn(logger).Log("msg", "Couldn't open registry to determine current Windows version", "err", err)
_ = level.Warn(logger).Log("msg", "Couldn't open registry to determine current Windows version", "err", err)
return 0
}
currentv_flt, err := strconv.ParseFloat(currentv, 64)
level.Debug(logger).Log("msg", fmt.Sprintf("Detected Windows version %f\n", currentv_flt))
_ = level.Debug(logger).Log("msg", fmt.Sprintf("Detected Windows version %f\n", currentv_flt))
return currentv_flt
}

View File

@ -165,7 +165,7 @@ func newContainerMetricsCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *ContainerMetricsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
level.Error(c.logger).Log("msg", "failed collecting ContainerMetricsCollector metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("msg", "failed collecting ContainerMetricsCollector metrics", "desc", desc, "err", err)
return err
}
return nil
@ -175,7 +175,7 @@ func (c *ContainerMetricsCollector) Collect(ctx *ScrapeContext, ch chan<- promet
func (c *ContainerMetricsCollector) containerClose(container hcsshim.Container) {
err := container.Close()
if err != nil {
level.Error(c.logger).Log("err", err)
_ = level.Error(c.logger).Log("err", err)
}
}
@ -184,7 +184,7 @@ func (c *ContainerMetricsCollector) collect(ch chan<- prometheus.Metric) (*prome
// Types Container is passed to get the containers compute systems only
containers, err := hcsshim.GetContainers(hcsshim.ComputeSystemQuery{Types: []string{"Container"}})
if err != nil {
level.Error(c.logger).Log("msg", "Err in Getting containers", "err", err)
_ = level.Error(c.logger).Log("msg", "Err in Getting containers", "err", err)
return nil, err
}
@ -205,13 +205,13 @@ func (c *ContainerMetricsCollector) collect(ch chan<- prometheus.Metric) (*prome
defer c.containerClose(container)
}
if err != nil {
level.Error(c.logger).Log("msg", "err in opening container", "containerId", containerDetails.ID, "err", err)
_ = level.Error(c.logger).Log("msg", "err in opening container", "containerId", containerDetails.ID, "err", err)
continue
}
cstats, err := container.Statistics()
if err != nil {
level.Error(c.logger).Log("msg", "err in fetching container Statistics", "containerId", containerDetails.ID, "err", err)
_ = level.Error(c.logger).Log("msg", "err in fetching container Statistics", "containerId", containerDetails.ID, "err", err)
continue
}
containerIdWithPrefix := getContainerIdWithPrefix(containerDetails)
@ -260,7 +260,7 @@ func (c *ContainerMetricsCollector) collect(ch chan<- prometheus.Metric) (*prome
)
if len(cstats.Network) == 0 {
level.Info(c.logger).Log("msg", "No Network Stats for container", "containetId", containerDetails.ID)
_ = level.Info(c.logger).Log("msg", "No Network Stats for container", "containetId", containerDetails.ID)
continue
}

View File

@ -61,7 +61,7 @@ type win32_Processor struct {
// to the provided prometheus Metric channel.
func (c *CpuInfoCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
level.Error(c.logger).Log("msg", "failed collecting cpu_info metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("msg", "failed collecting cpu_info metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -54,7 +54,7 @@ func newCSCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *CSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
level.Error(c.logger).Log("msg", "failed collecting cs metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("msg", "failed collecting cs metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -98,7 +98,7 @@ func newDFSRCollectorFlags(app *kingpin.Application) {
func newDFSRCollector(logger log.Logger) (Collector, error) {
const subsystem = "dfsr"
logger = log.With(logger, "collector", subsystem)
level.Info(logger).Log("msg", "dfsr collector is in an experimental state! Metrics for this collector have not been tested.")
_ = level.Info(logger).Log("msg", "dfsr collector is in an experimental state! Metrics for this collector have not been tested.")
enabled := expandEnabledChildCollectors(*dfsrEnabledCollectors)
perfCounters := make([]string, 0, len(enabled))

View File

@ -134,7 +134,7 @@ var (
// Collect sends the metric values for each metric to the provided prometheus Metric channel.
func (c *DiskDriveInfoCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
level.Error(c.logger).Log("msg", "failed collecting disk_drive_info metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("msg", "failed collecting disk_drive_info metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -185,7 +185,7 @@ func newDNSCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *DNSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
level.Error(c.logger).Log("msg", "failed collecting dns metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("msg", "failed collecting dns metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -207,7 +207,7 @@ func (c *exchangeCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Met
for _, collectorName := range c.enabledCollectors {
if err := collectorFuncs[collectorName](ctx, ch); err != nil {
level.Error(c.logger).Log("msg", "Error in "+collectorName, "err", err)
_ = level.Error(c.logger).Log("msg", "Error in "+collectorName, "err", err)
return err
}
}

View File

@ -89,7 +89,7 @@ func newFSRMQuotaCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *FSRMQuotaCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
level.Error(c.logger).Log("msg", "failed collecting fsrmquota metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("msg", "failed collecting fsrmquota metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -704,62 +704,62 @@ func newHyperVCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *HyperVCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collectVmHealth(ch); err != nil {
level.Error(c.logger).Log("msg", "failed collecting hyperV health status metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV health status metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectVmVid(ch); err != nil {
level.Error(c.logger).Log("msg", "failed collecting hyperV pages metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV pages metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectVmHv(ch); err != nil {
level.Error(c.logger).Log("msg", "failed collecting hyperV hv status metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV hv status metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectVmProcessor(ch); err != nil {
level.Error(c.logger).Log("msg", "failed collecting hyperV processor metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV processor metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectHostLPUsage(ch); err != nil {
level.Error(c.logger).Log("msg", "failed collecting hyperV host logical processors metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV host logical processors metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectHostCpuUsage(ch); err != nil {
level.Error(c.logger).Log("msg", "failed collecting hyperV host CPU metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV host CPU metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectVmCpuUsage(ch); err != nil {
level.Error(c.logger).Log("msg", "failed collecting hyperV VM CPU metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV VM CPU metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectVmSwitch(ch); err != nil {
level.Error(c.logger).Log("msg", "failed collecting hyperV switch metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV switch metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectVmEthernet(ch); err != nil {
level.Error(c.logger).Log("msg", "failed collecting hyperV ethernet metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV ethernet metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectVmStorage(ch); err != nil {
level.Error(c.logger).Log("msg", "failed collecting hyperV virtual storage metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV virtual storage metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectVmNetwork(ch); err != nil {
level.Error(c.logger).Log("msg", "failed collecting hyperV virtual network metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV virtual network metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectVmMemory(ch); err != nil {
level.Error(c.logger).Log("msg", "failed collecting hyperV virtual memory metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("msg", "failed collecting hyperV virtual memory metrics", "desc", desc, "err", err)
return err
}
@ -1055,7 +1055,7 @@ func (c *HyperVCollector) collectHostLPUsage(ch chan<- prometheus.Metric) (*prom
// The name format is Hv LP <core id>
parts := strings.Split(obj.Name, " ")
if len(parts) != 3 {
level.Warn(c.logger).Log("msg", fmt.Sprintf("Unexpected format of Name in collectHostLPUsage: %q", obj.Name))
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Unexpected format of Name in collectHostLPUsage: %q", obj.Name))
continue
}
coreId := parts[2]
@ -1109,7 +1109,7 @@ func (c *HyperVCollector) collectHostCpuUsage(ch chan<- prometheus.Metric) (*pro
// The name format is Root VP <core id>
parts := strings.Split(obj.Name, " ")
if len(parts) != 3 {
level.Warn(c.logger).Log("msg", "Unexpected format of Name in collectHostCpuUsage: "+obj.Name)
_ = level.Warn(c.logger).Log("msg", "Unexpected format of Name in collectHostCpuUsage: "+obj.Name)
continue
}
coreId := parts[2]
@ -1170,12 +1170,12 @@ func (c *HyperVCollector) collectVmCpuUsage(ch chan<- prometheus.Metric) (*prome
// The name format is <VM Name>:Hv VP <vcore id>
parts := strings.Split(obj.Name, ":")
if len(parts) != 2 {
level.Warn(c.logger).Log("msg", fmt.Sprintf("Unexpected format of Name in collectVmCpuUsage: %q, expected %q. Skipping.", obj.Name, "<VM Name>:Hv VP <vcore id>"))
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Unexpected format of Name in collectVmCpuUsage: %q, expected %q. Skipping.", obj.Name, "<VM Name>:Hv VP <vcore id>"))
continue
}
coreParts := strings.Split(parts[1], " ")
if len(coreParts) != 3 {
level.Warn(c.logger).Log("msg", fmt.Sprintf("Unexpected format of core identifier in collectVmCpuUsage: %q, expected %q. Skipping.", parts[1], "Hv VP <vcore id>"))
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Unexpected format of core identifier in collectVmCpuUsage: %q, expected %q. Skipping.", parts[1], "Hv VP <vcore id>"))
continue
}
vmName := parts[0]

View File

@ -54,28 +54,28 @@ type simple_version struct {
func getIISVersion(logger log.Logger) simple_version {
k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\InetStp\`, registry.QUERY_VALUE)
if err != nil {
level.Warn(logger).Log("msg", "Couldn't open registry to determine IIS version", "err", err)
_ = level.Warn(logger).Log("msg", "Couldn't open registry to determine IIS version", "err", err)
return simple_version{}
}
defer func() {
err = k.Close()
if err != nil {
level.Warn(logger).Log("msg", fmt.Sprintf("Failed to close registry key"), "err", err)
_ = level.Warn(logger).Log("msg", fmt.Sprintf("Failed to close registry key"), "err", err)
}
}()
major, _, err := k.GetIntegerValue("MajorVersion")
if err != nil {
level.Warn(logger).Log("msg", "Couldn't open registry to determine IIS version", "err", err)
_ = level.Warn(logger).Log("msg", "Couldn't open registry to determine IIS version", "err", err)
return simple_version{}
}
minor, _, err := k.GetIntegerValue("MinorVersion")
if err != nil {
level.Warn(logger).Log("msg", "Couldn't open registry to determine IIS version", "err", err)
_ = level.Warn(logger).Log("msg", "Couldn't open registry to determine IIS version", "err", err)
return simple_version{}
}
level.Debug(logger).Log("msg", fmt.Sprintf("Detected IIS %d.%d\n", major, minor))
_ = level.Debug(logger).Log("msg", fmt.Sprintf("Detected IIS %d.%d\n", major, minor))
return simple_version{
major: major,
@ -264,7 +264,7 @@ func newIISCollector(logger log.Logger) (Collector, error) {
if *oldSiteExclude != "" {
if !siteExcludeSet {
level.Warn(logger).Log("msg", "--collector.iis.site-blacklist is DEPRECATED and will be removed in a future release, use --collector.iis.site-exclude")
_ = level.Warn(logger).Log("msg", "--collector.iis.site-blacklist is DEPRECATED and will be removed in a future release, use --collector.iis.site-exclude")
*siteExclude = *oldSiteExclude
} else {
return nil, errors.New("--collector.iis.site-blacklist and --collector.iis.site-exclude are mutually exclusive")
@ -272,7 +272,7 @@ func newIISCollector(logger log.Logger) (Collector, error) {
}
if *oldSiteInclude != "" {
if !siteIncludeSet {
level.Warn(logger).Log("msg", "--collector.iis.site-whitelist is DEPRECATED and will be removed in a future release, use --collector.iis.site-include")
_ = level.Warn(logger).Log("msg", "--collector.iis.site-whitelist is DEPRECATED and will be removed in a future release, use --collector.iis.site-include")
*siteInclude = *oldSiteInclude
} else {
return nil, errors.New("--collector.iis.site-whitelist and --collector.iis.site-include are mutually exclusive")
@ -281,7 +281,7 @@ func newIISCollector(logger log.Logger) (Collector, error) {
if *oldAppExclude != "" {
if !appExcludeSet {
level.Warn(logger).Log("msg", "--collector.iis.app-blacklist is DEPRECATED and will be removed in a future release, use --collector.iis.app-exclude")
_ = level.Warn(logger).Log("msg", "--collector.iis.app-blacklist is DEPRECATED and will be removed in a future release, use --collector.iis.app-exclude")
*appExclude = *oldAppExclude
} else {
return nil, errors.New("--collector.iis.app-blacklist and --collector.iis.app-exclude are mutually exclusive")
@ -289,7 +289,7 @@ func newIISCollector(logger log.Logger) (Collector, error) {
}
if *oldAppInclude != "" {
if !appIncludeSet {
level.Warn(logger).Log("msg", "--collector.iis.app-whitelist is DEPRECATED and will be removed in a future release, use --collector.iis.app-include")
_ = level.Warn(logger).Log("msg", "--collector.iis.app-whitelist is DEPRECATED and will be removed in a future release, use --collector.iis.app-include")
*appInclude = *oldAppInclude
} else {
return nil, errors.New("--collector.iis.app-whitelist and --collector.iis.app-include are mutually exclusive")
@ -920,22 +920,22 @@ func newIISCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *IISCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collectWebService(ctx, ch); err != nil {
level.Error(c.logger).Log("msg", "failed collecting iis metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("msg", "failed collecting iis metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectAPP_POOL_WAS(ctx, ch); err != nil {
level.Error(c.logger).Log("msg", "failed collecting iis metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("msg", "failed collecting iis metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectW3SVC_W3WP(ctx, ch); err != nil {
level.Error(c.logger).Log("msg", "failed collecting iis metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("msg", "failed collecting iis metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectWebServiceCache(ctx, ch); err != nil {
level.Error(c.logger).Log("msg", "failed collecting iis metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("msg", "failed collecting iis metrics", "desc", desc, "err", err)
return err
}

View File

@ -93,7 +93,7 @@ func newLogicalDiskCollector(logger log.Logger) (Collector, error) {
if *volumeOldExclude != "" {
if !volumeExcludeSet {
level.Warn(logger).Log("msg", "--collector.logical_disk.volume-blacklist is DEPRECATED and will be removed in a future release, use --collector.logical_disk.volume-exclude")
_ = level.Warn(logger).Log("msg", "--collector.logical_disk.volume-blacklist is DEPRECATED and will be removed in a future release, use --collector.logical_disk.volume-exclude")
*volumeExclude = *volumeOldExclude
} else {
return nil, errors.New("--collector.logical_disk.volume-blacklist and --collector.logical_disk.volume-exclude are mutually exclusive")
@ -101,7 +101,7 @@ func newLogicalDiskCollector(logger log.Logger) (Collector, error) {
}
if *volumeOldInclude != "" {
if !volumeIncludeSet {
level.Warn(logger).Log("msg", "--collector.logical_disk.volume-whitelist is DEPRECATED and will be removed in a future release, use --collector.logical_disk.volume-include")
_ = level.Warn(logger).Log("msg", "--collector.logical_disk.volume-whitelist is DEPRECATED and will be removed in a future release, use --collector.logical_disk.volume-include")
*volumeInclude = *volumeOldInclude
} else {
return nil, errors.New("--collector.logical_disk.volume-whitelist and --collector.logical_disk.volume-include are mutually exclusive")
@ -232,7 +232,7 @@ func newLogicalDiskCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *LogicalDiskCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
level.Error(c.logger).Log("failed collecting logical_disk metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting logical_disk metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -38,7 +38,7 @@ func newLogonCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *LogonCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
level.Error(c.logger).Log("failed collecting user metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting user metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -268,7 +268,7 @@ func newMemoryCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *MemoryCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
level.Error(c.logger).Log("failed collecting memory metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting memory metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -44,7 +44,7 @@ func newMSMQCollector(logger log.Logger) (Collector, error) {
logger = log.With(logger, "collector", subsystem)
if *msmqWhereClause == "" {
level.Warn(logger).Log("msg", "No where-clause specified for msmq collector. This will generate a very large number of metrics!")
_ = level.Warn(logger).Log("msg", "No where-clause specified for msmq collector. This will generate a very large number of metrics!")
}
return &Win32_PerfRawData_MSMQ_MSMQQueueCollector{
@ -82,7 +82,7 @@ func newMSMQCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *Win32_PerfRawData_MSMQ_MSMQQueueCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
level.Error(c.logger).Log("failed collecting msmq metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting msmq metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -41,19 +41,19 @@ func getMSSQLInstances(logger log.Logger) mssqlInstancesType {
regkey := `Software\Microsoft\Microsoft SQL Server\Instance Names\SQL`
k, err := registry.OpenKey(registry.LOCAL_MACHINE, regkey, registry.QUERY_VALUE)
if err != nil {
level.Warn(logger).Log("msg", "Couldn't open registry to determine SQL instances", "err", err)
_ = level.Warn(logger).Log("msg", "Couldn't open registry to determine SQL instances", "err", err)
return sqlDefaultInstance
}
defer func() {
err = k.Close()
if err != nil {
level.Warn(logger).Log("msg", "Failed to close registry key", "err", err)
_ = level.Warn(logger).Log("msg", "Failed to close registry key", "err", err)
}
}()
instanceNames, err := k.ReadValueNames(0)
if err != nil {
level.Warn(logger).Log("msg", "Can't ReadSubKeyNames", "err", err)
_ = level.Warn(logger).Log("msg", "Can't ReadSubKeyNames", "err", err)
return sqlDefaultInstance
}
@ -63,7 +63,7 @@ func getMSSQLInstances(logger log.Logger) mssqlInstancesType {
}
}
level.Debug(logger).Log("msg", fmt.Sprintf("Detected MSSQL Instances: %#v\n", sqlInstances))
_ = level.Debug(logger).Log("msg", fmt.Sprintf("Detected MSSQL Instances: %#v\n", sqlInstances))
return sqlInstances
}
@ -1933,11 +1933,11 @@ func (c *MSSQLCollector) execute(ctx *ScrapeContext, name string, fn mssqlCollec
var success float64
if err != nil {
level.Error(c.logger).Log("msg", fmt.Sprintf("mssql class collector %s failed after %fs", name, duration.Seconds()), "err", err)
_ = level.Error(c.logger).Log("msg", fmt.Sprintf("mssql class collector %s failed after %fs", name, duration.Seconds()), "err", err)
success = 0
c.mssqlChildCollectorFailure++
} else {
level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql class collector %s succeeded after %fs.", name, duration.Seconds()))
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql class collector %s succeeded after %fs.", name, duration.Seconds()))
success = 1
}
ch <- prometheus.MustNewConstMetric(
@ -2028,7 +2028,7 @@ type mssqlAccessMethods struct {
func (c *MSSQLCollector) collectAccessMethods(ctx *ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
var dst []mssqlAccessMethods
level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_accessmethods collector iterating sql instance %s.", sqlInstance))
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_accessmethods collector iterating sql instance %s.", sqlInstance))
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "accessmethods")], &dst, c.logger); err != nil {
return nil, err
@ -2363,7 +2363,7 @@ type mssqlAvailabilityReplica struct {
func (c *MSSQLCollector) collectAvailabilityReplica(ctx *ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
var dst []mssqlAvailabilityReplica
level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_availreplica collector iterating sql instance %s.", sqlInstance))
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_availreplica collector iterating sql instance %s.", sqlInstance))
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "availreplica")], &dst, c.logger); err != nil {
return nil, err
@ -2471,7 +2471,7 @@ type mssqlBufferManager struct {
func (c *MSSQLCollector) collectBufferManager(ctx *ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
var dst []mssqlBufferManager
level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_bufman collector iterating sql instance %s.", sqlInstance))
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_bufman collector iterating sql instance %s.", sqlInstance))
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "bufman")], &dst, c.logger); err != nil {
return nil, err
@ -2675,7 +2675,7 @@ type mssqlDatabaseReplica struct {
func (c *MSSQLCollector) collectDatabaseReplica(ctx *ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
var dst []mssqlDatabaseReplica
level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_dbreplica collector iterating sql instance %s.", sqlInstance))
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_dbreplica collector iterating sql instance %s.", sqlInstance))
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "dbreplica")], &dst, c.logger); err != nil {
return nil, err
@ -2914,7 +2914,7 @@ type mssqlDatabases struct {
func (c *MSSQLCollector) collectDatabases(ctx *ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
var dst []mssqlDatabases
level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_databases collector iterating sql instance %s.", sqlInstance))
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_databases collector iterating sql instance %s.", sqlInstance))
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "databases")], &dst, c.logger); err != nil {
return nil, err
@ -3296,7 +3296,7 @@ type mssqlGeneralStatistics struct {
func (c *MSSQLCollector) collectGeneralStatistics(ctx *ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
var dst []mssqlGeneralStatistics
level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_genstats collector iterating sql instance %s.", sqlInstance))
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_genstats collector iterating sql instance %s.", sqlInstance))
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "genstats")], &dst, c.logger); err != nil {
return nil, err
@ -3491,7 +3491,7 @@ type mssqlLocks struct {
func (c *MSSQLCollector) collectLocks(ctx *ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
var dst []mssqlLocks
level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_locks collector iterating sql instance %s.", sqlInstance))
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_locks collector iterating sql instance %s.", sqlInstance))
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "locks")], &dst, c.logger); err != nil {
return nil, err
@ -3589,7 +3589,7 @@ type mssqlMemoryManager struct {
func (c *MSSQLCollector) collectMemoryManager(ctx *ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
var dst []mssqlMemoryManager
level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_memmgr collector iterating sql instance %s.", sqlInstance))
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_memmgr collector iterating sql instance %s.", sqlInstance))
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "memmgr")], &dst, c.logger); err != nil {
return nil, err
@ -3758,7 +3758,7 @@ type mssqlSQLStatistics struct {
func (c *MSSQLCollector) collectSQLStats(ctx *ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
var dst []mssqlSQLStatistics
level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_sqlstats collector iterating sql instance %s.", sqlInstance))
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_sqlstats collector iterating sql instance %s.", sqlInstance))
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "sqlstats")], &dst, c.logger); err != nil {
return nil, err
@ -3866,7 +3866,7 @@ type mssqlWaitStatistics struct {
func (c *MSSQLCollector) collectWaitStats(ctx *ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
var dst []mssqlWaitStatistics
level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_waitstats collector iterating sql instance %s.", sqlInstance))
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_waitstats collector iterating sql instance %s.", sqlInstance))
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "waitstats")], &dst, c.logger); err != nil {
return nil, err
@ -3972,7 +3972,7 @@ type mssqlSQLErrors struct {
// - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-sql-errors-object
func (c *MSSQLCollector) collectSQLErrors(ctx *ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
var dst []mssqlSQLErrors
level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_sqlerrors collector iterating sql instance %s.", sqlInstance))
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_sqlerrors collector iterating sql instance %s.", sqlInstance))
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "sqlerrors")], &dst, c.logger); err != nil {
return nil, err
@ -4015,7 +4015,7 @@ type mssqlTransactions struct {
// - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-transactions-object
func (c *MSSQLCollector) collectTransactions(ctx *ScrapeContext, ch chan<- prometheus.Metric, sqlInstance string) (*prometheus.Desc, error) {
var dst []mssqlTransactions
level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_transactions collector iterating sql instance %s.", sqlInstance))
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("mssql_transactions collector iterating sql instance %s.", sqlInstance))
if err := unmarshalObject(ctx.perfObjects[mssqlGetPerfObjectName(sqlInstance, "transactions")], &dst, c.logger); err != nil {
return nil, err

View File

@ -93,7 +93,7 @@ func newNetworkCollector(logger log.Logger) (Collector, error) {
if *nicOldExclude != "" {
if !nicExcludeSet {
level.Warn(logger).Log("msg", "--collector.net.nic-blacklist is DEPRECATED and will be removed in a future release, use --collector.net.nic-exclude")
_ = level.Warn(logger).Log("msg", "--collector.net.nic-blacklist is DEPRECATED and will be removed in a future release, use --collector.net.nic-exclude")
*nicExclude = *nicOldExclude
} else {
return nil, errors.New("--collector.net.nic-blacklist and --collector.net.nic-exclude are mutually exclusive")
@ -101,7 +101,7 @@ func newNetworkCollector(logger log.Logger) (Collector, error) {
}
if *nicOldInclude != "" {
if !nicIncludeSet {
level.Warn(logger).Log("msg", "--collector.net.nic-whitelist is DEPRECATED and will be removed in a future release, use --collector.net.nic-include")
_ = level.Warn(logger).Log("msg", "--collector.net.nic-whitelist is DEPRECATED and will be removed in a future release, use --collector.net.nic-include")
*nicInclude = *nicOldInclude
} else {
return nil, errors.New("--collector.net.nic-whitelist and --collector.net.nic-include are mutually exclusive")
@ -198,7 +198,7 @@ func newNetworkCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *NetworkCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
level.Error(c.logger).Log("failed collecting net metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting net metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -57,7 +57,7 @@ func newNETFramework_NETCLRExceptionsCollector(logger log.Logger) (Collector, er
// to the provided prometheus Metric channel.
func (c *NETFramework_NETCLRExceptionsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrexceptions metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrexceptions metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -49,7 +49,7 @@ func newNETFramework_NETCLRInteropCollector(logger log.Logger) (Collector, error
// to the provided prometheus Metric channel.
func (c *NETFramework_NETCLRInteropCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrinterop metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrinterop metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -56,7 +56,7 @@ func newNETFramework_NETCLRJitCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *NETFramework_NETCLRJitCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrjit metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrjit metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -91,7 +91,7 @@ func newNETFramework_NETCLRLoadingCollector(logger log.Logger) (Collector, error
// to the provided prometheus Metric channel.
func (c *NETFramework_NETCLRLoadingCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrloading metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrloading metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -77,7 +77,7 @@ func newNETFramework_NETCLRLocksAndThreadsCollector(logger log.Logger) (Collecto
// to the provided prometheus Metric channel.
func (c *NETFramework_NETCLRLocksAndThreadsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrlocksandthreads metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrlocksandthreads metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -115,7 +115,7 @@ func newNETFramework_NETCLRMemoryCollector(logger log.Logger) (Collector, error)
// to the provided prometheus Metric channel.
func (c *NETFramework_NETCLRMemoryCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrmemory metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrmemory metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -70,7 +70,7 @@ func newNETFramework_NETCLRRemotingCollector(logger log.Logger) (Collector, erro
// to the provided prometheus Metric channel.
func (c *NETFramework_NETCLRRemotingCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrremoting metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrremoting metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -56,7 +56,7 @@ func newNETFramework_NETCLRSecurityCollector(logger log.Logger) (Collector, erro
// to the provided prometheus Metric channel.
func (c *NETFramework_NETCLRSecurityCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrsecurity metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting win32_perfrawdata_netframework_netclrsecurity metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -135,7 +135,7 @@ func newOSCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *OSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
level.Error(c.logger).Log("failed collecting os metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting os metrics", "desc", desc, "err", err)
return err
}
return nil
@ -205,7 +205,7 @@ func (c *OSCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (
file, err := os.Stat(fileString)
// For unknown reasons, Windows doesn't always create a page file. Continue collection rather than aborting.
if err != nil {
level.Debug(c.logger).Log("msg", fmt.Sprintf("Failed to read page file (reason: %s): %s\n", err, fileString))
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("Failed to read page file (reason: %s): %s\n", err, fileString))
} else {
fsipf += float64(file.Size())
}
@ -276,7 +276,7 @@ func (c *OSCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (
fsipf,
)
} else {
level.Debug(c.logger).Log("Could not find HKLM:\\SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Memory Management key. windows_os_paging_free_bytes and windows_os_paging_limit_bytes will be omitted.")
_ = level.Debug(c.logger).Log("Could not find HKLM:\\SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Memory Management key. windows_os_paging_free_bytes and windows_os_paging_limit_bytes will be omitted.")
}
ch <- prometheus.MustNewConstMetric(
c.VirtualMemoryFreeBytes,

View File

@ -82,7 +82,7 @@ func unmarshalObject(obj *perflib.PerfObject, vs interface{}, logger log.Logger)
ctr, found := counters[tag]
if !found {
level.Debug(logger).Log("msg", fmt.Sprintf("missing counter %q, have %v", tag, counterMapKeys(counters)))
_ = level.Debug(logger).Log("msg", fmt.Sprintf("missing counter %q, have %v", tag, counterMapKeys(counters)))
continue
}
if !target.Field(i).CanSet() {

View File

@ -94,7 +94,7 @@ func newProcessCollector(logger log.Logger) (Collector, error) {
if *processOldExclude != "" {
if !processExcludeSet {
level.Warn(logger).Log("msg", "--collector.process.blacklist is DEPRECATED and will be removed in a future release, use --collector.process.exclude")
_ = level.Warn(logger).Log("msg", "--collector.process.blacklist is DEPRECATED and will be removed in a future release, use --collector.process.exclude")
*processExclude = *processOldExclude
} else {
return nil, errors.New("--collector.process.blacklist and --collector.process.exclude are mutually exclusive")
@ -102,7 +102,7 @@ func newProcessCollector(logger log.Logger) (Collector, error) {
}
if *processOldInclude != "" {
if !processIncludeSet {
level.Warn(logger).Log("msg", "--collector.process.whitelist is DEPRECATED and will be removed in a future release, use --collector.process.include")
_ = level.Warn(logger).Log("msg", "--collector.process.whitelist is DEPRECATED and will be removed in a future release, use --collector.process.include")
*processInclude = *processOldInclude
} else {
return nil, errors.New("--collector.process.whitelist and --collector.process.include are mutually exclusive")
@ -110,7 +110,7 @@ func newProcessCollector(logger log.Logger) (Collector, error) {
}
if *processInclude == ".*" && *processExclude == "" {
level.Warn(logger).Log("msg", "No filters specified for process collector. This will generate a very large number of metrics!")
_ = level.Warn(logger).Log("msg", "No filters specified for process collector. This will generate a very large number of metrics!")
}
return &processCollector{
@ -257,7 +257,7 @@ func (c *processCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metr
var dst_wp []WorkerProcess
q_wp := queryAll(&dst_wp, c.logger)
if err := wmi.QueryNamespace(q_wp, &dst_wp, "root\\WebAdministration"); err != nil {
level.Debug(c.logger).Log("msg", fmt.Sprintf("Could not query WebAdministration namespace for IIS worker processes: %v. Skipping", err))
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("Could not query WebAdministration namespace for IIS worker processes: %v. Skipping", err))
}
for _, process := range data {

View File

@ -167,7 +167,7 @@ func (coll *Prometheus) Collect(ch chan<- prometheus.Metric) {
}
if len(remainingCollectorNames) > 0 {
level.Warn(coll.logger).Log("msg", fmt.Sprintf("Collection timed out, still waiting for %v", remainingCollectorNames))
_ = level.Warn(coll.logger).Log("msg", fmt.Sprintf("Collection timed out, still waiting for %v", remainingCollectorNames))
}
l.Unlock()
@ -185,9 +185,9 @@ func execute(name string, c Collector, ctx *ScrapeContext, ch chan<- prometheus.
)
if err != nil {
level.Error(logger).Log("msg", fmt.Sprintf("collector %s failed after %fs", name, duration), "err", err)
_ = level.Error(logger).Log("msg", fmt.Sprintf("collector %s failed after %fs", name, duration), "err", err)
return failed
}
level.Debug(logger).Log("msg", fmt.Sprintf("collector %s succeeded after %fs.", name, duration))
_ = level.Debug(logger).Log("msg", fmt.Sprintf("collector %s succeeded after %fs.", name, duration))
return success
}

View File

@ -159,11 +159,11 @@ func newRemoteFx(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *RemoteFxCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collectRemoteFXNetworkCount(ctx, ch); err != nil {
level.Error(c.logger).Log("failed collecting terminal services session count metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting terminal services session count metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectRemoteFXGraphicsCounters(ctx, ch); err != nil {
level.Error(c.logger).Log("failed collecting terminal services session count metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting terminal services session count metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -110,7 +110,7 @@ func newScheduledTask(logger log.Logger) (Collector, error) {
if *taskOldExclude != "" {
if !taskExcludeSet {
level.Warn(logger).Log("msg", "--collector.scheduled_task.blacklist is DEPRECATED and will be removed in a future release, use --collector.scheduled_task.exclude")
_ = level.Warn(logger).Log("msg", "--collector.scheduled_task.blacklist is DEPRECATED and will be removed in a future release, use --collector.scheduled_task.exclude")
*taskExclude = *taskOldExclude
} else {
return nil, errors.New("--collector.scheduled_task.blacklist and --collector.scheduled_task.exclude are mutually exclusive")
@ -118,7 +118,7 @@ func newScheduledTask(logger log.Logger) (Collector, error) {
}
if *taskOldInclude != "" {
if !taskIncludeSet {
level.Warn(logger).Log("msg", "--collector.scheduled_task.whitelist is DEPRECATED and will be removed in a future release, use --collector.scheduled_task.include")
_ = level.Warn(logger).Log("msg", "--collector.scheduled_task.whitelist is DEPRECATED and will be removed in a future release, use --collector.scheduled_task.include")
*taskInclude = *taskOldInclude
} else {
return nil, errors.New("--collector.scheduled_task.whitelist and --collector.scheduled_task.include are mutually exclusive")
@ -167,7 +167,7 @@ func newScheduledTask(logger log.Logger) (Collector, error) {
func (c *ScheduledTaskCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
level.Error(c.logger).Log("failed collecting user metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting user metrics", "desc", desc, "err", err)
return err
}

View File

@ -57,10 +57,10 @@ func newserviceCollector(logger log.Logger) (Collector, error) {
logger = log.With(logger, "collector", subsystem)
if *serviceWhereClause == "" {
level.Warn(logger).Log("msg", "No where-clause specified for service collector. This will generate a very large number of metrics!")
_ = level.Warn(logger).Log("msg", "No where-clause specified for service collector. This will generate a very large number of metrics!")
}
if *useAPI {
level.Warn(logger).Log("msg", "API collection is enabled.")
_ = level.Warn(logger).Log("msg", "API collection is enabled.")
}
return &serviceCollector{
@ -99,12 +99,12 @@ func newserviceCollector(logger log.Logger) (Collector, error) {
func (c *serviceCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if *useAPI {
if err := c.collectAPI(ch); err != nil {
level.Error(c.logger).Log("msg", "failed collecting API service metrics:", "err", err)
_ = level.Error(c.logger).Log("msg", "failed collecting API service metrics:", "err", err)
return err
}
} else {
if err := c.collectWMI(ch); err != nil {
level.Error(c.logger).Log("msg", "failed collecting WMI service metrics:", "err", err)
_ = level.Error(c.logger).Log("msg", "failed collecting WMI service metrics:", "err", err)
return err
}
}
@ -259,14 +259,14 @@ func (c *serviceCollector) collectAPI(ch chan<- prometheus.Metric) error {
// Get UTF16 service name.
serviceName, err := syscall.UTF16PtrFromString(service)
if err != nil {
level.Warn(c.logger).Log("msg", fmt.Sprintf("Service %s get name error: %#v", service, err))
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Service %s get name error: %#v", service, err))
continue
}
// Open connection for service handler.
serviceHandle, err := windows.OpenService(svcmgrConnection.Handle, serviceName, windows.GENERIC_READ)
if err != nil {
level.Warn(c.logger).Log("msg", fmt.Sprintf("Open service %s error: %#v", service, err))
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Open service %s error: %#v", service, err))
continue
}
@ -277,14 +277,14 @@ func (c *serviceCollector) collectAPI(ch chan<- prometheus.Metric) error {
// Get Service Configuration.
serviceConfig, err := serviceManager.Config()
if err != nil {
level.Warn(c.logger).Log("msg", fmt.Sprintf("Get ervice %s config error: %#v", service, err))
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Get ervice %s config error: %#v", service, err))
continue
}
// Get Service Current Status.
serviceStatus, err := serviceManager.Query()
if err != nil {
level.Warn(c.logger).Log("msg", fmt.Sprintf("Get service %s status error: %#v", service, err))
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Get service %s status error: %#v", service, err))
continue
}

View File

@ -114,11 +114,11 @@ func newSMTPCollector(logger log.Logger) (Collector, error) {
const subsystem = "smtp"
logger = log.With(logger, "collector", subsystem)
level.Info(logger).Log("msg", "smtp collector is in an experimental state! Metrics for this collector have not been tested.")
_ = level.Info(logger).Log("msg", "smtp collector is in an experimental state! Metrics for this collector have not been tested.")
if *serverOldExclude != "" {
if !serverExcludeSet {
level.Warn(logger).Log("msg", "--collector.smtp.server-blacklist is DEPRECATED and will be removed in a future release, use --collector.smtp.server-exclude")
_ = level.Warn(logger).Log("msg", "--collector.smtp.server-blacklist is DEPRECATED and will be removed in a future release, use --collector.smtp.server-exclude")
*serverExclude = *serverOldExclude
} else {
return nil, errors.New("--collector.smtp.server-blacklist and --collector.smtp.server-exclude are mutually exclusive")
@ -126,7 +126,7 @@ func newSMTPCollector(logger log.Logger) (Collector, error) {
}
if *serverOldInclude != "" {
if !serverIncludeSet {
level.Warn(logger).Log("msg", "--collector.smtp.server-whitelist is DEPRECATED and will be removed in a future release, use --collector.smtp.server-include")
_ = level.Warn(logger).Log("msg", "--collector.smtp.server-whitelist is DEPRECATED and will be removed in a future release, use --collector.smtp.server-include")
*serverInclude = *serverOldInclude
} else {
return nil, errors.New("--collector.smtp.server-whitelist and --collector.smtp.server-include are mutually exclusive")
@ -397,7 +397,7 @@ func newSMTPCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *SMTPCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
level.Error(c.logger).Log("failed collecting smtp metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting smtp metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -70,7 +70,7 @@ func newSystemCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *SystemCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
level.Error(c.logger).Log("failed collecting system metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting system metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -91,7 +91,7 @@ func newTCPCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *TCPCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
level.Error(c.logger).Log("failed collecting tcp metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting tcp metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -303,23 +303,23 @@ func newTeradiciPcoipCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *teradiciPcoipCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collectAudio(ch); err != nil {
level.Error(c.logger).Log("failed collecting teradici session audio metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting teradici session audio metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectGeneral(ch); err != nil {
level.Error(c.logger).Log("failed collecting teradici session general metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting teradici session general metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectImaging(ch); err != nil {
level.Error(c.logger).Log("failed collecting teradici session imaging metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting teradici session imaging metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectNetwork(ch); err != nil {
level.Error(c.logger).Log("failed collecting teradici session network metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting teradici session network metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectUsb(ch); err != nil {
level.Error(c.logger).Log("failed collecting teradici session USB metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting teradici session USB metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -30,7 +30,7 @@ func isConnectionBrokerServer(logger log.Logger) bool {
return true
}
}
level.Debug(logger).Log("msg", "host is not a connection broker skipping Connection Broker performance metrics.")
_ = level.Debug(logger).Log("msg", "host is not a connection broker skipping Connection Broker performance metrics.")
return false
}
@ -178,18 +178,18 @@ func newTerminalServicesCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *TerminalServicesCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collectTSSessionCount(ctx, ch); err != nil {
level.Error(c.logger).Log("failed collecting terminal services session count metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting terminal services session count metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectTSSessionCounters(ctx, ch); err != nil {
level.Error(c.logger).Log("failed collecting terminal services session count metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting terminal services session count metrics", "desc", desc, "err", err)
return err
}
// only collect CollectionBrokerPerformance if host is a Connection Broker
if c.connectionBrokerEnabled {
if desc, err := c.collectCollectionBrokerPerformanceCounter(ctx, ch); err != nil {
level.Error(c.logger).Log("failed collecting Connection Broker performance metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting Connection Broker performance metrics", "desc", desc, "err", err)
return err
}
}

View File

@ -118,7 +118,7 @@ func (c *textFileCollector) convertMetricFamily(metricFamily *dto.MetricFamily,
for _, metric := range metricFamily.Metric {
if metric.TimestampMs != nil {
level.Warn(c.logger).Log("msg", fmt.Sprintf("Ignoring unsupported custom timestamp on textfile collector metric %v", metric))
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Ignoring unsupported custom timestamp on textfile collector metric %v", metric))
}
labels := metric.GetLabel()
@ -188,7 +188,7 @@ func (c *textFileCollector) convertMetricFamily(metricFamily *dto.MetricFamily,
buckets, values...,
)
default:
level.Error(c.logger).Log("msg", "unknown metric type for file")
_ = level.Error(c.logger).Log("msg", "unknown metric type for file")
continue
}
if metricType == dto.MetricType_GAUGE || metricType == dto.MetricType_COUNTER || metricType == dto.MetricType_UNTYPED {
@ -256,7 +256,7 @@ func (c *textFileCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Met
// Iterate over files and accumulate their metrics.
files, err := ioutil.ReadDir(c.path)
if err != nil && c.path != "" {
level.Error(c.logger).Log("msg", fmt.Sprintf("Error reading textfile collector directory %q", c.path), "err", err)
_ = level.Error(c.logger).Log("msg", fmt.Sprintf("Error reading textfile collector directory %q", c.path), "err", err)
error = 1.0
}
@ -270,27 +270,27 @@ fileLoop:
continue
}
path := filepath.Join(c.path, f.Name())
level.Debug(c.logger).Log("msg", fmt.Sprintf("Processing file %q", path))
_ = level.Debug(c.logger).Log("msg", fmt.Sprintf("Processing file %q", path))
file, err := os.Open(path)
if err != nil {
level.Error(c.logger).Log("msg", fmt.Sprintf("Error opening %q: %v", path, err))
_ = level.Error(c.logger).Log("msg", fmt.Sprintf("Error opening %q: %v", path, err))
error = 1.0
continue
}
var parser expfmt.TextParser
r, encoding := utfbom.Skip(carriageReturnFilteringReader{r: file})
if err = checkBOM(encoding); err != nil {
level.Error(c.logger).Log("msg", fmt.Sprintf("Invalid file encoding detected in %s: %s - file must be UTF8", path, err.Error()))
_ = level.Error(c.logger).Log("msg", fmt.Sprintf("Invalid file encoding detected in %s: %s - file must be UTF8", path, err.Error()))
error = 1.0
continue
}
parsedFamilies, err := parser.TextToMetricFamilies(r)
closeErr := file.Close()
if closeErr != nil {
level.Warn(c.logger).Log("msg", fmt.Sprintf("Error closing file"), "err", err)
_ = level.Warn(c.logger).Log("msg", fmt.Sprintf("Error closing file"), "err", err)
}
if err != nil {
level.Error(c.logger).Log("msg", fmt.Sprintf("Error parsing %q: %v", path, err))
_ = level.Error(c.logger).Log("msg", fmt.Sprintf("Error parsing %q: %v", path, err))
error = 1.0
continue
}
@ -302,7 +302,7 @@ fileLoop:
families_array = append(families_array, mf)
for _, m := range mf.Metric {
if m.TimestampMs != nil {
level.Error(c.logger).Log("msg", fmt.Sprintf("Textfile %q contains unsupported client-side timestamps, skipping entire file", path))
_ = level.Error(c.logger).Log("msg", fmt.Sprintf("Textfile %q contains unsupported client-side timestamps, skipping entire file", path))
error = 1.0
continue fileLoop
}
@ -315,7 +315,7 @@ fileLoop:
// If duplicate metrics are detected in a *single* file, skip processing of file metrics
if duplicateMetricEntry(families_array) {
level.Error(c.logger).Log("msg", fmt.Sprintf("Duplicate metrics detected in file %s. Skipping file processing.", f.Name()))
_ = level.Error(c.logger).Log("msg", fmt.Sprintf("Duplicate metrics detected in file %s. Skipping file processing.", f.Name()))
error = 1.0
continue
}
@ -331,7 +331,7 @@ fileLoop:
// If duplicates are detected across *multiple* files, return error.
if duplicateMetricEntry(metricFamilies) {
level.Error(c.logger).Log("msg", "Duplicate metrics detected across multiple files")
_ = level.Error(c.logger).Log("msg", "Duplicate metrics detected across multiple files")
error = 1.0
} else {
for _, mf := range metricFamilies {

View File

@ -54,7 +54,7 @@ func newThermalZoneCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *thermalZoneCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ch); err != nil {
level.Error(c.logger).Log("failed collecting thermalzone metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting thermalzone metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -76,7 +76,7 @@ func newTimeCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *TimeCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collect(ctx, ch); err != nil {
level.Error(c.logger).Log("failed collecting time metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting time metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -165,11 +165,11 @@ func newVmwareCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *VmwareCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collectMem(ch); err != nil {
level.Error(c.logger).Log("failed collecting vmware memory metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting vmware memory metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectCpu(ch); err != nil {
level.Error(c.logger).Log("failed collecting vmware cpu metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting vmware cpu metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -546,51 +546,51 @@ func newVmwareBlastCollector(logger log.Logger) (Collector, error) {
// to the provided prometheus Metric channel.
func (c *vmwareBlastCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
if desc, err := c.collectAudio(ch); err != nil {
level.Error(c.logger).Log("failed collecting vmware blast audio metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting vmware blast audio metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectCdr(ch); err != nil {
level.Error(c.logger).Log("failed collecting vmware blast CDR metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting vmware blast CDR metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectClipboard(ch); err != nil {
level.Error(c.logger).Log("failed collecting vmware blast clipboard metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting vmware blast clipboard metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectHtml5Mmr(ch); err != nil {
level.Error(c.logger).Log("failed collecting vmware blast HTML5 MMR metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting vmware blast HTML5 MMR metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectImaging(ch); err != nil {
level.Error(c.logger).Log("failed collecting vmware blast imaging metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting vmware blast imaging metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectRtav(ch); err != nil {
level.Error(c.logger).Log("failed collecting vmware blast RTAV metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting vmware blast RTAV metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectSerialPortandScanner(ch); err != nil {
level.Error(c.logger).Log("failed collecting vmware blast serial port and scanner metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting vmware blast serial port and scanner metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectSession(ch); err != nil {
level.Error(c.logger).Log("failed collecting vmware blast metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting vmware blast metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectSkypeforBusinessControl(ch); err != nil {
level.Error(c.logger).Log("failed collecting vmware blast skype for business control metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting vmware blast skype for business control metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectThinPrint(ch); err != nil {
level.Error(c.logger).Log("failed collecting vmware blast thin print metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting vmware blast thin print metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectUsb(ch); err != nil {
level.Error(c.logger).Log("failed collecting vmware blast USB metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting vmware blast USB metrics", "desc", desc, "err", err)
return err
}
if desc, err := c.collectWindowsMediaMmr(ch); err != nil {
level.Error(c.logger).Log("failed collecting vmware blast windows media MMR metrics", "desc", desc, "err", err)
_ = level.Error(c.logger).Log("failed collecting vmware blast windows media MMR metrics", "desc", desc, "err", err)
return err
}
return nil

View File

@ -23,7 +23,7 @@ func queryAll(src interface{}, logger log.Logger) string {
b.WriteString("SELECT * FROM ")
b.WriteString(className(src))
level.Debug(logger).Log("msg", fmt.Sprintf("Generated WMI query %s", b.String()))
_ = level.Debug(logger).Log("msg", fmt.Sprintf("Generated WMI query %s", b.String()))
return b.String()
}
@ -32,7 +32,7 @@ func queryAllForClass(src interface{}, class string, logger log.Logger) string {
b.WriteString("SELECT * FROM ")
b.WriteString(class)
level.Debug(logger).Log("msg", fmt.Sprintf("Generated WMI query %s", b.String()))
_ = level.Debug(logger).Log("msg", fmt.Sprintf("Generated WMI query %s", b.String()))
return b.String()
}
@ -46,7 +46,7 @@ func queryAllWhere(src interface{}, where string, logger log.Logger) string {
b.WriteString(where)
}
level.Debug(logger).Log("msg", fmt.Sprintf("Generated WMI query %s", b.String()))
_ = level.Debug(logger).Log("msg", fmt.Sprintf("Generated WMI query %s", b.String()))
return b.String()
}
@ -60,6 +60,6 @@ func queryAllForClassWhere(src interface{}, class string, where string, logger l
b.WriteString(where)
}
level.Debug(logger).Log("msg", fmt.Sprintf("Generated WMI query %s", b.String()))
_ = level.Debug(logger).Log("msg", fmt.Sprintf("Generated WMI query %s", b.String()))
return b.String()
}

View File

@ -35,7 +35,7 @@ type Resolver struct {
// NewResolver returns a Resolver structure.
func NewResolver(file string, logger log.Logger) (*Resolver, error) {
flags := map[string]string{}
level.Info(logger).Log("msg", fmt.Sprintf("Loading configuration file: %v", file))
_ = level.Info(logger).Log("msg", fmt.Sprintf("Loading configuration file: %v", file))
if _, err := os.Stat(file); err != nil {
return nil, err
}

View File

@ -88,10 +88,10 @@ func initWbem(logger log.Logger) {
// This initialization prevents a memory leak on WMF 5+. See
// https://github.com/prometheus-community/windows_exporter/issues/77 and
// linked issues for details.
level.Debug(logger).Log("msg", "Initializing SWbemServices")
_ = level.Debug(logger).Log("msg", "Initializing SWbemServices")
s, err := wmi.InitializeSWbemServices(wmi.DefaultClient)
if err != nil {
level.Error(logger).Log("err", err)
_ = level.Error(logger).Log("err", err)
os.Exit(1)
}
wmi.DefaultClient.AllowMissingFields = true
@ -146,20 +146,20 @@ func main() {
kingpin.MustParse(app.Parse(os.Args[1:]))
logger, err := winlog.New(winlogConfig)
if err != nil {
level.Error(logger).Log("err", err)
_ = level.Error(logger).Log("err", err)
os.Exit(1)
}
level.Debug(logger).Log("msg", "Logging has Started")
_ = level.Debug(logger).Log("msg", "Logging has Started")
if *configFile != "" {
resolver, err := config.NewResolver(*configFile, logger)
if err != nil {
level.Error(logger).Log("msg", "could not load config file", "err", err)
_ = level.Error(logger).Log("msg", "could not load config file", "err", err)
os.Exit(1)
}
err = resolver.Bind(app, os.Args[1:])
if err != nil {
level.Error(logger).Log("err", err)
_ = level.Error(logger).Log("err", err)
os.Exit(1)
}
@ -173,7 +173,7 @@ func main() {
logger, err = winlog.New(winlogConfig)
if err != nil {
level.Error(logger).Log("err", err)
_ = level.Error(logger).Log("err", err)
os.Exit(1)
}
}
@ -199,22 +199,22 @@ func main() {
collectors, err := loadCollectors(*enabledCollectors, logger)
if err != nil {
level.Error(logger).Log("msg", "Couldn't load collectors", "err", err)
_ = level.Error(logger).Log("msg", "Couldn't load collectors", "err", err)
os.Exit(1)
}
u, err := user.Current()
if err != nil {
level.Error(logger).Log("err", err)
_ = level.Error(logger).Log("err", err)
os.Exit(1)
}
level.Info(logger).Log("msg", fmt.Sprintf("Running as %v", u.Username))
_ = level.Info(logger).Log("msg", fmt.Sprintf("Running as %v", u.Username))
if strings.Contains(u.Username, "ContainerAdministrator") || strings.Contains(u.Username, "ContainerUser") {
level.Warn(logger).Log("msg", "Running as a preconfigured Windows Container user. This may mean you do not have Windows HostProcess containers configured correctly and some functionality will not work as expected.")
_ = level.Warn(logger).Log("msg", "Running as a preconfigured Windows Container user. This may mean you do not have Windows HostProcess containers configured correctly and some functionality will not work as expected.")
}
level.Info(logger).Log("msg", fmt.Sprintf("Enabled collectors: %v", strings.Join(keys(collectors), ", ")))
_ = level.Info(logger).Log("msg", fmt.Sprintf("Enabled collectors: %v", strings.Join(keys(collectors), ", ")))
h := &metricsHandler{
timeoutMargin: *timeoutMargin,
@ -242,7 +242,7 @@ func main() {
w.Header().Set("Content-Type", "application/json")
_, err := fmt.Fprintln(w, `{"status":"ok"}`)
if err != nil {
level.Debug(logger).Log("Failed to write to stream", "err", err)
_ = level.Debug(logger).Log("Failed to write to stream", "err", err)
}
})
http.HandleFunc("/version", func(w http.ResponseWriter, r *http.Request) {
@ -282,27 +282,27 @@ func main() {
}
landingPage, err := web.NewLandingPage(landingConfig)
if err != nil {
level.Error(logger).Log("msg", "failed to generate landing page", "err", err)
_ = level.Error(logger).Log("msg", "failed to generate landing page", "err", err)
os.Exit(1)
}
http.Handle("/", landingPage)
}
level.Info(logger).Log("msg", "Starting windows_exporter", "version", version.Info())
level.Info(logger).Log("msg", "Build context", "build_context", version.BuildContext())
level.Debug(logger).Log("msg", "Go MAXPROCS", "procs", runtime.GOMAXPROCS(0))
_ = level.Info(logger).Log("msg", "Starting windows_exporter", "version", version.Info())
_ = level.Info(logger).Log("msg", "Build context", "build_context", version.BuildContext())
_ = level.Debug(logger).Log("msg", "Go MAXPROCS", "procs", runtime.GOMAXPROCS(0))
go func() {
server := &http.Server{}
if err := web.ListenAndServe(server, webConfig, logger); err != nil {
level.Error(logger).Log("msg", "cannot start windows_exporter", "err", err)
_ = level.Error(logger).Log("msg", "cannot start windows_exporter", "err", err)
os.Exit(1)
}
}()
for {
if <-initiate.StopCh {
level.Info(logger).Log("msg", "Shutting down windows_exporter")
_ = level.Info(logger).Log("msg", "Shutting down windows_exporter")
break
}
}
@ -350,7 +350,7 @@ func (mh *metricsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
var err error
timeoutSeconds, err = strconv.ParseFloat(v, 64)
if err != nil {
level.Warn(mh.logger).Log("msg", fmt.Sprintf("Couldn't parse X-Prometheus-Scrape-Timeout-Seconds: %q. Defaulting timeout to %f", v, defaultTimeout))
_ = level.Warn(mh.logger).Log("msg", fmt.Sprintf("Couldn't parse X-Prometheus-Scrape-Timeout-Seconds: %q. Defaulting timeout to %f", v, defaultTimeout))
}
}
if timeoutSeconds == 0 {
@ -361,7 +361,7 @@ func (mh *metricsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
reg := prometheus.NewRegistry()
err, wc := mh.collectorFactory(time.Duration(timeoutSeconds*float64(time.Second)), r.URL.Query()["collect[]"])
if err != nil {
level.Warn(mh.logger).Log("msg", "Couldn't create filtered metrics handler", "err", err)
_ = level.Warn(mh.logger).Log("msg", "Couldn't create filtered metrics handler", "err", err)
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(fmt.Sprintf("Couldn't create filtered metrics handler: %s", err))) //nolint:errcheck
return

12
go.mod
View File

@ -9,11 +9,11 @@ require (
github.com/go-kit/log v0.2.1
github.com/go-ole/go-ole v1.2.6
github.com/leoluk/perflib_exporter v0.2.1
github.com/prometheus/client_golang v1.16.1
github.com/prometheus/client_golang v1.16.0
github.com/prometheus/client_model v0.4.0
github.com/prometheus/common v0.44.0
github.com/prometheus/exporter-toolkit v0.10.0
github.com/sirupsen/logrus v1.9.3
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/yusufpapurcu/wmi v1.2.3
go.opencensus.io v0.23.0 // indirect
golang.org/x/sys v0.10.0
@ -21,15 +21,15 @@ require (
)
require (
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/Microsoft/go-winio v0.4.17 // indirect
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/containerd/cgroups v1.1.0 // indirect
github.com/containerd/cgroups v1.0.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/go-logfmt/logfmt v0.6.0 // indirect
github.com/go-logfmt/logfmt v0.5.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect