Convert remaining collectors to use ConstMetrics

This commit is contained in:
Johannes 'fish' Ziemke 2016-12-28 15:21:31 +01:00
parent c53bc168fe
commit 8e50b80d12
17 changed files with 455 additions and 614 deletions

View File

@ -26,7 +26,7 @@ import (
)
type bondingCollector struct {
slaves, active *prometheus.GaugeVec
slaves, active typedDesc
}
func init() {
@ -37,22 +37,16 @@ func init() {
// It exposes the number of configured and active slave of linux bonding interfaces.
func NewBondingCollector() (Collector, error) {
return &bondingCollector{
slaves: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: Namespace,
Name: "net_bonding_slaves",
Help: "Number of configured slaves per bonding interface.",
},
[]string{"master"},
),
active: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: Namespace,
Name: "net_bonding_slaves_active",
Help: "Number of active slaves per bonding interface.",
},
[]string{"master"},
),
slaves: typedDesc{prometheus.NewDesc(
prometheus.BuildFQName(Namespace, "bonding", "slaves"),
"Number of configured slaves per bonding interface.",
[]string{"master"}, nil,
), prometheus.GaugeValue},
active: typedDesc{prometheus.NewDesc(
prometheus.BuildFQName(Namespace, "bonding", "active"),
"Number of active slaves per bonding interface.",
[]string{"master"}, nil,
), prometheus.GaugeValue},
}, nil
}
@ -63,11 +57,9 @@ func (c *bondingCollector) Update(ch chan<- prometheus.Metric) (err error) {
return err
}
for master, status := range bondingStats {
c.slaves.WithLabelValues(master).Set(float64(status[0]))
c.active.WithLabelValues(master).Set(float64(status[1]))
ch <- c.slaves.mustNewConstMetric(float64(status[0]), master)
ch <- c.active.mustNewConstMetric(float64(status[1]), master)
}
c.slaves.Collect(ch)
c.active.Collect(ch)
return nil
}

View File

@ -32,3 +32,12 @@ type Collector interface {
// Get new metrics and expose them via prometheus registry.
Update(ch chan<- prometheus.Metric) (err error)
}
type typedDesc struct {
desc *prometheus.Desc
valueType prometheus.ValueType
}
func (d *typedDesc) mustNewConstMetric(value float64, labels ...string) prometheus.Metric {
return prometheus.MustNewConstMetric(d.desc, d.valueType, value, labels...)
}

View File

@ -89,14 +89,11 @@ func init() {
// CPU stats.
func NewStatCollector() (Collector, error) {
return &statCollector{
cpu: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: Namespace,
Name: "cpu_seconds_total",
Help: "Seconds the CPU spent in each mode.",
},
[]string{"cpu", "mode"},
),
cpu: typedDesc{prometheus.NewDesc(
prometheus.BuildFQName(Namespace, "cpu", "seconds_total"),
"Seconds the CPU spent in each mode.",
[]string{"cpu", "mode"}, nil,
), prometheus.CounterValue},
}, nil
}
@ -118,12 +115,11 @@ func (c *statCollector) Update(ch chan<- prometheus.Metric) (err error) {
return err
}
for cpu, t := range cpuTimes {
c.cpu.With(prometheus.Labels{"cpu": strconv.Itoa(cpu), "mode": "user"}).Set(t.user)
c.cpu.With(prometheus.Labels{"cpu": strconv.Itoa(cpu), "mode": "nice"}).Set(t.nice)
c.cpu.With(prometheus.Labels{"cpu": strconv.Itoa(cpu), "mode": "system"}).Set(t.sys)
c.cpu.With(prometheus.Labels{"cpu": strconv.Itoa(cpu), "mode": "interrupt"}).Set(t.intr)
c.cpu.With(prometheus.Labels{"cpu": strconv.Itoa(cpu), "mode": "idle"}).Set(t.idle)
ch <- c.cpu.mustNewConstMetric(float64(cpuTimes[base_idx+C.CP_USER]), strconv.Itoa(cpu), "user")
ch <- c.cpu.mustNewConstMetric(float64(cpuTimes[base_idx+C.CP_NICE]), strconv.Itoa(cpu), "nice")
ch <- c.cpu.mustNewConstMetric(float64(cpuTimes[base_idx+C.CP_SYS]), strconv.Itoa(cpu), "system")
ch <- c.cpu.mustNewConstMetric(float64(cpuTimes[base_idx+C.CP_INTR]), strconv.Itoa(cpu), "interrupt")
ch <- c.cpu.mustNewConstMetric(float64(cpuTimes[base_idx+C.CP_IDLE]), strconv.Itoa(cpu), "idle")
}
c.cpu.Collect(ch)
return err
}

View File

@ -137,12 +137,12 @@ const (
)
type devstatCollector struct {
bytes *prometheus.CounterVec
bytes_total *prometheus.CounterVec
transfers *prometheus.CounterVec
duration *prometheus.CounterVec
busyTime *prometheus.CounterVec
blocks *prometheus.CounterVec
bytes typedDesc
bytes_total typedDesc
transfers typedDesc
duration typedDesc
busyTime typedDesc
blocks typedDesc
}
func init() {
@ -153,51 +153,31 @@ func init() {
// Device stats.
func NewDevstatCollector() (Collector, error) {
return &devstatCollector{
bytes: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: devstatSubsystem,
Name: "bytes_total",
Help: "The total number of bytes in transactions.",
},
[]string{"device", "type"},
),
transfers: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: devstatSubsystem,
Name: "transfers_total",
Help: "The total number of transactions.",
},
[]string{"device", "type"},
),
duration: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: devstatSubsystem,
Name: "duration_seconds_total",
Help: "The total duration of transactions in seconds.",
},
[]string{"device", "type"},
),
busyTime: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: devstatSubsystem,
Name: "busy_time_seconds_total",
Help: "Total time the device had one or more transactions outstanding in seconds.",
},
[]string{"device"},
),
blocks: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: devstatSubsystem,
Name: "blocks_transferred_total",
Help: "The total number of blocks transferred.",
},
[]string{"device"},
),
bytes: typedDesc{prometheus.NewDesc(
prometheus.BuildFQName(Namespace, devstatSubsystem, "bytes_total"),
"The total number of bytes in transactions.",
[]string{"device", "type"}, nil,
), prometheus.CounterValue},
transfers: typedDesc{prometheus.NewDesc(
prometheus.BuildFQName(Namespace, devstatSubsystem, "transfers_total"),
"The total number of transactions.",
[]string{"device", "type"}, nil,
), prometheus.CounterValue},
duration: typedDesc{prometheus.NewDesc(
prometheus.BuildFQName(Namespace, devstatSubsystem, "duration_seconds_total"),
"The total duration of transactions in seconds.",
[]string{"device", "type"}, nil,
), prometheus.CounterValue},
busyTime: typedDesc{prometheus.NewDesc(
prometheus.BuildFQName(Namespace, devstatSubsystem, "busy_time_seconds_total"),
"Total time the device had one or more transactions outstanding in seconds.",
[]string{"device"}, nil,
), prometheus.CounterValue},
blocks: typedDesc{prometheus.NewDesc(
prometheus.BuildFQName(Namespace, devstatSubsystem, "blocks_transferred_total"),
"The total number of blocks transferred.",
[]string{"device"}, nil,
), prometheus.CounterValue},
}, nil
}
@ -213,27 +193,16 @@ func (c *devstatCollector) Update(ch chan<- prometheus.Metric) (err error) {
for i := C.int(0); i < count; i++ {
stats := C._get_stats(i)
device := fmt.Sprintf("%s%d", C.GoString(&stats.device[0]), stats.unit)
// Free metrics are disabled for now, please see PR #88 for more details.
c.bytes.With(prometheus.Labels{"device": device, "type": "read"}).Set(float64(stats.bytes.read))
c.bytes.With(prometheus.Labels{"device": device, "type": "write"}).Set(float64(stats.bytes.write))
//c.bytes.With(prometheus.Labels{"device": device, "type": "free"}).Set(float64(stats.bytes.free))
c.transfers.With(prometheus.Labels{"device": device, "type": "other"}).Set(float64(stats.transfers.other))
c.transfers.With(prometheus.Labels{"device": device, "type": "read"}).Set(float64(stats.transfers.read))
c.transfers.With(prometheus.Labels{"device": device, "type": "write"}).Set(float64(stats.transfers.write))
//c.transfers.With(prometheus.Labels{"device": device, "type": "free"}).Set(float64(stats.transfers.free))
c.duration.With(prometheus.Labels{"device": device, "type": "other"}).Set(float64(stats.duration.other))
c.duration.With(prometheus.Labels{"device": device, "type": "read"}).Set(float64(stats.duration.read))
c.duration.With(prometheus.Labels{"device": device, "type": "write"}).Set(float64(stats.duration.write))
//c.duration.With(prometheus.Labels{"device": device, "type": "free"}).Set(float64(stats.duration.free))
c.busyTime.With(prometheus.Labels{"device": device}).Set(float64(stats.busyTime))
c.blocks.With(prometheus.Labels{"device": device}).Set(float64(stats.blocks))
ch <- c.bytes.mustNewConstMetric(float64(stats.bytes.read), device, "read")
ch <- c.bytes.mustNewConstMetric(float64(stats.bytes.write), device, "write")
ch <- c.transfers.mustNewConstMetric(float64(stats.transfers.other), device, "other")
ch <- c.transfers.mustNewConstMetric(float64(stats.transfers.read), device, "read")
ch <- c.transfers.mustNewConstMetric(float64(stats.transfers.write), device, "write")
ch <- c.duration.mustNewConstMetric(float64(stats.duration.other), device, "other")
ch <- c.duration.mustNewConstMetric(float64(stats.duration.read), device, "read")
ch <- c.duration.mustNewConstMetric(float64(stats.duration.write), device, "write")
ch <- c.busyTime.mustNewConstMetric(float64(stats.busyTime), device)
ch <- c.blocks.mustNewConstMetric(float64(stats.blocks), device)
}
c.bytes.Collect(ch)
c.transfers.Collect(ch)
c.duration.Collect(ch)
c.busyTime.Collect(ch)
c.blocks.Collect(ch)
return err
}

View File

@ -40,7 +40,7 @@ var (
type diskstatsCollector struct {
ignoredDevicesPattern *regexp.Regexp
metrics []prometheus.Collector
descs []typedDesc
}
func init() {
@ -55,129 +55,116 @@ func NewDiskstatsCollector() (Collector, error) {
return &diskstatsCollector{
ignoredDevicesPattern: regexp.MustCompile(*ignoredDevices),
// Docs from https://www.kernel.org/doc/Documentation/iostats.txt
metrics: []prometheus.Collector{
prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: diskSubsystem,
Name: "reads_completed",
Help: "The total number of reads completed successfully.",
},
diskLabelNames,
),
prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: diskSubsystem,
Name: "reads_merged",
Help: "The number of reads merged. See https://www.kernel.org/doc/Documentation/iostats.txt.",
},
diskLabelNames,
),
prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: diskSubsystem,
Name: "sectors_read",
Help: "The total number of sectors read successfully.",
},
diskLabelNames,
),
prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: diskSubsystem,
Name: "read_time_ms",
Help: "The total number of milliseconds spent by all reads.",
},
diskLabelNames,
),
prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: diskSubsystem,
Name: "writes_completed",
Help: "The total number of writes completed successfully.",
},
diskLabelNames,
),
prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: diskSubsystem,
Name: "writes_merged",
Help: "The number of writes merged. See https://www.kernel.org/doc/Documentation/iostats.txt.",
},
diskLabelNames,
),
prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: diskSubsystem,
Name: "sectors_written",
Help: "The total number of sectors written successfully.",
},
diskLabelNames,
),
prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: diskSubsystem,
Name: "write_time_ms",
Help: "This is the total number of milliseconds spent by all writes.",
},
diskLabelNames,
),
prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: Namespace,
Subsystem: diskSubsystem,
Name: "io_now",
Help: "The number of I/Os currently in progress.",
},
diskLabelNames,
),
prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: diskSubsystem,
Name: "io_time_ms",
Help: "Milliseconds spent doing I/Os.",
},
diskLabelNames,
),
prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: diskSubsystem,
Name: "io_time_weighted",
Help: "The weighted # of milliseconds spent doing I/Os. See https://www.kernel.org/doc/Documentation/iostats.txt.",
},
diskLabelNames,
),
prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: diskSubsystem,
Name: "bytes_read",
Help: "The total number of bytes read successfully.",
},
diskLabelNames,
),
prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: diskSubsystem,
Name: "bytes_written",
Help: "The total number of bytes written successfully.",
},
diskLabelNames,
),
descs: []typedDesc{
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, diskSubsystem, "reads_completed"),
"The total number of reads completed successfully.",
diskLabelNames,
nil,
), valueType: prometheus.CounterValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, diskSubsystem, "reads_merged"),
"The total number of reads merged. See https://www.kernel.org/doc/Documentation/iostats.txt.",
diskLabelNames,
nil,
), valueType: prometheus.CounterValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, diskSubsystem, "sectors_read"),
"The total number of sectors read successfully.",
diskLabelNames,
nil,
), valueType: prometheus.CounterValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, diskSubsystem, "read_time_ms"),
"The total number of milliseconds spent by all reads.",
diskLabelNames,
nil,
), valueType: prometheus.CounterValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, diskSubsystem, "writes_completed"),
"The total number of writes completed successfully.",
diskLabelNames,
nil,
), valueType: prometheus.CounterValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, diskSubsystem, "writes_merged"),
"The number of writes merged. See https://www.kernel.org/doc/Documentation/iostats.txt.",
diskLabelNames,
nil,
), valueType: prometheus.CounterValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, diskSubsystem, "sectors_written"),
"The total number of sectors written successfully.",
diskLabelNames,
nil,
), valueType: prometheus.CounterValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, diskSubsystem, "write_time_ms"),
"This is the total number of milliseconds spent by all writes.",
diskLabelNames,
nil,
), valueType: prometheus.CounterValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, diskSubsystem, "io_now"),
"The number of I/Os currently in progress.",
diskLabelNames,
nil,
), valueType: prometheus.GaugeValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, diskSubsystem, "io_time_ms"),
"Total Milliseconds spent doing I/Os.",
diskLabelNames,
nil,
), valueType: prometheus.CounterValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, diskSubsystem, "io_time_weighted"),
"The weighted # of milliseconds spent doing I/Os. See https://www.kernel.org/doc/Documentation/iostats.txt.",
diskLabelNames,
nil,
), valueType: prometheus.CounterValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, diskSubsystem, "bytes_read"),
"The total number of bytes read successfully.",
diskLabelNames,
nil,
), valueType: prometheus.CounterValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, diskSubsystem, "bytes_written"),
"The total number of bytes written successfully.",
diskLabelNames,
nil,
), valueType: prometheus.CounterValue,
},
},
}, nil
}
func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) (err error) {
func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) error {
procDiskStats := procFilePath("diskstats")
diskStats, err := getDiskStats()
if err != nil {
@ -190,29 +177,19 @@ func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) (err error) {
continue
}
if len(stats) != len(c.metrics) {
if len(stats) != len(c.descs) {
return fmt.Errorf("invalid line for %s for %s", procDiskStats, dev)
}
for k, value := range stats {
for i, value := range stats {
v, err := strconv.ParseFloat(value, 64)
if err != nil {
return fmt.Errorf("invalid value %s in diskstats: %s", value, err)
}
if counter, ok := c.metrics[k].(*prometheus.CounterVec); ok {
counter.WithLabelValues(dev).Set(v)
} else if gauge, ok := c.metrics[k].(*prometheus.GaugeVec); ok {
gauge.WithLabelValues(dev).Set(v)
} else {
return fmt.Errorf("unexpected collector %d", k)
}
ch <- c.descs[i].mustNewConstMetric(v, dev)
}
}
for _, c := range c.metrics {
c.Collect(ch)
}
return err
return nil
}
func getDiskStats() (map[string]map[int]string, error) {

View File

@ -12,73 +12,73 @@ go_gc_duration_seconds_count 0
go_goroutines 13
# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
# TYPE go_memstats_alloc_bytes gauge
go_memstats_alloc_bytes 1.236968e+06
go_memstats_alloc_bytes 1.196768e+06
# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
# TYPE go_memstats_alloc_bytes_total counter
go_memstats_alloc_bytes_total 1.236968e+06
go_memstats_alloc_bytes_total 1.196768e+06
# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
# TYPE go_memstats_buck_hash_sys_bytes gauge
go_memstats_buck_hash_sys_bytes 2717
go_memstats_buck_hash_sys_bytes 1.442915e+06
# HELP go_memstats_frees_total Total number of frees.
# TYPE go_memstats_frees_total counter
go_memstats_frees_total 265
go_memstats_frees_total 385
# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
# TYPE go_memstats_gc_sys_bytes gauge
go_memstats_gc_sys_bytes 65536
go_memstats_gc_sys_bytes 98304
# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
# TYPE go_memstats_heap_alloc_bytes gauge
go_memstats_heap_alloc_bytes 1.236968e+06
go_memstats_heap_alloc_bytes 1.196768e+06
# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
# TYPE go_memstats_heap_idle_bytes gauge
go_memstats_heap_idle_bytes 139264
go_memstats_heap_idle_bytes 884736
# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
# TYPE go_memstats_heap_inuse_bytes gauge
go_memstats_heap_inuse_bytes 1.630208e+06
go_memstats_heap_inuse_bytes 1.867776e+06
# HELP go_memstats_heap_objects Number of allocated objects.
# TYPE go_memstats_heap_objects gauge
go_memstats_heap_objects 8119
go_memstats_heap_objects 8934
# HELP go_memstats_heap_released_bytes_total Total number of heap bytes released to OS.
# TYPE go_memstats_heap_released_bytes_total counter
go_memstats_heap_released_bytes_total 0
# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
# TYPE go_memstats_heap_sys_bytes gauge
go_memstats_heap_sys_bytes 1.769472e+06
go_memstats_heap_sys_bytes 2.752512e+06
# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
# TYPE go_memstats_last_gc_time_seconds gauge
go_memstats_last_gc_time_seconds 9
go_memstats_last_gc_time_seconds 0
# HELP go_memstats_lookups_total Total number of pointer lookups.
# TYPE go_memstats_lookups_total counter
go_memstats_lookups_total 9
# HELP go_memstats_mallocs_total Total number of mallocs.
# TYPE go_memstats_mallocs_total counter
go_memstats_mallocs_total 8384
go_memstats_mallocs_total 9319
# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
# TYPE go_memstats_mcache_inuse_bytes gauge
go_memstats_mcache_inuse_bytes 9600
go_memstats_mcache_inuse_bytes 4800
# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
# TYPE go_memstats_mcache_sys_bytes gauge
go_memstats_mcache_sys_bytes 16384
# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
# TYPE go_memstats_mspan_inuse_bytes gauge
go_memstats_mspan_inuse_bytes 16080
go_memstats_mspan_inuse_bytes 19920
# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
# TYPE go_memstats_mspan_sys_bytes gauge
go_memstats_mspan_sys_bytes 16384
go_memstats_mspan_sys_bytes 32768
# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
# TYPE go_memstats_next_gc_bytes gauge
go_memstats_next_gc_bytes 4.194304e+06
# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
# TYPE go_memstats_other_sys_bytes gauge
go_memstats_other_sys_bytes 558435
go_memstats_other_sys_bytes 803989
# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
# TYPE go_memstats_stack_inuse_bytes gauge
go_memstats_stack_inuse_bytes 327680
go_memstats_stack_inuse_bytes 393216
# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
# TYPE go_memstats_stack_sys_bytes gauge
go_memstats_stack_sys_bytes 327680
go_memstats_stack_sys_bytes 393216
# HELP go_memstats_sys_bytes Number of bytes obtained by system. Sum of all system allocations.
# TYPE go_memstats_sys_bytes gauge
go_memstats_sys_bytes 2.756608e+06
go_memstats_sys_bytes 5.540088e+06
# HELP http_request_duration_microseconds The HTTP request latencies in microseconds.
# TYPE http_request_duration_microseconds summary
http_request_duration_microseconds{handler="prometheus",quantile="0.5"} NaN
@ -100,6 +100,16 @@ http_response_size_bytes{handler="prometheus",quantile="0.9"} NaN
http_response_size_bytes{handler="prometheus",quantile="0.99"} NaN
http_response_size_bytes_sum{handler="prometheus"} 0
http_response_size_bytes_count{handler="prometheus"} 0
# HELP node_bonding_active Number of active slaves per bonding interface.
# TYPE node_bonding_active gauge
node_bonding_active{master="bond0"} 0
node_bonding_active{master="dmz"} 2
node_bonding_active{master="int"} 1
# HELP node_bonding_slaves Number of configured slaves per bonding interface.
# TYPE node_bonding_slaves gauge
node_bonding_slaves{master="bond0"} 0
node_bonding_slaves{master="dmz"} 2
node_bonding_slaves{master="int"} 2
# HELP node_boot_time Node boot time, in unixtime.
# TYPE node_boot_time gauge
node_boot_time 1.418183276e+09
@ -225,7 +235,7 @@ node_disk_io_now{device="nvme0n1"} 0
node_disk_io_now{device="sda"} 0
node_disk_io_now{device="sr0"} 0
node_disk_io_now{device="vda"} 0
# HELP node_disk_io_time_ms Milliseconds spent doing I/Os.
# HELP node_disk_io_time_ms Total Milliseconds spent doing I/Os.
# TYPE node_disk_io_time_ms counter
node_disk_io_time_ms{device="dm-0"} 1.1325968e+07
node_disk_io_time_ms{device="dm-1"} 76
@ -285,7 +295,7 @@ node_disk_reads_completed{device="nvme0n1"} 47114
node_disk_reads_completed{device="sda"} 2.5354637e+07
node_disk_reads_completed{device="sr0"} 0
node_disk_reads_completed{device="vda"} 1.775784e+06
# HELP node_disk_reads_merged The number of reads merged. See https://www.kernel.org/doc/Documentation/iostats.txt.
# HELP node_disk_reads_merged The total number of reads merged. See https://www.kernel.org/doc/Documentation/iostats.txt.
# TYPE node_disk_reads_merged counter
node_disk_reads_merged{device="dm-0"} 0
node_disk_reads_merged{device="dm-1"} 0
@ -427,98 +437,108 @@ node_drbd_remote_unacknowledged{device="drbd1"} 12347
node_entropy_available_bits 1337
# HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which node_exporter was built.
# TYPE node_exporter_build_info gauge
node_exporter_build_info{branch="",goversion="go1.6.2",revision="",version=""} 1
node_exporter_build_info{branch="master",goversion="go1.6.1",revision="10e525ff0258b2d18119f0327cc9c7ff86e53375",version="0.13.0"} 1
# HELP node_exporter_scrape_duration_seconds node_exporter: Duration of a scrape job.
# TYPE node_exporter_scrape_duration_seconds summary
node_exporter_scrape_duration_seconds{collector="bonding",result="success",quantile="0.5"} 0.00016681900000000002
node_exporter_scrape_duration_seconds{collector="bonding",result="success",quantile="0.9"} 0.00016681900000000002
node_exporter_scrape_duration_seconds{collector="bonding",result="success",quantile="0.99"} 0.00016681900000000002
node_exporter_scrape_duration_seconds_sum{collector="bonding",result="success"} 0.00016681900000000002
node_exporter_scrape_duration_seconds{collector="bonding",result="success",quantile="0.5"} 0.000727146
node_exporter_scrape_duration_seconds{collector="bonding",result="success",quantile="0.9"} 0.000727146
node_exporter_scrape_duration_seconds{collector="bonding",result="success",quantile="0.99"} 0.000727146
node_exporter_scrape_duration_seconds_sum{collector="bonding",result="success"} 0.000727146
node_exporter_scrape_duration_seconds_count{collector="bonding",result="success"} 1
node_exporter_scrape_duration_seconds{collector="conntrack",result="success",quantile="0.5"} 7.791e-05
node_exporter_scrape_duration_seconds{collector="conntrack",result="success",quantile="0.9"} 7.791e-05
node_exporter_scrape_duration_seconds{collector="conntrack",result="success",quantile="0.99"} 7.791e-05
node_exporter_scrape_duration_seconds_sum{collector="conntrack",result="success"} 7.791e-05
node_exporter_scrape_duration_seconds{collector="conntrack",result="success",quantile="0.5"} 0.00031236
node_exporter_scrape_duration_seconds{collector="conntrack",result="success",quantile="0.9"} 0.00031236
node_exporter_scrape_duration_seconds{collector="conntrack",result="success",quantile="0.99"} 0.00031236
node_exporter_scrape_duration_seconds_sum{collector="conntrack",result="success"} 0.00031236
node_exporter_scrape_duration_seconds_count{collector="conntrack",result="success"} 1
node_exporter_scrape_duration_seconds{collector="diskstats",result="success",quantile="0.5"} 0.005133268000000001
node_exporter_scrape_duration_seconds{collector="diskstats",result="success",quantile="0.9"} 0.005133268000000001
node_exporter_scrape_duration_seconds{collector="diskstats",result="success",quantile="0.99"} 0.005133268000000001
node_exporter_scrape_duration_seconds_sum{collector="diskstats",result="success"} 0.005133268000000001
node_exporter_scrape_duration_seconds{collector="diskstats",result="success",quantile="0.5"} 0.020673309
node_exporter_scrape_duration_seconds{collector="diskstats",result="success",quantile="0.9"} 0.020673309
node_exporter_scrape_duration_seconds{collector="diskstats",result="success",quantile="0.99"} 0.020673309
node_exporter_scrape_duration_seconds_sum{collector="diskstats",result="success"} 0.020673309
node_exporter_scrape_duration_seconds_count{collector="diskstats",result="success"} 1
node_exporter_scrape_duration_seconds{collector="entropy",result="success",quantile="0.5"} 0.000170072
node_exporter_scrape_duration_seconds{collector="entropy",result="success",quantile="0.9"} 0.000170072
node_exporter_scrape_duration_seconds{collector="entropy",result="success",quantile="0.99"} 0.000170072
node_exporter_scrape_duration_seconds_sum{collector="entropy",result="success"} 0.000170072
node_exporter_scrape_duration_seconds{collector="drbd",result="success",quantile="0.5"} 0.006692643000000001
node_exporter_scrape_duration_seconds{collector="drbd",result="success",quantile="0.9"} 0.006692643000000001
node_exporter_scrape_duration_seconds{collector="drbd",result="success",quantile="0.99"} 0.006692643000000001
node_exporter_scrape_duration_seconds_sum{collector="drbd",result="success"} 0.006692643000000001
node_exporter_scrape_duration_seconds_count{collector="drbd",result="success"} 1
node_exporter_scrape_duration_seconds{collector="entropy",result="success",quantile="0.5"} 0.000102584
node_exporter_scrape_duration_seconds{collector="entropy",result="success",quantile="0.9"} 0.000102584
node_exporter_scrape_duration_seconds{collector="entropy",result="success",quantile="0.99"} 0.000102584
node_exporter_scrape_duration_seconds_sum{collector="entropy",result="success"} 0.000102584
node_exporter_scrape_duration_seconds_count{collector="entropy",result="success"} 1
node_exporter_scrape_duration_seconds{collector="filefd",result="success",quantile="0.5"} 8.825900000000001e-05
node_exporter_scrape_duration_seconds{collector="filefd",result="success",quantile="0.9"} 8.825900000000001e-05
node_exporter_scrape_duration_seconds{collector="filefd",result="success",quantile="0.99"} 8.825900000000001e-05
node_exporter_scrape_duration_seconds_sum{collector="filefd",result="success"} 8.825900000000001e-05
node_exporter_scrape_duration_seconds{collector="filefd",result="success",quantile="0.5"} 0.0006287700000000001
node_exporter_scrape_duration_seconds{collector="filefd",result="success",quantile="0.9"} 0.0006287700000000001
node_exporter_scrape_duration_seconds{collector="filefd",result="success",quantile="0.99"} 0.0006287700000000001
node_exporter_scrape_duration_seconds_sum{collector="filefd",result="success"} 0.0006287700000000001
node_exporter_scrape_duration_seconds_count{collector="filefd",result="success"} 1
node_exporter_scrape_duration_seconds{collector="hwmon",result="success",quantile="0.5"} 0.0076871280000000005
node_exporter_scrape_duration_seconds{collector="hwmon",result="success",quantile="0.9"} 0.0076871280000000005
node_exporter_scrape_duration_seconds{collector="hwmon",result="success",quantile="0.99"} 0.0076871280000000005
node_exporter_scrape_duration_seconds_sum{collector="hwmon",result="success"} 0.0076871280000000005
node_exporter_scrape_duration_seconds{collector="hwmon",result="success",quantile="0.5"} 0.020271281000000002
node_exporter_scrape_duration_seconds{collector="hwmon",result="success",quantile="0.9"} 0.020271281000000002
node_exporter_scrape_duration_seconds{collector="hwmon",result="success",quantile="0.99"} 0.020271281000000002
node_exporter_scrape_duration_seconds_sum{collector="hwmon",result="success"} 0.020271281000000002
node_exporter_scrape_duration_seconds_count{collector="hwmon",result="success"} 1
node_exporter_scrape_duration_seconds{collector="ksmd",result="success",quantile="0.5"} 0.001088926
node_exporter_scrape_duration_seconds{collector="ksmd",result="success",quantile="0.9"} 0.001088926
node_exporter_scrape_duration_seconds{collector="ksmd",result="success",quantile="0.99"} 0.001088926
node_exporter_scrape_duration_seconds_sum{collector="ksmd",result="success"} 0.001088926
node_exporter_scrape_duration_seconds{collector="ksmd",result="success",quantile="0.5"} 0.005877574
node_exporter_scrape_duration_seconds{collector="ksmd",result="success",quantile="0.9"} 0.005877574
node_exporter_scrape_duration_seconds{collector="ksmd",result="success",quantile="0.99"} 0.005877574
node_exporter_scrape_duration_seconds_sum{collector="ksmd",result="success"} 0.005877574
node_exporter_scrape_duration_seconds_count{collector="ksmd",result="success"} 1
node_exporter_scrape_duration_seconds{collector="loadavg",result="success",quantile="0.5"} 0.000189222
node_exporter_scrape_duration_seconds{collector="loadavg",result="success",quantile="0.9"} 0.000189222
node_exporter_scrape_duration_seconds{collector="loadavg",result="success",quantile="0.99"} 0.000189222
node_exporter_scrape_duration_seconds_sum{collector="loadavg",result="success"} 0.000189222
node_exporter_scrape_duration_seconds{collector="loadavg",result="success",quantile="0.5"} 0.0009549230000000001
node_exporter_scrape_duration_seconds{collector="loadavg",result="success",quantile="0.9"} 0.0009549230000000001
node_exporter_scrape_duration_seconds{collector="loadavg",result="success",quantile="0.99"} 0.0009549230000000001
node_exporter_scrape_duration_seconds_sum{collector="loadavg",result="success"} 0.0009549230000000001
node_exporter_scrape_duration_seconds_count{collector="loadavg",result="success"} 1
node_exporter_scrape_duration_seconds{collector="mdadm",result="success",quantile="0.5"} 0.0030327270000000003
node_exporter_scrape_duration_seconds{collector="mdadm",result="success",quantile="0.9"} 0.0030327270000000003
node_exporter_scrape_duration_seconds{collector="mdadm",result="success",quantile="0.99"} 0.0030327270000000003
node_exporter_scrape_duration_seconds_sum{collector="mdadm",result="success"} 0.0030327270000000003
node_exporter_scrape_duration_seconds{collector="mdadm",result="success",quantile="0.5"} 0.001143003
node_exporter_scrape_duration_seconds{collector="mdadm",result="success",quantile="0.9"} 0.001143003
node_exporter_scrape_duration_seconds{collector="mdadm",result="success",quantile="0.99"} 0.001143003
node_exporter_scrape_duration_seconds_sum{collector="mdadm",result="success"} 0.001143003
node_exporter_scrape_duration_seconds_count{collector="mdadm",result="success"} 1
node_exporter_scrape_duration_seconds{collector="megacli",result="success",quantile="0.5"} 0.010434629
node_exporter_scrape_duration_seconds{collector="megacli",result="success",quantile="0.9"} 0.010434629
node_exporter_scrape_duration_seconds{collector="megacli",result="success",quantile="0.99"} 0.010434629
node_exporter_scrape_duration_seconds_sum{collector="megacli",result="success"} 0.010434629
node_exporter_scrape_duration_seconds{collector="megacli",result="success",quantile="0.5"} 0.027908419
node_exporter_scrape_duration_seconds{collector="megacli",result="success",quantile="0.9"} 0.027908419
node_exporter_scrape_duration_seconds{collector="megacli",result="success",quantile="0.99"} 0.027908419
node_exporter_scrape_duration_seconds_sum{collector="megacli",result="success"} 0.027908419
node_exporter_scrape_duration_seconds_count{collector="megacli",result="success"} 1
node_exporter_scrape_duration_seconds{collector="meminfo",result="success",quantile="0.5"} 0.000966604
node_exporter_scrape_duration_seconds{collector="meminfo",result="success",quantile="0.9"} 0.000966604
node_exporter_scrape_duration_seconds{collector="meminfo",result="success",quantile="0.99"} 0.000966604
node_exporter_scrape_duration_seconds_sum{collector="meminfo",result="success"} 0.000966604
node_exporter_scrape_duration_seconds{collector="meminfo",result="success",quantile="0.5"} 0.0061671370000000005
node_exporter_scrape_duration_seconds{collector="meminfo",result="success",quantile="0.9"} 0.0061671370000000005
node_exporter_scrape_duration_seconds{collector="meminfo",result="success",quantile="0.99"} 0.0061671370000000005
node_exporter_scrape_duration_seconds_sum{collector="meminfo",result="success"} 0.0061671370000000005
node_exporter_scrape_duration_seconds_count{collector="meminfo",result="success"} 1
node_exporter_scrape_duration_seconds{collector="meminfo_numa",result="success",quantile="0.5"} 0.0017954980000000002
node_exporter_scrape_duration_seconds{collector="meminfo_numa",result="success",quantile="0.9"} 0.0017954980000000002
node_exporter_scrape_duration_seconds{collector="meminfo_numa",result="success",quantile="0.99"} 0.0017954980000000002
node_exporter_scrape_duration_seconds_sum{collector="meminfo_numa",result="success"} 0.0017954980000000002
node_exporter_scrape_duration_seconds{collector="meminfo_numa",result="success",quantile="0.5"} 0.002422029
node_exporter_scrape_duration_seconds{collector="meminfo_numa",result="success",quantile="0.9"} 0.002422029
node_exporter_scrape_duration_seconds{collector="meminfo_numa",result="success",quantile="0.99"} 0.002422029
node_exporter_scrape_duration_seconds_sum{collector="meminfo_numa",result="success"} 0.002422029
node_exporter_scrape_duration_seconds_count{collector="meminfo_numa",result="success"} 1
node_exporter_scrape_duration_seconds{collector="mountstats",result="success",quantile="0.5"} 0.000512374
node_exporter_scrape_duration_seconds{collector="mountstats",result="success",quantile="0.9"} 0.000512374
node_exporter_scrape_duration_seconds{collector="mountstats",result="success",quantile="0.99"} 0.000512374
node_exporter_scrape_duration_seconds_sum{collector="mountstats",result="success"} 0.000512374
node_exporter_scrape_duration_seconds{collector="mountstats",result="success",quantile="0.5"} 0.0007559820000000001
node_exporter_scrape_duration_seconds{collector="mountstats",result="success",quantile="0.9"} 0.0007559820000000001
node_exporter_scrape_duration_seconds{collector="mountstats",result="success",quantile="0.99"} 0.0007559820000000001
node_exporter_scrape_duration_seconds_sum{collector="mountstats",result="success"} 0.0007559820000000001
node_exporter_scrape_duration_seconds_count{collector="mountstats",result="success"} 1
node_exporter_scrape_duration_seconds{collector="netdev",result="success",quantile="0.5"} 0.000495389
node_exporter_scrape_duration_seconds{collector="netdev",result="success",quantile="0.9"} 0.000495389
node_exporter_scrape_duration_seconds{collector="netdev",result="success",quantile="0.99"} 0.000495389
node_exporter_scrape_duration_seconds_sum{collector="netdev",result="success"} 0.000495389
node_exporter_scrape_duration_seconds{collector="netdev",result="success",quantile="0.5"} 0.001059188
node_exporter_scrape_duration_seconds{collector="netdev",result="success",quantile="0.9"} 0.001059188
node_exporter_scrape_duration_seconds{collector="netdev",result="success",quantile="0.99"} 0.001059188
node_exporter_scrape_duration_seconds_sum{collector="netdev",result="success"} 0.001059188
node_exporter_scrape_duration_seconds_count{collector="netdev",result="success"} 1
node_exporter_scrape_duration_seconds{collector="netstat",result="success",quantile="0.5"} 0.004669412
node_exporter_scrape_duration_seconds{collector="netstat",result="success",quantile="0.9"} 0.004669412
node_exporter_scrape_duration_seconds{collector="netstat",result="success",quantile="0.99"} 0.004669412
node_exporter_scrape_duration_seconds_sum{collector="netstat",result="success"} 0.004669412
node_exporter_scrape_duration_seconds{collector="netstat",result="success",quantile="0.5"} 0.007826913000000001
node_exporter_scrape_duration_seconds{collector="netstat",result="success",quantile="0.9"} 0.007826913000000001
node_exporter_scrape_duration_seconds{collector="netstat",result="success",quantile="0.99"} 0.007826913000000001
node_exporter_scrape_duration_seconds_sum{collector="netstat",result="success"} 0.007826913000000001
node_exporter_scrape_duration_seconds_count{collector="netstat",result="success"} 1
node_exporter_scrape_duration_seconds{collector="sockstat",result="success",quantile="0.5"} 0.000185951
node_exporter_scrape_duration_seconds{collector="sockstat",result="success",quantile="0.9"} 0.000185951
node_exporter_scrape_duration_seconds{collector="sockstat",result="success",quantile="0.99"} 0.000185951
node_exporter_scrape_duration_seconds_sum{collector="sockstat",result="success"} 0.000185951
node_exporter_scrape_duration_seconds{collector="nfs",result="success",quantile="0.5"} 0.0006455
node_exporter_scrape_duration_seconds{collector="nfs",result="success",quantile="0.9"} 0.0006455
node_exporter_scrape_duration_seconds{collector="nfs",result="success",quantile="0.99"} 0.0006455
node_exporter_scrape_duration_seconds_sum{collector="nfs",result="success"} 0.0006455
node_exporter_scrape_duration_seconds_count{collector="nfs",result="success"} 1
node_exporter_scrape_duration_seconds{collector="sockstat",result="success",quantile="0.5"} 0.007697428
node_exporter_scrape_duration_seconds{collector="sockstat",result="success",quantile="0.9"} 0.007697428
node_exporter_scrape_duration_seconds{collector="sockstat",result="success",quantile="0.99"} 0.007697428
node_exporter_scrape_duration_seconds_sum{collector="sockstat",result="success"} 0.007697428
node_exporter_scrape_duration_seconds_count{collector="sockstat",result="success"} 1
node_exporter_scrape_duration_seconds{collector="stat",result="success",quantile="0.5"} 0.00029448200000000004
node_exporter_scrape_duration_seconds{collector="stat",result="success",quantile="0.9"} 0.00029448200000000004
node_exporter_scrape_duration_seconds{collector="stat",result="success",quantile="0.99"} 0.00029448200000000004
node_exporter_scrape_duration_seconds_sum{collector="stat",result="success"} 0.00029448200000000004
node_exporter_scrape_duration_seconds{collector="stat",result="success",quantile="0.5"} 0.006157345000000001
node_exporter_scrape_duration_seconds{collector="stat",result="success",quantile="0.9"} 0.006157345000000001
node_exporter_scrape_duration_seconds{collector="stat",result="success",quantile="0.99"} 0.006157345000000001
node_exporter_scrape_duration_seconds_sum{collector="stat",result="success"} 0.006157345000000001
node_exporter_scrape_duration_seconds_count{collector="stat",result="success"} 1
node_exporter_scrape_duration_seconds{collector="textfile",result="success",quantile="0.5"} 5.180000000000001e-07
node_exporter_scrape_duration_seconds{collector="textfile",result="success",quantile="0.9"} 5.180000000000001e-07
node_exporter_scrape_duration_seconds{collector="textfile",result="success",quantile="0.99"} 5.180000000000001e-07
node_exporter_scrape_duration_seconds_sum{collector="textfile",result="success"} 5.180000000000001e-07
node_exporter_scrape_duration_seconds{collector="textfile",result="success",quantile="0.5"} 7.63e-07
node_exporter_scrape_duration_seconds{collector="textfile",result="success",quantile="0.9"} 7.63e-07
node_exporter_scrape_duration_seconds{collector="textfile",result="success",quantile="0.99"} 7.63e-07
node_exporter_scrape_duration_seconds_sum{collector="textfile",result="success"} 7.63e-07
node_exporter_scrape_duration_seconds_count{collector="textfile",result="success"} 1
# HELP node_filefd_allocated File descriptor statistics: allocated.
# TYPE node_filefd_allocated gauge
@ -1219,16 +1239,6 @@ node_mountstats_nfs_write_bytes_total{export="192.168.1.1:/srv/test"} 0
# HELP node_mountstats_nfs_write_pages_total Number of pages written directly via mmap()'d files.
# TYPE node_mountstats_nfs_write_pages_total counter
node_mountstats_nfs_write_pages_total{export="192.168.1.1:/srv/test"} 0
# HELP node_net_bonding_slaves Number of configured slaves per bonding interface.
# TYPE node_net_bonding_slaves gauge
node_net_bonding_slaves{master="bond0"} 0
node_net_bonding_slaves{master="dmz"} 2
node_net_bonding_slaves{master="int"} 2
# HELP node_net_bonding_slaves_active Number of active slaves per bonding interface.
# TYPE node_net_bonding_slaves_active gauge
node_net_bonding_slaves_active{master="bond0"} 0
node_net_bonding_slaves_active{master="dmz"} 2
node_net_bonding_slaves_active{master="int"} 1
# HELP node_netstat_IcmpMsg_InType3 Protocol IcmpMsg statistic InType3.
# TYPE node_netstat_IcmpMsg_InType3 untyped
node_netstat_IcmpMsg_InType3 104
@ -2048,14 +2058,14 @@ node_sockstat_UDP_mem_bytes 0
node_sockstat_sockets_used 229
# HELP node_textfile_mtime Unixtime mtime of textfiles successfully read.
# TYPE node_textfile_mtime gauge
node_textfile_mtime{file="metrics1.prom"} 1.463773694e+09
node_textfile_mtime{file="metrics2.prom"} 1.463773694e+09
node_textfile_mtime{file="metrics1.prom"} 1.4611075321691382e+09
node_textfile_mtime{file="metrics2.prom"} 1.4611075321691382e+09
# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise
# TYPE node_textfile_scrape_error gauge
node_textfile_scrape_error 0
# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
# TYPE process_cpu_seconds_total counter
process_cpu_seconds_total 0
process_cpu_seconds_total 0.02
# HELP process_max_fds Maximum number of open file descriptors.
# TYPE process_max_fds gauge
process_max_fds 1024
@ -2064,13 +2074,13 @@ process_max_fds 1024
process_open_fds 8
# HELP process_resident_memory_bytes Resident memory size in bytes.
# TYPE process_resident_memory_bytes gauge
process_resident_memory_bytes 1.1108352e+07
process_resident_memory_bytes 1.1751424e+07
# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
# TYPE process_start_time_seconds gauge
process_start_time_seconds 1.47509459587e+09
process_start_time_seconds 1.48293316896e+09
# HELP process_virtual_memory_bytes Virtual memory size in bytes.
# TYPE process_virtual_memory_bytes gauge
process_virtual_memory_bytes 2.0549632e+08
process_virtual_memory_bytes 2.7445248e+08
# HELP testmetric1_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom
# TYPE testmetric1_1 untyped
testmetric1_1{foo="bar"} 10

View File

@ -19,7 +19,7 @@ package collector
import "github.com/prometheus/client_golang/prometheus"
type interruptsCollector struct {
metric *prometheus.CounterVec
desc typedDesc
}
func init() {
@ -30,13 +30,10 @@ func init() {
// interrupts stats
func NewInterruptsCollector() (Collector, error) {
return &interruptsCollector{
metric: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: Namespace,
Name: "interrupts",
Help: "Interrupt details.",
},
interruptLabelNames,
),
desc: typedDesc{prometheus.NewDesc(
Namespace+"_interrupts",
"Interrupt details.",
interruptLabelNames, nil,
), prometheus.CounterValue},
}, nil
}

View File

@ -42,16 +42,9 @@ func (c *interruptsCollector) Update(ch chan<- prometheus.Metric) (err error) {
if err != nil {
return fmt.Errorf("invalid value %s in interrupts: %s", value, err)
}
labels := prometheus.Labels{
"CPU": strconv.Itoa(cpuNo),
"type": name,
"info": interrupt.info,
"devices": interrupt.devices,
}
c.metric.With(labels).Set(fv)
ch <- c.desc.mustNewConstMetric(fv, strconv.Itoa(cpuNo), name, interrupt.info, interrupt.devices)
}
}
c.metric.Collect(ch)
return err
}

View File

@ -98,23 +98,22 @@ var (
interruptLabelNames = []string{"CPU", "type", "devices"}
)
func (c *interruptsCollector) Update(ch chan<- prometheus.Metric) (err error) {
func (c *interruptsCollector) Update(ch chan<- prometheus.Metric) error {
interrupts, err := getInterrupts()
if err != nil {
return fmt.Errorf("couldn't get interrupts: %s", err)
}
for dev, interrupt := range interrupts {
for cpuNo, value := range interrupt.values {
labels := prometheus.Labels{
"CPU": strconv.Itoa(cpuNo),
"type": fmt.Sprintf("%d", interrupt.vector),
"devices": dev,
}
c.metric.With(labels).Set(value)
ch <- c.desc.mustNewConstMetric(
value,
strconv.Itoa(cpuNo),
fmt.Sprintf("%d", interrupt.vector),
dev,
)
}
}
c.metric.Collect(ch)
return err
return nil
}
type interrupt struct {

View File

@ -26,8 +26,8 @@ import (
type ipvsCollector struct {
Collector
fs procfs.FS
backendConnectionsActive, backendConnectionsInact, backendWeight *prometheus.GaugeVec
connections, incomingPackets, outgoingPackets, incomingBytes, outgoingBytes prometheus.Counter
backendConnectionsActive, backendConnectionsInact, backendWeight typedDesc
connections, incomingPackets, outgoingPackets, incomingBytes, outgoingBytes typedDesc
}
func init() {
@ -59,74 +59,46 @@ func newIPVSCollector() (*ipvsCollector, error) {
return nil, err
}
c.connections = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: subsystem,
Name: "connections_total",
Help: "The total number of connections made.",
},
)
c.incomingPackets = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: subsystem,
Name: "incoming_packets_total",
Help: "The total number of incoming packets.",
},
)
c.outgoingPackets = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: subsystem,
Name: "outgoing_packets_total",
Help: "The total number of outgoing packets.",
},
)
c.incomingBytes = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: subsystem,
Name: "incoming_bytes_total",
Help: "The total amount of incoming data.",
},
)
c.outgoingBytes = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: subsystem,
Name: "outgoing_bytes_total",
Help: "The total amount of outgoing data.",
},
)
c.backendConnectionsActive = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: Namespace,
Subsystem: subsystem,
Name: "backend_connections_active",
Help: "The current active connections by local and remote address.",
},
ipvsBackendLabelNames,
)
c.backendConnectionsInact = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: Namespace,
Subsystem: subsystem,
Name: "backend_connections_inactive",
Help: "The current inactive connections by local and remote address.",
},
ipvsBackendLabelNames,
)
c.backendWeight = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: Namespace,
Subsystem: subsystem,
Name: "backend_weight",
Help: "The current backend weight by local and remote address.",
},
ipvsBackendLabelNames,
)
c.connections = typedDesc{prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "connections_total"),
"The total number of connections made.",
nil, nil,
), prometheus.CounterValue}
c.incomingPackets = typedDesc{prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "incoming_packets_total"),
"The total number of incoming packets.",
nil, nil,
), prometheus.CounterValue}
c.outgoingPackets = typedDesc{prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "outgoing_packets_total"),
"The total number of outgoing packets.",
nil, nil,
), prometheus.CounterValue}
c.incomingBytes = typedDesc{prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "incoming_bytes_total"),
"The total amount of incoming data.",
nil, nil,
), prometheus.CounterValue}
c.outgoingBytes = typedDesc{prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "outgoing_bytes_total"),
"The total amount of outgoing data.",
nil, nil,
), prometheus.CounterValue}
c.backendConnectionsActive = typedDesc{prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "backend_connections_active"),
"The current active connections by local and remote address.",
ipvsBackendLabelNames, nil,
), prometheus.GaugeValue}
c.backendConnectionsInact = typedDesc{prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "backend_connections_inactive"),
"The current inactive connections by local and remote address.",
ipvsBackendLabelNames, nil,
), prometheus.GaugeValue}
c.backendWeight = typedDesc{prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "backend_weight"),
"The current backend weight by local and remote address.",
ipvsBackendLabelNames, nil,
), prometheus.GaugeValue}
return &c, nil
}
@ -136,18 +108,11 @@ func (c *ipvsCollector) Update(ch chan<- prometheus.Metric) error {
if err != nil {
return fmt.Errorf("could not get IPVS stats: %s", err)
}
c.connections.Set(float64(ipvsStats.Connections))
c.incomingPackets.Set(float64(ipvsStats.IncomingPackets))
c.outgoingPackets.Set(float64(ipvsStats.OutgoingPackets))
c.incomingBytes.Set(float64(ipvsStats.IncomingBytes))
c.outgoingBytes.Set(float64(ipvsStats.OutgoingBytes))
c.connections.Collect(ch)
c.incomingPackets.Collect(ch)
c.outgoingPackets.Collect(ch)
c.incomingBytes.Collect(ch)
c.outgoingBytes.Collect(ch)
ch <- c.connections.mustNewConstMetric(float64(ipvsStats.Connections))
ch <- c.incomingPackets.mustNewConstMetric(float64(ipvsStats.IncomingPackets))
ch <- c.outgoingPackets.mustNewConstMetric(float64(ipvsStats.OutgoingPackets))
ch <- c.incomingBytes.mustNewConstMetric(float64(ipvsStats.IncomingBytes))
ch <- c.outgoingBytes.mustNewConstMetric(float64(ipvsStats.OutgoingBytes))
backendStats, err := c.fs.NewIPVSBackendStatus()
if err != nil {
@ -162,14 +127,9 @@ func (c *ipvsCollector) Update(ch chan<- prometheus.Metric) error {
strconv.FormatUint(uint64(backend.RemotePort), 10),
backend.Proto,
}
c.backendConnectionsActive.WithLabelValues(labelValues...).Set(float64(backend.ActiveConn))
c.backendConnectionsInact.WithLabelValues(labelValues...).Set(float64(backend.InactConn))
c.backendWeight.WithLabelValues(labelValues...).Set(float64(backend.Weight))
ch <- c.backendConnectionsActive.mustNewConstMetric(float64(backend.ActiveConn), labelValues...)
ch <- c.backendConnectionsInact.mustNewConstMetric(float64(backend.InactConn), labelValues...)
ch <- c.backendWeight.mustNewConstMetric(float64(backend.Weight), labelValues...)
}
c.backendConnectionsActive.Collect(ch)
c.backendConnectionsInact.Collect(ch)
c.backendWeight.Collect(ch)
return nil
}

View File

@ -19,7 +19,6 @@ import (
"net"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
@ -129,36 +128,23 @@ func TestIPVSCollector(t *testing.T) {
}
sink := make(chan prometheus.Metric)
go func() {
for {
<-sink
err = collector.Update(sink)
if err != nil {
t.Fatal(err)
}
}()
err = collector.Update(sink)
if err != nil {
t.Fatal(err)
}
for _, expect := range expectedIPVSBackendStatuses {
labels := prometheus.Labels{
"local_address": expect.LocalAddress.String(),
"local_port": strconv.FormatUint(uint64(expect.LocalPort), 10),
"remote_address": expect.RemoteAddress.String(),
"remote_port": strconv.FormatUint(uint64(expect.RemotePort), 10),
"proto": expect.Proto,
}
// TODO: Pending prometheus/client_golang#58, check the actual numbers
_, err = collector.backendConnectionsActive.GetMetricWith(labels)
if err != nil {
t.Errorf("Missing active connections metric for label combination: %+v", labels)
}
_, err = collector.backendConnectionsInact.GetMetricWith(labels)
if err != nil {
t.Errorf("Missing inactive connections metric for label combination: %+v", labels)
}
_, err = collector.backendWeight.GetMetricWith(labels)
if err != nil {
t.Errorf("Missing weight metric for label combination: %+v", labels)
for expected, got := range map[string]string{
prometheus.NewDesc("node_ipvs_connections_total", "The total number of connections made.", nil, nil).String(): (<-sink).Desc().String(),
prometheus.NewDesc("node_ipvs_incoming_packets_total", "The total number of incoming packets.", nil, nil).String(): (<-sink).Desc().String(),
prometheus.NewDesc("node_ipvs_outgoing_packets_total", "The total number of outgoing packets.", nil, nil).String(): (<-sink).Desc().String(),
prometheus.NewDesc("node_ipvs_incoming_bytes_total", "The total amount of incoming data.", nil, nil).String(): (<-sink).Desc().String(),
prometheus.NewDesc("node_ipvs_outgoing_bytes_total", "The total amount of outgoing data.", nil, nil).String(): (<-sink).Desc().String(),
prometheus.NewDesc("node_ipvs_backend_connections_active", "The current active connections by local and remote address.", []string{"local_address", "local_port", "remote_address", "remote_port", "proto"}, nil).String(): (<-sink).Desc().String(),
prometheus.NewDesc("node_ipvs_backend_connections_inactive", "The current inactive connections by local and remote address.", []string{"local_address", "local_port", "remote_address", "remote_port", "proto"}, nil).String(): (<-sink).Desc().String(),
prometheus.NewDesc("node_ipvs_backend_weight", "The current backend weight by local and remote address.", []string{"local_address", "local_port", "remote_address", "remote_port", "proto"}, nil).String(): (<-sink).Desc().String(),
} {
if expected != got {
t.Fatalf("Expected '%s' but got '%s'", expected, got)
}
}
}

View File

@ -24,7 +24,7 @@ import (
)
type loadavgCollector struct {
metric []prometheus.Gauge
metric []typedDesc
}
func init() {
@ -34,22 +34,10 @@ func init() {
// Take a prometheus registry and return a new Collector exposing load average.
func NewLoadavgCollector() (Collector, error) {
return &loadavgCollector{
metric: []prometheus.Gauge{
prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: Namespace,
Name: "load1",
Help: "1m load average.",
}),
prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: Namespace,
Name: "load5",
Help: "5m load average.",
}),
prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: Namespace,
Name: "load15",
Help: "15m load average.",
}),
metric: []typedDesc{
{prometheus.NewDesc(Namespace+"_load1", "1m load average.", nil, nil), prometheus.GaugeValue},
{prometheus.NewDesc(Namespace+"_load5", "5m load average.", nil, nil), prometheus.GaugeValue},
{prometheus.NewDesc(Namespace+"_load15", "15m load average.", nil, nil), prometheus.GaugeValue},
},
}, nil
}
@ -60,9 +48,8 @@ func (c *loadavgCollector) Update(ch chan<- prometheus.Metric) (err error) {
return fmt.Errorf("couldn't get load: %s", err)
}
for i, load := range loads {
log.Debugf("Set load %d: %f", i, load)
c.metric[i].Set(load)
c.metric[i].Collect(ch)
log.Debugf("return load %d: %f", i, load)
ch <- c.metric[i].mustNewConstMetric(load)
}
return err
}

View File

@ -30,8 +30,7 @@ var (
)
type ntpCollector struct {
drift prometheus.Gauge
stratum prometheus.Gauge
drift, stratum typedDesc
}
func init() {
@ -50,16 +49,16 @@ func NewNtpCollector() (Collector, error) {
}
return &ntpCollector{
drift: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: Namespace,
Name: "ntp_drift_seconds",
Help: "Time between system time and ntp time.",
}),
stratum: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: Namespace,
Name: "ntp_stratum",
Help: "NTP server stratum.",
}),
drift: typedDesc{prometheus.NewDesc(
prometheus.BuildFQName(Namespace, "ntp", "drift_seconds"),
"Time between system time and ntp time.",
nil, nil,
), prometheus.GaugeValue},
stratum: typedDesc{prometheus.NewDesc(
prometheus.BuildFQName(Namespace, "ntp", "stratum"),
"NTP server stratum.",
nil, nil,
), prometheus.GaugeValue},
}, nil
}
@ -70,12 +69,10 @@ func (c *ntpCollector) Update(ch chan<- prometheus.Metric) (err error) {
}
driftSeconds := resp.ClockOffset.Seconds()
log.Debugf("Set ntp_drift_seconds: %f", driftSeconds)
c.drift.Set(driftSeconds)
c.drift.Collect(ch)
ch <- c.drift.mustNewConstMetric(driftSeconds)
stratum := float64(resp.Stratum)
log.Debugf("Set ntp_stratum: %f", stratum)
c.stratum.Set(stratum)
c.stratum.Collect(ch)
ch <- c.stratum.mustNewConstMetric(stratum)
return nil
}

View File

@ -29,7 +29,7 @@ var runitServiceDir = flag.String(
"Path to runit service directory.")
type runitCollector struct {
state, stateDesired, stateNormal, stateTimestamp *prometheus.GaugeVec
state, stateDesired, stateNormal, stateTimestamp typedDesc
}
func init() {
@ -44,46 +44,26 @@ func NewRunitCollector() (Collector, error) {
)
return &runitCollector{
state: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: Namespace,
Subsystem: subsystem,
Name: "state",
Help: "State of runit service.",
ConstLabels: constLabels,
},
labelNames,
),
stateDesired: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: Namespace,
Subsystem: subsystem,
Name: "desired_state",
Help: "Desired state of runit service.",
ConstLabels: constLabels,
},
labelNames,
),
stateNormal: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: Namespace,
Subsystem: subsystem,
Name: "normal_state",
Help: "Normal state of runit service.",
ConstLabels: constLabels,
},
labelNames,
),
stateTimestamp: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: Namespace,
Subsystem: subsystem,
Name: "state_last_change_timestamp_seconds",
Help: "Unix timestamp of the last runit service state change.",
ConstLabels: constLabels,
},
labelNames,
),
state: typedDesc{prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "state"),
"State of runit service.",
labelNames, constLabels,
), prometheus.GaugeValue},
stateDesired: typedDesc{prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "desired_state"),
"Desired state of runit service.",
labelNames, constLabels,
), prometheus.GaugeValue},
stateNormal: typedDesc{prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "normal_state"),
"Normal state of runit service.",
labelNames, constLabels,
), prometheus.GaugeValue},
stateTimestamp: typedDesc{prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "state_last_change_timestamp_seconds"),
"Unix timestamp of the last runit service state change.",
labelNames, constLabels,
), prometheus.GaugeValue},
}, nil
}
@ -101,19 +81,14 @@ func (c *runitCollector) Update(ch chan<- prometheus.Metric) error {
}
log.Debugf("%s is %d on pid %d for %d seconds", service.Name, status.State, status.Pid, status.Duration)
c.state.WithLabelValues(service.Name).Set(float64(status.State))
c.stateDesired.WithLabelValues(service.Name).Set(float64(status.Want))
c.stateTimestamp.WithLabelValues(service.Name).Set(float64(status.Timestamp.Unix()))
ch <- c.state.mustNewConstMetric(float64(status.State), service.Name)
ch <- c.stateDesired.mustNewConstMetric(float64(status.Want), service.Name)
ch <- c.stateTimestamp.mustNewConstMetric(float64(status.Timestamp.Unix()), service.Name)
if status.NormallyUp {
c.stateNormal.WithLabelValues(service.Name).Set(1)
ch <- c.stateNormal.mustNewConstMetric(1, service.Name)
} else {
c.stateNormal.WithLabelValues(service.Name).Set(0)
ch <- c.stateNormal.mustNewConstMetric(0, service.Name)
}
}
c.state.Collect(ch)
c.stateDesired.Collect(ch)
c.stateNormal.Collect(ch)
c.stateTimestamp.Collect(ch)
return nil
}

View File

@ -43,7 +43,7 @@ const (
)
type tcpStatCollector struct {
metric *prometheus.GaugeVec
desc typedDesc
}
func init() {
@ -54,14 +54,11 @@ func init() {
// a new Collector exposing network stats.
func NewTCPStatCollector() (Collector, error) {
return &tcpStatCollector{
metric: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: Namespace,
Name: "tcp_connection_states",
Help: "Number of connection states.",
},
[]string{"state"},
),
desc: typedDesc{prometheus.NewDesc(
prometheus.BuildFQName(Namespace, "tcp", "connection_states"),
"Number of connection states.",
[]string{"state"}, nil,
), prometheus.GaugeValue},
}, nil
}
@ -85,10 +82,8 @@ func (c *tcpStatCollector) Update(ch chan<- prometheus.Metric) (err error) {
}
for st, value := range tcpStats {
c.metric.WithLabelValues(st.String()).Set(value)
ch <- c.desc.mustNewConstMetric(value, st.String())
}
c.metric.Collect(ch)
return err
}

View File

@ -23,7 +23,7 @@ import (
)
type timeCollector struct {
metric prometheus.Counter
desc *prometheus.Desc
}
func init() {
@ -34,18 +34,17 @@ func init() {
// the current system time in seconds since epoch.
func NewTimeCollector() (Collector, error) {
return &timeCollector{
metric: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: Namespace,
Name: "time",
Help: "System time in seconds since epoch (1970).",
}),
desc: prometheus.NewDesc(
Namespace+"_time",
"System time in seconds since epoch (1970).",
nil, nil,
),
}, nil
}
func (c *timeCollector) Update(ch chan<- prometheus.Metric) (err error) {
func (c *timeCollector) Update(ch chan<- prometheus.Metric) error {
now := float64(time.Now().Unix())
log.Debugf("Set time: %f", now)
c.metric.Set(now)
c.metric.Collect(ch)
return err
log.Debugf("Return time: %f", now)
ch <- prometheus.MustNewConstMetric(c.desc, prometheus.GaugeValue, now)
return nil
}

View File

@ -56,14 +56,14 @@ func (c *vmStatCollector) Update(ch chan<- prometheus.Metric) (err error) {
return err
}
metric := prometheus.NewUntyped(prometheus.UntypedOpts{
Namespace: Namespace,
Subsystem: vmStatSubsystem,
Name: parts[0],
Help: fmt.Sprintf("/proc/vmstat information field %s.", parts[0]),
})
metric.Set(value)
metric.Collect(ch)
ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc(
prometheus.BuildFQName(Namespace, vmStatSubsystem, parts[0]),
fmt.Sprintf("/proc/vmstat information field %s.", parts[0]),
nil, nil),
prometheus.UntypedValue,
value,
)
}
return err
}