pool: fix read/write counters

This commit is contained in:
Vaibhav Bhembre 2016-10-31 20:46:43 -04:00 committed by Vaibhav Bhembre
parent c6a80e0795
commit b58fdf7666
1 changed files with 17 additions and 17 deletions

View File

@ -42,22 +42,22 @@ type PoolUsageCollector struct {
// Objects shows the no. of RADOS objects created within the pool. // Objects shows the no. of RADOS objects created within the pool.
Objects *prometheus.GaugeVec Objects *prometheus.GaugeVec
// DirtyObjects shows the no. of RADOS dirty objects in a cache-tier pool, // DirtyObjects shows the no. of RADOS dirty objects in a cache-tier pool,
// this doesn't make sense in a regular pool, see: // this doesn't make sense in a regular pool, see:
// http://lists.ceph.com/pipermail/ceph-users-ceph.com/2015-April/000557.html // http://lists.ceph.com/pipermail/ceph-users-ceph.com/2015-April/000557.html
DirtyObjects *prometheus.GaugeVec DirtyObjects *prometheus.GaugeVec
// ReadIO tracks the read IO calls made for the images within each pool. // ReadIO tracks the read IO calls made for the images within each pool.
ReadIO *prometheus.CounterVec ReadIO *prometheus.GaugeVec
// Readbytes tracks the read throughput made for the images within each pool. // Readbytes tracks the read throughput made for the images within each pool.
ReadBytes *prometheus.CounterVec ReadBytes *prometheus.GaugeVec
// WriteIO tracks the write IO calls made for the images within each pool. // WriteIO tracks the write IO calls made for the images within each pool.
WriteIO *prometheus.CounterVec WriteIO *prometheus.GaugeVec
// WriteBytes tracks the write throughput made for the images within each pool. // WriteBytes tracks the write throughput made for the images within each pool.
WriteBytes *prometheus.CounterVec WriteBytes *prometheus.GaugeVec
} }
// NewPoolUsageCollector creates a new instance of PoolUsageCollector and returns // NewPoolUsageCollector creates a new instance of PoolUsageCollector and returns
@ -115,39 +115,39 @@ func NewPoolUsageCollector(conn Conn) *PoolUsageCollector {
}, },
poolLabel, poolLabel,
), ),
ReadIO: prometheus.NewCounterVec( ReadIO: prometheus.NewGaugeVec(
prometheus.CounterOpts{ prometheus.GaugeOpts{
Namespace: cephNamespace, Namespace: cephNamespace,
Subsystem: subSystem, Subsystem: subSystem,
Name: "read_total", Name: "read_total",
Help: "Total read i/o calls the pool has been subject to", Help: "Total read i/o calls for the pool",
}, },
poolLabel, poolLabel,
), ),
ReadBytes: prometheus.NewCounterVec( ReadBytes: prometheus.NewGaugeVec(
prometheus.CounterOpts{ prometheus.GaugeOpts{
Namespace: cephNamespace, Namespace: cephNamespace,
Subsystem: subSystem, Subsystem: subSystem,
Name: "read_bytes_total", Name: "read_bytes_total",
Help: "Total read throughput the pool has been subject to", Help: "Total read throughput for the pool",
}, },
poolLabel, poolLabel,
), ),
WriteIO: prometheus.NewCounterVec( WriteIO: prometheus.NewGaugeVec(
prometheus.CounterOpts{ prometheus.GaugeOpts{
Namespace: cephNamespace, Namespace: cephNamespace,
Subsystem: subSystem, Subsystem: subSystem,
Name: "write_total", Name: "write_total",
Help: "Total write i/o calls the pool has been subject to", Help: "Total write i/o calls for the pool",
}, },
poolLabel, poolLabel,
), ),
WriteBytes: prometheus.NewCounterVec( WriteBytes: prometheus.NewGaugeVec(
prometheus.CounterOpts{ prometheus.GaugeOpts{
Namespace: cephNamespace, Namespace: cephNamespace,
Subsystem: subSystem, Subsystem: subSystem,
Name: "write_bytes_total", Name: "write_bytes_total",
Help: "Total write throughput the pool has been subject to", Help: "Total write throughput for the pool",
}, },
poolLabel, poolLabel,
), ),