Expose max_avail field in pool_usage instead

This commit is contained in:
Kai Storbeck 2016-05-31 21:21:25 +02:00
parent 09bbb2cb1f
commit a3e30d7c67
4 changed files with 25 additions and 253 deletions

View File

@ -1,141 +0,0 @@
// Copyright 2016 DigitalOcean
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package collectors
import (
"encoding/json"
"fmt"
"log"
"github.com/prometheus/client_golang/prometheus"
)
// DfCollector collects information about the available and used space for
// each pool.
type DfCollector struct {
// conn holds connection to the Ceph cluster
conn Conn
// DfPoolBytesAvailable shows the current bytes available for each pool
DfPoolBytesAvailable *prometheus.GaugeVec
// DfPoolBytesUsed shows the current bytes used for each pool
DfPoolBytesUsed *prometheus.GaugeVec
}
// NewDfCollector creates a new instance of DfCollector to collect df
// metrics on.
func NewDfCollector(conn Conn) *DfCollector {
return &DfCollector{
conn: conn,
DfPoolBytesAvailable: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: cephNamespace,
Subsystem: "df",
Name: "pool_bytes_available",
Help: "Ceph volume available statistics",
},
[]string{"pool"},
),
DfPoolBytesUsed: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: cephNamespace,
Subsystem: "df",
Name: "pool_bytes_used",
Help: "Ceph volume usage statistics",
},
[]string{"pool"},
),
}
}
func (c *DfCollector) collectorList() []prometheus.Collector {
return []prometheus.Collector{
c.DfPoolBytesAvailable,
c.DfPoolBytesUsed,
}
}
type cephDfStats struct {
Pools []struct {
Name string
Stats struct {
BytesUsed json.Number `json:"bytes_used"`
BytesAvailable json.Number `json:"max_avail"`
}
}
}
func (c *DfCollector) collectDfStats() (err error) {
cmd := c.cephDfCommand()
buf, _, err := c.conn.MonCommand(cmd)
if err != nil {
return err
}
stats := &cephDfStats{}
if err := json.Unmarshal(buf, stats); err != nil {
return err
}
for _, p := range stats.Pools {
avail, err := p.Stats.BytesAvailable.Float64()
if err != nil {
return fmt.Errorf("Cannot convert bytesavailable %s to float64: %s", p.Stats.BytesAvailable, err)
}
c.DfPoolBytesAvailable.WithLabelValues(p.Name).Set(avail)
used, err := p.Stats.BytesUsed.Float64()
if err != nil {
return fmt.Errorf("Cannot convert bytesused %s to float64: %s", p.Stats.BytesUsed, err)
}
c.DfPoolBytesUsed.WithLabelValues(p.Name).Set(used)
}
return err
}
func (c *DfCollector) cephDfCommand() []byte {
cmd, err := json.Marshal(map[string]interface{}{
"prefix": "df",
"format": "json",
})
if err != nil {
// panic! because ideally in no world this hard-coded input
// should fail.
panic(err)
}
return cmd
}
// Describe sends all the descriptions of individual metrics of DfCollector
// to the provided prometheus channel.
func (c *DfCollector) Describe(ch chan<- *prometheus.Desc) {
for _, collector := range c.collectorList() {
collector.Describe(ch)
}
}
// Collect sends all the collected metrics to the provided prometheus channel.
// It requires the caller to handle synchronization.
func (c *DfCollector) Collect(ch chan<- prometheus.Metric) {
if err := c.collectDfStats(); err != nil {
log.Println("failed collecting df stats:", err)
}
for _, collector := range c.collectorList() {
collector.Collect(ch)
}
}

View File

@ -1,112 +0,0 @@
// Copyright 2016 DigitalOcean
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package collectors
import (
"io/ioutil"
"net/http"
"net/http/httptest"
"regexp"
"testing"
"github.com/prometheus/client_golang/prometheus"
)
func TestDfCollector(t *testing.T) {
for _, tt := range []struct {
input string
regexes []*regexp.Regexp
}{
{
`
{
"stats": {
"total_bytes": 255205081620480,
"total_used_bytes": 17850383970304,
"total_avail_bytes": 237354697650176
},
"pools": [
{
"name": "rbd",
"id": 1,
"stats": {
"kb_used": 4715700422,
"bytes_used": 4828877231187,
"max_avail": 65868981633781,
"objects": 1360283
}
},
{
"name": "ssd",
"id": 2,
"stats": {
"kb_used": 362463233,
"bytes_used": 371162350522,
"max_avail": 4621426774653,
"objects": 89647
}
},
{
"name": "bench",
"id": 3,
"stats": {
"kb_used": 0,
"bytes_used": 0,
"max_avail": 4621426774653,
"objects": 0
}
}
]
}
`,
[]*regexp.Regexp{
regexp.MustCompile(`ceph_df_pool_bytes_used{pool="bench"} 0`),
regexp.MustCompile(`ceph_df_pool_bytes_used{pool="rbd"} 4.828877231187e\+12`),
regexp.MustCompile(`ceph_df_pool_bytes_used{pool="ssd"} 3.71162350522e\+11`),
regexp.MustCompile(`ceph_df_pool_bytes_available{pool="bench"} 4.621426774653e\+12`),
regexp.MustCompile(`ceph_df_pool_bytes_available{pool="rbd"} 6.5868981633781e\+13`),
regexp.MustCompile(`ceph_df_pool_bytes_available{pool="ssd"} 4.621426774653e\+12`),
},
},
} {
func() {
collector := NewDfCollector(NewNoopConn(tt.input))
if err := prometheus.Register(collector); err != nil {
t.Fatalf("collector failed to register: %s", err)
}
defer prometheus.Unregister(collector)
server := httptest.NewServer(prometheus.Handler())
defer server.Close()
resp, err := http.Get(server.URL)
if err != nil {
t.Fatalf("unexpected failed response from prometheus: %s", err)
}
defer resp.Body.Close()
buf, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatalf("failed reading server response: %s", err)
}
for _, re := range tt.regexes {
if !re.Match(buf) {
t.Errorf("failed matching: %q", re)
}
}
}()
}
}

View File

@ -31,6 +31,10 @@ type PoolUsageCollector struct {
// does not factor in the overcommitment made for individual images. // does not factor in the overcommitment made for individual images.
UsedBytes *prometheus.GaugeVec UsedBytes *prometheus.GaugeVec
// MaxAvail tracks the amount of bytes currently free for the pool,
// which depends on the replication settings for the pool in question.
MaxAvail *prometheus.GaugeVec
// Objects shows the no. of RADOS objects created within the pool. // Objects shows the no. of RADOS objects created within the pool.
Objects *prometheus.GaugeVec Objects *prometheus.GaugeVec
@ -55,6 +59,15 @@ func NewPoolUsageCollector(conn Conn) *PoolUsageCollector {
}, },
[]string{"pool"}, []string{"pool"},
), ),
MaxAvail: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: cephNamespace,
Subsystem: "df",
Name: "pool_available_bytes",
Help: "Free space for this ceph pool",
},
[]string{"pool"},
),
Objects: prometheus.NewGaugeVec( Objects: prometheus.NewGaugeVec(
prometheus.GaugeOpts{ prometheus.GaugeOpts{
Namespace: cephNamespace, Namespace: cephNamespace,
@ -85,6 +98,7 @@ func NewPoolUsageCollector(conn Conn) *PoolUsageCollector {
func (p *PoolUsageCollector) collectorList() []prometheus.Collector { func (p *PoolUsageCollector) collectorList() []prometheus.Collector {
return []prometheus.Collector{ return []prometheus.Collector{
p.UsedBytes, p.UsedBytes,
p.MaxAvail,
p.Objects, p.Objects,
p.ReadIO, p.ReadIO,
p.WriteIO, p.WriteIO,
@ -97,6 +111,7 @@ type cephPoolStats struct {
ID int `json:"id"` ID int `json:"id"`
Stats struct { Stats struct {
BytesUsed json.Number `json:"bytes_used"` BytesUsed json.Number `json:"bytes_used"`
MaxAvail float64 `json:"max_avail"`
Objects json.Number `json:"objects"` Objects json.Number `json:"objects"`
Read json.Number `json:"rd"` Read json.Number `json:"rd"`
Write json.Number `json:"wr"` Write json.Number `json:"wr"`
@ -144,6 +159,7 @@ func (p *PoolUsageCollector) collect() error {
} }
p.UsedBytes.WithLabelValues(pool.Name).Set(bytesUsed) p.UsedBytes.WithLabelValues(pool.Name).Set(bytesUsed)
p.MaxAvail.WithLabelValues(pool.Name).Set(pool.Stats.MaxAvail)
p.Objects.WithLabelValues(pool.Name).Set(objects) p.Objects.WithLabelValues(pool.Name).Set(objects)
p.ReadIO.WithLabelValues(pool.Name).Set(read) p.ReadIO.WithLabelValues(pool.Name).Set(read)
p.WriteIO.WithLabelValues(pool.Name).Set(write) p.WriteIO.WithLabelValues(pool.Name).Set(write)

View File

@ -128,6 +128,15 @@ func TestPoolUsageCollector(t *testing.T) {
}, },
reUnmatch: []*regexp.Regexp{}, reUnmatch: []*regexp.Regexp{},
}, },
{
input: `
{"pools": [
{"name": "ssd", "id": 11, "stats": {"max_avail": 4618201748262, "objects": 5, "rd": 4, "wr": 6}}
]}`,
reMatch: []*regexp.Regexp{
regexp.MustCompile(`pool_available_bytes{pool="ssd"} 4.618201748262e\+12`),
},
},
} { } {
func() { func() {
collector := NewPoolUsageCollector(NewNoopConn(tt.input)) collector := NewPoolUsageCollector(NewNoopConn(tt.input))