2016-07-14 05:01:19 +00:00
|
|
|
package collectors
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
|
|
|
"log"
|
|
|
|
|
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
|
|
|
)
|
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
// OSDCollector displays statistics about OSD in the ceph cluster.
|
|
|
|
// An important aspect of monitoring OSDs is to ensure that when the cluster is up and
|
|
|
|
// running that all OSDs that are in the cluster are up and running, too
|
|
|
|
type OSDCollector struct {
|
2016-07-14 05:01:19 +00:00
|
|
|
conn Conn
|
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
// CrushWeight is a persistent setting, and it affects how CRUSH assigns data to OSDs.
|
|
|
|
// It displays the CRUSH weight for the OSD
|
2016-07-14 05:01:19 +00:00
|
|
|
CrushWeight *prometheus.GaugeVec
|
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
// Depth displays the OSD's level of hierarchy in the CRUSH map
|
2016-07-14 05:01:19 +00:00
|
|
|
Depth *prometheus.GaugeVec
|
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
// Reweight sets an override weight on the OSD.
|
|
|
|
// It displays value within 0 to 1.
|
2016-07-14 05:01:19 +00:00
|
|
|
Reweight *prometheus.GaugeVec
|
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
// Bytes displays the total bytes available in the OSD
|
2016-07-27 09:48:54 +00:00
|
|
|
Bytes *prometheus.GaugeVec
|
2016-07-14 05:01:19 +00:00
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
// UsedBytes displays the total used bytes in the OSD
|
2016-07-27 09:48:54 +00:00
|
|
|
UsedBytes *prometheus.GaugeVec
|
2016-07-14 05:01:19 +00:00
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
// AvailBytes displays the total available bytes in the OSD
|
2016-07-27 09:48:54 +00:00
|
|
|
AvailBytes *prometheus.GaugeVec
|
2016-07-14 05:01:19 +00:00
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
// Utilization displays current utilization of the OSD
|
2016-07-14 05:01:19 +00:00
|
|
|
Utilization *prometheus.GaugeVec
|
|
|
|
|
2016-10-19 09:45:05 +00:00
|
|
|
// Variance displays current variance of the OSD from the standard utilization
|
|
|
|
Variance *prometheus.GaugeVec
|
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
// Pgs displays total no. of placement groups in the OSD.
|
|
|
|
// Available in Ceph Jewel version.
|
2016-07-14 05:01:19 +00:00
|
|
|
Pgs *prometheus.GaugeVec
|
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
// CommitLatency displays in seconds how long it takes for an operation to be applied to disk
|
2016-07-14 05:01:19 +00:00
|
|
|
CommitLatency *prometheus.GaugeVec
|
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
// ApplyLatency displays in seconds how long it takes to get applied to the backing filesystem
|
2016-07-14 05:01:19 +00:00
|
|
|
ApplyLatency *prometheus.GaugeVec
|
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
// OSDIn displays the In state of the OSD
|
|
|
|
OSDIn *prometheus.GaugeVec
|
2016-07-14 05:01:19 +00:00
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
// OSDUp displays the Up state of the OSD
|
|
|
|
OSDUp *prometheus.GaugeVec
|
2016-07-14 05:01:19 +00:00
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
// TotalBytes displays total bytes in all OSDs
|
2016-07-27 09:48:54 +00:00
|
|
|
TotalBytes prometheus.Gauge
|
2016-07-14 05:01:19 +00:00
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
// TotalUsedBytes displays total used bytes in all OSDs
|
2016-07-27 09:48:54 +00:00
|
|
|
TotalUsedBytes prometheus.Gauge
|
2016-07-14 05:01:19 +00:00
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
// TotalAvailBytes displays total available bytes in all OSDs
|
2016-07-27 09:48:54 +00:00
|
|
|
TotalAvailBytes prometheus.Gauge
|
2016-07-14 05:01:19 +00:00
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
// AverageUtil displays average utilization in all OSDs
|
2016-07-14 05:01:19 +00:00
|
|
|
AverageUtil prometheus.Gauge
|
|
|
|
}
|
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
//NewOSDCollector creates an instance of the OSDCollector and instantiates
|
|
|
|
// the individual metrics that show information about the OSD.
|
|
|
|
func NewOSDCollector(conn Conn) *OSDCollector {
|
|
|
|
return &OSDCollector{
|
2016-07-14 05:01:19 +00:00
|
|
|
conn: conn,
|
|
|
|
|
|
|
|
CrushWeight: prometheus.NewGaugeVec(
|
|
|
|
prometheus.GaugeOpts{
|
|
|
|
Namespace: cephNamespace,
|
|
|
|
Name: "osd_crush_weight",
|
|
|
|
Help: "OSD Crush Weight",
|
|
|
|
},
|
|
|
|
[]string{"osd"},
|
|
|
|
),
|
|
|
|
|
|
|
|
Depth: prometheus.NewGaugeVec(
|
|
|
|
prometheus.GaugeOpts{
|
|
|
|
Namespace: cephNamespace,
|
|
|
|
Name: "osd_depth",
|
|
|
|
Help: "OSD Depth",
|
|
|
|
},
|
|
|
|
[]string{"osd"},
|
|
|
|
),
|
|
|
|
|
|
|
|
Reweight: prometheus.NewGaugeVec(
|
|
|
|
prometheus.GaugeOpts{
|
|
|
|
Namespace: cephNamespace,
|
|
|
|
Name: "osd_reweight",
|
|
|
|
Help: "OSD Reweight",
|
|
|
|
},
|
|
|
|
[]string{"osd"},
|
|
|
|
),
|
|
|
|
|
2016-07-27 09:48:54 +00:00
|
|
|
Bytes: prometheus.NewGaugeVec(
|
2016-07-14 05:01:19 +00:00
|
|
|
prometheus.GaugeOpts{
|
|
|
|
Namespace: cephNamespace,
|
2016-07-27 09:48:54 +00:00
|
|
|
Name: "osd_bytes",
|
|
|
|
Help: "OSD Total Bytes",
|
2016-07-14 05:01:19 +00:00
|
|
|
},
|
|
|
|
[]string{"osd"},
|
|
|
|
),
|
|
|
|
|
2016-07-27 09:48:54 +00:00
|
|
|
UsedBytes: prometheus.NewGaugeVec(
|
2016-07-14 05:01:19 +00:00
|
|
|
prometheus.GaugeOpts{
|
|
|
|
Namespace: cephNamespace,
|
2016-07-27 13:05:10 +00:00
|
|
|
Name: "osd_used_bytes",
|
2016-07-27 09:48:54 +00:00
|
|
|
Help: "OSD Used Storage in Bytes",
|
2016-07-14 05:01:19 +00:00
|
|
|
},
|
|
|
|
[]string{"osd"},
|
|
|
|
),
|
|
|
|
|
2016-07-27 09:48:54 +00:00
|
|
|
AvailBytes: prometheus.NewGaugeVec(
|
2016-07-14 05:01:19 +00:00
|
|
|
prometheus.GaugeOpts{
|
|
|
|
Namespace: cephNamespace,
|
2016-07-27 13:05:10 +00:00
|
|
|
Name: "osd_avail_bytes",
|
2016-07-27 09:48:54 +00:00
|
|
|
Help: "OSD Available Storage in Bytes",
|
2016-07-14 05:01:19 +00:00
|
|
|
},
|
|
|
|
[]string{"osd"},
|
|
|
|
),
|
|
|
|
|
|
|
|
Utilization: prometheus.NewGaugeVec(
|
|
|
|
prometheus.GaugeOpts{
|
|
|
|
Namespace: cephNamespace,
|
|
|
|
Name: "osd_utilization",
|
|
|
|
Help: "OSD Utilization",
|
|
|
|
},
|
|
|
|
[]string{"osd"},
|
|
|
|
),
|
|
|
|
|
2016-10-19 09:45:05 +00:00
|
|
|
Variance: prometheus.NewGaugeVec(
|
|
|
|
prometheus.GaugeOpts{
|
|
|
|
Namespace: cephNamespace,
|
|
|
|
Name: "osd_variance",
|
|
|
|
Help: "OSD Variance",
|
|
|
|
},
|
|
|
|
[]string{"osd"},
|
|
|
|
),
|
|
|
|
|
2016-07-14 05:01:19 +00:00
|
|
|
Pgs: prometheus.NewGaugeVec(
|
|
|
|
prometheus.GaugeOpts{
|
|
|
|
Namespace: cephNamespace,
|
|
|
|
Name: "osd_pgs",
|
|
|
|
Help: "OSD Placement Group Count",
|
|
|
|
},
|
|
|
|
[]string{"osd"},
|
|
|
|
),
|
|
|
|
|
2016-07-27 09:48:54 +00:00
|
|
|
TotalBytes: prometheus.NewGauge(
|
2016-07-14 05:01:19 +00:00
|
|
|
prometheus.GaugeOpts{
|
|
|
|
Namespace: cephNamespace,
|
2016-07-27 09:48:54 +00:00
|
|
|
Name: "osd_total_bytes",
|
|
|
|
Help: "OSD Total Storage Bytes",
|
2016-07-14 05:01:19 +00:00
|
|
|
},
|
|
|
|
),
|
2016-07-27 09:48:54 +00:00
|
|
|
TotalUsedBytes: prometheus.NewGauge(
|
2016-07-14 05:01:19 +00:00
|
|
|
prometheus.GaugeOpts{
|
|
|
|
Namespace: cephNamespace,
|
2016-07-27 13:05:10 +00:00
|
|
|
Name: "osd_total_used_bytes",
|
2016-07-27 09:48:54 +00:00
|
|
|
Help: "OSD Total Used Storage Bytes",
|
2016-07-14 05:01:19 +00:00
|
|
|
},
|
|
|
|
),
|
|
|
|
|
2016-07-27 09:48:54 +00:00
|
|
|
TotalAvailBytes: prometheus.NewGauge(
|
2016-07-14 05:01:19 +00:00
|
|
|
prometheus.GaugeOpts{
|
|
|
|
Namespace: cephNamespace,
|
2016-07-27 13:05:10 +00:00
|
|
|
Name: "osd_total_avail_bytes",
|
2016-07-27 09:48:54 +00:00
|
|
|
Help: "OSD Total Available Storage Bytes ",
|
2016-07-14 05:01:19 +00:00
|
|
|
},
|
|
|
|
),
|
|
|
|
|
|
|
|
AverageUtil: prometheus.NewGauge(
|
|
|
|
prometheus.GaugeOpts{
|
|
|
|
Namespace: cephNamespace,
|
|
|
|
Name: "osd_average_utilization",
|
|
|
|
Help: "OSD Average Utilization",
|
|
|
|
},
|
|
|
|
),
|
|
|
|
|
|
|
|
CommitLatency: prometheus.NewGaugeVec(
|
|
|
|
prometheus.GaugeOpts{
|
|
|
|
Namespace: cephNamespace,
|
2016-07-27 09:48:54 +00:00
|
|
|
Name: "osd_perf_commit_latency_seconds",
|
2016-07-14 05:01:19 +00:00
|
|
|
Help: "OSD Perf Commit Latency",
|
|
|
|
},
|
|
|
|
[]string{"osd"},
|
|
|
|
),
|
|
|
|
|
|
|
|
ApplyLatency: prometheus.NewGaugeVec(
|
|
|
|
prometheus.GaugeOpts{
|
|
|
|
Namespace: cephNamespace,
|
2016-07-27 09:48:54 +00:00
|
|
|
Name: "osd_perf_apply_latency_seconds",
|
2016-07-14 05:01:19 +00:00
|
|
|
Help: "OSD Perf Apply Latency",
|
|
|
|
},
|
|
|
|
[]string{"osd"},
|
|
|
|
),
|
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
OSDIn: prometheus.NewGaugeVec(
|
2016-07-14 05:01:19 +00:00
|
|
|
prometheus.GaugeOpts{
|
|
|
|
Namespace: cephNamespace,
|
|
|
|
Name: "osd_in",
|
|
|
|
Help: "OSD In Status",
|
|
|
|
},
|
|
|
|
[]string{"osd"},
|
|
|
|
),
|
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
OSDUp: prometheus.NewGaugeVec(
|
2016-07-14 05:01:19 +00:00
|
|
|
prometheus.GaugeOpts{
|
|
|
|
Namespace: cephNamespace,
|
|
|
|
Name: "osd_up",
|
|
|
|
Help: "OSD Up Status",
|
|
|
|
},
|
|
|
|
[]string{"osd"},
|
|
|
|
),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
func (o *OSDCollector) collectorList() []prometheus.Collector {
|
2016-07-14 05:01:19 +00:00
|
|
|
return []prometheus.Collector{
|
|
|
|
o.CrushWeight,
|
|
|
|
o.Depth,
|
|
|
|
o.Reweight,
|
2016-07-27 09:48:54 +00:00
|
|
|
o.Bytes,
|
|
|
|
o.UsedBytes,
|
|
|
|
o.AvailBytes,
|
2016-07-14 05:01:19 +00:00
|
|
|
o.Utilization,
|
2016-10-19 09:45:05 +00:00
|
|
|
o.Variance,
|
2016-07-14 05:01:19 +00:00
|
|
|
o.Pgs,
|
2016-07-27 09:48:54 +00:00
|
|
|
o.TotalBytes,
|
|
|
|
o.TotalUsedBytes,
|
|
|
|
o.TotalAvailBytes,
|
2016-07-14 05:01:19 +00:00
|
|
|
o.AverageUtil,
|
|
|
|
o.CommitLatency,
|
|
|
|
o.ApplyLatency,
|
2016-07-29 03:04:55 +00:00
|
|
|
o.OSDIn,
|
|
|
|
o.OSDUp,
|
2016-07-14 05:01:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
type cephOSDDF struct {
|
|
|
|
OSDNodes []struct {
|
2016-07-14 05:01:19 +00:00
|
|
|
Name string `json:"name"`
|
|
|
|
CrushWeight json.Number `json:"crush_weight"`
|
|
|
|
Depth json.Number `json:"depth"`
|
|
|
|
Reweight json.Number `json:"reweight"`
|
|
|
|
KB json.Number `json:"kb"`
|
|
|
|
UsedKB json.Number `json:"kb_used"`
|
|
|
|
AvailKB json.Number `json:"kb_avail"`
|
|
|
|
Utilization json.Number `json:"utilization"`
|
2016-10-19 09:45:05 +00:00
|
|
|
Variance json.Number `json:"var"`
|
2016-07-14 05:01:19 +00:00
|
|
|
Pgs json.Number `json:"pgs"`
|
|
|
|
} `json:"nodes"`
|
|
|
|
|
|
|
|
Summary struct {
|
|
|
|
TotalKB json.Number `json:"total_kb"`
|
|
|
|
TotalUsedKB json.Number `json:"total_kb_used"`
|
|
|
|
TotalAvailKB json.Number `json:"total_kb_avail"`
|
|
|
|
AverageUtil json.Number `json:"average_utilization"`
|
|
|
|
} `json:"summary"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type cephPerfStat struct {
|
|
|
|
PerfInfo []struct {
|
2016-07-27 09:48:54 +00:00
|
|
|
ID json.Number `json:"id"`
|
2016-07-14 05:01:19 +00:00
|
|
|
Stats struct {
|
|
|
|
CommitLatency json.Number `json:"commit_latency_ms"`
|
|
|
|
ApplyLatency json.Number `json:"apply_latency_ms"`
|
|
|
|
} `json:"perf_stats"`
|
|
|
|
} `json:"osd_perf_infos"`
|
|
|
|
}
|
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
type cephOSDDump struct {
|
|
|
|
OSDs []struct {
|
|
|
|
OSD json.Number `json:"osd"`
|
2016-07-14 05:01:19 +00:00
|
|
|
Up json.Number `json:"up"`
|
|
|
|
In json.Number `json:"in"`
|
|
|
|
} `json:"osds"`
|
|
|
|
}
|
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
func (o *OSDCollector) collect() error {
|
|
|
|
cmd := o.cephOSDDFCommand()
|
2016-07-15 09:37:38 +00:00
|
|
|
|
2016-07-14 05:01:19 +00:00
|
|
|
buf, _, err := o.conn.MonCommand(cmd)
|
2016-07-15 09:37:38 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Println("[ERROR] Unable to collect data from ceph osd df", err)
|
|
|
|
return err
|
|
|
|
}
|
2016-07-14 05:01:19 +00:00
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
osdDF := &cephOSDDF{}
|
|
|
|
if err := json.Unmarshal(buf, osdDF); err != nil {
|
2016-07-14 05:01:19 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
for _, node := range osdDF.OSDNodes {
|
2016-07-14 05:01:19 +00:00
|
|
|
|
|
|
|
crushWeight, err := node.CrushWeight.Float64()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
o.CrushWeight.WithLabelValues(node.Name).Set(crushWeight)
|
|
|
|
|
|
|
|
depth, err := node.Depth.Float64()
|
|
|
|
if err != nil {
|
2016-07-15 09:37:38 +00:00
|
|
|
|
2016-07-14 05:01:19 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
o.Depth.WithLabelValues(node.Name).Set(depth)
|
|
|
|
|
|
|
|
reweight, err := node.Reweight.Float64()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
o.Reweight.WithLabelValues(node.Name).Set(reweight)
|
|
|
|
|
2016-07-27 09:48:54 +00:00
|
|
|
osdKB, err := node.KB.Float64()
|
2016-07-14 05:01:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-07-27 09:48:54 +00:00
|
|
|
o.Bytes.WithLabelValues(node.Name).Set(osdKB * 1e3)
|
2016-07-14 05:01:19 +00:00
|
|
|
|
2016-07-27 09:48:54 +00:00
|
|
|
usedKB, err := node.UsedKB.Float64()
|
2016-07-14 05:01:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-07-27 09:48:54 +00:00
|
|
|
o.UsedBytes.WithLabelValues(node.Name).Set(usedKB * 1e3)
|
2016-07-14 05:01:19 +00:00
|
|
|
|
2016-07-27 09:48:54 +00:00
|
|
|
availKB, err := node.AvailKB.Float64()
|
2016-07-14 05:01:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-07-27 09:48:54 +00:00
|
|
|
o.AvailBytes.WithLabelValues(node.Name).Set(availKB * 1e3)
|
2016-07-14 05:01:19 +00:00
|
|
|
|
|
|
|
util, err := node.Utilization.Float64()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
o.Utilization.WithLabelValues(node.Name).Set(util)
|
|
|
|
|
2016-10-19 09:45:05 +00:00
|
|
|
variance, err := node.Variance.Float64()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
o.Variance.WithLabelValues(node.Name).Set(variance)
|
|
|
|
|
2016-07-14 05:01:19 +00:00
|
|
|
pgs, err := node.Pgs.Float64()
|
|
|
|
if err != nil {
|
2016-07-21 09:30:58 +00:00
|
|
|
continue
|
2016-07-14 05:01:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
o.Pgs.WithLabelValues(node.Name).Set(pgs)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
totalKB, err := osdDF.Summary.TotalKB.Float64()
|
2016-07-14 05:01:19 +00:00
|
|
|
if err != nil {
|
2016-07-15 09:37:38 +00:00
|
|
|
return err
|
2016-07-14 05:01:19 +00:00
|
|
|
}
|
|
|
|
|
2016-07-27 09:48:54 +00:00
|
|
|
o.TotalBytes.Set(totalKB * 1e3)
|
2016-07-14 05:01:19 +00:00
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
totalUsedKB, err := osdDF.Summary.TotalUsedKB.Float64()
|
2016-07-14 05:01:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-07-27 09:48:54 +00:00
|
|
|
o.TotalUsedBytes.Set(totalUsedKB * 1e3)
|
2016-07-14 05:01:19 +00:00
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
totalAvailKB, err := osdDF.Summary.TotalAvailKB.Float64()
|
2016-07-14 05:01:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-07-27 09:48:54 +00:00
|
|
|
o.TotalAvailBytes.Set(totalAvailKB * 1e3)
|
2016-07-14 05:01:19 +00:00
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
averageUtil, err := osdDF.Summary.AverageUtil.Float64()
|
2016-07-14 05:01:19 +00:00
|
|
|
if err != nil {
|
2016-07-15 09:37:38 +00:00
|
|
|
return err
|
2016-07-14 05:01:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
o.AverageUtil.Set(averageUtil)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
func (o *OSDCollector) collectOSDPerf() error {
|
2016-07-14 05:01:19 +00:00
|
|
|
osdPerfCmd := o.cephOSDPerfCommand()
|
2016-07-15 09:37:38 +00:00
|
|
|
buf, _, err := o.conn.MonCommand(osdPerfCmd)
|
|
|
|
if err != nil {
|
|
|
|
log.Println("[ERROR] Unable to collect data from ceph osd perf", err)
|
|
|
|
return err
|
|
|
|
}
|
2016-07-14 05:01:19 +00:00
|
|
|
|
|
|
|
osdPerf := &cephPerfStat{}
|
|
|
|
if err := json.Unmarshal(buf, osdPerf); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, perfStat := range osdPerf.PerfInfo {
|
2016-07-27 09:48:54 +00:00
|
|
|
osdID, err := perfStat.ID.Int64()
|
2016-07-14 05:01:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-07-27 09:48:54 +00:00
|
|
|
osdName := fmt.Sprintf("osd.%v", osdID)
|
2016-07-14 05:01:19 +00:00
|
|
|
|
|
|
|
commitLatency, err := perfStat.Stats.CommitLatency.Float64()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-07-27 09:48:54 +00:00
|
|
|
o.CommitLatency.WithLabelValues(osdName).Set(commitLatency / 1e3)
|
2016-07-14 05:01:19 +00:00
|
|
|
|
|
|
|
applyLatency, err := perfStat.Stats.ApplyLatency.Float64()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-07-27 09:48:54 +00:00
|
|
|
o.ApplyLatency.WithLabelValues(osdName).Set(applyLatency / 1e3)
|
2016-07-14 05:01:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
func (o *OSDCollector) collectOSDDump() error {
|
|
|
|
osdDumpCmd := o.cephOSDDump()
|
2016-07-15 09:37:38 +00:00
|
|
|
buff, _, err := o.conn.MonCommand(osdDumpCmd)
|
|
|
|
if err != nil {
|
|
|
|
log.Println("[ERROR] Unable to collect data from ceph osd dump", err)
|
|
|
|
return err
|
|
|
|
}
|
2016-07-14 05:01:19 +00:00
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
osdDump := &cephOSDDump{}
|
2016-07-14 05:01:19 +00:00
|
|
|
if err := json.Unmarshal(buff, osdDump); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
for _, dumpInfo := range osdDump.OSDs {
|
|
|
|
osdID, err := dumpInfo.OSD.Int64()
|
2016-07-14 05:01:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-07-27 09:48:54 +00:00
|
|
|
osdName := fmt.Sprintf("osd.%v", osdID)
|
2016-07-14 05:01:19 +00:00
|
|
|
|
|
|
|
in, err := dumpInfo.In.Float64()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
o.OSDIn.WithLabelValues(osdName).Set(in)
|
2016-07-14 05:01:19 +00:00
|
|
|
|
|
|
|
up, err := dumpInfo.Up.Float64()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
o.OSDUp.WithLabelValues(osdName).Set(up)
|
2016-07-14 05:01:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
func (o *OSDCollector) cephOSDDump() []byte {
|
2016-07-14 05:01:19 +00:00
|
|
|
cmd, err := json.Marshal(map[string]interface{}{
|
|
|
|
"prefix": "osd dump",
|
|
|
|
"format": "json",
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return cmd
|
|
|
|
}
|
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
func (o *OSDCollector) cephOSDDFCommand() []byte {
|
2016-07-14 05:01:19 +00:00
|
|
|
cmd, err := json.Marshal(map[string]interface{}{
|
|
|
|
"prefix": "osd df",
|
|
|
|
"format": "json",
|
|
|
|
})
|
|
|
|
if err != nil {
|
2016-07-20 12:16:11 +00:00
|
|
|
panic(err)
|
2016-07-14 05:01:19 +00:00
|
|
|
}
|
2016-07-20 12:16:11 +00:00
|
|
|
return cmd
|
2016-07-14 05:01:19 +00:00
|
|
|
}
|
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
func (o *OSDCollector) cephOSDPerfCommand() []byte {
|
2016-07-14 05:01:19 +00:00
|
|
|
cmd, err := json.Marshal(map[string]interface{}{
|
|
|
|
"prefix": "osd perf",
|
|
|
|
"format": "json",
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return cmd
|
|
|
|
}
|
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
// Describe sends the descriptors of each OSDCollector related metrics we have defined
|
2016-07-27 09:48:54 +00:00
|
|
|
// to the provided prometheus channel.
|
2016-07-29 03:04:55 +00:00
|
|
|
func (o *OSDCollector) Describe(ch chan<- *prometheus.Desc) {
|
2016-07-14 05:01:19 +00:00
|
|
|
for _, metric := range o.collectorList() {
|
|
|
|
metric.Describe(ch)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-07-27 09:48:54 +00:00
|
|
|
// Collect sends all the collected metrics to the provided prometheus channel.
|
|
|
|
// It requires the caller to handle synchronization.
|
2016-07-29 03:04:55 +00:00
|
|
|
func (o *OSDCollector) Collect(ch chan<- prometheus.Metric) {
|
2016-07-14 05:01:19 +00:00
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
if err := o.collectOSDPerf(); err != nil {
|
|
|
|
log.Println("failed collecting osd perf stats:", err)
|
2016-07-14 05:01:19 +00:00
|
|
|
}
|
|
|
|
|
2016-07-29 03:04:55 +00:00
|
|
|
if err := o.collectOSDDump(); err != nil {
|
|
|
|
log.Println("failed collecting osd dump:", err)
|
2016-07-14 05:01:19 +00:00
|
|
|
}
|
|
|
|
|
2016-07-15 09:37:38 +00:00
|
|
|
if err := o.collect(); err != nil {
|
|
|
|
log.Println("failed collecting osd metrics:", err)
|
|
|
|
}
|
|
|
|
|
2016-07-14 05:01:19 +00:00
|
|
|
for _, metric := range o.collectorList() {
|
|
|
|
metric.Collect(ch)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|