ceph_exporter/collectors/osd.go

805 lines
18 KiB
Go
Raw Normal View History

2016-07-14 05:01:19 +00:00
package collectors
import (
"bytes"
2016-07-14 05:01:19 +00:00
"encoding/json"
"fmt"
"log"
"strings"
"github.com/prometheus/client_golang/prometheus"
)
const (
osdLabelFormat = "osd.%v"
)
const (
scrubStateIdle = 0
scrubStateScrubbing = 1
scrubStateDeepScrubbing = 2
2016-07-14 05:01:19 +00:00
)
type cephPGDumpBriefResponse []struct {
PGID string `json:"pgid"`
ActingPrimary int64 `json:"acting_primary"`
Acting []int `json:"acting"`
State string `json:"state"`
}
2016-07-29 03:04:55 +00:00
// OSDCollector displays statistics about OSD in the ceph cluster.
// An important aspect of monitoring OSDs is to ensure that when the cluster is up and
// running that all OSDs that are in the cluster are up and running, too
type OSDCollector struct {
2016-07-14 05:01:19 +00:00
conn Conn
// osdScrubCache holds the cache of previous PG scrubs
osdScrubCache map[int]int
2016-07-29 03:04:55 +00:00
// CrushWeight is a persistent setting, and it affects how CRUSH assigns data to OSDs.
// It displays the CRUSH weight for the OSD
2016-07-14 05:01:19 +00:00
CrushWeight *prometheus.GaugeVec
2016-07-29 03:04:55 +00:00
// Depth displays the OSD's level of hierarchy in the CRUSH map
2016-07-14 05:01:19 +00:00
Depth *prometheus.GaugeVec
2016-07-29 03:04:55 +00:00
// Reweight sets an override weight on the OSD.
// It displays value within 0 to 1.
2016-07-14 05:01:19 +00:00
Reweight *prometheus.GaugeVec
2016-07-29 03:04:55 +00:00
// Bytes displays the total bytes available in the OSD
Bytes *prometheus.GaugeVec
2016-07-14 05:01:19 +00:00
2016-07-29 03:04:55 +00:00
// UsedBytes displays the total used bytes in the OSD
UsedBytes *prometheus.GaugeVec
2016-07-14 05:01:19 +00:00
2016-07-29 03:04:55 +00:00
// AvailBytes displays the total available bytes in the OSD
AvailBytes *prometheus.GaugeVec
2016-07-14 05:01:19 +00:00
2016-07-29 03:04:55 +00:00
// Utilization displays current utilization of the OSD
2016-07-14 05:01:19 +00:00
Utilization *prometheus.GaugeVec
// Variance displays current variance of the OSD from the standard utilization
Variance *prometheus.GaugeVec
2016-07-29 03:04:55 +00:00
// Pgs displays total no. of placement groups in the OSD.
// Available in Ceph Jewel version.
2016-07-14 05:01:19 +00:00
Pgs *prometheus.GaugeVec
2016-07-29 03:04:55 +00:00
// CommitLatency displays in seconds how long it takes for an operation to be applied to disk
2016-07-14 05:01:19 +00:00
CommitLatency *prometheus.GaugeVec
2016-07-29 03:04:55 +00:00
// ApplyLatency displays in seconds how long it takes to get applied to the backing filesystem
2016-07-14 05:01:19 +00:00
ApplyLatency *prometheus.GaugeVec
2016-07-29 03:04:55 +00:00
// OSDIn displays the In state of the OSD
OSDIn *prometheus.GaugeVec
2016-07-14 05:01:19 +00:00
2016-07-29 03:04:55 +00:00
// OSDUp displays the Up state of the OSD
OSDUp *prometheus.GaugeVec
2016-07-14 05:01:19 +00:00
// OSDFull flags if an osd is full
OSDFull *prometheus.GaugeVec
// OSDNearfull flags if an osd is near full
OSDNearFull *prometheus.GaugeVec
// OSDBackfillFull flags if an osd is backfill full
OSDBackfillFull *prometheus.GaugeVec
// OSDDownDesc displays OSDs present in the cluster in "down" state
OSDDownDesc *prometheus.Desc
2016-07-29 03:04:55 +00:00
// TotalBytes displays total bytes in all OSDs
TotalBytes prometheus.Gauge
2016-07-14 05:01:19 +00:00
2016-07-29 03:04:55 +00:00
// TotalUsedBytes displays total used bytes in all OSDs
TotalUsedBytes prometheus.Gauge
2016-07-14 05:01:19 +00:00
2016-07-29 03:04:55 +00:00
// TotalAvailBytes displays total available bytes in all OSDs
TotalAvailBytes prometheus.Gauge
2016-07-14 05:01:19 +00:00
2016-07-29 03:04:55 +00:00
// AverageUtil displays average utilization in all OSDs
2016-07-14 05:01:19 +00:00
AverageUtil prometheus.Gauge
// ScrubbingStateDesc depicts if an osd is being scrubbed
// labelled by OSD
ScrubbingStateDesc *prometheus.Desc
2016-07-14 05:01:19 +00:00
}
2016-07-29 03:04:55 +00:00
//NewOSDCollector creates an instance of the OSDCollector and instantiates
// the individual metrics that show information about the OSD.
func NewOSDCollector(conn Conn, cluster string) *OSDCollector {
labels := make(prometheus.Labels)
labels["cluster"] = cluster
2016-07-29 03:04:55 +00:00
return &OSDCollector{
conn: conn,
osdScrubCache: make(map[int]int),
2016-07-14 05:01:19 +00:00
CrushWeight: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: cephNamespace,
Name: "osd_crush_weight",
Help: "OSD Crush Weight",
ConstLabels: labels,
2016-07-14 05:01:19 +00:00
},
[]string{"osd"},
),
Depth: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: cephNamespace,
Name: "osd_depth",
Help: "OSD Depth",
ConstLabels: labels,
2016-07-14 05:01:19 +00:00
},
[]string{"osd"},
),
Reweight: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: cephNamespace,
Name: "osd_reweight",
Help: "OSD Reweight",
ConstLabels: labels,
2016-07-14 05:01:19 +00:00
},
[]string{"osd"},
),
Bytes: prometheus.NewGaugeVec(
2016-07-14 05:01:19 +00:00
prometheus.GaugeOpts{
Namespace: cephNamespace,
Name: "osd_bytes",
Help: "OSD Total Bytes",
ConstLabels: labels,
2016-07-14 05:01:19 +00:00
},
[]string{"osd"},
),
UsedBytes: prometheus.NewGaugeVec(
2016-07-14 05:01:19 +00:00
prometheus.GaugeOpts{
Namespace: cephNamespace,
Name: "osd_used_bytes",
Help: "OSD Used Storage in Bytes",
ConstLabels: labels,
2016-07-14 05:01:19 +00:00
},
[]string{"osd"},
),
AvailBytes: prometheus.NewGaugeVec(
2016-07-14 05:01:19 +00:00
prometheus.GaugeOpts{
Namespace: cephNamespace,
Name: "osd_avail_bytes",
Help: "OSD Available Storage in Bytes",
ConstLabels: labels,
2016-07-14 05:01:19 +00:00
},
[]string{"osd"},
),
Utilization: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: cephNamespace,
Name: "osd_utilization",
Help: "OSD Utilization",
ConstLabels: labels,
2016-07-14 05:01:19 +00:00
},
[]string{"osd"},
),
Variance: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: cephNamespace,
Name: "osd_variance",
Help: "OSD Variance",
ConstLabels: labels,
},
[]string{"osd"},
),
2016-07-14 05:01:19 +00:00
Pgs: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: cephNamespace,
Name: "osd_pgs",
Help: "OSD Placement Group Count",
ConstLabels: labels,
2016-07-14 05:01:19 +00:00
},
[]string{"osd"},
),
TotalBytes: prometheus.NewGauge(
2016-07-14 05:01:19 +00:00
prometheus.GaugeOpts{
Namespace: cephNamespace,
Name: "osd_total_bytes",
Help: "OSD Total Storage Bytes",
ConstLabels: labels,
2016-07-14 05:01:19 +00:00
},
),
TotalUsedBytes: prometheus.NewGauge(
2016-07-14 05:01:19 +00:00
prometheus.GaugeOpts{
Namespace: cephNamespace,
Name: "osd_total_used_bytes",
Help: "OSD Total Used Storage Bytes",
ConstLabels: labels,
2016-07-14 05:01:19 +00:00
},
),
TotalAvailBytes: prometheus.NewGauge(
2016-07-14 05:01:19 +00:00
prometheus.GaugeOpts{
Namespace: cephNamespace,
Name: "osd_total_avail_bytes",
Help: "OSD Total Available Storage Bytes ",
ConstLabels: labels,
2016-07-14 05:01:19 +00:00
},
),
AverageUtil: prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: cephNamespace,
Name: "osd_average_utilization",
Help: "OSD Average Utilization",
ConstLabels: labels,
2016-07-14 05:01:19 +00:00
},
),
CommitLatency: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: cephNamespace,
Name: "osd_perf_commit_latency_seconds",
Help: "OSD Perf Commit Latency",
ConstLabels: labels,
2016-07-14 05:01:19 +00:00
},
[]string{"osd"},
),
ApplyLatency: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: cephNamespace,
Name: "osd_perf_apply_latency_seconds",
Help: "OSD Perf Apply Latency",
ConstLabels: labels,
2016-07-14 05:01:19 +00:00
},
[]string{"osd"},
),
2016-07-29 03:04:55 +00:00
OSDIn: prometheus.NewGaugeVec(
2016-07-14 05:01:19 +00:00
prometheus.GaugeOpts{
Namespace: cephNamespace,
Name: "osd_in",
Help: "OSD In Status",
ConstLabels: labels,
2016-07-14 05:01:19 +00:00
},
[]string{"osd"},
),
2016-07-29 03:04:55 +00:00
OSDUp: prometheus.NewGaugeVec(
2016-07-14 05:01:19 +00:00
prometheus.GaugeOpts{
Namespace: cephNamespace,
Name: "osd_up",
Help: "OSD Up Status",
ConstLabels: labels,
2016-07-14 05:01:19 +00:00
},
[]string{"osd"},
),
OSDFull: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: cephNamespace,
Name: "osd_full",
Help: "OSD Full Status",
ConstLabels: labels,
},
[]string{"osd"},
),
OSDNearFull: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: cephNamespace,
Name: "osd_near_full",
Help: "OSD Near Full Status",
ConstLabels: labels,
},
[]string{"osd"},
),
OSDBackfillFull: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: cephNamespace,
Name: "osd_backfill_full",
Help: "OSD Backfill Full Status",
ConstLabels: labels,
},
[]string{"osd"},
),
OSDDownDesc: prometheus.NewDesc(
fmt.Sprintf("%s_osd_down", cephNamespace),
"No. of OSDs down in the cluster",
[]string{"osd", "status"},
labels,
),
ScrubbingStateDesc: prometheus.NewDesc(
fmt.Sprintf("%s_osd_scrub_state", cephNamespace),
"State of OSDs involved in a scrub",
[]string{"osd"},
labels,
),
2016-07-14 05:01:19 +00:00
}
}
2016-07-29 03:04:55 +00:00
func (o *OSDCollector) collectorList() []prometheus.Collector {
2016-07-14 05:01:19 +00:00
return []prometheus.Collector{
o.CrushWeight,
o.Depth,
o.Reweight,
o.Bytes,
o.UsedBytes,
o.AvailBytes,
2016-07-14 05:01:19 +00:00
o.Utilization,
o.Variance,
2016-07-14 05:01:19 +00:00
o.Pgs,
o.TotalBytes,
o.TotalUsedBytes,
o.TotalAvailBytes,
2016-07-14 05:01:19 +00:00
o.AverageUtil,
o.CommitLatency,
o.ApplyLatency,
2016-07-29 03:04:55 +00:00
o.OSDIn,
o.OSDUp,
o.OSDFull,
o.OSDNearFull,
o.OSDBackfillFull,
2016-07-14 05:01:19 +00:00
}
}
2016-07-29 03:04:55 +00:00
type cephOSDDF struct {
OSDNodes []struct {
2016-07-14 05:01:19 +00:00
Name string `json:"name"`
CrushWeight json.Number `json:"crush_weight"`
Depth json.Number `json:"depth"`
Reweight json.Number `json:"reweight"`
KB json.Number `json:"kb"`
UsedKB json.Number `json:"kb_used"`
AvailKB json.Number `json:"kb_avail"`
Utilization json.Number `json:"utilization"`
Variance json.Number `json:"var"`
2016-07-14 05:01:19 +00:00
Pgs json.Number `json:"pgs"`
} `json:"nodes"`
Summary struct {
TotalKB json.Number `json:"total_kb"`
TotalUsedKB json.Number `json:"total_kb_used"`
TotalAvailKB json.Number `json:"total_kb_avail"`
AverageUtil json.Number `json:"average_utilization"`
} `json:"summary"`
}
type cephPerfStat struct {
PerfInfo []struct {
ID json.Number `json:"id"`
2016-07-14 05:01:19 +00:00
Stats struct {
CommitLatency json.Number `json:"commit_latency_ms"`
ApplyLatency json.Number `json:"apply_latency_ms"`
} `json:"perf_stats"`
} `json:"osd_perf_infos"`
}
type cephOSDPerfStat struct {
cephPerfStat `json:"osdstats"`
}
2016-07-29 03:04:55 +00:00
type cephOSDDump struct {
OSDs []struct {
OSD json.Number `json:"osd"`
Up json.Number `json:"up"`
In json.Number `json:"in"`
State []string `json:"state"`
2016-07-14 05:01:19 +00:00
} `json:"osds"`
}
type cephOSDTreeDown struct {
Nodes []struct {
ID int64 `json:"id"`
Name string `json:"name"`
Type string `json:"type"`
Status string `json:"status"`
} `json:"nodes"`
Stray []struct {
ID int64 `json:"id"`
Name string `json:"name"`
Type string `json:"type"`
Status string `json:"status"`
} `json:"stray"`
}
func (o *OSDCollector) collectOSDDF() error {
2016-07-29 03:04:55 +00:00
cmd := o.cephOSDDFCommand()
2016-07-15 09:37:38 +00:00
2016-07-14 05:01:19 +00:00
buf, _, err := o.conn.MonCommand(cmd)
2016-07-15 09:37:38 +00:00
if err != nil {
log.Println("[ERROR] Unable to collect data from ceph osd df", err)
return err
}
2016-07-14 05:01:19 +00:00
// Workaround for Ceph Jewel after 10.2.5 produces invalid json when osd is out
buf = bytes.Replace(buf, []byte("-nan"), []byte("0"), -1)
2016-07-29 03:04:55 +00:00
osdDF := &cephOSDDF{}
if err := json.Unmarshal(buf, osdDF); err != nil {
2016-07-14 05:01:19 +00:00
return err
}
2016-07-29 03:04:55 +00:00
for _, node := range osdDF.OSDNodes {
2016-07-14 05:01:19 +00:00
crushWeight, err := node.CrushWeight.Float64()
if err != nil {
return err
}
o.CrushWeight.WithLabelValues(node.Name).Set(crushWeight)
depth, err := node.Depth.Float64()
if err != nil {
2016-07-15 09:37:38 +00:00
2016-07-14 05:01:19 +00:00
return err
}
o.Depth.WithLabelValues(node.Name).Set(depth)
reweight, err := node.Reweight.Float64()
if err != nil {
return err
}
o.Reweight.WithLabelValues(node.Name).Set(reweight)
osdKB, err := node.KB.Float64()
2016-07-14 05:01:19 +00:00
if err != nil {
return nil
}
o.Bytes.WithLabelValues(node.Name).Set(osdKB * 1e3)
2016-07-14 05:01:19 +00:00
usedKB, err := node.UsedKB.Float64()
2016-07-14 05:01:19 +00:00
if err != nil {
return err
}
o.UsedBytes.WithLabelValues(node.Name).Set(usedKB * 1e3)
2016-07-14 05:01:19 +00:00
availKB, err := node.AvailKB.Float64()
2016-07-14 05:01:19 +00:00
if err != nil {
return err
}
o.AvailBytes.WithLabelValues(node.Name).Set(availKB * 1e3)
2016-07-14 05:01:19 +00:00
util, err := node.Utilization.Float64()
if err != nil {
return err
}
o.Utilization.WithLabelValues(node.Name).Set(util)
variance, err := node.Variance.Float64()
if err != nil {
return err
}
o.Variance.WithLabelValues(node.Name).Set(variance)
2016-07-14 05:01:19 +00:00
pgs, err := node.Pgs.Float64()
if err != nil {
continue
2016-07-14 05:01:19 +00:00
}
o.Pgs.WithLabelValues(node.Name).Set(pgs)
}
2016-07-29 03:04:55 +00:00
totalKB, err := osdDF.Summary.TotalKB.Float64()
2016-07-14 05:01:19 +00:00
if err != nil {
2016-07-15 09:37:38 +00:00
return err
2016-07-14 05:01:19 +00:00
}
o.TotalBytes.Set(totalKB * 1e3)
2016-07-14 05:01:19 +00:00
2016-07-29 03:04:55 +00:00
totalUsedKB, err := osdDF.Summary.TotalUsedKB.Float64()
2016-07-14 05:01:19 +00:00
if err != nil {
return err
}
o.TotalUsedBytes.Set(totalUsedKB * 1e3)
2016-07-14 05:01:19 +00:00
2016-07-29 03:04:55 +00:00
totalAvailKB, err := osdDF.Summary.TotalAvailKB.Float64()
2016-07-14 05:01:19 +00:00
if err != nil {
return err
}
o.TotalAvailBytes.Set(totalAvailKB * 1e3)
2016-07-14 05:01:19 +00:00
2016-07-29 03:04:55 +00:00
averageUtil, err := osdDF.Summary.AverageUtil.Float64()
2016-07-14 05:01:19 +00:00
if err != nil {
2016-07-15 09:37:38 +00:00
return err
2016-07-14 05:01:19 +00:00
}
o.AverageUtil.Set(averageUtil)
return nil
}
2016-07-29 03:04:55 +00:00
func (o *OSDCollector) collectOSDPerf() error {
2016-07-14 05:01:19 +00:00
osdPerfCmd := o.cephOSDPerfCommand()
2016-07-15 09:37:38 +00:00
buf, _, err := o.conn.MonCommand(osdPerfCmd)
if err != nil {
log.Println("[ERROR] Unable to collect data from ceph osd perf", err)
return err
}
2016-07-14 05:01:19 +00:00
2019-09-12 12:44:38 +00:00
osdPerf := &cephOSDPerfStat{}
2016-07-14 05:01:19 +00:00
if err := json.Unmarshal(buf, osdPerf); err != nil {
return err
}
for _, perfStat := range osdPerf.PerfInfo {
osdID, err := perfStat.ID.Int64()
2016-07-14 05:01:19 +00:00
if err != nil {
return err
}
osdName := fmt.Sprintf(osdLabelFormat, osdID)
2016-07-14 05:01:19 +00:00
commitLatency, err := perfStat.Stats.CommitLatency.Float64()
if err != nil {
return err
}
o.CommitLatency.WithLabelValues(osdName).Set(commitLatency / 1e3)
2016-07-14 05:01:19 +00:00
applyLatency, err := perfStat.Stats.ApplyLatency.Float64()
if err != nil {
return err
}
o.ApplyLatency.WithLabelValues(osdName).Set(applyLatency / 1e3)
2016-07-14 05:01:19 +00:00
}
return nil
}
func (o *OSDCollector) collectOSDTreeDown(ch chan<- prometheus.Metric) error {
osdDownCmd := o.cephOSDTreeCommand("down")
buff, _, err := o.conn.MonCommand(osdDownCmd)
if err != nil {
log.Println("[ERROR] Unable to collect data from ceph osd tree down", err)
return err
}
osdDown := &cephOSDTreeDown{}
if err := json.Unmarshal(buff, osdDown); err != nil {
return err
}
downItems := append(osdDown.Nodes, osdDown.Stray...)
for _, downItem := range downItems {
if downItem.Type != "osd" {
continue
}
osdName := downItem.Name
ch <- prometheus.MustNewConstMetric(o.OSDDownDesc, prometheus.GaugeValue, 1, osdName, downItem.Status)
}
return nil
}
2016-07-29 03:04:55 +00:00
func (o *OSDCollector) collectOSDDump() error {
osdDumpCmd := o.cephOSDDump()
2016-07-15 09:37:38 +00:00
buff, _, err := o.conn.MonCommand(osdDumpCmd)
if err != nil {
log.Println("[ERROR] Unable to collect data from ceph osd dump", err)
return err
}
2016-07-14 05:01:19 +00:00
2016-07-29 03:04:55 +00:00
osdDump := &cephOSDDump{}
2016-07-14 05:01:19 +00:00
if err := json.Unmarshal(buff, osdDump); err != nil {
return err
}
2016-07-29 03:04:55 +00:00
for _, dumpInfo := range osdDump.OSDs {
osdID, err := dumpInfo.OSD.Int64()
2016-07-14 05:01:19 +00:00
if err != nil {
return err
}
osdName := fmt.Sprintf(osdLabelFormat, osdID)
2016-07-14 05:01:19 +00:00
in, err := dumpInfo.In.Float64()
if err != nil {
return err
}
2016-07-29 03:04:55 +00:00
o.OSDIn.WithLabelValues(osdName).Set(in)
2016-07-14 05:01:19 +00:00
up, err := dumpInfo.Up.Float64()
if err != nil {
return err
}
2016-07-29 03:04:55 +00:00
o.OSDUp.WithLabelValues(osdName).Set(up)
o.OSDFull.WithLabelValues(osdName).Set(0)
o.OSDNearFull.WithLabelValues(osdName).Set(0)
o.OSDBackfillFull.WithLabelValues(osdName).Set(0)
for _, state := range dumpInfo.State {
switch state {
case "full":
o.OSDFull.WithLabelValues(osdName).Set(1)
case "nearfull":
o.OSDNearFull.WithLabelValues(osdName).Set(1)
case "backfillfull":
o.OSDBackfillFull.WithLabelValues(osdName).Set(1)
}
}
2016-07-14 05:01:19 +00:00
}
return nil
}
func (o *OSDCollector) collectOSDScrubState(ch chan<- prometheus.Metric) error {
cmd := o.cephPGDumpCommand()
buf, _, err := o.conn.MonCommand(cmd)
if err != nil {
return err
}
stats := cephPGDumpBriefResponse{}
if err := json.Unmarshal(buf, &stats); err != nil {
return err
}
// need to reset the PG scrub state since the scrub might have ended within the last prom scrape interval.
// This forces us to report scrub state on all previously discovered osds
// We may be able to remove the "cache" when using prometheus 2.0 if we can tune how
// unreported/abandoned gauges are treated (ie set to 0).
for i := range o.osdScrubCache {
o.osdScrubCache[i] = scrubStateIdle
}
for _, pg := range stats {
if strings.Contains(pg.State, "scrubbing") {
scrubState := scrubStateScrubbing
if strings.Contains(pg.State, "deep") {
scrubState = scrubStateDeepScrubbing
}
for _, osd := range pg.Acting {
o.osdScrubCache[osd] = scrubState
}
}
}
for i, v := range o.osdScrubCache {
ch <- prometheus.MustNewConstMetric(
o.ScrubbingStateDesc,
prometheus.GaugeValue,
float64(v),
fmt.Sprintf(osdLabelFormat, i))
}
return nil
}
2016-07-29 03:04:55 +00:00
func (o *OSDCollector) cephOSDDump() []byte {
2016-07-14 05:01:19 +00:00
cmd, err := json.Marshal(map[string]interface{}{
"prefix": "osd dump",
"format": "json",
})
if err != nil {
panic(err)
}
return cmd
}
2016-07-29 03:04:55 +00:00
func (o *OSDCollector) cephOSDDFCommand() []byte {
2016-07-14 05:01:19 +00:00
cmd, err := json.Marshal(map[string]interface{}{
"prefix": "osd df",
"format": "json",
})
if err != nil {
2016-07-20 12:16:11 +00:00
panic(err)
2016-07-14 05:01:19 +00:00
}
2016-07-20 12:16:11 +00:00
return cmd
2016-07-14 05:01:19 +00:00
}
2016-07-29 03:04:55 +00:00
func (o *OSDCollector) cephOSDPerfCommand() []byte {
2016-07-14 05:01:19 +00:00
cmd, err := json.Marshal(map[string]interface{}{
"prefix": "osd perf",
"format": "json",
})
if err != nil {
panic(err)
}
return cmd
}
func (o *OSDCollector) cephOSDTreeCommand(states ...string) []byte {
cmd, err := json.Marshal(map[string]interface{}{
"prefix": "osd tree",
"states": states,
"format": "json",
})
if err != nil {
panic(err)
}
return cmd
}
func (o *OSDCollector) cephPGDumpCommand() []byte {
cmd, err := json.Marshal(map[string]interface{}{
"prefix": "pg dump",
"dumpcontents": []string{"pgs_brief"},
"format": jsonFormat,
})
if err != nil {
// panic! because ideally in no world this hard-coded input
// should fail.
panic(err)
}
return cmd
}
2016-07-29 03:04:55 +00:00
// Describe sends the descriptors of each OSDCollector related metrics we have defined
// to the provided prometheus channel.
2016-07-29 03:04:55 +00:00
func (o *OSDCollector) Describe(ch chan<- *prometheus.Desc) {
2016-07-14 05:01:19 +00:00
for _, metric := range o.collectorList() {
metric.Describe(ch)
}
ch <- o.ScrubbingStateDesc
2016-07-14 05:01:19 +00:00
}
// Collect sends all the collected metrics to the provided prometheus channel.
// It requires the caller to handle synchronization.
2016-07-29 03:04:55 +00:00
func (o *OSDCollector) Collect(ch chan<- prometheus.Metric) {
// Reset daemon specifc metrics; daemons can leave the cluster
o.CrushWeight.Reset()
o.Depth.Reset()
o.Reweight.Reset()
o.Bytes.Reset()
o.UsedBytes.Reset()
o.AvailBytes.Reset()
o.Utilization.Reset()
o.Variance.Reset()
o.Pgs.Reset()
o.CommitLatency.Reset()
o.ApplyLatency.Reset()
o.OSDIn.Reset()
o.OSDUp.Reset()
2016-07-29 03:04:55 +00:00
if err := o.collectOSDPerf(); err != nil {
log.Println("failed collecting osd perf stats:", err)
2016-07-14 05:01:19 +00:00
}
2016-07-29 03:04:55 +00:00
if err := o.collectOSDDump(); err != nil {
log.Println("failed collecting osd dump:", err)
2016-07-14 05:01:19 +00:00
}
if err := o.collectOSDDF(); err != nil {
log.Println("failed collecting osd metrics:", err)
}
if err := o.collectOSDTreeDown(ch); err != nil {
2016-07-15 09:37:38 +00:00
log.Println("failed collecting osd metrics:", err)
}
2016-07-14 05:01:19 +00:00
for _, metric := range o.collectorList() {
metric.Collect(ch)
}
if err := o.collectOSDScrubState(ch); err != nil {
log.Println("failed collecting osd scrub state:", err)
}
2016-07-14 05:01:19 +00:00
}