mirror of
https://github.com/digitalocean/ceph_exporter
synced 2025-02-13 09:57:36 +00:00
Implement new gauge counting crash reports
New metric: `ceph_crash_reports` which counts the entries returned by `ceph crash ls` by daemon name and archival status. This is not the same as `ceph_new_crash_reports` which is the value of the `RECENT_CRASH` health check, and that only counts the non-archived errors of the past two weeks. The new metric counts errors as long as they are not purged (which is done after 1 year by defaults).
This commit is contained in:
parent
56bd79f4be
commit
c3a228c221
152
ceph/crashes.go
Normal file
152
ceph/crashes.go
Normal file
@ -0,0 +1,152 @@
|
|||||||
|
// Copyright 2022 DigitalOcean
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package ceph
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
crashLsLineRegex = regexp.MustCompile(`.*_[0-9a-f-]{36}\s+(\S+)\s*(\*)?`)
|
||||||
|
|
||||||
|
statusNames = map[bool]string{true: "new", false: "archived"}
|
||||||
|
)
|
||||||
|
|
||||||
|
// CrashesCollector collects information on how many crash reports are currently open.
|
||||||
|
// These reports are counted by daemon/client name, and by status (new or archived).
|
||||||
|
// This is NOT the same as new_crash_reports, that only counts new reports in the past
|
||||||
|
// two weeks as reported by 'ceph health'.
|
||||||
|
type CrashesCollector struct {
|
||||||
|
conn Conn
|
||||||
|
logger *logrus.Logger
|
||||||
|
version *Version
|
||||||
|
|
||||||
|
// We keep track of which daemons we've seen so that their error count
|
||||||
|
// can be reset to zero if the errors get purged.
|
||||||
|
knownEntities map[string]bool
|
||||||
|
|
||||||
|
CrashReports prometheus.GaugeVec
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCrashesCollector creates a new CrashesCollector instance
|
||||||
|
func NewCrashesCollector(exporter *Exporter) *CrashesCollector {
|
||||||
|
labels := make(prometheus.Labels)
|
||||||
|
labels["cluster"] = exporter.Cluster
|
||||||
|
|
||||||
|
collector := &CrashesCollector{
|
||||||
|
conn: exporter.Conn,
|
||||||
|
logger: exporter.Logger,
|
||||||
|
version: exporter.Version,
|
||||||
|
|
||||||
|
knownEntities: map[string]bool{},
|
||||||
|
|
||||||
|
CrashReports: *prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Namespace: cephNamespace,
|
||||||
|
Name: "crash_reports",
|
||||||
|
Help: "Count of crashes reports per daemon, according to `ceph crash ls`",
|
||||||
|
ConstLabels: labels,
|
||||||
|
},
|
||||||
|
[]string{"daemon", "status"},
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
return collector
|
||||||
|
}
|
||||||
|
|
||||||
|
type crashEntry struct {
|
||||||
|
entity string
|
||||||
|
isNew bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// getCrashLs runs the 'crash ls' command and parses its results
|
||||||
|
func (c *CrashesCollector) getCrashLs() ([]crashEntry, error) {
|
||||||
|
crashes := make([]crashEntry, 0)
|
||||||
|
|
||||||
|
// We parse the plain format because it is quite compact.
|
||||||
|
// The JSON output of this command is very verbose and might be too slow
|
||||||
|
// to process in an outage storm.
|
||||||
|
cmd, err := json.Marshal(map[string]interface{}{
|
||||||
|
"prefix": "crash ls",
|
||||||
|
"format": "plain",
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return crashes, err
|
||||||
|
}
|
||||||
|
|
||||||
|
buf, _, err := c.conn.MonCommand(cmd)
|
||||||
|
if err != nil {
|
||||||
|
return crashes, err
|
||||||
|
}
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(bytes.NewBuffer(buf))
|
||||||
|
for scanner.Scan() {
|
||||||
|
matched := crashLsLineRegex.FindStringSubmatch(scanner.Text())
|
||||||
|
if len(matched) == 3 {
|
||||||
|
crashes = append(crashes, crashEntry{matched[1], matched[2] == "*"})
|
||||||
|
} else if len(matched) == 2 {
|
||||||
|
// Just in case the line-end spaces were stripped
|
||||||
|
crashes = append(crashes, crashEntry{matched[1], false})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return crashes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// processCrashLs takes the parsed results from getCrashLs and counts them
|
||||||
|
// in a map. It also keeps track of which daemons we've see in the past, and
|
||||||
|
// initializes all counts to zero where needed.
|
||||||
|
func (c *CrashesCollector) processCrashLs(crashes []crashEntry) map[crashEntry]int {
|
||||||
|
crashMap := make(map[crashEntry]int)
|
||||||
|
|
||||||
|
for _, crash := range crashes {
|
||||||
|
c.knownEntities[crash.entity] = true
|
||||||
|
}
|
||||||
|
for entity := range c.knownEntities {
|
||||||
|
crashMap[crashEntry{entity, true}] = 0
|
||||||
|
crashMap[crashEntry{entity, false}] = 0
|
||||||
|
}
|
||||||
|
for _, crash := range crashes {
|
||||||
|
crashMap[crash]++
|
||||||
|
}
|
||||||
|
|
||||||
|
return crashMap
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describe provides the metrics descriptions to Prometheus
|
||||||
|
func (c *CrashesCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||||
|
c.CrashReports.Describe(ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect sends all the collected metrics Prometheus.
|
||||||
|
func (c *CrashesCollector) Collect(ch chan<- prometheus.Metric) {
|
||||||
|
crashes, err := c.getCrashLs()
|
||||||
|
if err != nil {
|
||||||
|
c.logger.WithError(err).Error("failed to run 'ceph crash ls'")
|
||||||
|
}
|
||||||
|
crashMap := c.processCrashLs(crashes)
|
||||||
|
|
||||||
|
for crash, count := range crashMap {
|
||||||
|
c.CrashReports.WithLabelValues(crash.entity, statusNames[crash.isNew]).Set(float64(count))
|
||||||
|
}
|
||||||
|
|
||||||
|
c.CrashReports.Collect(ch)
|
||||||
|
}
|
159
ceph/crashes_test.go
Normal file
159
ceph/crashes_test.go
Normal file
@ -0,0 +1,159 @@
|
|||||||
|
// Copyright 2022 DigitalOcean
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package ceph
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"reflect"
|
||||||
|
"regexp"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCrashesCollector(t *testing.T) {
|
||||||
|
|
||||||
|
const outputCephCrashLs string = `
|
||||||
|
ID ENTITY NEW
|
||||||
|
2022-01-01_18:57:51.184156Z_02d9b659-69d1-4dd6-8495-ee2345208568 client.admin
|
||||||
|
2022-01-01_19:02:01.401852Z_9100163b-4cd1-479f-b3a8-0dc2d288eaea mgr.mgr-node-01
|
||||||
|
2022-02-01_21:02:46.687015Z_0de8b741-b323-4f63-828a-e460294e28b9 client.admin *
|
||||||
|
2022-02-03_04:03:38.371403Z_bd756324-27c0-494e-adfb-9f5f6e3db000 osd.3 *
|
||||||
|
2022-02-03_04:05:45.419226Z_11c639af-5eb2-4a29-91aa-20120218891a osd.3 *
|
||||||
|
`
|
||||||
|
|
||||||
|
t.Run(
|
||||||
|
"full test",
|
||||||
|
func(t *testing.T) {
|
||||||
|
conn := &MockConn{}
|
||||||
|
conn.On("MonCommand", mock.Anything).Return(
|
||||||
|
[]byte(outputCephCrashLs), "", nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
collector := NewCrashesCollector(&Exporter{Conn: conn, Cluster: "ceph", Logger: logrus.New(), Version: Pacific})
|
||||||
|
err := prometheus.Register(collector)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer prometheus.Unregister(collector)
|
||||||
|
|
||||||
|
server := httptest.NewServer(promhttp.Handler())
|
||||||
|
defer server.Close()
|
||||||
|
|
||||||
|
resp, err := http.Get(server.URL)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
buf, err := ioutil.ReadAll(resp.Body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
reMatches := []*regexp.Regexp{
|
||||||
|
regexp.MustCompile(`crash_reports{cluster="ceph",daemon="client.admin",status="new"} 1`),
|
||||||
|
regexp.MustCompile(`crash_reports{cluster="ceph",daemon="client.admin",status="archived"} 1`),
|
||||||
|
regexp.MustCompile(`crash_reports{cluster="ceph",daemon="mgr.mgr-node-01",status="new"} 0`),
|
||||||
|
regexp.MustCompile(`crash_reports{cluster="ceph",daemon="mgr.mgr-node-01",status="archived"} 1`),
|
||||||
|
regexp.MustCompile(`crash_reports{cluster="ceph",daemon="osd.3",status="new"} 2`),
|
||||||
|
regexp.MustCompile(`crash_reports{cluster="ceph",daemon="osd.3",status="archived"} 0`),
|
||||||
|
}
|
||||||
|
|
||||||
|
// t.Log(string(buf))
|
||||||
|
for _, re := range reMatches {
|
||||||
|
if !re.Match(buf) {
|
||||||
|
t.Errorf("expected %s to match\n", re.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
t.Run(
|
||||||
|
"getCrashLs unit test",
|
||||||
|
func(t *testing.T) {
|
||||||
|
conn := &MockConn{}
|
||||||
|
conn.On("MonCommand", mock.Anything).Return(
|
||||||
|
[]byte(outputCephCrashLs), "", nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
log := logrus.New()
|
||||||
|
log.Level = logrus.DebugLevel
|
||||||
|
collector := NewCrashesCollector(&Exporter{Conn: conn, Cluster: "ceph", Logger: log, Version: Pacific})
|
||||||
|
|
||||||
|
expected := []crashEntry{
|
||||||
|
{"client.admin", false},
|
||||||
|
{"mgr.mgr-node-01", false},
|
||||||
|
{"client.admin", true},
|
||||||
|
{"osd.3", true},
|
||||||
|
{"osd.3", true},
|
||||||
|
}
|
||||||
|
crashes, _ := collector.getCrashLs()
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(crashes, expected) {
|
||||||
|
t.Errorf("incorrect getCrashLs result: expected %v, got %v\n", expected, crashes)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
t.Run(
|
||||||
|
"getCrashLs empty crash list unit test",
|
||||||
|
func(t *testing.T) {
|
||||||
|
conn := &MockConn{}
|
||||||
|
conn.On("MonCommand", mock.Anything).Return(
|
||||||
|
[]byte(""), "", nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
collector := NewCrashesCollector(&Exporter{Conn: conn, Cluster: "ceph", Logger: logrus.New(), Version: Pacific})
|
||||||
|
|
||||||
|
crashes, _ := collector.getCrashLs()
|
||||||
|
if len(crashes) != 0 {
|
||||||
|
t.Errorf("expected empty result from getCrashLs, got %v\n", crashes)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
t.Run(
|
||||||
|
"processCrashLs test",
|
||||||
|
func(t *testing.T) {
|
||||||
|
collector := NewCrashesCollector(&Exporter{Conn: nil, Cluster: "ceph", Logger: logrus.New(), Version: Pacific})
|
||||||
|
|
||||||
|
newCrash := crashEntry{"daemon", true}
|
||||||
|
archivedCrash := crashEntry{"daemon", false}
|
||||||
|
|
||||||
|
// New crash
|
||||||
|
crashMap := collector.processCrashLs([]crashEntry{newCrash})
|
||||||
|
expected := map[crashEntry]int{newCrash: 1, archivedCrash: 0}
|
||||||
|
if !reflect.DeepEqual(crashMap, expected) {
|
||||||
|
t.Errorf("incorrect processCrashLs result: expected %v, got %v\n", expected, crashMap)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Archived crash
|
||||||
|
crashMap = collector.processCrashLs([]crashEntry{archivedCrash})
|
||||||
|
expected = map[crashEntry]int{newCrash: 0, archivedCrash: 1}
|
||||||
|
if !reflect.DeepEqual(crashMap, expected) {
|
||||||
|
t.Errorf("incorrect processCrashLs result: expected %v, got %v\n", expected, crashMap)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Crash was memorized, check that we reset count to zero
|
||||||
|
crashMap = collector.processCrashLs([]crashEntry{})
|
||||||
|
expected = map[crashEntry]int{newCrash: 0, archivedCrash: 0}
|
||||||
|
if !reflect.DeepEqual(crashMap, expected) {
|
||||||
|
t.Errorf("incorrect processCrashLs result: expected %v, got %v\n", expected, crashMap)
|
||||||
|
}
|
||||||
|
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
@ -16,9 +16,10 @@ package ceph
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"sync"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Exporter wraps all the ceph collectors and provides a single global
|
// Exporter wraps all the ceph collectors and provides a single global
|
||||||
@ -56,6 +57,7 @@ func (exporter *Exporter) getCollectors() []prometheus.Collector {
|
|||||||
NewClusterHealthCollector(exporter),
|
NewClusterHealthCollector(exporter),
|
||||||
NewMonitorCollector(exporter),
|
NewMonitorCollector(exporter),
|
||||||
NewOSDCollector(exporter),
|
NewOSDCollector(exporter),
|
||||||
|
NewCrashesCollector(exporter),
|
||||||
}
|
}
|
||||||
|
|
||||||
switch exporter.RgwMode {
|
switch exporter.RgwMode {
|
||||||
|
Loading…
Reference in New Issue
Block a user