Merge pull request #47633 from bosc0/alerts_multicluster

prometheus: add multicluster support to alerts
This commit is contained in:
Arthur Outhenin-Chalandre 2022-08-18 13:35:24 +02:00 committed by GitHub
commit 50f4580686
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 1312 additions and 1143 deletions

View File

@ -367,6 +367,7 @@ BuildRequires: rdma-core-devel
BuildRequires: liblz4-devel >= 1.7
# for prometheus-alerts
BuildRequires: golang-github-prometheus-prometheus
BuildRequires: jsonnet
%endif
%if 0%{?fedora} || 0%{?rhel}
Requires: systemd
@ -408,6 +409,7 @@ BuildRequires: python%{python3_pkgversion}-pyOpenSSL
%endif
%if 0%{?suse_version}
BuildRequires: golang-github-prometheus-prometheus
BuildRequires: jsonnet
BuildRequires: libxmlsec1-1
BuildRequires: libxmlsec1-nss1
BuildRequires: libxmlsec1-openssl1

1
debian/control vendored
View File

@ -24,6 +24,7 @@ Build-Depends: automake,
hostname <pkg.ceph.check>,
javahelper,
jq <pkg.ceph.check>,
jsonnet <pkg.ceph.check>,
junit4,
libarrow-dev <pkg.ceph.arrow>,
libparquet-dev <pkg.ceph.arrow>,

View File

@ -11,17 +11,6 @@ if(WITH_GRAFANA)
include(AddCephTest)
set(CEPH_BUILD_VIRTUALENV ${CMAKE_BINARY_DIR})
add_test(NAME jsonnet-build
COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/jsonnet-build.sh ${CMAKE_CURRENT_BINARY_DIR})
set_property(TEST jsonnet-build PROPERTY
FIXTURES_SETUP jsonnet)
add_test(NAME jsonnet-cleanup
COMMAND rm -rf go-jsonnet
${CMAKE_CURRENT_BINARY_DIR}/jsonnet
${CMAKE_CURRENT_BINARY_DIR}/jsonnetfmt)
set_property(TEST jsonnet-cleanup PROPERTY
FIXTURES_CLEANUP jsonnet)
add_test(NAME jsonnet-bundler-build
COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/jsonnet-bundler-build.sh ${CMAKE_CURRENT_BINARY_DIR})
set_property(TEST jsonnet-bundler-build PROPERTY
@ -34,11 +23,10 @@ if(WITH_GRAFANA)
add_tox_test(grafana-lint TOX_ENVS lint)
add_tox_test(jsonnet-lint TOX_ENVS jsonnet-lint)
set_property(TEST run-tox-jsonnet-lint PROPERTY
FIXTURES_REQUIRED venv-for-jsonnet-lint jsonnet
ENVIRONMENT "PATH=${CMAKE_CURRENT_BINARY_DIR}:$ENV{PATH}")
FIXTURES_REQUIRED venv-for-jsonnet-lint)
add_tox_test(jsonnet-check TOX_ENVS jsonnet-check)
set_property(TEST run-tox-jsonnet-check PROPERTY
FIXTURES_REQUIRED venv-for-jsonnet-check jsonnet jsonnet-bundler
FIXTURES_REQUIRED venv-for-jsonnet-check jsonnet-bundler
ENVIRONMENT "PATH=${CMAKE_CURRENT_BINARY_DIR}:$ENV{PATH}")
add_tox_test(alerts-check TOX_ENVS alerts-check)

View File

@ -5,7 +5,7 @@ All the Grafana dashboards are already generated in the `dashboards_out`
directory and alerts in the `prometheus_alerts.yml` file.
You can use the Grafana dashboards and alerts with Jsonnet like any other
prometheus mixin. You can find more ressources about mixins in general on
prometheus mixin. You can find more resources about mixins in general on
[monitoring.mixins.dev](https://monitoring.mixins.dev/).
### Grafana dashboards for Ceph
@ -26,11 +26,20 @@ plugin](http://docs.ceph.com/en/latest/mgr/prometheus/) and the
### Prometheus alerts
In `prometheus_alerts.yml` you'll find a set of Prometheus
In `prometheus_alerts.libsonnet` you'll find a set of Prometheus
alert rules that should provide a decent set of default alerts for a
Ceph cluster. Just put this file in a place according to your Prometheus
Ceph cluster. After building them with jsonnet put this file in place according to your Prometheus
configuration (wherever the `rules` configuration stanza points).
### Multi-cluster support
Ceph-mixin supports dashboards and alerts across multiple clusters.
To enable this feature you need to configure the following in `config.libsonnnet`:
```
showMultiCluster: true,
clusterLabel: '<your cluster label>',
```
#### SNMP
Ceph provides a MIB (CEPH-PROMETHEUS-ALERT-MIB.txt) to support sending Prometheus
alerts through to an SNMP management platform. The translation from Prometheus

View File

@ -0,0 +1 @@
std.manifestYamlDoc((import 'alerts.libsonnet').prometheusAlerts, indent_array_in_object=true, quote_keys=false)

View File

@ -1,3 +1,3 @@
{
prometheusAlerts+:: std.parseYaml(importstr 'prometheus_alerts.yml'),
prometheusAlerts+:: (import 'prometheus_alerts.libsonnet'),
}

View File

@ -1,10 +0,0 @@
#!/bin/sh -ex
JSONNET_VERSION="v0.18.0"
OUTPUT_DIR=${1:-$(pwd)}
git clone -b ${JSONNET_VERSION} --depth 1 https://github.com/google/go-jsonnet.git
cd go-jsonnet
go build ./cmd/jsonnet
go build ./cmd/jsonnetfmt
mv jsonnet jsonnetfmt ${OUTPUT_DIR}

View File

@ -0,0 +1,695 @@
(import 'config.libsonnet')
{
MultiClusterQuery()::
if $._config.showMultiCluster
then 'cluster,'
else '',
MultiClusterSummary()::
if $._config.showMultiCluster
then ' on cluster {{ $labels.cluster }}'
else '',
groups+: [
{
name: 'cluster health',
rules: [
{
alert: 'CephHealthError',
'for': '5m',
expr: 'ceph_health_status == 2',
labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.2.1' },
annotations: {
summary: 'Ceph is in the ERROR state%(cluster)s' % $.MultiClusterSummary(),
description: "The cluster state has been HEALTH_ERROR for more than 5 minutes%(cluster)s. Please check 'ceph health detail' for more information." % $.MultiClusterSummary(),
},
},
{
alert: 'CephHealthWarning',
'for': '15m',
expr: 'ceph_health_status == 1',
labels: { severity: 'warning', type: 'ceph_default' },
annotations: {
summary: 'Ceph is in the WARNING state%(cluster)s' % $.MultiClusterSummary(),
description: "The cluster state has been HEALTH_WARN for more than 15 minutes%(cluster)s. Please check 'ceph health detail' for more information." % $.MultiClusterSummary(),
},
},
],
},
{
name: 'mon',
rules: [
{
alert: 'CephMonDownQuorumAtRisk',
'for': '30s',
expr: |||
(
(ceph_health_detail{name="MON_DOWN"} == 1) * on() (
count(ceph_mon_quorum_status == 1) == bool (floor(count(ceph_mon_metadata) / 2) + 1)
)
) == 1
|||,
labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.3.1' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-down',
summary: 'Monitor quorum is at risk%(cluster)s' % $.MultiClusterSummary(),
description: '{{ $min := query "floor(count(ceph_mon_metadata) / 2) + 1" | first | value }}Quorum requires a majority of monitors (x {{ $min }}) to be active. Without quorum the cluster will become inoperable, affecting all services and connected clients. The following monitors are down: {{- range query "(ceph_mon_quorum_status == 0) + on(ceph_daemon) group_left(hostname) (ceph_mon_metadata * 0)" }} - {{ .Labels.ceph_daemon }} on {{ .Labels.hostname }} {{- end }}',
},
},
{
alert: 'CephMonDown',
'for': '30s',
expr: |||
count(ceph_mon_quorum_status == 0) <= (count(ceph_mon_metadata) - floor(count(ceph_mon_metadata) / 2) + 1)
|||,
labels: { severity: 'warning', type: 'ceph_default' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-down',
summary: 'One or more monitors down%(cluster)s' % $.MultiClusterSummary(),
description: |||
{{ $down := query "count(ceph_mon_quorum_status == 0)" | first | value }}{{ $s := "" }}{{ if gt $down 1.0 }}{{ $s = "s" }}{{ end }}You have {{ $down }} monitor{{ $s }} down. Quorum is still intact, but the loss of an additional monitor will make your cluster inoperable. The following monitors are down: {{- range query "(ceph_mon_quorum_status == 0) + on(ceph_daemon) group_left(hostname) (ceph_mon_metadata * 0)" }} - {{ .Labels.ceph_daemon }} on {{ .Labels.hostname }} {{- end }}
|||,
},
},
{
alert: 'CephMonDiskspaceCritical',
'for': '1m',
expr: 'ceph_health_detail{name="MON_DISK_CRIT"} == 1',
labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.3.2' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-disk-crit',
summary: 'Filesystem space on at least one monitor is critically low%(cluster)s' % $.MultiClusterSummary(),
description: "The free space available to a monitor's store is critically low. You should increase the space available to the monitor(s). The default directory is /var/lib/ceph/mon-*/data/store.db on traditional deployments, and /var/lib/rook/mon-*/data/store.db on the mon pod's worker node for Rook. Look for old, rotated versions of *.log and MANIFEST*. Do NOT touch any *.sst files. Also check any other directories under /var/lib/rook and other directories on the same filesystem, often /var/log and /var/tmp are culprits. Your monitor hosts are; {{- range query \"ceph_mon_metadata\"}} - {{ .Labels.hostname }} {{- end }}",
},
},
{
alert: 'CephMonDiskspaceLow',
'for': '5m',
expr: 'ceph_health_detail{name="MON_DISK_LOW"} == 1',
labels: { severity: 'warning', type: 'ceph_default' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-disk-low',
summary: 'Drive space on at least one monitor is approaching full%(cluster)s' % $.MultiClusterSummary(),
description: "The space available to a monitor's store is approaching full (>70% is the default). You should increase the space available to the monitor(s). The default directory is /var/lib/ceph/mon-*/data/store.db on traditional deployments, and /var/lib/rook/mon-*/data/store.db on the mon pod's worker node for Rook. Look for old, rotated versions of *.log and MANIFEST*. Do NOT touch any *.sst files. Also check any other directories under /var/lib/rook and other directories on the same filesystem, often /var/log and /var/tmp are culprits. Your monitor hosts are; {{- range query \"ceph_mon_metadata\"}} - {{ .Labels.hostname }} {{- end }}",
},
},
{
alert: 'CephMonClockSkew',
'for': '1m',
expr: 'ceph_health_detail{name="MON_CLOCK_SKEW"} == 1',
labels: { severity: 'warning', type: 'ceph_default' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-clock-skew',
summary: 'Clock skew detected among monitors%(cluster)s' % $.MultiClusterSummary(),
description: "Ceph monitors rely on closely synchronized time to maintain quorum and cluster consistency. This event indicates that the time on at least one mon has drifted too far from the lead mon. Review cluster status with ceph -s. This will show which monitors are affected. Check the time sync status on each monitor host with 'ceph time-sync-status' and the state and peers of your ntpd or chrony daemon.",
},
},
],
},
{
name: 'osd',
rules: [
{
alert: 'CephOSDDownHigh',
expr: 'count(ceph_osd_up == 0) / count(ceph_osd_up) * 100 >= 10',
labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.4.1' },
annotations: {
summary: 'More than 10%% of OSDs are down%(cluster)s' % $.MultiClusterSummary(),
description: '{{ $value | humanize }}% or {{ with query "count(ceph_osd_up == 0)" }}{{ . | first | value }}{{ end }} of {{ with query "count(ceph_osd_up)" }}{{ . | first | value }}{{ end }} OSDs are down (>= 10%). The following OSDs are down: {{- range query "(ceph_osd_up * on(ceph_daemon) group_left(hostname) ceph_osd_metadata) == 0" }} - {{ .Labels.ceph_daemon }} on {{ .Labels.hostname }} {{- end }}',
},
},
{
alert: 'CephOSDHostDown',
'for': '5m',
expr: 'ceph_health_detail{name="OSD_HOST_DOWN"} == 1',
labels: { severity: 'warning', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.4.8' },
annotations: {
summary: 'An OSD host is offline%(cluster)s' % $.MultiClusterSummary(),
description: 'The following OSDs are down: {{- range query "(ceph_osd_up * on(ceph_daemon) group_left(hostname) ceph_osd_metadata) == 0" }} - {{ .Labels.hostname }} : {{ .Labels.ceph_daemon }} {{- end }}',
},
},
{
alert: 'CephOSDDown',
'for': '5m',
expr: 'ceph_health_detail{name="OSD_DOWN"} == 1',
labels: { severity: 'warning', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.4.2' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-down',
summary: 'An OSD has been marked down%(cluster)s' % $.MultiClusterSummary(),
description: |||
{{ $num := query "count(ceph_osd_up == 0)" | first | value }}{{ $s := "" }}{{ if gt $num 1.0 }}{{ $s = "s" }}{{ end }}{{ $num }} OSD{{ $s }} down for over 5mins. The following OSD{{ $s }} {{ if eq $s "" }}is{{ else }}are{{ end }} down: {{- range query "(ceph_osd_up * on(ceph_daemon) group_left(hostname) ceph_osd_metadata) == 0"}} - {{ .Labels.ceph_daemon }} on {{ .Labels.hostname }} {{- end }}
|||,
},
},
{
alert: 'CephOSDNearFull',
'for': '5m',
expr: 'ceph_health_detail{name="OSD_NEARFULL"} == 1',
labels: { severity: 'warning', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.4.3' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-nearfull',
summary: 'OSD(s) running low on free space (NEARFULL)%(cluster)s' % $.MultiClusterSummary(),
description: "One or more OSDs have reached the NEARFULL threshold. Use 'ceph health detail' and 'ceph osd df' to identify the problem. To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data.",
},
},
{
alert: 'CephOSDFull',
'for': '1m',
expr: 'ceph_health_detail{name="OSD_FULL"} > 0',
labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.4.6' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-full',
summary: 'OSD full, writes blocked%(cluster)s' % $.MultiClusterSummary(),
description: "An OSD has reached the FULL threshold. Writes to pools that share the affected OSD will be blocked. Use 'ceph health detail' and 'ceph osd df' to identify the problem. To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data.",
},
},
{
alert: 'CephOSDBackfillFull',
'for': '1m',
expr: 'ceph_health_detail{name="OSD_BACKFILLFULL"} > 0',
labels: { severity: 'warning', type: 'ceph_default' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-backfillfull',
summary: 'OSD(s) too full for backfill operations%(cluster)s' % $.MultiClusterSummary(),
description: "An OSD has reached the BACKFILL FULL threshold. This will prevent rebalance operations from completing. Use 'ceph health detail' and 'ceph osd df' to identify the problem. To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data.",
},
},
{
alert: 'CephOSDTooManyRepairs',
'for': '30s',
expr: 'ceph_health_detail{name="OSD_TOO_MANY_REPAIRS"} == 1',
labels: { severity: 'warning', type: 'ceph_default' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-too-many-repairs',
summary: 'OSD reports a high number of read errors%(cluster)s' % $.MultiClusterSummary(),
description: 'Reads from an OSD have used a secondary PG to return data to the client, indicating a potential failing drive.',
},
},
{
alert: 'CephOSDTimeoutsPublicNetwork',
'for': '1m',
expr: 'ceph_health_detail{name="OSD_SLOW_PING_TIME_FRONT"} == 1',
labels: { severity: 'warning', type: 'ceph_default' },
annotations: {
summary: 'Network issues delaying OSD heartbeats (public network)%(cluster)s' % $.MultiClusterSummary(),
description: "OSD heartbeats on the cluster's 'public' network (frontend) are running slow. Investigate the network for latency or loss issues. Use 'ceph health detail' to show the affected OSDs.",
},
},
{
alert: 'CephOSDTimeoutsClusterNetwork',
'for': '1m',
expr: 'ceph_health_detail{name="OSD_SLOW_PING_TIME_BACK"} == 1',
labels: { severity: 'warning', type: 'ceph_default' },
annotations: {
summary: 'Network issues delaying OSD heartbeats (cluster network)%(cluster)s' % $.MultiClusterSummary(),
description: "OSD heartbeats on the cluster's 'cluster' network (backend) are slow. Investigate the network for latency issues on this subnet. Use 'ceph health detail' to show the affected OSDs.",
},
},
{
alert: 'CephOSDInternalDiskSizeMismatch',
'for': '1m',
expr: 'ceph_health_detail{name="BLUESTORE_DISK_SIZE_MISMATCH"} == 1',
labels: { severity: 'warning', type: 'ceph_default' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#bluestore-disk-size-mismatch',
summary: 'OSD size inconsistency error%(cluster)s' % $.MultiClusterSummary(),
description: 'One or more OSDs have an internal inconsistency between metadata and the size of the device. This could lead to the OSD(s) crashing in future. You should redeploy the affected OSDs.',
},
},
{
alert: 'CephDeviceFailurePredicted',
'for': '1m',
expr: 'ceph_health_detail{name="DEVICE_HEALTH"} == 1',
labels: { severity: 'warning', type: 'ceph_default' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#id2',
summary: 'Device(s) predicted to fail soon%(cluster)s' % $.MultiClusterSummary(),
description: "The device health module has determined that one or more devices will fail soon. To review device status use 'ceph device ls'. To show a specific device use 'ceph device info <dev id>'. Mark the OSD out so that data may migrate to other OSDs. Once the OSD has drained, destroy the OSD, replace the device, and redeploy the OSD.",
},
},
{
alert: 'CephDeviceFailurePredictionTooHigh',
'for': '1m',
expr: 'ceph_health_detail{name="DEVICE_HEALTH_TOOMANY"} == 1',
labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.4.7' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#device-health-toomany',
summary: 'Too many devices are predicted to fail, unable to resolve%(cluster)s' % $.MultiClusterSummary(),
description: 'The device health module has determined that devices predicted to fail can not be remediated automatically, since too many OSDs would be removed from the cluster to ensure performance and availabililty. Prevent data integrity issues by adding new OSDs so that data may be relocated.',
},
},
{
alert: 'CephDeviceFailureRelocationIncomplete',
'for': '1m',
expr: 'ceph_health_detail{name="DEVICE_HEALTH_IN_USE"} == 1',
labels: { severity: 'warning', type: 'ceph_default' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#device-health-in-use',
summary: 'Device failure is predicted, but unable to relocate data%(cluster)s' % $.MultiClusterSummary(),
description: 'The device health module has determined that one or more devices will fail soon, but the normal process of relocating the data on the device to other OSDs in the cluster is blocked. \nEnsure that the cluster has available free space. It may be necessary to add capacity to the cluster to allow data from the failing device to successfully migrate, or to enable the balancer.',
},
},
{
alert: 'CephOSDFlapping',
expr: '(rate(ceph_osd_up[5m]) * on(%(cluster)sceph_daemon) group_left(hostname) ceph_osd_metadata) * 60 > 1' % $.MultiClusterQuery(),
labels: { severity: 'warning', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.4.4' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds',
summary: 'Network issues are causing OSDs to flap (mark each other down)%(cluster)s' % $.MultiClusterSummary(),
description: 'OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked down and back up {{ $value | humanize }} times once a minute for 5 minutes. This may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster network, or the public network if no cluster network is deployed. Check the network stats on the listed host(s).',
},
},
{
alert: 'CephOSDReadErrors',
'for': '30s',
expr: 'ceph_health_detail{name="BLUESTORE_SPURIOUS_READ_ERRORS"} == 1',
labels: { severity: 'warning', type: 'ceph_default' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#bluestore-spurious-read-errors',
summary: 'Device read errors detected%(cluster)s' % $.MultiClusterSummary(),
description: 'An OSD has encountered read errors, but the OSD has recovered by retrying the reads. This may indicate an issue with hardware or the kernel.',
},
},
{
alert: 'CephPGImbalance',
'for': '5m',
expr: |||
abs(
((ceph_osd_numpg > 0) - on (%(cluster)sjob) group_left avg(ceph_osd_numpg > 0) by (%(cluster)sjob)) /
on (job) group_left avg(ceph_osd_numpg > 0) by (job)
) * on (%(cluster)sceph_daemon) group_left(hostname) ceph_osd_metadata > 0.30
||| % [$.MultiClusterQuery(), $.MultiClusterQuery(), $.MultiClusterQuery()],
labels: { severity: 'warning', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.4.5' },
annotations: {
summary: 'PGs are not balanced across OSDs%(cluster)s' % $.MultiClusterSummary(),
description: 'OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} deviates by more than 30% from average PG count.',
},
},
],
},
{
name: 'mds',
rules: [
{
alert: 'CephFilesystemDamaged',
'for': '1m',
expr: 'ceph_health_detail{name="MDS_DAMAGE"} > 0',
labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.5.1' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/cephfs/health-messages#cephfs-health-messages',
summary: 'CephFS filesystem is damaged%(cluster)s.' % $.MultiClusterSummary(),
description: 'Filesystem metadata has been corrupted. Data may be inaccessible. Analyze metrics from the MDS daemon admin socket, or escalate to support.',
},
},
{
alert: 'CephFilesystemOffline',
'for': '1m',
expr: 'ceph_health_detail{name="MDS_ALL_DOWN"} > 0',
labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.5.3' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-all-down',
summary: 'CephFS filesystem is offline%(cluster)s' % $.MultiClusterSummary(),
description: 'All MDS ranks are unavailable. The MDS daemons managing metadata are down, rendering the filesystem offline.',
},
},
{
alert: 'CephFilesystemDegraded',
'for': '1m',
expr: 'ceph_health_detail{name="FS_DEGRADED"} > 0',
labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.5.4' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/cephfs/health-messages/#fs-degraded',
summary: 'CephFS filesystem is degraded%(cluster)s' % $.MultiClusterSummary(),
description: 'One or more metadata daemons (MDS ranks) are failed or in a damaged state. At best the filesystem is partially available, at worst the filesystem is completely unusable.',
},
},
{
alert: 'CephFilesystemMDSRanksLow',
'for': '1m',
expr: 'ceph_health_detail{name="MDS_UP_LESS_THAN_MAX"} > 0',
labels: { severity: 'warning', type: 'ceph_default' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-up-less-than-max',
summary: 'Ceph MDS daemon count is lower than configured%(cluster)s' % $.MultiClusterSummary(),
description: "The filesystem's 'max_mds' setting defines the number of MDS ranks in the filesystem. The current number of active MDS daemons is less than this value.",
},
},
{
alert: 'CephFilesystemInsufficientStandby',
'for': '1m',
expr: 'ceph_health_detail{name="MDS_INSUFFICIENT_STANDBY"} > 0',
labels: { severity: 'warning', type: 'ceph_default' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-insufficient-standby',
summary: 'Ceph filesystem standby daemons too few%(cluster)s' % $.MultiClusterSummary(),
description: 'The minimum number of standby daemons required by standby_count_wanted is less than the current number of standby daemons. Adjust the standby count or increase the number of MDS daemons.',
},
},
{
alert: 'CephFilesystemFailureNoStandby',
'for': '1m',
expr: 'ceph_health_detail{name="FS_WITH_FAILED_MDS"} > 0',
labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.5.5' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/cephfs/health-messages/#fs-with-failed-mds',
summary: 'MDS daemon failed, no further standby available%(cluster)s' % $.MultiClusterSummary(),
description: 'An MDS daemon has failed, leaving only one active rank and no available standby. Investigate the cause of the failure or add a standby MDS.',
},
},
{
alert: 'CephFilesystemReadOnly',
'for': '1m',
expr: 'ceph_health_detail{name="MDS_HEALTH_READ_ONLY"} > 0',
labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.5.2' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/cephfs/health-messages#cephfs-health-messages',
summary: 'CephFS filesystem in read only mode due to write error(s)%(cluster)s' % $.MultiClusterSummary(),
description: 'The filesystem has switched to READ ONLY due to an unexpected error when writing to the metadata pool. Either analyze the output from the MDS daemon admin socket, or escalate to support.',
},
},
],
},
{
name: 'mgr',
rules: [
{
alert: 'CephMgrModuleCrash',
'for': '5m',
expr: 'ceph_health_detail{name="RECENT_MGR_MODULE_CRASH"} == 1',
labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.6.1' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#recent-mgr-module-crash',
summary: 'A manager module has recently crashed%(cluster)s' % $.MultiClusterSummary(),
description: "One or more mgr modules have crashed and have yet to be acknowledged by an administrator. A crashed module may impact functionality within the cluster. Use the 'ceph crash' command to determine which module has failed, and archive it to acknowledge the failure.",
},
},
{
alert: 'CephMgrPrometheusModuleInactive',
'for': '1m',
expr: 'up{job="ceph"} == 0',
labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.6.2' },
annotations: {
summary: 'The mgr/prometheus module is not available%(cluster)s' % $.MultiClusterSummary(),
description: "The mgr/prometheus module at {{ $labels.instance }} is unreachable. This could mean that the module has been disabled or the mgr daemon itself is down. Without the mgr/prometheus module metrics and alerts will no longer function. Open a shell to an admin node or toolbox pod and use 'ceph -s' to to determine whether the mgr is active. If the mgr is not active, restart it, otherwise you can determine module status with 'ceph mgr module ls'. If it is not listed as enabled, enable it with 'ceph mgr module enable prometheus'.",
},
},
],
},
{
name: 'pgs',
rules: [
{
alert: 'CephPGsInactive',
'for': '5m',
expr: 'ceph_pool_metadata * on(%(cluster)spool_id,instance) group_left() (ceph_pg_total - ceph_pg_active) > 0' % $.MultiClusterQuery(),
labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.7.1' },
annotations: {
summary: 'One or more placement groups are inactive%(cluster)s' % $.MultiClusterSummary(),
description: '{{ $value }} PGs have been inactive for more than 5 minutes in pool {{ $labels.name }}. Inactive placement groups are not able to serve read/write requests.',
},
},
{
alert: 'CephPGsUnclean',
'for': '15m',
expr: 'ceph_pool_metadata * on(%(cluster)spool_id,instance) group_left() (ceph_pg_total - ceph_pg_clean) > 0' % $.MultiClusterQuery(),
labels: { severity: 'warning', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.7.2' },
annotations: {
summary: 'One or more placement groups are marked unclean%(cluster)s' % $.MultiClusterSummary(),
description: '{{ $value }} PGs have been unclean for more than 15 minutes in pool {{ $labels.name }}. Unclean PGs have not recovered from a previous failure.',
},
},
{
alert: 'CephPGsDamaged',
'for': '5m',
expr: 'ceph_health_detail{name=~"PG_DAMAGED|OSD_SCRUB_ERRORS"} == 1',
labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.7.4' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-damaged',
summary: 'Placement group damaged, manual intervention needed%(cluster)s' % $.MultiClusterSummary(),
description: "During data consistency checks (scrub), at least one PG has been flagged as being damaged or inconsistent. Check to see which PG is affected, and attempt a manual repair if necessary. To list problematic placement groups, use 'rados list-inconsistent-pg <pool>'. To repair PGs use the 'ceph pg repair <pg_num>' command.",
},
},
{
alert: 'CephPGRecoveryAtRisk',
'for': '1m',
expr: 'ceph_health_detail{name="PG_RECOVERY_FULL"} == 1',
labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.7.5' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-recovery-full',
summary: 'OSDs are too full for recovery%(cluster)s' % $.MultiClusterSummary(),
description: "Data redundancy is at risk since one or more OSDs are at or above the 'full' threshold. Add more capacity to the cluster, restore down/out OSDs, or delete unwanted data.",
},
},
{
alert: 'CephPGUnavilableBlockingIO',
'for': '1m',
expr: '((ceph_health_detail{name="PG_AVAILABILITY"} == 1) - scalar(ceph_health_detail{name="OSD_DOWN"})) == 1',
labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.7.3' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-availability',
summary: 'PG is unavailable%(cluster)s, blocking I/O' % $.MultiClusterSummary(),
description: "Data availability is reduced, impacting the cluster's ability to service I/O. One or more placement groups (PGs) are in a state that blocks I/O.",
},
},
{
alert: 'CephPGBackfillAtRisk',
'for': '1m',
expr: 'ceph_health_detail{name="PG_BACKFILL_FULL"} == 1',
labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.7.6' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-backfill-full',
summary: 'Backfill operations are blocked due to lack of free space%(cluster)s' % $.MultiClusterSummary(),
description: "Data redundancy may be at risk due to lack of free space within the cluster. One or more OSDs have reached the 'backfillfull' threshold. Add more capacity, or delete unwanted data.",
},
},
{
alert: 'CephPGNotScrubbed',
'for': '5m',
expr: 'ceph_health_detail{name="PG_NOT_SCRUBBED"} == 1',
labels: { severity: 'warning', type: 'ceph_default' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-not-scrubbed',
summary: 'Placement group(s) have not been scrubbed%(cluster)s' % $.MultiClusterSummary(),
description: "One or more PGs have not been scrubbed recently. Scrubs check metadata integrity, protecting against bit-rot. They check that metadata is consistent across data replicas. When PGs miss their scrub interval, it may indicate that the scrub window is too small, or PGs were not in a 'clean' state during the scrub window. You can manually initiate a scrub with: ceph pg scrub <pgid>",
},
},
{
alert: 'CephPGsHighPerOSD',
'for': '1m',
expr: 'ceph_health_detail{name="TOO_MANY_PGS"} == 1',
labels: { severity: 'warning', type: 'ceph_default' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks/#too-many-pgs',
summary: 'Placement groups per OSD is too high%(cluster)s' % $.MultiClusterSummary(),
description: "The number of placement groups per OSD is too high (exceeds the mon_max_pg_per_osd setting).\n Check that the pg_autoscaler has not been disabled for any pools with 'ceph osd pool autoscale-status', and that the profile selected is appropriate. You may also adjust the target_size_ratio of a pool to guide the autoscaler based on the expected relative size of the pool ('ceph osd pool set cephfs.cephfs.meta target_size_ratio .1') or set the pg_autoscaler mode to 'warn' and adjust pg_num appropriately for one or more pools.",
},
},
{
alert: 'CephPGNotDeepScrubbed',
'for': '5m',
expr: 'ceph_health_detail{name="PG_NOT_DEEP_SCRUBBED"} == 1',
labels: { severity: 'warning', type: 'ceph_default' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-not-deep-scrubbed',
summary: 'Placement group(s) have not been deep scrubbed%(cluster)s' % $.MultiClusterSummary(),
description: "One or more PGs have not been deep scrubbed recently. Deep scrubs protect against bit-rot. They compare data replicas to ensure consistency. When PGs miss their deep scrub interval, it may indicate that the window is too small or PGs were not in a 'clean' state during the deep-scrub window.",
},
},
],
},
{
name: 'nodes',
rules: [
{
alert: 'CephNodeRootFilesystemFull',
'for': '5m',
expr: 'node_filesystem_avail_bytes{mountpoint="/"} / node_filesystem_size_bytes{mountpoint="/"} * 100 < 5',
labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.8.1' },
annotations: {
summary: 'Root filesystem is dangerously full%(cluster)s' % $.MultiClusterSummary(),
description: 'Root volume is dangerously full: {{ $value | humanize }}% free.',
},
},
{
alert: 'CephNodeNetworkPacketDrops',
expr: '( increase(node_network_receive_drop_total{device!="lo"}[1m]) + increase(node_network_transmit_drop_total{device!="lo"}[1m])) / ( increase(node_network_receive_packets_total{device!="lo"}[1m]) + increase(node_network_transmit_packets_total{device!="lo"}[1m])) >= 0.0001 or ( increase(node_network_receive_drop_total{device!="lo"}[1m]) + increase(node_network_transmit_drop_total{device!="lo"}[1m])) >= 10',
labels: { severity: 'warning', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.8.2' },
annotations: {
summary: 'One or more NICs reports packet drops%(cluster)s' % $.MultiClusterSummary(),
description: 'Node {{ $labels.instance }} experiences packet drop > 0.01% or > 10 packets/s on interface {{ $labels.device }}.',
},
},
{
alert: 'CephNodeNetworkPacketErrors',
expr: '( increase(node_network_receive_errs_total{device!="lo"}[1m]) + increase(node_network_transmit_errs_total{device!="lo"}[1m])) / ( increase(node_network_receive_packets_total{device!="lo"}[1m]) + increase(node_network_transmit_packets_total{device!="lo"}[1m])) >= 0.0001 or ( increase(node_network_receive_errs_total{device!="lo"}[1m]) + increase(node_network_transmit_errs_total{device!="lo"}[1m])) >= 10',
labels: { severity: 'warning', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.8.3' },
annotations: {
summary: 'One or more NICs reports packet errors%(cluster)s' % $.MultiClusterSummary(),
description: 'Node {{ $labels.instance }} experiences packet errors > 0.01% or > 10 packets/s on interface {{ $labels.device }}.',
},
},
{
alert: 'CephNodeDiskspaceWarning',
expr: 'predict_linear(node_filesystem_free_bytes{device=~"/.*"}[2d], 3600 * 24 * 5) *on(instance) group_left(nodename) node_uname_info < 0',
labels: { severity: 'warning', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.8.4' },
annotations: {
summary: 'Host filesystem free space is getting low%(cluster)s' % $.MultiClusterSummary(),
description: 'Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will be full in less than 5 days based on the 48 hour trailing fill rate.',
},
},
{
alert: 'CephNodeInconsistentMTU',
expr: 'node_network_mtu_bytes * (node_network_up{device!="lo"} > 0) == scalar( max by (device) (node_network_mtu_bytes * (node_network_up{device!="lo"} > 0)) != quantile by (device) (.5, node_network_mtu_bytes * (node_network_up{device!="lo"} > 0)) )or node_network_mtu_bytes * (node_network_up{device!="lo"} > 0) == scalar( min by (device) (node_network_mtu_bytes * (node_network_up{device!="lo"} > 0)) != quantile by (device) (.5, node_network_mtu_bytes * (node_network_up{device!="lo"} > 0)) )',
labels: { severity: 'warning', type: 'ceph_default' },
annotations: {
summary: 'MTU settings across Ceph hosts are inconsistent%(cluster)s' % $.MultiClusterSummary(),
description: 'Node {{ $labels.instance }} has a different MTU size ({{ $value }}) than the median of devices named {{ $labels.device }}.',
},
},
],
},
{
name: 'pools',
rules: [
{
alert: 'CephPoolGrowthWarning',
expr: '(predict_linear(ceph_pool_percent_used[2d], 3600 * 24 * 5) * on(%(cluster)spool_id) group_right ceph_pool_metadata) >= 95' % $.MultiClusterQuery(),
labels: { severity: 'warning', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.9.2' },
annotations: {
summary: 'Pool growth rate may soon exceed capacity%(cluster)s' % $.MultiClusterSummary(),
description: "Pool '{{ $labels.name }}' will be full in less than 5 days assuming the average fill-up rate of the past 48 hours.",
},
},
{
alert: 'CephPoolBackfillFull',
expr: 'ceph_health_detail{name="POOL_BACKFILLFULL"} > 0',
labels: { severity: 'warning', type: 'ceph_default' },
annotations: {
summary: 'Free space in a pool is too low for recovery/backfill%(cluster)s' % $.MultiClusterSummary(),
description: 'A pool is approaching the near full threshold, which will prevent recovery/backfill operations from completing. Consider adding more capacity.',
},
},
{
alert: 'CephPoolFull',
'for': '1m',
expr: 'ceph_health_detail{name="POOL_FULL"} > 0',
labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.9.1' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#pool-full',
summary: 'Pool is full - writes are blocked%(cluster)s' % $.MultiClusterSummary(),
description: "A pool has reached its MAX quota, or OSDs supporting the pool have reached the FULL threshold. Until this is resolved, writes to the pool will be blocked. Pool Breakdown (top 5) {{- range query \"topk(5, sort_desc(ceph_pool_percent_used * on(pool_id) group_right ceph_pool_metadata))\" }} - {{ .Labels.name }} at {{ .Value }}% {{- end }} Increase the pool's quota, or add capacity to the cluster first then increase the pool's quota (e.g. ceph osd pool set quota <pool_name> max_bytes <bytes>)",
},
},
{
alert: 'CephPoolNearFull',
'for': '5m',
expr: 'ceph_health_detail{name="POOL_NEAR_FULL"} > 0',
labels: { severity: 'warning', type: 'ceph_default' },
annotations: {
summary: 'One or more Ceph pools are nearly full%(cluster)s' % $.MultiClusterSummary(),
description: "A pool has exceeded the warning (percent full) threshold, or OSDs supporting the pool have reached the NEARFULL threshold. Writes may continue, but you are at risk of the pool going read-only if more capacity isn't made available. Determine the affected pool with 'ceph df detail', looking at QUOTA BYTES and STORED. Increase the pool's quota, or add capacity to the cluster first then increase the pool's quota (e.g. ceph osd pool set quota <pool_name> max_bytes <bytes>). Also ensure that the balancer is active.",
},
},
],
},
{
name: 'healthchecks',
rules: [
{
alert: 'CephSlowOps',
'for': '30s',
expr: 'ceph_healthcheck_slow_ops > 0',
labels: { severity: 'warning', type: 'ceph_default' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#slow-ops',
summary: 'OSD operations are slow to complete%(cluster)s' % $.MultiClusterSummary(),
description: '{{ $value }} OSD requests are taking too long to process (osd_op_complaint_time exceeded)',
},
},
],
},
{
name: 'cephadm',
rules: [
{
alert: 'CephadmUpgradeFailed',
'for': '30s',
expr: 'ceph_health_detail{name="UPGRADE_EXCEPTION"} > 0',
labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.11.2' },
annotations: {
summary: 'Ceph version upgrade has failed%(cluster)s' % $.MultiClusterSummary(),
description: 'The cephadm cluster upgrade process has failed. The cluster remains in an undetermined state. Please review the cephadm logs, to understand the nature of the issue',
},
},
{
alert: 'CephadmDaemonFailed',
'for': '30s',
expr: 'ceph_health_detail{name="CEPHADM_FAILED_DAEMON"} > 0',
labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.11.1' },
annotations: {
summary: 'A ceph daemon manged by cephadm is down%(cluster)s' % $.MultiClusterSummary(),
description: "A daemon managed by cephadm is no longer active. Determine, which daemon is down with 'ceph health detail'. you may start daemons with the 'ceph orch daemon start <daemon_id>'",
},
},
{
alert: 'CephadmPaused',
'for': '1m',
expr: 'ceph_health_detail{name="CEPHADM_PAUSED"} > 0',
labels: { severity: 'warning', type: 'ceph_default' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/cephadm/operations#cephadm-paused',
summary: 'Orchestration tasks via cephadm are PAUSED%(cluster)s' % $.MultiClusterSummary(),
description: "Cluster management has been paused manually. This will prevent the orchestrator from service management and reconciliation. If this is not intentional, resume cephadm operations with 'ceph orch resume'",
},
},
],
},
{
name: 'PrometheusServer',
rules: [
{
alert: 'PrometheusJobMissing',
'for': '30s',
expr: 'absent(up{job="ceph"})',
labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.12.1' },
annotations: {
summary: 'The scrape job for Ceph is missing from Prometheus%(cluster)s' % $.MultiClusterSummary(),
description: "The prometheus job that scrapes from Ceph is no longer defined, this will effectively mean you'll have no metrics or alerts for the cluster. Please review the job definitions in the prometheus.yml file of the prometheus instance.",
},
},
],
},
{
name: 'rados',
rules: [
{
alert: 'CephObjectMissing',
'for': '30s',
expr: '(ceph_health_detail{name="OBJECT_UNFOUND"} == 1) * on() (count(ceph_osd_up == 1) == bool count(ceph_osd_metadata)) == 1',
labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.10.1' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#object-unfound',
summary: 'Object(s) marked UNFOUND%(cluster)s' % $.MultiClusterSummary(),
description: 'The latest version of a RADOS object can not be found, even though all OSDs are up. I/O requests for this object from clients will block (hang). Resolving this issue may require the object to be rolled back to a prior version manually, and manually verified.',
},
},
],
},
{
name: 'generic',
rules: [
{
alert: 'CephDaemonCrash',
'for': '1m',
expr: 'ceph_health_detail{name="RECENT_CRASH"} == 1',
labels: { severity: 'critical', type: 'ceph_default', oid: '1.3.6.1.4.1.50495.1.2.1.1.2' },
annotations: {
documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks/#recent-crash',
summary: 'One or more Ceph daemons have crashed, and are pending acknowledgement%(cluster)s' % $.MultiClusterSummary(),
description: "One or more daemons have crashed recently, and need to be acknowledged. This notification ensures that software crashes do not go unseen. To acknowledge a crash, use the 'ceph crash archive <id>' command.",
},
},
],
},
],
}

File diff suppressed because it is too large Load Diff

View File

@ -19,6 +19,10 @@ do
done
done
jsonnet -J vendor -S alerts.jsonnet -o ${TEMPDIR}/prometheus_alerts.yml
jsondiff --indent 2 "prometheus_alerts.yml" "${TEMPDIR}/prometheus_alerts.yml" \
| tee -a ${TEMPDIR}/json_difference.log
err=0
if [ $(wc -l < ${TEMPDIR}/json_difference.log) -eq 0 ]
then

View File

@ -26,10 +26,8 @@ tests:
type: ceph_default
severity: critical
exp_annotations:
summary: Cluster is in the ERROR state
description: >
The cluster state has been HEALTH_ERROR for more than 5 minutes.
Please check "ceph health detail" for more information.
summary: Ceph is in the ERROR state
description: The cluster state has been HEALTH_ERROR for more than 5 minutes. Please check 'ceph health detail' for more information.
# health warning
- interval: 5m
@ -54,10 +52,8 @@ tests:
type: ceph_default
severity: warning
exp_annotations:
summary: Cluster is in the WARNING state
description: >
The cluster state has been HEALTH_WARN for more than 15 minutes.
Please check "ceph health detail" for more information.
summary: Ceph is in the WARNING state
description: The cluster state has been HEALTH_WARN for more than 15 minutes. Please check 'ceph health detail' for more information.
# 10% OSDs down
- interval: 1m
@ -105,11 +101,7 @@ tests:
severity: critical
exp_annotations:
summary: More than 10% of OSDs are down
description: |
33.33% or 1 of 3 OSDs are down (>= 10%).
The following OSDs are down:
- osd.1 on ceph
description: "33.33% or 1 of 3 OSDs are down (>= 10%). The following OSDs are down: - osd.1 on ceph"
# flapping OSD
- interval: 1s
@ -167,12 +159,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds
summary: Network issues are causing OSDs to flap (mark each other down)
description: >
OSD osd.0 on ceph was
marked down and back up 20.1 times once a minute for 5 minutes.
This may indicate a network issue (latency, packet loss, MTU mismatch)
on the cluster network, or the public network if no cluster network
is deployed. Check the network stats on the listed host(s).
description: "OSD osd.0 on ceph was marked down and back up 20.1 times once a minute for 5 minutes. This may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster network, or the public network if no cluster network is deployed. Check the network stats on the listed host(s)."
# high pg count deviation
- interval: 1m
@ -245,9 +232,7 @@ tests:
type: ceph_default
exp_annotations:
summary: PGs are not balanced across OSDs
description: >
OSD osd.1 on ceph deviates
by more than 30% from average PG count.
description: "OSD osd.1 on ceph deviates by more than 30% from average PG count."
# pgs inactive
- interval: 1m
@ -296,11 +281,7 @@ tests:
type: ceph_default
exp_annotations:
summary: One or more placement groups are inactive
description: >
1 PGs have been inactive for more than 5 minutes in pool
device_health_metrics.
Inactive placement groups are not able to serve read/write
requests.
description: "1 PGs have been inactive for more than 5 minutes in pool device_health_metrics. Inactive placement groups are not able to serve read/write requests."
#pgs unclean
- interval: 1m
@ -352,10 +333,7 @@ tests:
type: ceph_default
exp_annotations:
summary: One or more placement groups are marked unclean
description: >
1 PGs have been unclean for more than 15 minutes in pool
device_health_metrics.
Unclean PGs have not recovered from a previous failure.
description: "1 PGs have been unclean for more than 15 minutes in pool device_health_metrics. Unclean PGs have not recovered from a previous failure."
# root volume full
- interval: 1m
@ -394,8 +372,7 @@ tests:
type: ceph_default
exp_annotations:
summary: Root filesystem is dangerously full
description: >
Root volume is dangerously full: 4.811% free.
description: "Root volume is dangerously full: 4.811% free."
# network packets dropped
- interval: 1s
@ -437,9 +414,7 @@ tests:
type: ceph_default
exp_annotations:
summary: One or more NICs reports packet drops
description: >
Node node-exporter experiences packet drop > 0.01% or >
10 packets/s on interface eth0.
description: "Node node-exporter experiences packet drop > 0.01% or > 10 packets/s on interface eth0."
# network packets errors
- interval: 1s
@ -481,9 +456,7 @@ tests:
type: ceph_default
exp_annotations:
summary: One or more NICs reports packet errors
description: >
Node node-exporter experiences packet errors > 0.01% or > 10
packets/s on interface eth0.
description: "Node node-exporter experiences packet errors > 0.01% or > 10 packets/s on interface eth0."
# Node Storage disk space filling up
- interval: 1m
@ -523,10 +496,7 @@ tests:
nodename: node-1.unittests.com
exp_annotations:
summary: Host filesystem free space is getting low
description: >
Mountpoint /rootfs on node-1.unittests.com
will be full in less than 5 days based on the 48 hour trailing
fill rate.
description: "Mountpoint /rootfs on node-1.unittests.com will be full in less than 5 days based on the 48 hour trailing fill rate."
# MTU Mismatch
- interval: 1m
input_series:
@ -603,9 +573,7 @@ tests:
type: ceph_default
exp_annotations:
summary: MTU settings across Ceph hosts are inconsistent
description: >
Node hostname1 has a different MTU size (2200)
than the median of devices named eth4.
description: "Node hostname1 has a different MTU size (2200) than the median of devices named eth4."
- exp_labels:
device: eth4
instance: node-exporter
@ -614,9 +582,7 @@ tests:
type: ceph_default
exp_annotations:
summary: MTU settings across Ceph hosts are inconsistent
description: >
Node node-exporter has a different MTU size (9000)
than the median of devices named eth4.
description: "Node node-exporter has a different MTU size (9000) than the median of devices named eth4."
# pool full, data series has 6 but using topk(5) so to ensure the
# results are working as expected
@ -674,18 +640,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pool-full
summary: Pool is full - writes are blocked
description: |
A pool has reached its MAX quota, or OSDs supporting the pool
have reached the FULL threshold. Until this is resolved, writes to
the pool will be blocked.
Pool Breakdown (top 5)
- rbd at 96%
- iscsi at 90%
- default.rgw.index at 72%
- cephfs_data at 32%
- default.rgw.log at 19%
Increase the pool's quota, or add capacity to the cluster first
then increase the pool's quota (e.g. ceph osd pool set quota <pool_name> max_bytes <bytes>)
description: "A pool has reached its MAX quota, or OSDs supporting the pool have reached the FULL threshold. Until this is resolved, writes to the pool will be blocked. Pool Breakdown (top 5) - rbd at 96% - iscsi at 90% - default.rgw.index at 72% - cephfs_data at 32% - default.rgw.log at 19% Increase the pool's quota, or add capacity to the cluster first then increase the pool's quota (e.g. ceph osd pool set quota <pool_name> max_bytes <bytes>)"
# slow OSD ops
- interval : 1m
input_series:
@ -710,8 +665,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#slow-ops
summary: OSD operations are slow to complete
description: >
1 OSD requests are taking too long to process (osd_op_complaint_time exceeded)
description: "1 OSD requests are taking too long to process (osd_op_complaint_time exceeded)"
# CEPHADM orchestrator alert triggers
- interval: 30s
@ -737,11 +691,7 @@ tests:
oid: 1.3.6.1.4.1.50495.1.2.1.11.2
exp_annotations:
summary: Ceph version upgrade has failed
description: >
The cephadm cluster upgrade process has failed. The cluster remains in
an undetermined state.
Please review the cephadm logs, to understand the nature of the issue
description: "The cephadm cluster upgrade process has failed. The cluster remains in an undetermined state. Please review the cephadm logs, to understand the nature of the issue"
- interval: 30s
input_series:
- series: 'ceph_health_detail{name="CEPHADM_FAILED_DAEMON"}'
@ -765,10 +715,7 @@ tests:
oid: 1.3.6.1.4.1.50495.1.2.1.11.1
exp_annotations:
summary: A ceph daemon manged by cephadm is down
description: >
A daemon managed by cephadm is no longer active. Determine, which
daemon is down with 'ceph health detail'. you may start daemons with
the 'ceph orch daemon start <daemon_id>'
description: "A daemon managed by cephadm is no longer active. Determine, which daemon is down with 'ceph health detail'. you may start daemons with the 'ceph orch daemon start <daemon_id>'"
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="CEPHADM_PAUSED"}'
@ -792,10 +739,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/cephadm/operations#cephadm-paused
summary: Orchestration tasks via cephadm are PAUSED
description: >
Cluster management has been paused manually. This will prevent the
orchestrator from service management and reconciliation. If this is
not intentional, resume cephadm operations with 'ceph orch resume'
description: "Cluster management has been paused manually. This will prevent the orchestrator from service management and reconciliation. If this is not intentional, resume cephadm operations with 'ceph orch resume'"
# MDS
- interval: 1m
input_series:
@ -821,10 +765,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/cephfs/health-messages#cephfs-health-messages
summary: CephFS filesystem is damaged.
description: >
Filesystem metadata has been corrupted. Data may be inaccessible.
Analyze metrics from the MDS daemon admin socket, or
escalate to support.
description: "Filesystem metadata has been corrupted. Data may be inaccessible. Analyze metrics from the MDS daemon admin socket, or escalate to support."
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="MDS_HEALTH_READ_ONLY"}'
@ -849,12 +790,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/cephfs/health-messages#cephfs-health-messages
summary: CephFS filesystem in read only mode due to write error(s)
description: >
The filesystem has switched to READ ONLY due to an unexpected
error when writing to the metadata pool.
Either analyze the output from the MDS daemon admin socket, or
escalate to support.
description: "The filesystem has switched to READ ONLY due to an unexpected error when writing to the metadata pool. Either analyze the output from the MDS daemon admin socket, or escalate to support."
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="MDS_ALL_DOWN"}'
@ -879,9 +815,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-all-down
summary: CephFS filesystem is offline
description: >
All MDS ranks are unavailable. The MDS daemons managing metadata
are down, rendering the filesystem offline.
description: "All MDS ranks are unavailable. The MDS daemons managing metadata are down, rendering the filesystem offline."
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="FS_DEGRADED"}'
@ -906,10 +840,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/cephfs/health-messages/#fs-degraded
summary: CephFS filesystem is degraded
description: >
One or more metadata daemons (MDS ranks) are failed or in a
damaged state. At best the filesystem is partially available,
at worst the filesystem is completely unusable.
description: "One or more metadata daemons (MDS ranks) are failed or in a damaged state. At best the filesystem is partially available, at worst the filesystem is completely unusable."
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="MDS_INSUFFICIENT_STANDBY"}'
@ -933,10 +864,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-insufficient-standby
summary: Ceph filesystem standby daemons too few
description: >
The minimum number of standby daemons required by standby_count_wanted
is less than the current number of standby daemons. Adjust the standby count
or increase the number of MDS daemons.
description: "The minimum number of standby daemons required by standby_count_wanted is less than the current number of standby daemons. Adjust the standby count or increase the number of MDS daemons."
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="FS_WITH_FAILED_MDS"}'
@ -961,10 +889,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/cephfs/health-messages/#fs-with-failed-mds
summary: MDS daemon failed, no further standby available
description: >
An MDS daemon has failed, leaving only one active rank and no
available standby. Investigate the cause of the failure or add a
standby MDS.
description: "An MDS daemon has failed, leaving only one active rank and no available standby. Investigate the cause of the failure or add a standby MDS."
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="MDS_UP_LESS_THAN_MAX"}'
@ -988,10 +913,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-up-less-than-max
summary: Ceph MDS daemon count is lower than configured
description: >
The filesystem's "max_mds" setting defines the number of MDS ranks in
the filesystem. The current number of active MDS daemons is less than
this value.
description: "The filesystem's 'max_mds' setting defines the number of MDS ranks in the filesystem. The current number of active MDS daemons is less than this value."
# MGR
- interval: 1m
input_series:
@ -1017,15 +939,7 @@ tests:
oid: 1.3.6.1.4.1.50495.1.2.1.6.2
exp_annotations:
summary: The mgr/prometheus module is not available
description: >
The mgr/prometheus module at ceph-mgr:9283 is unreachable. This
could mean that the module has been disabled or the mgr daemon itself is down.
Without the mgr/prometheus module metrics and alerts will no longer
function. Open a shell to an admin node or toolbox pod and use 'ceph -s' to to determine whether the
mgr is active. If the mgr is not active, restart it, otherwise you can determine
module status with 'ceph mgr module ls'. If it is
not listed as enabled, enable it with 'ceph mgr module enable prometheus'.
description: "The mgr/prometheus module at ceph-mgr:9283 is unreachable. This could mean that the module has been disabled or the mgr daemon itself is down. Without the mgr/prometheus module metrics and alerts will no longer function. Open a shell to an admin node or toolbox pod and use 'ceph -s' to to determine whether the mgr is active. If the mgr is not active, restart it, otherwise you can determine module status with 'ceph mgr module ls'. If it is not listed as enabled, enable it with 'ceph mgr module enable prometheus'."
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="RECENT_MGR_MODULE_CRASH"}'
@ -1050,10 +964,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#recent-mgr-module-crash
summary: A manager module has recently crashed
description: >
One or more mgr modules have crashed and have yet to be acknowledged by an administrator. A
crashed module may impact functionality within the cluster. Use the 'ceph crash' command to
determine which module has failed, and archive it to acknowledge the failure.
description: "One or more mgr modules have crashed and have yet to be acknowledged by an administrator. A crashed module may impact functionality within the cluster. Use the 'ceph crash' command to determine which module has failed, and archive it to acknowledge the failure."
# MON
- interval: 1m
input_series:
@ -1081,15 +992,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-disk-crit
summary: Filesystem space on at least one monitor is critically low
description: |
The free space available to a monitor's store is critically low.
You should increase the space available to the monitor(s). The default directory
is /var/lib/ceph/mon-*/data/store.db on traditional deployments,
and /var/lib/rook/mon-*/data/store.db on the mon pod's worker node for Rook.
Look for old, rotated versions of *.log and MANIFEST*. Do NOT touch any *.sst files.
Also check any other directories under /var/lib/rook and other directories on the
same filesystem, often /var/log and /var/tmp are culprits. Your monitor hosts are;
- ceph-mon-a
description: "The free space available to a monitor's store is critically low. You should increase the space available to the monitor(s). The default directory is /var/lib/ceph/mon-*/data/store.db on traditional deployments, and /var/lib/rook/mon-*/data/store.db on the mon pod's worker node for Rook. Look for old, rotated versions of *.log and MANIFEST*. Do NOT touch any *.sst files. Also check any other directories under /var/lib/rook and other directories on the same filesystem, often /var/log and /var/tmp are culprits. Your monitor hosts are; - ceph-mon-a"
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="MON_DISK_LOW"}'
@ -1114,16 +1017,8 @@ tests:
type: ceph_default
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-disk-low
summary: Disk space on at least one monitor is approaching full
description: |
The space available to a monitor's store is approaching full (>70% is the default).
You should increase the space available to the monitor(s). The default directory
is /var/lib/ceph/mon-*/data/store.db on traditional deployments,
and /var/lib/rook/mon-*/data/store.db on the mon pod's worker node for Rook.
Look for old, rotated versions of *.log and MANIFEST*. Do NOT touch any *.sst files.
Also check any other directories under /var/lib/rook and other directories on the
same filesystem, often /var/log and /var/tmp are culprits. Your monitor hosts are;
- ceph-mon-a
summary: Drive space on at least one monitor is approaching full
description: "The space available to a monitor's store is approaching full (>70% is the default). You should increase the space available to the monitor(s). The default directory is /var/lib/ceph/mon-*/data/store.db on traditional deployments, and /var/lib/rook/mon-*/data/store.db on the mon pod's worker node for Rook. Look for old, rotated versions of *.log and MANIFEST*. Do NOT touch any *.sst files. Also check any other directories under /var/lib/rook and other directories on the same filesystem, often /var/log and /var/tmp are culprits. Your monitor hosts are; - ceph-mon-a"
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="MON_CLOCK_SKEW"}'
@ -1147,14 +1042,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-clock-skew
summary: Clock skew detected among monitors
description: |
Ceph monitors rely on closely synchronized time to maintain
quorum and cluster consistency. This event indicates that the time on at least
one mon has drifted too far from the lead mon.
Review cluster status with ceph -s. This will show which monitors
are affected. Check the time sync status on each monitor host with
"ceph time-sync-status" and the state and peers of your ntpd or chrony daemon.
description: "Ceph monitors rely on closely synchronized time to maintain quorum and cluster consistency. This event indicates that the time on at least one mon has drifted too far from the lead mon. Review cluster status with ceph -s. This will show which monitors are affected. Check the time sync status on each monitor host with 'ceph time-sync-status' and the state and peers of your ntpd or chrony daemon."
# Check 3 mons one down, quorum at risk
- interval: 1m
@ -1193,12 +1081,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-down
summary: Monitor quorum is at risk
description: |
Quorum requires a majority of monitors (x 2) to be active
Without quorum the cluster will become inoperable, affecting all services and connected clients.
The following monitors are down:
- mon.c on ceph-mon-3
description: "Quorum requires a majority of monitors (x 2) to be active. Without quorum the cluster will become inoperable, affecting all services and connected clients. The following monitors are down: - mon.c on ceph-mon-3"
# check 5 mons, 1 down - warning only
- interval: 1m
input_series:
@ -1240,12 +1123,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-down
summary: One or more monitors down
description: |
You have 1 monitor down.
Quorum is still intact, but the loss of an additional monitor will make your cluster inoperable.
The following monitors are down:
- mon.e on ceph-mon-5
description: "You have 1 monitor down. Quorum is still intact, but the loss of an additional monitor will make your cluster inoperable. The following monitors are down: - mon.e on ceph-mon-5\n"
# Device Health
- interval: 1m
input_series:
@ -1270,13 +1148,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#id2
summary: Device(s) predicted to fail soon
description: |
The device health module has determined that one or more devices will fail
soon. To review device status use 'ceph device ls'. To show a specific
device use 'ceph device info <dev id>'.
Mark the OSD out so that data may migrate to other OSDs. Once
the OSD has drained, destroy the OSD, replace the device, and redeploy the OSD.
description: "The device health module has determined that one or more devices will fail soon. To review device status use 'ceph device ls'. To show a specific device use 'ceph device info <dev id>'. Mark the OSD out so that data may migrate to other OSDs. Once the OSD has drained, destroy the OSD, replace the device, and redeploy the OSD."
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="DEVICE_HEALTH_TOOMANY"}'
@ -1301,11 +1173,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#device-health-toomany
summary: Too many devices are predicted to fail, unable to resolve
description: |
The device health module has determined that devices predicted to
fail can not be remediated automatically, since too many OSDs would be removed from
the cluster to ensure performance and availabililty. Prevent data
integrity issues by adding new OSDs so that data may be relocated.
description: "The device health module has determined that devices predicted to fail can not be remediated automatically, since too many OSDs would be removed from the cluster to ensure performance and availabililty. Prevent data integrity issues by adding new OSDs so that data may be relocated."
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="DEVICE_HEALTH_IN_USE"}'
@ -1329,14 +1197,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#device-health-in-use
summary: Device failure is predicted, but unable to relocate data
description: |
The device health module has determined that one or more devices will fail
soon, but the normal process of relocating the data on the device to other
OSDs in the cluster is blocked.
Ensure that the cluster has available free space. It may be necessary to add
capacity to the cluster to allow data from the failing device to
successfully migrate, or to enable the balancer.
description: "The device health module has determined that one or more devices will fail soon, but the normal process of relocating the data on the device to other OSDs in the cluster is blocked. \nEnsure that the cluster has available free space. It may be necessary to add capacity to the cluster to allow data from the failing device to successfully migrate, or to enable the balancer."
# OSD
- interval: 1m
input_series:
@ -1365,9 +1226,7 @@ tests:
oid: 1.3.6.1.4.1.50495.1.2.1.4.8
exp_annotations:
summary: An OSD host is offline
description: |
The following OSDs are down:
- ceph-osd-1 : osd.0
description: "The following OSDs are down: - ceph-osd-1 : osd.0"
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="OSD_SLOW_PING_TIME_FRONT"}'
@ -1390,9 +1249,7 @@ tests:
type: ceph_default
exp_annotations:
summary: Network issues delaying OSD heartbeats (public network)
description: |
OSD heartbeats on the cluster's 'public' network (frontend) are running slow. Investigate the network
for latency or loss issues. Use 'ceph health detail' to show the affected OSDs.
description: "OSD heartbeats on the cluster's 'public' network (frontend) are running slow. Investigate the network for latency or loss issues. Use 'ceph health detail' to show the affected OSDs."
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="OSD_SLOW_PING_TIME_BACK"}'
@ -1415,9 +1272,7 @@ tests:
type: ceph_default
exp_annotations:
summary: Network issues delaying OSD heartbeats (cluster network)
description: |
OSD heartbeats on the cluster's 'cluster' network (backend) are running slow. Investigate the network
for latency or loss issues. Use 'ceph health detail' to show the affected OSDs.
description: "OSD heartbeats on the cluster's 'cluster' network (backend) are slow. Investigate the network for latency issues on this subnet. Use 'ceph health detail' to show the affected OSDs."
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="BLUESTORE_DISK_SIZE_MISMATCH"}'
@ -1441,9 +1296,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#bluestore-disk-size-mismatch
summary: OSD size inconsistency error
description: |
One or more OSDs have an internal inconsistency between metadata and the size of the device.
This could lead to the OSD(s) crashing in future. You should redeploy the affected OSDs.
description: "One or more OSDs have an internal inconsistency between metadata and the size of the device. This could lead to the OSD(s) crashing in future. You should redeploy the affected OSDs."
- interval: 30s
input_series:
- series: 'ceph_health_detail{name="BLUESTORE_SPURIOUS_READ_ERRORS"}'
@ -1467,9 +1320,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#bluestore-spurious-read-errors
summary: Device read errors detected
description: >
An OSD has encountered read errors, but the OSD has recovered by retrying
the reads. This may indicate an issue with hardware or the kernel.
description: "An OSD has encountered read errors, but the OSD has recovered by retrying the reads. This may indicate an issue with hardware or the kernel."
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="OSD_DOWN"}'
@ -1506,11 +1357,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-down
summary: An OSD has been marked down
description: |
1 OSD down for over 5mins.
The following OSD is down:
- osd.1 on ceph-osd-2
description: "1 OSD down for over 5mins. The following OSD is down: - osd.1 on ceph-osd-2\n"
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="OSD_NEARFULL"}'
@ -1535,11 +1382,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-nearfull
summary: OSD(s) running low on free space (NEARFULL)
description: |
One or more OSDs have reached the NEARFULL threshold
Use 'ceph health detail' and 'ceph osd df' to identify the problem.
To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data.
description: One or more OSDs have reached the NEARFULL threshold. Use 'ceph health detail' and 'ceph osd df' to identify the problem. To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data.
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="OSD_FULL"}'
@ -1564,12 +1407,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-full
summary: OSD full, writes blocked
description: |
An OSD has reached the FULL threshold. Writes to pools that share the
affected OSD will be blocked.
Use 'ceph health detail' and 'ceph osd df' to identify the problem.
To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data.
description: An OSD has reached the FULL threshold. Writes to pools that share the affected OSD will be blocked. Use 'ceph health detail' and 'ceph osd df' to identify the problem. To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data.
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="OSD_BACKFILLFULL"}'
@ -1593,12 +1431,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-backfillfull
summary: OSD(s) too full for backfill operations
description: |
An OSD has reached the BACKFILL FULL threshold. This will prevent rebalance operations
from completing.
Use 'ceph health detail' and 'ceph osd df' to identify the problem.
To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data.
description: "An OSD has reached the BACKFILL FULL threshold. This will prevent rebalance operations from completing. Use 'ceph health detail' and 'ceph osd df' to identify the problem. To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data."
- interval: 30s
input_series:
- series: 'ceph_health_detail{name="OSD_TOO_MANY_REPAIRS"}'
@ -1622,9 +1455,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-too-many-repairs
summary: OSD reports a high number of read errors
description: |
Reads from an OSD have used a secondary PG to return data to the client, indicating
a potential failing disk.
description: Reads from an OSD have used a secondary PG to return data to the client, indicating a potential failing drive.
# Pools
# trigger percent full prediction on pools 1 and 2 only
- interval: 12h
@ -1657,9 +1488,7 @@ tests:
oid: 1.3.6.1.4.1.50495.1.2.1.9.2
exp_annotations:
summary: Pool growth rate may soon exceed capacity
description: >
Pool 'rbd' will be full in less than 5 days
assuming the average fill-up rate of the past 48 hours.
description: Pool 'rbd' will be full in less than 5 days assuming the average fill-up rate of the past 48 hours.
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="POOL_BACKFILLFULL"}'
@ -1682,10 +1511,7 @@ tests:
type: ceph_default
exp_annotations:
summary: Free space in a pool is too low for recovery/backfill
description: >
A pool is approaching the near full threshold, which will
prevent recovery/backfill operations from completing.
Consider adding more capacity.
description: A pool is approaching the near full threshold, which will prevent recovery/backfill operations from completing. Consider adding more capacity.
- interval: 1m
input_series:
@ -1709,17 +1535,7 @@ tests:
type: ceph_default
exp_annotations:
summary: One or more Ceph pools are nearly full
description: |
A pool has exceeeded the warning (percent full) threshold, or OSDs
supporting the pool have reached the NEARFULL threshold. Writes may
continue, but you are at risk of the pool going read-only if more capacity
isn't made available.
Determine the affected pool with 'ceph df detail', looking
at QUOTA BYTES and STORED. Increase the pool's quota, or add
capacity to the cluster first then increase the pool's quota
(e.g. ceph osd pool set quota <pool_name> max_bytes <bytes>).
Also ensure that the balancer is active.
description: "A pool has exceeded the warning (percent full) threshold, or OSDs supporting the pool have reached the NEARFULL threshold. Writes may continue, but you are at risk of the pool going read-only if more capacity isn't made available. Determine the affected pool with 'ceph df detail', looking at QUOTA BYTES and STORED. Increase the pool's quota, or add capacity to the cluster first then increase the pool's quota (e.g. ceph osd pool set quota <pool_name> max_bytes <bytes>). Also ensure that the balancer is active."
# PGs
- interval: 1m
@ -1745,14 +1561,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-not-scrubbed
summary: Placement group(s) have not been scrubbed
description: |
One or more PGs have not been scrubbed recently. Scrubs check metadata integrity,
protecting against bit-rot. They check that metadata
is consistent across data replicas. When PGs miss their scrub interval, it may
indicate that the scrub window is too small, or PGs were not in a 'clean' state during the
scrub window.
You can manually initiate a scrub with: ceph pg scrub <pgid>
description: "One or more PGs have not been scrubbed recently. Scrubs check metadata integrity, protecting against bit-rot. They check that metadata is consistent across data replicas. When PGs miss their scrub interval, it may indicate that the scrub window is too small, or PGs were not in a 'clean' state during the scrub window. You can manually initiate a scrub with: ceph pg scrub <pgid>"
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="PG_DAMAGED"}'
@ -1777,13 +1586,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-damaged
summary: Placement group damaged, manual intervention needed
description: >
During data consistency checks (scrub), at least one PG has been flagged as being
damaged or inconsistent.
Check to see which PG is affected, and attempt a manual repair if necessary. To list
problematic placement groups, use 'rados list-inconsistent-pg <pool>'. To repair PGs use
the 'ceph pg repair <pg_num>' command.
description: During data consistency checks (scrub), at least one PG has been flagged as being damaged or inconsistent. Check to see which PG is affected, and attempt a manual repair if necessary. To list problematic placement groups, use 'rados list-inconsistent-pg <pool>'. To repair PGs use the 'ceph pg repair <pg_num>' command.
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="TOO_MANY_PGS"}'
@ -1807,14 +1610,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks/#too-many-pgs
summary: Placement groups per OSD is too high
description: |
The number of placement groups per OSD is too high (exceeds the mon_max_pg_per_osd setting).
Check that the pg_autoscaler has not been disabled for any pools with 'ceph osd pool autoscale-status',
and that the profile selected is appropriate. You may also adjust the target_size_ratio of a pool to guide
the autoscaler based on the expected relative size of the pool
('ceph osd pool set cephfs.cephfs.meta target_size_ratio .1') or set the pg_autoscaler
mode to "warn" and adjust pg_num appropriately for one or more pools.
description: "The number of placement groups per OSD is too high (exceeds the mon_max_pg_per_osd setting).\n Check that the pg_autoscaler has not been disabled for any pools with 'ceph osd pool autoscale-status', and that the profile selected is appropriate. You may also adjust the target_size_ratio of a pool to guide the autoscaler based on the expected relative size of the pool ('ceph osd pool set cephfs.cephfs.meta target_size_ratio .1') or set the pg_autoscaler mode to 'warn' and adjust pg_num appropriately for one or more pools."
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="PG_RECOVERY_FULL"}'
@ -1839,9 +1635,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-recovery-full
summary: OSDs are too full for recovery
description: >
Data redundancy is at risk since one or more OSDs are at or above the
'full' threshold. Add more capacity to the cluster, restore down/out OSDs, or delete unwanted data.
description: Data redundancy is at risk since one or more OSDs are at or above the 'full' threshold. Add more capacity to the cluster, restore down/out OSDs, or delete unwanted data.
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="PG_BACKFILL_FULL"}'
@ -1866,9 +1660,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-backfill-full
summary: Backfill operations are blocked due to lack of free space
description: >
Data redundancy may be at risk due to lack of free space within the cluster. One or more OSDs
have reached the 'backfillfull' threshold. Add more capacity, or delete unwanted data.
description: Data redundancy may be at risk due to lack of free space within the cluster. One or more OSDs have reached the 'backfillfull' threshold. Add more capacity, or delete unwanted data.
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="PG_AVAILABILITY"}'
@ -1901,9 +1693,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-availability
summary: PG is unavailable, blocking I/O
description: >
Data availability is reduced, impacting the cluster's ability to service I/O. One or
more placement groups (PGs) are in a state that blocks I/O.
description: Data availability is reduced, impacting the cluster's ability to service I/O. One or more placement groups (PGs) are in a state that blocks I/O.
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="PG_NOT_DEEP_SCRUBBED"}'
@ -1927,14 +1717,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-not-deep-scrubbed
summary: Placement group(s) have not been deep scrubbed
description: |
One or more PGs have not been deep scrubbed recently. Deep scrubs
protect against bit-rot. They compare data
replicas to ensure consistency. When PGs miss their deep scrub interval, it may indicate
that the window is too small or PGs were not in a 'clean' state during the deep-scrub
window.
You can manually initiate a deep scrub with: ceph pg deep-scrub <pgid>
description: One or more PGs have not been deep scrubbed recently. Deep scrubs protect against bit-rot. They compare data replicas to ensure consistency. When PGs miss their deep scrub interval, it may indicate that the window is too small or PGs were not in a 'clean' state during the deep-scrub window.
# Prometheus
- interval: 1m
@ -1958,12 +1741,7 @@ tests:
oid: 1.3.6.1.4.1.50495.1.2.1.12.1
exp_annotations:
summary: The scrape job for Ceph is missing from Prometheus
description: |
The prometheus job that scrapes from Ceph is no longer defined, this
will effectively mean you'll have no metrics or alerts for the cluster.
Please review the job definitions in the prometheus.yml file of the prometheus
instance.
description: The prometheus job that scrapes from Ceph is no longer defined, this will effectively mean you'll have no metrics or alerts for the cluster. Please review the job definitions in the prometheus.yml file of the prometheus instance.
# RADOS
- interval: 1m
input_series:
@ -2001,10 +1779,7 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#object-unfound
summary: Object(s) marked UNFOUND
description: |
The latest version of a RADOS object can not be found, even though all OSDs are up. I/O
requests for this object from clients will block (hang). Resolving this issue may
require the object to be rolled back to a prior version manually, and manually verified.
description: The latest version of a RADOS object can not be found, even though all OSDs are up. I/O requests for this object from clients will block (hang). Resolving this issue may require the object to be rolled back to a prior version manually, and manually verified.
# Generic Alerts
- interval: 1m
input_series:
@ -2031,7 +1806,4 @@ tests:
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks/#recent-crash
summary: One or more Ceph daemons have crashed, and are pending acknowledgement
description: |
One or more daemons have crashed recently, and need to be acknowledged. This notification
ensures that software crashes do not go unseen. To acknowledge a crash, use the
'ceph crash archive <id>' command.
description: One or more daemons have crashed recently, and need to be acknowledged. This notification ensures that software crashes do not go unseen. To acknowledge a crash, use the 'ceph crash archive <id>' command.

View File

@ -22,7 +22,7 @@ whitelist_externals =
sh
description =
check: Ensure that auto-generated files matches the current version
fix: Update generated files from jsonnet filse with latest changes
fix: Update generated files from jsonnet file with latest changes
lint: Test if jsonnet files are linted (without any update)
deps =
-rrequirements-grafonnet.txt
@ -56,7 +56,7 @@ whitelist_externals =
commands =
behave tests_dashboards/features
[testenv:alerts-{check,lint}]
[testenv:alerts-{fix,check,lint}]
deps =
-rrequirements-alerts.txt
pytest
@ -64,6 +64,7 @@ depends = grafonnet-check
whitelist_externals =
promtool
commands =
fix: jsonnet -J vendor -S alerts.jsonnet -o prometheus_alerts.yml
lint: promtool check rules prometheus_alerts.yml
test: pytest -rA tests_alerts/test_syntax.py tests_alerts/test_unittests.py
python3 ./tests_alerts/validate_rules.py