Refactor collector descriptors

Use individual collector metric descriptor vars to help avoid
miss-mapped or unused metrics.

Signed-off-by: SuperQ <superq@gmail.com>
This commit is contained in:
SuperQ 2023-06-01 10:04:35 +02:00
parent dde6e6e52a
commit 425c4938ef
No known key found for this signature in database
GPG Key ID: C646B23C9E3245F1
3 changed files with 52 additions and 54 deletions

View File

@ -41,13 +41,11 @@ func NewPGDatabaseCollector(config collectorConfig) (Collector, error) {
}, nil
}
var pgDatabase = map[string]*prometheus.Desc{
"size_bytes": prometheus.NewDesc(
"pg_database_size_bytes",
"Disk space used by the database",
[]string{"datname"}, nil,
),
}
var pgDatabaseSizeDesc = prometheus.NewDesc(
"pg_database_size_bytes",
"Disk space used by the database",
[]string{"datname"}, nil,
)
// Update implements Collector and exposes database size.
// It is called by the Prometheus registry when collecting metrics.
@ -96,7 +94,7 @@ func (c PGDatabaseCollector) Update(ctx context.Context, db *sql.DB, ch chan<- p
}
ch <- prometheus.MustNewConstMetric(
pgDatabase["size_bytes"],
pgDatabaseSizeDesc,
prometheus.GaugeValue, float64(size), datname,
)
}

View File

@ -34,74 +34,74 @@ func NewPGStatBGWriterCollector(collectorConfig) (Collector, error) {
const bgWriterSubsystem = "stat_bgwriter"
var statBGWriter = map[string]*prometheus.Desc{
"checkpoints_timed": prometheus.NewDesc(
var (
statBGWriterCheckpointsTimedDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoints_timed_total"),
"Number of scheduled checkpoints that have been performed",
[]string{},
prometheus.Labels{},
),
"checkpoints_req": prometheus.NewDesc(
)
statBGWriterCheckpointsReqDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoints_req_total"),
"Number of requested checkpoints that have been performed",
[]string{},
prometheus.Labels{},
),
"checkpoint_write_time": prometheus.NewDesc(
)
statBGWriterCheckpointsReqTimeDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoint_write_time_total"),
"Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds",
[]string{},
prometheus.Labels{},
),
"checkpoint_sync_time": prometheus.NewDesc(
)
statBGWriterCheckpointsSyncTimeDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoint_sync_time_total"),
"Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds",
[]string{},
prometheus.Labels{},
),
"buffers_checkpoint": prometheus.NewDesc(
)
statBGWriterBuffersCheckpointDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_checkpoint_total"),
"Number of buffers written during checkpoints",
[]string{},
prometheus.Labels{},
),
"buffers_clean": prometheus.NewDesc(
)
statBGWriterBuffersCleanDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_clean_total"),
"Number of buffers written by the background writer",
[]string{},
prometheus.Labels{},
),
"maxwritten_clean": prometheus.NewDesc(
)
statBGWriterMaxwrittenCleanDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, bgWriterSubsystem, "maxwritten_clean_total"),
"Number of times the background writer stopped a cleaning scan because it had written too many buffers",
[]string{},
prometheus.Labels{},
),
"buffers_backend": prometheus.NewDesc(
)
statBGWriterBuffersBackendDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_backend_total"),
"Number of buffers written directly by a backend",
[]string{},
prometheus.Labels{},
),
"buffers_backend_fsync": prometheus.NewDesc(
)
statBGWriterBuffersBackendFsyncDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_backend_fsync_total"),
"Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)",
[]string{},
prometheus.Labels{},
),
"buffers_alloc": prometheus.NewDesc(
)
statBGWriterBuffersAllocDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_alloc_total"),
"Number of buffers allocated",
[]string{},
prometheus.Labels{},
),
"stats_reset": prometheus.NewDesc(
)
statBGWriterStatsResetDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, bgWriterSubsystem, "stats_reset_total"),
"Time at which these statistics were last reset",
[]string{},
prometheus.Labels{},
),
}
)
)
func (PGStatBGWriterCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
row := db.QueryRowContext(ctx,
@ -137,57 +137,57 @@ func (PGStatBGWriterCollector) Update(ctx context.Context, db *sql.DB, ch chan<-
}
ch <- prometheus.MustNewConstMetric(
statBGWriter["checkpoints_timed"],
statBGWriterCheckpointsTimedDesc,
prometheus.CounterValue,
float64(cpt),
)
ch <- prometheus.MustNewConstMetric(
statBGWriter["checkpoints_req"],
statBGWriterCheckpointsReqDesc,
prometheus.CounterValue,
float64(cpr),
)
ch <- prometheus.MustNewConstMetric(
statBGWriter["checkpoint_write_time"],
statBGWriterCheckpointsReqTimeDesc,
prometheus.CounterValue,
float64(cpwt),
)
ch <- prometheus.MustNewConstMetric(
statBGWriter["checkpoint_sync_time"],
statBGWriterCheckpointsSyncTimeDesc,
prometheus.CounterValue,
float64(cpst),
)
ch <- prometheus.MustNewConstMetric(
statBGWriter["buffers_checkpoint"],
statBGWriterBuffersCheckpointDesc,
prometheus.CounterValue,
float64(bcp),
)
ch <- prometheus.MustNewConstMetric(
statBGWriter["buffers_clean"],
statBGWriterBuffersCleanDesc,
prometheus.CounterValue,
float64(bc),
)
ch <- prometheus.MustNewConstMetric(
statBGWriter["maxwritten_clean"],
statBGWriterMaxwrittenCleanDesc,
prometheus.CounterValue,
float64(mwc),
)
ch <- prometheus.MustNewConstMetric(
statBGWriter["buffers_backend"],
statBGWriterBuffersBackendDesc,
prometheus.CounterValue,
float64(bb),
)
ch <- prometheus.MustNewConstMetric(
statBGWriter["buffers_backend_fsync"],
statBGWriterBuffersBackendFsyncDesc,
prometheus.CounterValue,
float64(bbf),
)
ch <- prometheus.MustNewConstMetric(
statBGWriter["buffers_alloc"],
statBGWriterBuffersAllocDesc,
prometheus.CounterValue,
float64(ba),
)
ch <- prometheus.MustNewConstMetric(
statBGWriter["stats_reset"],
statBGWriterStatsResetDesc,
prometheus.CounterValue,
float64(sr.Unix()),
)

View File

@ -33,23 +33,23 @@ func NewPGReplicationSlotCollector(config collectorConfig) (Collector, error) {
return &PGReplicationSlotCollector{log: config.logger}, nil
}
var pgReplicationSlot = map[string]*prometheus.Desc{
"current_wal_lsn": prometheus.NewDesc(
var (
pgReplicationSlotCurrentWalDesc = prometheus.NewDesc(
"pg_replication_slot_current_wal_lsn",
"current wal lsn value",
[]string{"slot_name"}, nil,
),
"confirmed_flush_lsn": prometheus.NewDesc(
)
pgReplicationSlotCurrentFlushDesc = prometheus.NewDesc(
"pg_replication_slot_confirmed_flush_lsn",
"last lsn confirmed flushed to the replication slot",
[]string{"slot_name"}, nil,
),
"is_active": prometheus.NewDesc(
)
pgReplicationSlotIsActiveDesc = prometheus.NewDesc(
"pg_replication_slot_is_active",
"last lsn confirmed flushed to the replication slot",
[]string{"slot_name"}, nil,
),
}
)
)
func (PGReplicationSlotCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
rows, err := db.QueryContext(ctx,
@ -75,17 +75,17 @@ func (PGReplicationSlotCollector) Update(ctx context.Context, db *sql.DB, ch cha
}
ch <- prometheus.MustNewConstMetric(
pgReplicationSlot["current_wal_lsn"],
pgReplicationSlotCurrentWalDesc,
prometheus.GaugeValue, float64(wal_lsn), slot_name,
)
if is_active {
ch <- prometheus.MustNewConstMetric(
pgReplicationSlot["confirmed_flush_lsn"],
pgReplicationSlotCurrentFlushDesc,
prometheus.GaugeValue, float64(flush_lsn), slot_name,
)
}
ch <- prometheus.MustNewConstMetric(
pgReplicationSlot["is_active"],
pgReplicationSlotIsActiveDesc,
prometheus.GaugeValue, float64(flush_lsn), slot_name,
)
}