pg_stat_database: added support for active_time counter (#961)

* feat(pg_stat_database): active time metric

---------

Signed-off-by: Jiri Sveceny <jiri.sveceny@icloud.com>
This commit is contained in:
Jiri Sveceny 2023-11-28 15:12:07 +01:00 committed by GitHub
parent 5ceae7f414
commit f5b613aba7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 40 additions and 1 deletions

View File

@ -206,6 +206,15 @@ var (
[]string{"datid", "datname"},
prometheus.Labels{},
)
statDatabaseActiveTime = prometheus.NewDesc(prometheus.BuildFQName(
namespace,
statDatabaseSubsystem,
"active_time_seconds_total",
),
"Time spent executing SQL statements in this database, in seconds",
[]string{"datid", "datname"},
prometheus.Labels{},
)
statDatabaseQuery = `
SELECT
@ -227,6 +236,7 @@ var (
,deadlocks
,blk_read_time
,blk_write_time
,active_time
,stats_reset
FROM pg_stat_database;
`
@ -244,7 +254,7 @@ func (c *PGStatDatabaseCollector) Update(ctx context.Context, instance *instance
for rows.Next() {
var datid, datname sql.NullString
var numBackends, xactCommit, xactRollback, blksRead, blksHit, tupReturned, tupFetched, tupInserted, tupUpdated, tupDeleted, conflicts, tempFiles, tempBytes, deadlocks, blkReadTime, blkWriteTime sql.NullFloat64
var numBackends, xactCommit, xactRollback, blksRead, blksHit, tupReturned, tupFetched, tupInserted, tupUpdated, tupDeleted, conflicts, tempFiles, tempBytes, deadlocks, blkReadTime, blkWriteTime, activeTime sql.NullFloat64
var statsReset sql.NullTime
err := rows.Scan(
@ -266,6 +276,7 @@ func (c *PGStatDatabaseCollector) Update(ctx context.Context, instance *instance
&deadlocks,
&blkReadTime,
&blkWriteTime,
&activeTime,
&statsReset,
)
if err != nil {
@ -344,6 +355,10 @@ func (c *PGStatDatabaseCollector) Update(ctx context.Context, instance *instance
level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no blk_write_time")
continue
}
if !activeTime.Valid {
level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no active_time")
continue
}
statsResetMetric := 0.0
if !statsReset.Valid {
@ -467,6 +482,13 @@ func (c *PGStatDatabaseCollector) Update(ctx context.Context, instance *instance
labels...,
)
ch <- prometheus.MustNewConstMetric(
statDatabaseActiveTime,
prometheus.CounterValue,
activeTime.Float64/1000.0,
labels...,
)
ch <- prometheus.MustNewConstMetric(
statDatabaseStatsReset,
prometheus.CounterValue,

View File

@ -52,6 +52,7 @@ func TestPGStatDatabaseCollector(t *testing.T) {
"deadlocks",
"blk_read_time",
"blk_write_time",
"active_time",
"stats_reset",
}
@ -80,6 +81,7 @@ func TestPGStatDatabaseCollector(t *testing.T) {
925,
16,
823,
33,
srT)
mock.ExpectQuery(sanitizeQuery(statDatabaseQuery)).WillReturnRows(rows)
@ -113,6 +115,7 @@ func TestPGStatDatabaseCollector(t *testing.T) {
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 925},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 16},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 823},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0.033},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 1685059842},
}
@ -159,6 +162,7 @@ func TestPGStatDatabaseCollectorNullValues(t *testing.T) {
"deadlocks",
"blk_read_time",
"blk_write_time",
"active_time",
"stats_reset",
}
@ -182,6 +186,7 @@ func TestPGStatDatabaseCollectorNullValues(t *testing.T) {
925,
16,
823,
32,
srT).
AddRow(
"pid",
@ -202,6 +207,7 @@ func TestPGStatDatabaseCollectorNullValues(t *testing.T) {
925,
16,
823,
32,
srT)
mock.ExpectQuery(sanitizeQuery(statDatabaseQuery)).WillReturnRows(rows)
@ -234,6 +240,7 @@ func TestPGStatDatabaseCollectorNullValues(t *testing.T) {
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 925},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 16},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 823},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0.032},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 1685059842},
}
@ -275,6 +282,7 @@ func TestPGStatDatabaseCollectorRowLeakTest(t *testing.T) {
"deadlocks",
"blk_read_time",
"blk_write_time",
"active_time",
"stats_reset",
}
@ -303,6 +311,7 @@ func TestPGStatDatabaseCollectorRowLeakTest(t *testing.T) {
925,
16,
823,
14,
srT).
AddRow(
nil,
@ -324,6 +333,7 @@ func TestPGStatDatabaseCollectorRowLeakTest(t *testing.T) {
nil,
nil,
nil,
nil,
).
AddRow(
"pid",
@ -344,6 +354,7 @@ func TestPGStatDatabaseCollectorRowLeakTest(t *testing.T) {
926,
17,
824,
15,
srT)
mock.ExpectQuery(sanitizeQuery(statDatabaseQuery)).WillReturnRows(rows)
@ -376,7 +387,9 @@ func TestPGStatDatabaseCollectorRowLeakTest(t *testing.T) {
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 925},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 16},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 823},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0.014},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 1685059842},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_GAUGE, value: 355},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 4946},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 289097745},
@ -393,6 +406,7 @@ func TestPGStatDatabaseCollectorRowLeakTest(t *testing.T) {
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 926},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 17},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 824},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0.015},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 1685059842},
}
@ -435,6 +449,7 @@ func TestPGStatDatabaseCollectorTestNilStatReset(t *testing.T) {
"deadlocks",
"blk_read_time",
"blk_write_time",
"active_time",
"stats_reset",
}
@ -458,6 +473,7 @@ func TestPGStatDatabaseCollectorTestNilStatReset(t *testing.T) {
925,
16,
823,
7,
nil)
mock.ExpectQuery(sanitizeQuery(statDatabaseQuery)).WillReturnRows(rows)
@ -491,6 +507,7 @@ func TestPGStatDatabaseCollectorTestNilStatReset(t *testing.T) {
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 925},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 16},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 823},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0.007},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0},
}