Add pg_database collector

Converts the pg_database metrics from queries.yaml to a built in collector. This is enabled by default because it is not likely to be a performance problem and likely very useful data.

Signed-off-by: Joe Adams <github@joeadams.io>
This commit is contained in:
Joe Adams 2022-02-09 21:28:40 -05:00
parent 3880df4f64
commit 21a19ed252
No known key found for this signature in database
GPG Key ID: 2A21CFFDE8B588C6
4 changed files with 81 additions and 12 deletions

View File

@ -1,3 +1,7 @@
## master / unreleased
* [ENHANCEMENT] Add pg_database_size_bytes metric #613
## 0.10.1 / 2022-01-14
* [BUGFIX] Fix broken log-level for values other than debug. #560

View File

@ -14,13 +14,16 @@
package main
import (
"context"
"database/sql"
"fmt"
"log"
"sync"
"time"
"github.com/blang/semver"
"github.com/go-kit/log/level"
"github.com/prometheus-community/postgres_exporter/collector"
"github.com/prometheus/client_golang/prometheus"
)
@ -128,6 +131,17 @@ func (s *Server) Scrape(ch chan<- prometheus.Metric, disableSettingsMetrics bool
err = fmt.Errorf("queryNamespaceMappings returned %d errors", len(errMap))
}
{
pgdb := collector.NewPGDatabaseCollector()
metrics, err := pgdb.Update(context.TODO(), s.db, s.String())
if err != nil {
log.Printf("Failed to scrape pg_database metrics: %s", err)
}
for _, m := range metrics {
ch <- m
}
}
return err
}

63
collector/pg_database.go Normal file
View File

@ -0,0 +1,63 @@
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package collector
import (
"context"
"database/sql"
"github.com/prometheus/client_golang/prometheus"
)
type PGDatabaseCollector struct{}
func NewPGDatabaseCollector() *PGDatabaseCollector {
return &PGDatabaseCollector{}
}
var pgDatabase = map[string]*prometheus.Desc{
"size_bytes": prometheus.NewDesc(
"pg_database_size_bytes",
"Disk space used by the database",
[]string{"datname"}, nil,
),
}
func (PGDatabaseCollector) Update(ctx context.Context, db *sql.DB, server string) ([]prometheus.Metric, error) {
metrics := []prometheus.Metric{}
rows, err := db.QueryContext(ctx,
`SELECT pg_database.datname
,pg_database_size(pg_database.datname)
FROM pg_database;`)
if err != nil {
return metrics, err
}
defer rows.Close()
for rows.Next() {
var datname string
var size int64
if err := rows.Scan(&datname, &size); err != nil {
return metrics, err
}
metrics = append(metrics, prometheus.MustNewConstMetric(
pgDatabase["size_bytes"],
prometheus.GaugeValue, float64(size), datname,
))
}
if err := rows.Err(); err != nil {
return metrics, err
}
return metrics, nil
}

View File

@ -146,18 +146,6 @@ pg_statio_user_tables:
usage: "COUNTER"
description: "Number of buffer hits in this table's TOAST table indexes (if any)"
pg_database:
query: "SELECT pg_database.datname, pg_database_size(pg_database.datname) as size_bytes FROM pg_database"
master: true
cache_seconds: 30
metrics:
- datname:
usage: "LABEL"
description: "Name of the database"
- size_bytes:
usage: "GAUGE"
description: "Disk space used by the database"
# WARNING: This set of metrics can be very expensive on a busy server as every unique query executed will create an additional time series
pg_stat_statements:
query: "SELECT t2.rolname, t3.datname, queryid, calls, total_time / 1000 as total_time_seconds, min_time / 1000 as min_time_seconds, max_time / 1000 as max_time_seconds, mean_time / 1000 as mean_time_seconds, stddev_time / 1000 as stddev_time_seconds, rows, shared_blks_hit, shared_blks_read, shared_blks_dirtied, shared_blks_written, local_blks_hit, local_blks_read, local_blks_dirtied, local_blks_written, temp_blks_read, temp_blks_written, blk_read_time / 1000 as blk_read_time_seconds, blk_write_time / 1000 as blk_write_time_seconds FROM pg_stat_statements t1 JOIN pg_roles t2 ON (t1.userid=t2.oid) JOIN pg_database t3 ON (t1.dbid=t3.oid) WHERE t2.rolname != 'rdsadmin'"