mirror of
https://github.com/prometheus-community/postgres_exporter
synced 2025-04-07 17:51:33 +00:00
Add pg_database collector
Converts the pg_database metrics from queries.yaml to a built in collector. This is enabled by default because it is not likely to be a performance problem and likely very useful data. Signed-off-by: Joe Adams <github@joeadams.io>
This commit is contained in:
parent
3880df4f64
commit
21a19ed252
@ -1,3 +1,7 @@
|
|||||||
|
## master / unreleased
|
||||||
|
|
||||||
|
* [ENHANCEMENT] Add pg_database_size_bytes metric #613
|
||||||
|
|
||||||
## 0.10.1 / 2022-01-14
|
## 0.10.1 / 2022-01-14
|
||||||
|
|
||||||
* [BUGFIX] Fix broken log-level for values other than debug. #560
|
* [BUGFIX] Fix broken log-level for values other than debug. #560
|
||||||
|
@ -14,13 +14,16 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/blang/semver"
|
"github.com/blang/semver"
|
||||||
"github.com/go-kit/log/level"
|
"github.com/go-kit/log/level"
|
||||||
|
"github.com/prometheus-community/postgres_exporter/collector"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -128,6 +131,17 @@ func (s *Server) Scrape(ch chan<- prometheus.Metric, disableSettingsMetrics bool
|
|||||||
err = fmt.Errorf("queryNamespaceMappings returned %d errors", len(errMap))
|
err = fmt.Errorf("queryNamespaceMappings returned %d errors", len(errMap))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
pgdb := collector.NewPGDatabaseCollector()
|
||||||
|
metrics, err := pgdb.Update(context.TODO(), s.db, s.String())
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Failed to scrape pg_database metrics: %s", err)
|
||||||
|
}
|
||||||
|
for _, m := range metrics {
|
||||||
|
ch <- m
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
63
collector/pg_database.go
Normal file
63
collector/pg_database.go
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package collector
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
type PGDatabaseCollector struct{}
|
||||||
|
|
||||||
|
func NewPGDatabaseCollector() *PGDatabaseCollector {
|
||||||
|
return &PGDatabaseCollector{}
|
||||||
|
}
|
||||||
|
|
||||||
|
var pgDatabase = map[string]*prometheus.Desc{
|
||||||
|
"size_bytes": prometheus.NewDesc(
|
||||||
|
"pg_database_size_bytes",
|
||||||
|
"Disk space used by the database",
|
||||||
|
[]string{"datname"}, nil,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
func (PGDatabaseCollector) Update(ctx context.Context, db *sql.DB, server string) ([]prometheus.Metric, error) {
|
||||||
|
metrics := []prometheus.Metric{}
|
||||||
|
rows, err := db.QueryContext(ctx,
|
||||||
|
`SELECT pg_database.datname
|
||||||
|
,pg_database_size(pg_database.datname)
|
||||||
|
FROM pg_database;`)
|
||||||
|
if err != nil {
|
||||||
|
return metrics, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
for rows.Next() {
|
||||||
|
var datname string
|
||||||
|
var size int64
|
||||||
|
if err := rows.Scan(&datname, &size); err != nil {
|
||||||
|
return metrics, err
|
||||||
|
}
|
||||||
|
metrics = append(metrics, prometheus.MustNewConstMetric(
|
||||||
|
pgDatabase["size_bytes"],
|
||||||
|
prometheus.GaugeValue, float64(size), datname,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
if err := rows.Err(); err != nil {
|
||||||
|
return metrics, err
|
||||||
|
}
|
||||||
|
return metrics, nil
|
||||||
|
}
|
12
queries.yaml
12
queries.yaml
@ -146,18 +146,6 @@ pg_statio_user_tables:
|
|||||||
usage: "COUNTER"
|
usage: "COUNTER"
|
||||||
description: "Number of buffer hits in this table's TOAST table indexes (if any)"
|
description: "Number of buffer hits in this table's TOAST table indexes (if any)"
|
||||||
|
|
||||||
pg_database:
|
|
||||||
query: "SELECT pg_database.datname, pg_database_size(pg_database.datname) as size_bytes FROM pg_database"
|
|
||||||
master: true
|
|
||||||
cache_seconds: 30
|
|
||||||
metrics:
|
|
||||||
- datname:
|
|
||||||
usage: "LABEL"
|
|
||||||
description: "Name of the database"
|
|
||||||
- size_bytes:
|
|
||||||
usage: "GAUGE"
|
|
||||||
description: "Disk space used by the database"
|
|
||||||
|
|
||||||
# WARNING: This set of metrics can be very expensive on a busy server as every unique query executed will create an additional time series
|
# WARNING: This set of metrics can be very expensive on a busy server as every unique query executed will create an additional time series
|
||||||
pg_stat_statements:
|
pg_stat_statements:
|
||||||
query: "SELECT t2.rolname, t3.datname, queryid, calls, total_time / 1000 as total_time_seconds, min_time / 1000 as min_time_seconds, max_time / 1000 as max_time_seconds, mean_time / 1000 as mean_time_seconds, stddev_time / 1000 as stddev_time_seconds, rows, shared_blks_hit, shared_blks_read, shared_blks_dirtied, shared_blks_written, local_blks_hit, local_blks_read, local_blks_dirtied, local_blks_written, temp_blks_read, temp_blks_written, blk_read_time / 1000 as blk_read_time_seconds, blk_write_time / 1000 as blk_write_time_seconds FROM pg_stat_statements t1 JOIN pg_roles t2 ON (t1.userid=t2.oid) JOIN pg_database t3 ON (t1.dbid=t3.oid) WHERE t2.rolname != 'rdsadmin'"
|
query: "SELECT t2.rolname, t3.datname, queryid, calls, total_time / 1000 as total_time_seconds, min_time / 1000 as min_time_seconds, max_time / 1000 as max_time_seconds, mean_time / 1000 as mean_time_seconds, stddev_time / 1000 as stddev_time_seconds, rows, shared_blks_hit, shared_blks_read, shared_blks_dirtied, shared_blks_written, local_blks_hit, local_blks_read, local_blks_dirtied, local_blks_written, temp_blks_read, temp_blks_written, blk_read_time / 1000 as blk_read_time_seconds, blk_write_time / 1000 as blk_write_time_seconds FROM pg_stat_statements t1 JOIN pg_roles t2 ON (t1.userid=t2.oid) JOIN pg_database t3 ON (t1.dbid=t3.oid) WHERE t2.rolname != 'rdsadmin'"
|
||||||
|
Loading…
Reference in New Issue
Block a user