mirror of
https://github.com/prometheus-community/postgres_exporter
synced 2025-04-28 22:18:05 +00:00
Merge pull request #697 from sysadmind/exclude-database-fix
Fix exclude-databases for collector package
This commit is contained in:
commit
c86f549c83
@ -14,8 +14,10 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/go-kit/log/level"
|
"github.com/go-kit/log/level"
|
||||||
@ -101,13 +103,16 @@ func main() {
|
|||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
excludedDatabases := strings.Split(*excludeDatabases, ",")
|
||||||
|
logger.Log("msg", "Excluded databases", "databases", fmt.Sprintf("%v", excludedDatabases))
|
||||||
|
|
||||||
opts := []ExporterOpt{
|
opts := []ExporterOpt{
|
||||||
DisableDefaultMetrics(*disableDefaultMetrics),
|
DisableDefaultMetrics(*disableDefaultMetrics),
|
||||||
DisableSettingsMetrics(*disableSettingsMetrics),
|
DisableSettingsMetrics(*disableSettingsMetrics),
|
||||||
AutoDiscoverDatabases(*autoDiscoverDatabases),
|
AutoDiscoverDatabases(*autoDiscoverDatabases),
|
||||||
WithUserQueriesPath(*queriesPath),
|
WithUserQueriesPath(*queriesPath),
|
||||||
WithConstantLabels(*constantLabelsList),
|
WithConstantLabels(*constantLabelsList),
|
||||||
ExcludeDatabases(*excludeDatabases),
|
ExcludeDatabases(excludedDatabases),
|
||||||
IncludeDatabases(*includeDatabases),
|
IncludeDatabases(*includeDatabases),
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -128,6 +133,7 @@ func main() {
|
|||||||
|
|
||||||
pe, err := collector.NewPostgresCollector(
|
pe, err := collector.NewPostgresCollector(
|
||||||
logger,
|
logger,
|
||||||
|
excludedDatabases,
|
||||||
dsn,
|
dsn,
|
||||||
[]string{},
|
[]string{},
|
||||||
)
|
)
|
||||||
@ -143,7 +149,7 @@ func main() {
|
|||||||
w.Write(landingPage) // nolint: errcheck
|
w.Write(landingPage) // nolint: errcheck
|
||||||
})
|
})
|
||||||
|
|
||||||
http.HandleFunc("/probe", handleProbe(logger))
|
http.HandleFunc("/probe", handleProbe(logger, excludedDatabases))
|
||||||
|
|
||||||
srv := &http.Server{}
|
srv := &http.Server{}
|
||||||
if err := web.ListenAndServe(srv, webConfig, logger); err != nil {
|
if err := web.ListenAndServe(srv, webConfig, logger); err != nil {
|
||||||
|
@ -484,9 +484,9 @@ func AutoDiscoverDatabases(b bool) ExporterOpt {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ExcludeDatabases allows to filter out result from AutoDiscoverDatabases
|
// ExcludeDatabases allows to filter out result from AutoDiscoverDatabases
|
||||||
func ExcludeDatabases(s string) ExporterOpt {
|
func ExcludeDatabases(s []string) ExporterOpt {
|
||||||
return func(e *Exporter) {
|
return func(e *Exporter) {
|
||||||
e.excludeDatabases = strings.Split(s, ",")
|
e.excludeDatabases = s
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -16,7 +16,6 @@ package main
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/go-kit/log/level"
|
"github.com/go-kit/log/level"
|
||||||
@ -26,7 +25,7 @@ import (
|
|||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
)
|
)
|
||||||
|
|
||||||
func handleProbe(logger log.Logger) http.HandlerFunc {
|
func handleProbe(logger log.Logger, excludeDatabases []string) http.HandlerFunc {
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
conf := c.GetConfig()
|
conf := c.GetConfig()
|
||||||
@ -62,21 +61,9 @@ func handleProbe(logger log.Logger) http.HandlerFunc {
|
|||||||
|
|
||||||
// TODO(@sysadmind): Timeout
|
// TODO(@sysadmind): Timeout
|
||||||
|
|
||||||
probeSuccessGauge := prometheus.NewGauge(prometheus.GaugeOpts{
|
|
||||||
Name: "probe_success",
|
|
||||||
Help: "Displays whether or not the probe was a success",
|
|
||||||
})
|
|
||||||
probeDurationGauge := prometheus.NewGauge(prometheus.GaugeOpts{
|
|
||||||
Name: "probe_duration_seconds",
|
|
||||||
Help: "Returns how long the probe took to complete in seconds",
|
|
||||||
})
|
|
||||||
|
|
||||||
tl := log.With(logger, "target", target)
|
tl := log.With(logger, "target", target)
|
||||||
|
|
||||||
start := time.Now()
|
|
||||||
registry := prometheus.NewRegistry()
|
registry := prometheus.NewRegistry()
|
||||||
registry.MustRegister(probeSuccessGauge)
|
|
||||||
registry.MustRegister(probeDurationGauge)
|
|
||||||
|
|
||||||
opts := []ExporterOpt{
|
opts := []ExporterOpt{
|
||||||
DisableDefaultMetrics(*disableDefaultMetrics),
|
DisableDefaultMetrics(*disableDefaultMetrics),
|
||||||
@ -84,7 +71,7 @@ func handleProbe(logger log.Logger) http.HandlerFunc {
|
|||||||
AutoDiscoverDatabases(*autoDiscoverDatabases),
|
AutoDiscoverDatabases(*autoDiscoverDatabases),
|
||||||
WithUserQueriesPath(*queriesPath),
|
WithUserQueriesPath(*queriesPath),
|
||||||
WithConstantLabels(*constantLabelsList),
|
WithConstantLabels(*constantLabelsList),
|
||||||
ExcludeDatabases(*excludeDatabases),
|
ExcludeDatabases(excludeDatabases),
|
||||||
IncludeDatabases(*includeDatabases),
|
IncludeDatabases(*includeDatabases),
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -96,10 +83,8 @@ func handleProbe(logger log.Logger) http.HandlerFunc {
|
|||||||
registry.MustRegister(exporter)
|
registry.MustRegister(exporter)
|
||||||
|
|
||||||
// Run the probe
|
// Run the probe
|
||||||
pc, err := collector.NewProbeCollector(tl, registry, dsn)
|
pc, err := collector.NewProbeCollector(tl, excludeDatabases, registry, dsn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
probeSuccessGauge.Set(0)
|
|
||||||
probeDurationGauge.Set(time.Since(start).Seconds())
|
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -115,10 +100,6 @@ func handleProbe(logger log.Logger) http.HandlerFunc {
|
|||||||
|
|
||||||
registry.MustRegister(pc)
|
registry.MustRegister(pc)
|
||||||
|
|
||||||
duration := time.Since(start).Seconds()
|
|
||||||
probeDurationGauge.Set(duration)
|
|
||||||
probeSuccessGauge.Set(1)
|
|
||||||
|
|
||||||
// TODO check success, etc
|
// TODO check success, etc
|
||||||
h := promhttp.HandlerFor(registry, promhttp.HandlerOpts{})
|
h := promhttp.HandlerFor(registry, promhttp.HandlerOpts{})
|
||||||
h.ServeHTTP(w, r)
|
h.ServeHTTP(w, r)
|
||||||
|
@ -28,7 +28,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
factories = make(map[string]func(logger log.Logger) (Collector, error))
|
factories = make(map[string]func(collectorConfig) (Collector, error))
|
||||||
initiatedCollectorsMtx = sync.Mutex{}
|
initiatedCollectorsMtx = sync.Mutex{}
|
||||||
initiatedCollectors = make(map[string]Collector)
|
initiatedCollectors = make(map[string]Collector)
|
||||||
collectorState = make(map[string]*bool)
|
collectorState = make(map[string]*bool)
|
||||||
@ -62,7 +62,12 @@ type Collector interface {
|
|||||||
Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error
|
Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error
|
||||||
}
|
}
|
||||||
|
|
||||||
func registerCollector(name string, isDefaultEnabled bool, createFunc func(logger log.Logger) (Collector, error)) {
|
type collectorConfig struct {
|
||||||
|
logger log.Logger
|
||||||
|
excludeDatabases []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func registerCollector(name string, isDefaultEnabled bool, createFunc func(collectorConfig) (Collector, error)) {
|
||||||
var helpDefaultState string
|
var helpDefaultState string
|
||||||
if isDefaultEnabled {
|
if isDefaultEnabled {
|
||||||
helpDefaultState = "enabled"
|
helpDefaultState = "enabled"
|
||||||
@ -93,7 +98,7 @@ type PostgresCollector struct {
|
|||||||
type Option func(*PostgresCollector) error
|
type Option func(*PostgresCollector) error
|
||||||
|
|
||||||
// NewPostgresCollector creates a new PostgresCollector.
|
// NewPostgresCollector creates a new PostgresCollector.
|
||||||
func NewPostgresCollector(logger log.Logger, dsn string, filters []string, options ...Option) (*PostgresCollector, error) {
|
func NewPostgresCollector(logger log.Logger, excludeDatabases []string, dsn string, filters []string, options ...Option) (*PostgresCollector, error) {
|
||||||
p := &PostgresCollector{
|
p := &PostgresCollector{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
}
|
}
|
||||||
@ -126,7 +131,10 @@ func NewPostgresCollector(logger log.Logger, dsn string, filters []string, optio
|
|||||||
if collector, ok := initiatedCollectors[key]; ok {
|
if collector, ok := initiatedCollectors[key]; ok {
|
||||||
collectors[key] = collector
|
collectors[key] = collector
|
||||||
} else {
|
} else {
|
||||||
collector, err := factories[key](log.With(logger, "collector", key))
|
collector, err := factories[key](collectorConfig{
|
||||||
|
logger: log.With(logger, "collector", key),
|
||||||
|
excludeDatabases: excludeDatabases,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -26,11 +26,19 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type PGDatabaseCollector struct {
|
type PGDatabaseCollector struct {
|
||||||
log log.Logger
|
log log.Logger
|
||||||
|
excludedDatabases []string
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewPGDatabaseCollector(logger log.Logger) (Collector, error) {
|
func NewPGDatabaseCollector(config collectorConfig) (Collector, error) {
|
||||||
return &PGDatabaseCollector{log: logger}, nil
|
exclude := config.excludeDatabases
|
||||||
|
if exclude == nil {
|
||||||
|
exclude = []string{}
|
||||||
|
}
|
||||||
|
return &PGDatabaseCollector{
|
||||||
|
log: config.logger,
|
||||||
|
excludedDatabases: exclude,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var pgDatabase = map[string]*prometheus.Desc{
|
var pgDatabase = map[string]*prometheus.Desc{
|
||||||
@ -41,20 +49,49 @@ var pgDatabase = map[string]*prometheus.Desc{
|
|||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
func (PGDatabaseCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
|
// Update implements Collector and exposes database size.
|
||||||
|
// It is called by the Prometheus registry when collecting metrics.
|
||||||
|
// The list of databases is retrieved from pg_database and filtered
|
||||||
|
// by the excludeDatabase config parameter. The tradeoff here is that
|
||||||
|
// we have to query the list of databases and then query the size of
|
||||||
|
// each database individually. This is because we can't filter the
|
||||||
|
// list of databases in the query because the list of excluded
|
||||||
|
// databases is dynamic.
|
||||||
|
func (c PGDatabaseCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
|
// Query the list of databases
|
||||||
rows, err := db.QueryContext(ctx,
|
rows, err := db.QueryContext(ctx,
|
||||||
`SELECT pg_database.datname
|
`SELECT pg_database.datname
|
||||||
,pg_database_size(pg_database.datname)
|
FROM pg_database;
|
||||||
FROM pg_database;`)
|
`,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer rows.Close()
|
defer rows.Close()
|
||||||
|
|
||||||
|
var databases []string
|
||||||
|
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
var datname string
|
var datname string
|
||||||
|
if err := rows.Scan(&datname); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore excluded databases
|
||||||
|
// Filtering is done here instead of in the query to avoid
|
||||||
|
// a complicated NOT IN query with a variable number of parameters
|
||||||
|
if sliceContains(c.excludedDatabases, datname) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
databases = append(databases, datname)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query the size of the databases
|
||||||
|
for _, datname := range databases {
|
||||||
var size int64
|
var size int64
|
||||||
if err := rows.Scan(&datname, &size); err != nil {
|
err = db.QueryRowContext(ctx, "SELECT pg_database_size($1)", datname).Scan(&size)
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -68,3 +105,12 @@ func (PGDatabaseCollector) Update(ctx context.Context, db *sql.DB, ch chan<- pro
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func sliceContains(slice []string, s string) bool {
|
||||||
|
for _, item := range slice {
|
||||||
|
if item == s {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
@ -18,7 +18,6 @@ import (
|
|||||||
"database/sql"
|
"database/sql"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -29,7 +28,7 @@ func init() {
|
|||||||
type PGStatBGWriterCollector struct {
|
type PGStatBGWriterCollector struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewPGStatBGWriterCollector(logger log.Logger) (Collector, error) {
|
func NewPGStatBGWriterCollector(collectorConfig) (Collector, error) {
|
||||||
return &PGStatBGWriterCollector{}, nil
|
return &PGStatBGWriterCollector{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -30,7 +30,7 @@ type ProbeCollector struct {
|
|||||||
db *sql.DB
|
db *sql.DB
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewProbeCollector(logger log.Logger, registry *prometheus.Registry, dsn config.DSN) (*ProbeCollector, error) {
|
func NewProbeCollector(logger log.Logger, excludeDatabases []string, registry *prometheus.Registry, dsn config.DSN) (*ProbeCollector, error) {
|
||||||
collectors := make(map[string]Collector)
|
collectors := make(map[string]Collector)
|
||||||
initiatedCollectorsMtx.Lock()
|
initiatedCollectorsMtx.Lock()
|
||||||
defer initiatedCollectorsMtx.Unlock()
|
defer initiatedCollectorsMtx.Unlock()
|
||||||
@ -45,7 +45,11 @@ func NewProbeCollector(logger log.Logger, registry *prometheus.Registry, dsn con
|
|||||||
if collector, ok := initiatedCollectors[key]; ok {
|
if collector, ok := initiatedCollectors[key]; ok {
|
||||||
collectors[key] = collector
|
collectors[key] = collector
|
||||||
} else {
|
} else {
|
||||||
collector, err := factories[key](log.With(logger, "collector", key))
|
collector, err := factories[key](
|
||||||
|
collectorConfig{
|
||||||
|
logger: log.With(logger, "collector", key),
|
||||||
|
excludeDatabases: excludeDatabases,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -29,8 +29,8 @@ type PGReplicationSlotCollector struct {
|
|||||||
log log.Logger
|
log log.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewPGReplicationSlotCollector(logger log.Logger) (Collector, error) {
|
func NewPGReplicationSlotCollector(config collectorConfig) (Collector, error) {
|
||||||
return &PGReplicationSlotCollector{log: logger}, nil
|
return &PGReplicationSlotCollector{log: config.logger}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var pgReplicationSlot = map[string]*prometheus.Desc{
|
var pgReplicationSlot = map[string]*prometheus.Desc{
|
||||||
|
Loading…
Reference in New Issue
Block a user