mirror of
https://github.com/prometheus-community/postgres_exporter
synced 2025-04-22 06:55:41 +00:00
Merge pull request #618 from sysadmind/multi-target
Add multi-target support
This commit is contained in:
commit
e552a37f21
30
README.md
30
README.md
@ -21,6 +21,36 @@ docker run \
|
|||||||
quay.io/prometheuscommunity/postgres-exporter
|
quay.io/prometheuscommunity/postgres-exporter
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Multi-Target Support (BETA)
|
||||||
|
**This Feature is in beta and may require changes in future releases. Feedback is welcome.**
|
||||||
|
|
||||||
|
This exporter supports the [multi-target pattern](https://prometheus.io/docs/guides/multi-target-exporter/). This allows running a single instance of this exporter for multiple postgres targets. Using the milti-target funcationality of this exporter is **optional** and meant for users where it is impossible to install the exporter as a sidecar. For example SaaS-managed services.
|
||||||
|
|
||||||
|
To use the multi-target functionality, send an http request to the endpoint `/probe?target=foo:5432` where target is set to the DSN of the postgres instance to scrape metrics from.
|
||||||
|
|
||||||
|
To avoid putting sensitive information like username and password in the URL, preconfigured auth modules are supported via the [auth_modules](#auth_modules) section of the config file. auth_modules for DSNs can be used with the `/probe` endpoint by specifying the `?auth_module=foo` http parameter.
|
||||||
|
|
||||||
|
## Configuration File
|
||||||
|
|
||||||
|
The configuration file controls the behavior of the exporter. It can be set using the `--config.file` command line flag and defaults to `postgres_exporter.yml`.
|
||||||
|
|
||||||
|
### auth_modules
|
||||||
|
This section defines preset authentication and connection parameters for use in the [multi-target endpoint](#multi-target-support-beta). `auth_modules` is a map of modules with the key being the identifier which can be used in the `/probe` endpoint.
|
||||||
|
Currently only the `userpass` type is supported.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```yaml
|
||||||
|
auth_modules:
|
||||||
|
foo1: # Set this to any name you want
|
||||||
|
type: userpass
|
||||||
|
userpass:
|
||||||
|
username: first
|
||||||
|
password: firstpass
|
||||||
|
options:
|
||||||
|
# options become key=value parameters of the DSN
|
||||||
|
sslmode: disable
|
||||||
|
```
|
||||||
|
|
||||||
## Building and running
|
## Building and running
|
||||||
|
|
||||||
git clone https://github.com/prometheus-community/postgres_exporter.git
|
git clone https://github.com/prometheus-community/postgres_exporter.git
|
||||||
|
@ -162,6 +162,12 @@ func getDataSources() ([]string, error) {
|
|||||||
uri = os.Getenv("DATA_SOURCE_URI")
|
uri = os.Getenv("DATA_SOURCE_URI")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// No datasources found. This allows us to support the multi-target pattern
|
||||||
|
// withouth an explicit datasource.
|
||||||
|
if uri == "" {
|
||||||
|
return []string{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
dsn = "postgresql://" + ui + "@" + uri
|
dsn = "postgresql://" + ui + "@" + uri
|
||||||
|
|
||||||
return []string{dsn}, nil
|
return []string{dsn}, nil
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/go-kit/log/level"
|
"github.com/go-kit/log/level"
|
||||||
"github.com/prometheus-community/postgres_exporter/collector"
|
"github.com/prometheus-community/postgres_exporter/collector"
|
||||||
|
"github.com/prometheus-community/postgres_exporter/config"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
"github.com/prometheus/common/promlog"
|
"github.com/prometheus/common/promlog"
|
||||||
@ -31,6 +32,11 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
c = config.ConfigHandler{
|
||||||
|
Config: &config.Config{},
|
||||||
|
}
|
||||||
|
|
||||||
|
configFile = kingpin.Flag("config.file", "Postgres exporter configuration file.").Default("postgres_exporter.yml").String()
|
||||||
listenAddress = kingpin.Flag("web.listen-address", "Address to listen on for web interface and telemetry.").Default(":9187").Envar("PG_EXPORTER_WEB_LISTEN_ADDRESS").String()
|
listenAddress = kingpin.Flag("web.listen-address", "Address to listen on for web interface and telemetry.").Default(":9187").Envar("PG_EXPORTER_WEB_LISTEN_ADDRESS").String()
|
||||||
webConfig = webflag.AddFlags(kingpin.CommandLine)
|
webConfig = webflag.AddFlags(kingpin.CommandLine)
|
||||||
metricPath = kingpin.Flag("web.telemetry-path", "Path under which to expose metrics.").Default("/metrics").Envar("PG_EXPORTER_WEB_TELEMETRY_PATH").String()
|
metricPath = kingpin.Flag("web.telemetry-path", "Path under which to expose metrics.").Default("/metrics").Envar("PG_EXPORTER_WEB_TELEMETRY_PATH").String()
|
||||||
@ -85,14 +91,14 @@ func main() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
dsn, err := getDataSources()
|
if err := c.ReloadConfig(*configFile, logger); err != nil {
|
||||||
if err != nil {
|
// This is not fatal, but it means that auth must be provided for every dsn.
|
||||||
level.Error(logger).Log("msg", "Failed reading data sources", "err", err.Error())
|
level.Error(logger).Log("msg", "Error loading config", "err", err)
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(dsn) == 0 {
|
dsns, err := getDataSources()
|
||||||
level.Error(logger).Log("msg", "Couldn't find environment variables describing the datasource to use")
|
if err != nil {
|
||||||
|
level.Error(logger).Log("msg", "Failed reading data sources", "err", err.Error())
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -106,7 +112,7 @@ func main() {
|
|||||||
IncludeDatabases(*includeDatabases),
|
IncludeDatabases(*includeDatabases),
|
||||||
}
|
}
|
||||||
|
|
||||||
exporter := NewExporter(dsn, opts...)
|
exporter := NewExporter(dsns, opts...)
|
||||||
defer func() {
|
defer func() {
|
||||||
exporter.servers.Close()
|
exporter.servers.Close()
|
||||||
}()
|
}()
|
||||||
@ -115,6 +121,12 @@ func main() {
|
|||||||
|
|
||||||
prometheus.MustRegister(exporter)
|
prometheus.MustRegister(exporter)
|
||||||
|
|
||||||
|
// TODO(@sysadmind): Remove this with multi-target support. We are removing multiple DSN support
|
||||||
|
dsn := ""
|
||||||
|
if len(dsns) > 0 {
|
||||||
|
dsn = dsns[0]
|
||||||
|
}
|
||||||
|
|
||||||
pe, err := collector.NewPostgresCollector(
|
pe, err := collector.NewPostgresCollector(
|
||||||
logger,
|
logger,
|
||||||
dsn,
|
dsn,
|
||||||
@ -122,9 +134,9 @@ func main() {
|
|||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(logger).Log("msg", "Failed to create PostgresCollector", "err", err.Error())
|
level.Error(logger).Log("msg", "Failed to create PostgresCollector", "err", err.Error())
|
||||||
os.Exit(1)
|
} else {
|
||||||
}
|
|
||||||
prometheus.MustRegister(pe)
|
prometheus.MustRegister(pe)
|
||||||
|
}
|
||||||
|
|
||||||
http.Handle(*metricPath, promhttp.Handler())
|
http.Handle(*metricPath, promhttp.Handler())
|
||||||
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||||
@ -132,6 +144,8 @@ func main() {
|
|||||||
w.Write(landingPage) // nolint: errcheck
|
w.Write(landingPage) // nolint: errcheck
|
||||||
})
|
})
|
||||||
|
|
||||||
|
http.HandleFunc("/probe", handleProbe(logger))
|
||||||
|
|
||||||
level.Info(logger).Log("msg", "Listening on address", "address", *listenAddress)
|
level.Info(logger).Log("msg", "Listening on address", "address", *listenAddress)
|
||||||
srv := &http.Server{Addr: *listenAddress}
|
srv := &http.Server{Addr: *listenAddress}
|
||||||
if err := web.ListenAndServe(srv, *webConfig, logger); err != nil {
|
if err := web.ListenAndServe(srv, *webConfig, logger); err != nil {
|
||||||
|
105
cmd/postgres_exporter/probe.go
Normal file
105
cmd/postgres_exporter/probe.go
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
// Copyright 2022 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-kit/log"
|
||||||
|
"github.com/go-kit/log/level"
|
||||||
|
"github.com/prometheus-community/postgres_exporter/collector"
|
||||||
|
"github.com/prometheus-community/postgres_exporter/config"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
|
)
|
||||||
|
|
||||||
|
func handleProbe(logger log.Logger) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
conf := c.GetConfig()
|
||||||
|
params := r.URL.Query()
|
||||||
|
target := params.Get("target")
|
||||||
|
if target == "" {
|
||||||
|
http.Error(w, "target is required", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var authModule config.AuthModule
|
||||||
|
authModuleName := params.Get("auth_module")
|
||||||
|
if authModuleName == "" {
|
||||||
|
level.Info(logger).Log("msg", "no auth_module specified, using default")
|
||||||
|
} else {
|
||||||
|
var ok bool
|
||||||
|
authModule, ok = conf.AuthModules[authModuleName]
|
||||||
|
if !ok {
|
||||||
|
http.Error(w, fmt.Sprintf("auth_module %s not found", authModuleName), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if authModule.UserPass.Username == "" || authModule.UserPass.Password == "" {
|
||||||
|
http.Error(w, fmt.Sprintf("auth_module %s has no username or password", authModuleName), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dsn, err := authModule.ConfigureTarget(target)
|
||||||
|
if err != nil {
|
||||||
|
level.Error(logger).Log("msg", "failed to configure target", "err", err)
|
||||||
|
http.Error(w, fmt.Sprintf("could not configure dsn for target: %v", err), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(@sysadmind): Timeout
|
||||||
|
|
||||||
|
probeSuccessGauge := prometheus.NewGauge(prometheus.GaugeOpts{
|
||||||
|
Name: "probe_success",
|
||||||
|
Help: "Displays whether or not the probe was a success",
|
||||||
|
})
|
||||||
|
probeDurationGauge := prometheus.NewGauge(prometheus.GaugeOpts{
|
||||||
|
Name: "probe_duration_seconds",
|
||||||
|
Help: "Returns how long the probe took to complete in seconds",
|
||||||
|
})
|
||||||
|
|
||||||
|
tl := log.With(logger, "target", target)
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
registry := prometheus.NewRegistry()
|
||||||
|
registry.MustRegister(probeSuccessGauge)
|
||||||
|
registry.MustRegister(probeDurationGauge)
|
||||||
|
|
||||||
|
// Run the probe
|
||||||
|
pc, err := collector.NewProbeCollector(tl, registry, dsn)
|
||||||
|
if err != nil {
|
||||||
|
probeSuccessGauge.Set(0)
|
||||||
|
probeDurationGauge.Set(time.Since(start).Seconds())
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(@sysadmind): Remove the registry.MustRegister() call below and instead handle the collection here. That will allow
|
||||||
|
// for the passing of context, handling of timeouts, and more control over the collection.
|
||||||
|
// The current NewProbeCollector() implementation relies on the MustNewConstMetric() call to create the metrics which is not
|
||||||
|
// ideal to use without the registry.MustRegister() call.
|
||||||
|
_ = ctx
|
||||||
|
|
||||||
|
registry.MustRegister(pc)
|
||||||
|
|
||||||
|
duration := time.Since(start).Seconds()
|
||||||
|
probeDurationGauge.Set(duration)
|
||||||
|
|
||||||
|
// TODO check success, etc
|
||||||
|
h := promhttp.HandlerFor(registry, promhttp.HandlerOpts{})
|
||||||
|
h.ServeHTTP(w, r)
|
||||||
|
}
|
||||||
|
}
|
@ -15,6 +15,7 @@ package collector
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"database/sql"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
@ -58,7 +59,7 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Collector interface {
|
type Collector interface {
|
||||||
Update(ctx context.Context, server *server, ch chan<- prometheus.Metric) error
|
Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error
|
||||||
}
|
}
|
||||||
|
|
||||||
func registerCollector(name string, isDefaultEnabled bool, createFunc func(logger log.Logger) (Collector, error)) {
|
func registerCollector(name string, isDefaultEnabled bool, createFunc func(logger log.Logger) (Collector, error)) {
|
||||||
@ -86,13 +87,13 @@ type PostgresCollector struct {
|
|||||||
Collectors map[string]Collector
|
Collectors map[string]Collector
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
|
|
||||||
servers map[string]*server
|
db *sql.DB
|
||||||
}
|
}
|
||||||
|
|
||||||
type Option func(*PostgresCollector) error
|
type Option func(*PostgresCollector) error
|
||||||
|
|
||||||
// NewPostgresCollector creates a new PostgresCollector.
|
// NewPostgresCollector creates a new PostgresCollector.
|
||||||
func NewPostgresCollector(logger log.Logger, dsns []string, filters []string, options ...Option) (*PostgresCollector, error) {
|
func NewPostgresCollector(logger log.Logger, dsn string, filters []string, options ...Option) (*PostgresCollector, error) {
|
||||||
p := &PostgresCollector{
|
p := &PostgresCollector{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
}
|
}
|
||||||
@ -136,17 +137,18 @@ func NewPostgresCollector(logger log.Logger, dsns []string, filters []string, op
|
|||||||
|
|
||||||
p.Collectors = collectors
|
p.Collectors = collectors
|
||||||
|
|
||||||
servers := make(map[string]*server)
|
if dsn == "" {
|
||||||
for _, dsn := range dsns {
|
return nil, errors.New("empty dsn")
|
||||||
s, err := makeServer(dsn)
|
}
|
||||||
|
|
||||||
|
db, err := sql.Open("postgres", dsn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
db.SetMaxOpenConns(1)
|
||||||
|
db.SetMaxIdleConns(1)
|
||||||
|
|
||||||
servers[dsn] = s
|
p.db = db
|
||||||
}
|
|
||||||
|
|
||||||
p.servers = servers
|
|
||||||
|
|
||||||
return p, nil
|
return p, nil
|
||||||
}
|
}
|
||||||
@ -160,32 +162,20 @@ func (p PostgresCollector) Describe(ch chan<- *prometheus.Desc) {
|
|||||||
// Collect implements the prometheus.Collector interface.
|
// Collect implements the prometheus.Collector interface.
|
||||||
func (p PostgresCollector) Collect(ch chan<- prometheus.Metric) {
|
func (p PostgresCollector) Collect(ch chan<- prometheus.Metric) {
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
wg := sync.WaitGroup{}
|
|
||||||
wg.Add(len(p.servers))
|
|
||||||
for _, s := range p.servers {
|
|
||||||
go func(s *server) {
|
|
||||||
p.subCollect(ctx, s, ch)
|
|
||||||
wg.Done()
|
|
||||||
}(s)
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p PostgresCollector) subCollect(ctx context.Context, server *server, ch chan<- prometheus.Metric) {
|
|
||||||
wg := sync.WaitGroup{}
|
wg := sync.WaitGroup{}
|
||||||
wg.Add(len(p.Collectors))
|
wg.Add(len(p.Collectors))
|
||||||
for name, c := range p.Collectors {
|
for name, c := range p.Collectors {
|
||||||
go func(name string, c Collector) {
|
go func(name string, c Collector) {
|
||||||
execute(ctx, name, c, server, ch, p.logger)
|
execute(ctx, name, c, p.db, ch, p.logger)
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}(name, c)
|
}(name, c)
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
func execute(ctx context.Context, name string, c Collector, s *server, ch chan<- prometheus.Metric, logger log.Logger) {
|
func execute(ctx context.Context, name string, c Collector, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) {
|
||||||
begin := time.Now()
|
begin := time.Now()
|
||||||
err := c.Update(ctx, s, ch)
|
err := c.Update(ctx, db, ch)
|
||||||
duration := time.Since(begin)
|
duration := time.Since(begin)
|
||||||
var success float64
|
var success float64
|
||||||
|
|
||||||
|
@ -15,6 +15,7 @@ package collector
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
@ -36,15 +37,11 @@ var pgDatabase = map[string]*prometheus.Desc{
|
|||||||
"size_bytes": prometheus.NewDesc(
|
"size_bytes": prometheus.NewDesc(
|
||||||
"pg_database_size_bytes",
|
"pg_database_size_bytes",
|
||||||
"Disk space used by the database",
|
"Disk space used by the database",
|
||||||
[]string{"datname", "server"}, nil,
|
[]string{"datname"}, nil,
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
func (PGDatabaseCollector) Update(ctx context.Context, server *server, ch chan<- prometheus.Metric) error {
|
func (PGDatabaseCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
db, err := server.GetDB()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rows, err := db.QueryContext(ctx,
|
rows, err := db.QueryContext(ctx,
|
||||||
`SELECT pg_database.datname
|
`SELECT pg_database.datname
|
||||||
,pg_database_size(pg_database.datname)
|
,pg_database_size(pg_database.datname)
|
||||||
@ -63,7 +60,7 @@ func (PGDatabaseCollector) Update(ctx context.Context, server *server, ch chan<-
|
|||||||
|
|
||||||
ch <- prometheus.MustNewConstMetric(
|
ch <- prometheus.MustNewConstMetric(
|
||||||
pgDatabase["size_bytes"],
|
pgDatabase["size_bytes"],
|
||||||
prometheus.GaugeValue, float64(size), datname, server.GetName(),
|
prometheus.GaugeValue, float64(size), datname,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if err := rows.Err(); err != nil {
|
if err := rows.Err(); err != nil {
|
||||||
|
@ -15,6 +15,7 @@ package collector
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"database/sql"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
@ -38,77 +39,72 @@ var statBGWriter = map[string]*prometheus.Desc{
|
|||||||
"checkpoints_timed": prometheus.NewDesc(
|
"checkpoints_timed": prometheus.NewDesc(
|
||||||
prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoints_timed_total"),
|
prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoints_timed_total"),
|
||||||
"Number of scheduled checkpoints that have been performed",
|
"Number of scheduled checkpoints that have been performed",
|
||||||
[]string{"server"},
|
[]string{},
|
||||||
prometheus.Labels{},
|
prometheus.Labels{},
|
||||||
),
|
),
|
||||||
"checkpoints_req": prometheus.NewDesc(
|
"checkpoints_req": prometheus.NewDesc(
|
||||||
prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoints_req_total"),
|
prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoints_req_total"),
|
||||||
"Number of requested checkpoints that have been performed",
|
"Number of requested checkpoints that have been performed",
|
||||||
[]string{"server"},
|
[]string{},
|
||||||
prometheus.Labels{},
|
prometheus.Labels{},
|
||||||
),
|
),
|
||||||
"checkpoint_write_time": prometheus.NewDesc(
|
"checkpoint_write_time": prometheus.NewDesc(
|
||||||
prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoint_write_time_total"),
|
prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoint_write_time_total"),
|
||||||
"Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds",
|
"Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds",
|
||||||
[]string{"server"},
|
[]string{},
|
||||||
prometheus.Labels{},
|
prometheus.Labels{},
|
||||||
),
|
),
|
||||||
"checkpoint_sync_time": prometheus.NewDesc(
|
"checkpoint_sync_time": prometheus.NewDesc(
|
||||||
prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoint_sync_time_total"),
|
prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoint_sync_time_total"),
|
||||||
"Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds",
|
"Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds",
|
||||||
[]string{"server"},
|
[]string{},
|
||||||
prometheus.Labels{},
|
prometheus.Labels{},
|
||||||
),
|
),
|
||||||
"buffers_checkpoint": prometheus.NewDesc(
|
"buffers_checkpoint": prometheus.NewDesc(
|
||||||
prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_checkpoint_total"),
|
prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_checkpoint_total"),
|
||||||
"Number of buffers written during checkpoints",
|
"Number of buffers written during checkpoints",
|
||||||
[]string{"server"},
|
[]string{},
|
||||||
prometheus.Labels{},
|
prometheus.Labels{},
|
||||||
),
|
),
|
||||||
"buffers_clean": prometheus.NewDesc(
|
"buffers_clean": prometheus.NewDesc(
|
||||||
prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_clean_total"),
|
prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_clean_total"),
|
||||||
"Number of buffers written by the background writer",
|
"Number of buffers written by the background writer",
|
||||||
[]string{"server"},
|
[]string{},
|
||||||
prometheus.Labels{},
|
prometheus.Labels{},
|
||||||
),
|
),
|
||||||
"maxwritten_clean": prometheus.NewDesc(
|
"maxwritten_clean": prometheus.NewDesc(
|
||||||
prometheus.BuildFQName(namespace, bgWriterSubsystem, "maxwritten_clean_total"),
|
prometheus.BuildFQName(namespace, bgWriterSubsystem, "maxwritten_clean_total"),
|
||||||
"Number of times the background writer stopped a cleaning scan because it had written too many buffers",
|
"Number of times the background writer stopped a cleaning scan because it had written too many buffers",
|
||||||
[]string{"server"},
|
[]string{},
|
||||||
prometheus.Labels{},
|
prometheus.Labels{},
|
||||||
),
|
),
|
||||||
"buffers_backend": prometheus.NewDesc(
|
"buffers_backend": prometheus.NewDesc(
|
||||||
prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_backend_total"),
|
prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_backend_total"),
|
||||||
"Number of buffers written directly by a backend",
|
"Number of buffers written directly by a backend",
|
||||||
[]string{"server"},
|
[]string{},
|
||||||
prometheus.Labels{},
|
prometheus.Labels{},
|
||||||
),
|
),
|
||||||
"buffers_backend_fsync": prometheus.NewDesc(
|
"buffers_backend_fsync": prometheus.NewDesc(
|
||||||
prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_backend_fsync_total"),
|
prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_backend_fsync_total"),
|
||||||
"Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)",
|
"Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)",
|
||||||
[]string{"server"},
|
[]string{},
|
||||||
prometheus.Labels{},
|
prometheus.Labels{},
|
||||||
),
|
),
|
||||||
"buffers_alloc": prometheus.NewDesc(
|
"buffers_alloc": prometheus.NewDesc(
|
||||||
prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_alloc_total"),
|
prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_alloc_total"),
|
||||||
"Number of buffers allocated",
|
"Number of buffers allocated",
|
||||||
[]string{"server"},
|
[]string{},
|
||||||
prometheus.Labels{},
|
prometheus.Labels{},
|
||||||
),
|
),
|
||||||
"stats_reset": prometheus.NewDesc(
|
"stats_reset": prometheus.NewDesc(
|
||||||
prometheus.BuildFQName(namespace, bgWriterSubsystem, "stats_reset_total"),
|
prometheus.BuildFQName(namespace, bgWriterSubsystem, "stats_reset_total"),
|
||||||
"Time at which these statistics were last reset",
|
"Time at which these statistics were last reset",
|
||||||
[]string{"server"},
|
[]string{},
|
||||||
prometheus.Labels{},
|
prometheus.Labels{},
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
func (PGStatBGWriterCollector) Update(ctx context.Context, server *server, ch chan<- prometheus.Metric) error {
|
func (PGStatBGWriterCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
db, err := server.GetDB()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
row := db.QueryRowContext(ctx,
|
row := db.QueryRowContext(ctx,
|
||||||
`SELECT
|
`SELECT
|
||||||
checkpoints_timed
|
checkpoints_timed
|
||||||
@ -136,7 +132,7 @@ func (PGStatBGWriterCollector) Update(ctx context.Context, server *server, ch ch
|
|||||||
var ba int
|
var ba int
|
||||||
var sr time.Time
|
var sr time.Time
|
||||||
|
|
||||||
err = row.Scan(&cpt, &cpr, &cpwt, &cpst, &bcp, &bc, &mwc, &bb, &bbf, &ba, &sr)
|
err := row.Scan(&cpt, &cpr, &cpwt, &cpst, &bcp, &bc, &mwc, &bb, &bbf, &ba, &sr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -145,67 +141,56 @@ func (PGStatBGWriterCollector) Update(ctx context.Context, server *server, ch ch
|
|||||||
statBGWriter["checkpoints_timed"],
|
statBGWriter["checkpoints_timed"],
|
||||||
prometheus.CounterValue,
|
prometheus.CounterValue,
|
||||||
float64(cpt),
|
float64(cpt),
|
||||||
server.GetName(),
|
|
||||||
)
|
)
|
||||||
ch <- prometheus.MustNewConstMetric(
|
ch <- prometheus.MustNewConstMetric(
|
||||||
statBGWriter["checkpoints_req"],
|
statBGWriter["checkpoints_req"],
|
||||||
prometheus.CounterValue,
|
prometheus.CounterValue,
|
||||||
float64(cpr),
|
float64(cpr),
|
||||||
server.GetName(),
|
|
||||||
)
|
)
|
||||||
ch <- prometheus.MustNewConstMetric(
|
ch <- prometheus.MustNewConstMetric(
|
||||||
statBGWriter["checkpoint_write_time"],
|
statBGWriter["checkpoint_write_time"],
|
||||||
prometheus.CounterValue,
|
prometheus.CounterValue,
|
||||||
float64(cpwt),
|
float64(cpwt),
|
||||||
server.GetName(),
|
|
||||||
)
|
)
|
||||||
ch <- prometheus.MustNewConstMetric(
|
ch <- prometheus.MustNewConstMetric(
|
||||||
statBGWriter["checkpoint_sync_time"],
|
statBGWriter["checkpoint_sync_time"],
|
||||||
prometheus.CounterValue,
|
prometheus.CounterValue,
|
||||||
float64(cpst),
|
float64(cpst),
|
||||||
server.GetName(),
|
|
||||||
)
|
)
|
||||||
ch <- prometheus.MustNewConstMetric(
|
ch <- prometheus.MustNewConstMetric(
|
||||||
statBGWriter["buffers_checkpoint"],
|
statBGWriter["buffers_checkpoint"],
|
||||||
prometheus.CounterValue,
|
prometheus.CounterValue,
|
||||||
float64(bcp),
|
float64(bcp),
|
||||||
server.GetName(),
|
|
||||||
)
|
)
|
||||||
ch <- prometheus.MustNewConstMetric(
|
ch <- prometheus.MustNewConstMetric(
|
||||||
statBGWriter["buffers_clean"],
|
statBGWriter["buffers_clean"],
|
||||||
prometheus.CounterValue,
|
prometheus.CounterValue,
|
||||||
float64(bc),
|
float64(bc),
|
||||||
server.GetName(),
|
|
||||||
)
|
)
|
||||||
ch <- prometheus.MustNewConstMetric(
|
ch <- prometheus.MustNewConstMetric(
|
||||||
statBGWriter["maxwritten_clean"],
|
statBGWriter["maxwritten_clean"],
|
||||||
prometheus.CounterValue,
|
prometheus.CounterValue,
|
||||||
float64(mwc),
|
float64(mwc),
|
||||||
server.GetName(),
|
|
||||||
)
|
)
|
||||||
ch <- prometheus.MustNewConstMetric(
|
ch <- prometheus.MustNewConstMetric(
|
||||||
statBGWriter["buffers_backend"],
|
statBGWriter["buffers_backend"],
|
||||||
prometheus.CounterValue,
|
prometheus.CounterValue,
|
||||||
float64(bb),
|
float64(bb),
|
||||||
server.GetName(),
|
|
||||||
)
|
)
|
||||||
ch <- prometheus.MustNewConstMetric(
|
ch <- prometheus.MustNewConstMetric(
|
||||||
statBGWriter["buffers_backend_fsync"],
|
statBGWriter["buffers_backend_fsync"],
|
||||||
prometheus.CounterValue,
|
prometheus.CounterValue,
|
||||||
float64(bbf),
|
float64(bbf),
|
||||||
server.GetName(),
|
|
||||||
)
|
)
|
||||||
ch <- prometheus.MustNewConstMetric(
|
ch <- prometheus.MustNewConstMetric(
|
||||||
statBGWriter["buffers_alloc"],
|
statBGWriter["buffers_alloc"],
|
||||||
prometheus.CounterValue,
|
prometheus.CounterValue,
|
||||||
float64(ba),
|
float64(ba),
|
||||||
server.GetName(),
|
|
||||||
)
|
)
|
||||||
ch <- prometheus.MustNewConstMetric(
|
ch <- prometheus.MustNewConstMetric(
|
||||||
statBGWriter["stats_reset"],
|
statBGWriter["stats_reset"],
|
||||||
prometheus.CounterValue,
|
prometheus.CounterValue,
|
||||||
float64(sr.Unix()),
|
float64(sr.Unix()),
|
||||||
server.GetName(),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
90
collector/probe.go
Normal file
90
collector/probe.go
Normal file
@ -0,0 +1,90 @@
|
|||||||
|
// Copyright 2022 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package collector
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ProbeCollector struct {
|
||||||
|
registry *prometheus.Registry
|
||||||
|
collectors map[string]Collector
|
||||||
|
logger log.Logger
|
||||||
|
db *sql.DB
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewProbeCollector(logger log.Logger, registry *prometheus.Registry, dsn string) (*ProbeCollector, error) {
|
||||||
|
collectors := make(map[string]Collector)
|
||||||
|
initiatedCollectorsMtx.Lock()
|
||||||
|
defer initiatedCollectorsMtx.Unlock()
|
||||||
|
for key, enabled := range collectorState {
|
||||||
|
// TODO: Handle filters
|
||||||
|
// if !*enabled || (len(f) > 0 && !f[key]) {
|
||||||
|
// continue
|
||||||
|
// }
|
||||||
|
if !*enabled {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if collector, ok := initiatedCollectors[key]; ok {
|
||||||
|
collectors[key] = collector
|
||||||
|
} else {
|
||||||
|
collector, err := factories[key](log.With(logger, "collector", key))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
collectors[key] = collector
|
||||||
|
initiatedCollectors[key] = collector
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.HasPrefix(dsn, "postgres://") {
|
||||||
|
dsn = fmt.Sprintf("postgres://%s", dsn)
|
||||||
|
}
|
||||||
|
|
||||||
|
db, err := sql.Open("postgres", dsn)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
db.SetMaxOpenConns(1)
|
||||||
|
db.SetMaxIdleConns(1)
|
||||||
|
|
||||||
|
return &ProbeCollector{
|
||||||
|
registry: registry,
|
||||||
|
collectors: collectors,
|
||||||
|
logger: logger,
|
||||||
|
db: db,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pc *ProbeCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pc *ProbeCollector) Collect(ch chan<- prometheus.Metric) {
|
||||||
|
wg := sync.WaitGroup{}
|
||||||
|
wg.Add(len(pc.collectors))
|
||||||
|
for name, c := range pc.collectors {
|
||||||
|
go func(name string, c Collector) {
|
||||||
|
execute(context.TODO(), name, c, pc.db, ch, pc.logger)
|
||||||
|
wg.Done()
|
||||||
|
}(name, c)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
}
|
@ -1,100 +0,0 @@
|
|||||||
// Copyright 2022 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package collector
|
|
||||||
|
|
||||||
import (
|
|
||||||
"database/sql"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/lib/pq"
|
|
||||||
)
|
|
||||||
|
|
||||||
type server struct {
|
|
||||||
dsn string
|
|
||||||
name string
|
|
||||||
db *sql.DB
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeServer(dsn string) (*server, error) {
|
|
||||||
name, err := parseServerName(dsn)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &server{
|
|
||||||
dsn: dsn,
|
|
||||||
name: name,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *server) GetDB() (*sql.DB, error) {
|
|
||||||
if s.db != nil {
|
|
||||||
return s.db, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
db, err := sql.Open("postgres", s.dsn)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
db.SetMaxOpenConns(1)
|
|
||||||
db.SetMaxIdleConns(1)
|
|
||||||
|
|
||||||
s.db = db
|
|
||||||
|
|
||||||
return s.db, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *server) GetName() string {
|
|
||||||
return s.name
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *server) String() string {
|
|
||||||
return s.name
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseServerName(url string) (string, error) {
|
|
||||||
dsn, err := pq.ParseURL(url)
|
|
||||||
if err != nil {
|
|
||||||
dsn = url
|
|
||||||
}
|
|
||||||
|
|
||||||
pairs := strings.Split(dsn, " ")
|
|
||||||
kv := make(map[string]string, len(pairs))
|
|
||||||
for _, pair := range pairs {
|
|
||||||
splitted := strings.SplitN(pair, "=", 2)
|
|
||||||
if len(splitted) != 2 {
|
|
||||||
return "", fmt.Errorf("malformed dsn %q", dsn)
|
|
||||||
}
|
|
||||||
// Newer versions of pq.ParseURL quote values so trim them off if they exist
|
|
||||||
key := strings.Trim(splitted[0], "'\"")
|
|
||||||
value := strings.Trim(splitted[1], "'\"")
|
|
||||||
kv[key] = value
|
|
||||||
}
|
|
||||||
|
|
||||||
var fingerprint string
|
|
||||||
|
|
||||||
if host, ok := kv["host"]; ok {
|
|
||||||
fingerprint += host
|
|
||||||
} else {
|
|
||||||
fingerprint += "localhost"
|
|
||||||
}
|
|
||||||
|
|
||||||
if port, ok := kv["port"]; ok {
|
|
||||||
fingerprint += ":" + port
|
|
||||||
} else {
|
|
||||||
fingerprint += ":5432"
|
|
||||||
}
|
|
||||||
|
|
||||||
return fingerprint, nil
|
|
||||||
}
|
|
122
config/config.go
Normal file
122
config/config.go
Normal file
@ -0,0 +1,122 @@
|
|||||||
|
// Copyright 2022 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||||
|
"gopkg.in/yaml.v3"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
configReloadSuccess = promauto.NewGauge(prometheus.GaugeOpts{
|
||||||
|
Namespace: "postgres_exporter",
|
||||||
|
Name: "config_last_reload_successful",
|
||||||
|
Help: "Postgres exporter config loaded successfully.",
|
||||||
|
})
|
||||||
|
|
||||||
|
configReloadSeconds = promauto.NewGauge(prometheus.GaugeOpts{
|
||||||
|
Namespace: "postgres_exporter",
|
||||||
|
Name: "config_last_reload_success_timestamp_seconds",
|
||||||
|
Help: "Timestamp of the last successful configuration reload.",
|
||||||
|
})
|
||||||
|
)
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
AuthModules map[string]AuthModule `yaml:"auth_modules"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type AuthModule struct {
|
||||||
|
Type string `yaml:"type"`
|
||||||
|
UserPass UserPass `yaml:"userpass,omitempty"`
|
||||||
|
// Add alternative auth modules here
|
||||||
|
Options map[string]string `yaml:"options"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type UserPass struct {
|
||||||
|
Username string `yaml:"username"`
|
||||||
|
Password string `yaml:"password"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ConfigHandler struct {
|
||||||
|
sync.RWMutex
|
||||||
|
Config *Config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ch *ConfigHandler) GetConfig() *Config {
|
||||||
|
ch.RLock()
|
||||||
|
defer ch.RUnlock()
|
||||||
|
return ch.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ch *ConfigHandler) ReloadConfig(f string, logger log.Logger) error {
|
||||||
|
config := &Config{}
|
||||||
|
var err error
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
configReloadSuccess.Set(0)
|
||||||
|
} else {
|
||||||
|
configReloadSuccess.Set(1)
|
||||||
|
configReloadSeconds.SetToCurrentTime()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
yamlReader, err := os.Open(f)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error opening config file %q: %s", f, err)
|
||||||
|
}
|
||||||
|
defer yamlReader.Close()
|
||||||
|
decoder := yaml.NewDecoder(yamlReader)
|
||||||
|
decoder.KnownFields(true)
|
||||||
|
|
||||||
|
if err = decoder.Decode(config); err != nil {
|
||||||
|
return fmt.Errorf("Error parsing config file %q: %s", f, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ch.Lock()
|
||||||
|
ch.Config = config
|
||||||
|
ch.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m AuthModule) ConfigureTarget(target string) (string, error) {
|
||||||
|
// ip:port urls do not parse properly and that is the typical way users interact with postgres
|
||||||
|
t := fmt.Sprintf("exporter://%s", target)
|
||||||
|
u, err := url.Parse(t)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.Type == "userpass" {
|
||||||
|
u.User = url.UserPassword(m.UserPass.Username, m.UserPass.Password)
|
||||||
|
}
|
||||||
|
|
||||||
|
query := u.Query()
|
||||||
|
for k, v := range m.Options {
|
||||||
|
query.Set(k, v)
|
||||||
|
}
|
||||||
|
u.RawQuery = query.Encode()
|
||||||
|
|
||||||
|
parsed := u.String()
|
||||||
|
trim := strings.TrimPrefix(parsed, "exporter://")
|
||||||
|
|
||||||
|
return trim, nil
|
||||||
|
}
|
58
config/config_test.go
Normal file
58
config/config_test.go
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
// Copyright 2022 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestLoadConfig(t *testing.T) {
|
||||||
|
ch := &ConfigHandler{
|
||||||
|
Config: &Config{},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := ch.ReloadConfig("testdata/config-good.yaml", nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error loading config: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoadBadConfigs(t *testing.T) {
|
||||||
|
ch := &ConfigHandler{
|
||||||
|
Config: &Config{},
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
input string
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
input: "testdata/config-bad-auth-module.yaml",
|
||||||
|
want: "Error parsing config file \"testdata/config-bad-auth-module.yaml\": yaml: unmarshal errors:\n line 3: field pretendauth not found in type config.AuthModule",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "testdata/config-bad-extra-field.yaml",
|
||||||
|
want: "Error parsing config file \"testdata/config-bad-extra-field.yaml\": yaml: unmarshal errors:\n line 8: field doesNotExist not found in type config.AuthModule",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.input, func(t *testing.T) {
|
||||||
|
got := ch.ReloadConfig(test.input, nil)
|
||||||
|
if got == nil || got.Error() != test.want {
|
||||||
|
t.Fatalf("ReloadConfig(%q) = %v, want %s", test.input, got, test.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
7
config/testdata/config-bad-auth-module.yaml
vendored
Normal file
7
config/testdata/config-bad-auth-module.yaml
vendored
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
auth_modules:
|
||||||
|
foo:
|
||||||
|
pretendauth:
|
||||||
|
username: test
|
||||||
|
password: pass
|
||||||
|
options:
|
||||||
|
extra: "1"
|
8
config/testdata/config-bad-extra-field.yaml
vendored
Normal file
8
config/testdata/config-bad-extra-field.yaml
vendored
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
auth_modules:
|
||||||
|
foo:
|
||||||
|
userpass:
|
||||||
|
username: test
|
||||||
|
password: pass
|
||||||
|
options:
|
||||||
|
extra: "1"
|
||||||
|
doesNotExist: test
|
8
config/testdata/config-good.yaml
vendored
Normal file
8
config/testdata/config-good.yaml
vendored
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
auth_modules:
|
||||||
|
first:
|
||||||
|
type: userpass
|
||||||
|
userpass:
|
||||||
|
username: first
|
||||||
|
password: firstpass
|
||||||
|
options:
|
||||||
|
sslmode: disable
|
1
go.mod
1
go.mod
@ -13,6 +13,7 @@ require (
|
|||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
2
go.sum
2
go.sum
@ -494,6 +494,8 @@ gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|||||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
Loading…
Reference in New Issue
Block a user