mirror of
https://github.com/prometheus-community/postgres_exporter
synced 2025-05-16 23:08:45 +00:00
multi-server-exporter multi server exporter is introduced
This commit is contained in:
parent
72446a5b1e
commit
1d6a733ba2
34
README.md
34
README.md
@ -49,16 +49,19 @@ Package vendoring is handled with [`govendor`](https://github.com/kardianos/gove
|
||||
Path under which to expose metrics. Default is `/metrics`.
|
||||
|
||||
* `disable-default-metrics`
|
||||
Use only metrics supplied from `queries.yaml` via `--extend.query-path`
|
||||
Use only metrics supplied from `queries.yaml` via `--extend.query-path`.
|
||||
|
||||
* `disable-settings-metrics`
|
||||
Use the flag if you don't want to scrape `pg_settings`.
|
||||
|
||||
* `extend.query-path`
|
||||
Path to a YAML file containing custom queries to run. Check out [`queries.yaml`](queries.yaml)
|
||||
for examples of the format.
|
||||
|
||||
|
||||
* `dumpmaps`
|
||||
Do not run - print the internal representation of the metric maps. Useful when debugging a custom
|
||||
queries file.
|
||||
|
||||
|
||||
* `log.level`
|
||||
Set logging level: one of `debug`, `info`, `warn`, `error`, `fatal`
|
||||
|
||||
@ -78,21 +81,23 @@ The following environment variables configure the exporter:
|
||||
URI may contain the username and password to connect with.
|
||||
|
||||
* `DATA_SOURCE_URI`
|
||||
an alternative to DATA_SOURCE_NAME which exclusively accepts the raw URI
|
||||
an alternative to `DATA_SOURCE_NAME` which exclusively accepts the raw URI
|
||||
without a username and password component.
|
||||
|
||||
* `DATA_SOURCE_USER`
|
||||
When using `DATA_SOURCE_URI`, this environment variable is used to specify
|
||||
the username.
|
||||
|
||||
* `DATA_SOURCE_USER_FILE`
|
||||
The same, but reads the username from a file.
|
||||
|
||||
* `DATA_SOURCE_PASS`
|
||||
When using `DATA_SOURCE_URI`, this environment variable is used to specify
|
||||
the password to connect with.
|
||||
|
||||
* `DATA_SOURCE_PASS_FILE`
|
||||
The same as above but reads the password from a file.
|
||||
|
||||
|
||||
* `PG_EXPORTER_WEB_LISTEN_ADDRESS`
|
||||
Address to listen on for web interface and telemetry. Default is `:9187`.
|
||||
|
||||
@ -102,13 +107,16 @@ The following environment variables configure the exporter:
|
||||
* `PG_EXPORTER_DISABLE_DEFAULT_METRICS`
|
||||
Use only metrics supplied from `queries.yaml`. Value can be `true` or `false`. Default is `false`.
|
||||
|
||||
* `PG_EXPORTER_DISABLE_SETTINGS_METRICS`
|
||||
Use the flag if you don't want to scrape `pg_settings`. Value can be `true` or `false`. Defauls is `false`.
|
||||
|
||||
* `PG_EXPORTER_EXTEND_QUERY_PATH`
|
||||
Path to a YAML file containing custom queries to run. Check out [`queries.yaml`](queries.yaml)
|
||||
for examples of the format.
|
||||
|
||||
* `PG_EXPORTER_CONSTANT_LABELS`
|
||||
Labels to set in all metrics. A list of `label=value` pairs, separated by commas.
|
||||
|
||||
|
||||
Settings set by environment variables starting with `PG_` will be overwritten by the corresponding CLI flag if given.
|
||||
|
||||
### Setting the Postgres server's data source name
|
||||
@ -120,6 +128,10 @@ For running it locally on a default Debian/Ubuntu install, this will work (trans
|
||||
|
||||
sudo -u postgres DATA_SOURCE_NAME="user=postgres host=/var/run/postgresql/ sslmode=disable" postgres_exporter
|
||||
|
||||
Also, you can set a list of sources to scrape different instances from the one exporter setup. Just define a comma separated string.
|
||||
|
||||
sudo -u postgres DATA_SOURCE_NAME="port=5432,port=6432" postgres_exporter
|
||||
|
||||
See the [github.com/lib/pq](http://github.com/lib/pq) module for other ways to format the connection string.
|
||||
|
||||
### Adding new metrics
|
||||
@ -143,18 +155,18 @@ The -extend.query-path command-line argument specifies a YAML file containing ad
|
||||
Some examples are provided in [queries.yaml](queries.yaml).
|
||||
|
||||
### Disabling default metrics
|
||||
To work with non-officially-supported postgres versions you can try disabling (e.g. 8.2.15)
|
||||
To work with non-officially-supported postgres versions you can try disabling (e.g. 8.2.15)
|
||||
or a variant of postgres (e.g. Greenplum) you can disable the default metrics with the `--disable-default-metrics`
|
||||
flag. This removes all built-in metrics, and uses only metrics defined by queries in the `queries.yaml` file you supply
|
||||
(so you must supply one, otherwise the exporter will return nothing but internal statuses and not your database).
|
||||
|
||||
### Running as non-superuser
|
||||
|
||||
To be able to collect metrics from `pg_stat_activity` and `pg_stat_replication`
|
||||
as non-superuser you have to create views as a superuser, and assign permissions
|
||||
separately to those.
|
||||
To be able to collect metrics from `pg_stat_activity` and `pg_stat_replication`
|
||||
as non-superuser you have to create views as a superuser, and assign permissions
|
||||
separately to those.
|
||||
|
||||
In PostgreSQL, views run with the permissions of the user that created them so
|
||||
In PostgreSQL, views run with the permissions of the user that created them so
|
||||
they can act as security barriers.
|
||||
|
||||
```sql
|
||||
|
@ -1,8 +1,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
@ -13,8 +11,8 @@ import (
|
||||
)
|
||||
|
||||
// Query the pg_settings view containing runtime variables
|
||||
func querySettings(ch chan<- prometheus.Metric, db *sql.DB) error {
|
||||
log.Debugln("Querying pg_setting view")
|
||||
func querySettings(ch chan<- prometheus.Metric, server *Server) error {
|
||||
log.Debugf("Querying pg_setting view on %q", server)
|
||||
|
||||
// pg_settings docs: https://www.postgresql.org/docs/current/static/view-pg-settings.html
|
||||
//
|
||||
@ -22,9 +20,9 @@ func querySettings(ch chan<- prometheus.Metric, db *sql.DB) error {
|
||||
// types in normaliseUnit() below
|
||||
query := "SELECT name, setting, COALESCE(unit, ''), short_desc, vartype FROM pg_settings WHERE vartype IN ('bool', 'integer', 'real');"
|
||||
|
||||
rows, err := db.Query(query)
|
||||
rows, err := server.db.Query(query)
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintln("Error running query on database: ", namespace, err))
|
||||
return fmt.Errorf("Error running query on database %q: %s %v", server, namespace, err)
|
||||
}
|
||||
defer rows.Close() // nolint: errcheck
|
||||
|
||||
@ -32,10 +30,10 @@ func querySettings(ch chan<- prometheus.Metric, db *sql.DB) error {
|
||||
s := &pgSetting{}
|
||||
err = rows.Scan(&s.name, &s.setting, &s.unit, &s.shortDesc, &s.vartype)
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintln("Error retrieving rows:", namespace, err))
|
||||
return fmt.Errorf("Error retrieving rows on %q: %s %v", server, namespace, err)
|
||||
}
|
||||
|
||||
ch <- s.metric()
|
||||
ch <- s.metric(server.labels)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -47,7 +45,7 @@ type pgSetting struct {
|
||||
name, setting, unit, shortDesc, vartype string
|
||||
}
|
||||
|
||||
func (s *pgSetting) metric() prometheus.Metric {
|
||||
func (s *pgSetting) metric(labels prometheus.Labels) prometheus.Metric {
|
||||
var (
|
||||
err error
|
||||
name = strings.Replace(s.name, ".", "_", -1)
|
||||
@ -78,7 +76,7 @@ func (s *pgSetting) metric() prometheus.Metric {
|
||||
panic(fmt.Sprintf("Unsupported vartype %q", s.vartype))
|
||||
}
|
||||
|
||||
desc := newDesc(subsystem, name, shortDesc)
|
||||
desc := newDesc(subsystem, name, shortDesc, labels)
|
||||
return prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, val)
|
||||
}
|
||||
|
||||
|
@ -3,6 +3,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
@ -25,7 +26,7 @@ var fixtures = []fixture{
|
||||
unit: "seconds",
|
||||
err: "",
|
||||
},
|
||||
d: "Desc{fqName: \"pg_settings_seconds_fixture_metric_seconds\", help: \"Foo foo foo [Units converted to seconds.]\", constLabels: {}, variableLabels: []}",
|
||||
d: `Desc{fqName: "pg_settings_seconds_fixture_metric_seconds", help: "Foo foo foo [Units converted to seconds.]", constLabels: {}, variableLabels: []}`,
|
||||
v: 5,
|
||||
},
|
||||
{
|
||||
@ -41,7 +42,7 @@ var fixtures = []fixture{
|
||||
unit: "seconds",
|
||||
err: "",
|
||||
},
|
||||
d: "Desc{fqName: \"pg_settings_milliseconds_fixture_metric_seconds\", help: \"Foo foo foo [Units converted to seconds.]\", constLabels: {}, variableLabels: []}",
|
||||
d: `Desc{fqName: "pg_settings_milliseconds_fixture_metric_seconds", help: "Foo foo foo [Units converted to seconds.]", constLabels: {}, variableLabels: []}`,
|
||||
v: 5,
|
||||
},
|
||||
{
|
||||
@ -57,7 +58,7 @@ var fixtures = []fixture{
|
||||
unit: "bytes",
|
||||
err: "",
|
||||
},
|
||||
d: "Desc{fqName: \"pg_settings_eight_kb_fixture_metric_bytes\", help: \"Foo foo foo [Units converted to bytes.]\", constLabels: {}, variableLabels: []}",
|
||||
d: `Desc{fqName: "pg_settings_eight_kb_fixture_metric_bytes", help: "Foo foo foo [Units converted to bytes.]", constLabels: {}, variableLabels: []}`,
|
||||
v: 139264,
|
||||
},
|
||||
{
|
||||
@ -73,7 +74,7 @@ var fixtures = []fixture{
|
||||
unit: "bytes",
|
||||
err: "",
|
||||
},
|
||||
d: "Desc{fqName: \"pg_settings_16_kb_real_fixture_metric_bytes\", help: \"Foo foo foo [Units converted to bytes.]\", constLabels: {}, variableLabels: []}",
|
||||
d: `Desc{fqName: "pg_settings_16_kb_real_fixture_metric_bytes", help: "Foo foo foo [Units converted to bytes.]", constLabels: {}, variableLabels: []}`,
|
||||
v: 49152,
|
||||
},
|
||||
{
|
||||
@ -89,7 +90,7 @@ var fixtures = []fixture{
|
||||
unit: "bytes",
|
||||
err: "",
|
||||
},
|
||||
d: "Desc{fqName: \"pg_settings_16_mb_real_fixture_metric_bytes\", help: \"Foo foo foo [Units converted to bytes.]\", constLabels: {}, variableLabels: []}",
|
||||
d: `Desc{fqName: "pg_settings_16_mb_real_fixture_metric_bytes", help: "Foo foo foo [Units converted to bytes.]", constLabels: {}, variableLabels: []}`,
|
||||
v: 5.0331648e+07,
|
||||
},
|
||||
{
|
||||
@ -105,7 +106,7 @@ var fixtures = []fixture{
|
||||
unit: "bytes",
|
||||
err: "",
|
||||
},
|
||||
d: "Desc{fqName: \"pg_settings_32_mb_real_fixture_metric_bytes\", help: \"Foo foo foo [Units converted to bytes.]\", constLabels: {}, variableLabels: []}",
|
||||
d: `Desc{fqName: "pg_settings_32_mb_real_fixture_metric_bytes", help: "Foo foo foo [Units converted to bytes.]", constLabels: {}, variableLabels: []}`,
|
||||
v: 1.00663296e+08,
|
||||
},
|
||||
{
|
||||
@ -121,7 +122,7 @@ var fixtures = []fixture{
|
||||
unit: "bytes",
|
||||
err: "",
|
||||
},
|
||||
d: "Desc{fqName: \"pg_settings_64_mb_real_fixture_metric_bytes\", help: \"Foo foo foo [Units converted to bytes.]\", constLabels: {}, variableLabels: []}",
|
||||
d: `Desc{fqName: "pg_settings_64_mb_real_fixture_metric_bytes", help: "Foo foo foo [Units converted to bytes.]", constLabels: {}, variableLabels: []}`,
|
||||
v: 2.01326592e+08,
|
||||
},
|
||||
{
|
||||
@ -137,7 +138,7 @@ var fixtures = []fixture{
|
||||
unit: "",
|
||||
err: "",
|
||||
},
|
||||
d: "Desc{fqName: \"pg_settings_bool_on_fixture_metric\", help: \"Foo foo foo\", constLabels: {}, variableLabels: []}",
|
||||
d: `Desc{fqName: "pg_settings_bool_on_fixture_metric", help: "Foo foo foo", constLabels: {}, variableLabels: []}`,
|
||||
v: 1,
|
||||
},
|
||||
{
|
||||
@ -153,7 +154,7 @@ var fixtures = []fixture{
|
||||
unit: "",
|
||||
err: "",
|
||||
},
|
||||
d: "Desc{fqName: \"pg_settings_bool_off_fixture_metric\", help: \"Foo foo foo\", constLabels: {}, variableLabels: []}",
|
||||
d: `Desc{fqName: "pg_settings_bool_off_fixture_metric", help: "Foo foo foo", constLabels: {}, variableLabels: []}`,
|
||||
v: 0,
|
||||
},
|
||||
{
|
||||
@ -169,7 +170,7 @@ var fixtures = []fixture{
|
||||
unit: "seconds",
|
||||
err: "",
|
||||
},
|
||||
d: "Desc{fqName: \"pg_settings_special_minus_one_value_seconds\", help: \"foo foo foo [Units converted to seconds.]\", constLabels: {}, variableLabels: []}",
|
||||
d: `Desc{fqName: "pg_settings_special_minus_one_value_seconds", help: "foo foo foo [Units converted to seconds.]", constLabels: {}, variableLabels: []}`,
|
||||
v: -1,
|
||||
},
|
||||
{
|
||||
@ -185,7 +186,7 @@ var fixtures = []fixture{
|
||||
unit: "",
|
||||
err: "",
|
||||
},
|
||||
d: "Desc{fqName: \"pg_settings_rds_rds_superuser_reserved_connections\", help: \"Sets the number of connection slots reserved for rds_superusers.\", constLabels: {}, variableLabels: []}",
|
||||
d: `Desc{fqName: "pg_settings_rds_rds_superuser_reserved_connections", help: "Sets the number of connection slots reserved for rds_superusers.", constLabels: {}, variableLabels: []}`,
|
||||
v: 2,
|
||||
},
|
||||
{
|
||||
@ -233,7 +234,7 @@ func (s *PgSettingSuite) TestMetric(c *C) {
|
||||
|
||||
for _, f := range fixtures {
|
||||
d := &dto.Metric{}
|
||||
m := f.p.metric()
|
||||
m := f.p.metric(prometheus.Labels{})
|
||||
m.Write(d) // nolint: errcheck
|
||||
|
||||
c.Check(m.Desc().String(), Equals, f.d)
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
"crypto/sha256"
|
||||
|
||||
"github.com/blang/semver"
|
||||
_ "github.com/lib/pq"
|
||||
"github.com/lib/pq"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/prometheus/common/log"
|
||||
@ -33,12 +33,13 @@ import (
|
||||
var Version = "0.0.1"
|
||||
|
||||
var (
|
||||
listenAddress = kingpin.Flag("web.listen-address", "Address to listen on for web interface and telemetry.").Default(":9187").OverrideDefaultFromEnvar("PG_EXPORTER_WEB_LISTEN_ADDRESS").String()
|
||||
metricPath = kingpin.Flag("web.telemetry-path", "Path under which to expose metrics.").Default("/metrics").OverrideDefaultFromEnvar("PG_EXPORTER_WEB_TELEMETRY_PATH").String()
|
||||
disableDefaultMetrics = kingpin.Flag("disable-default-metrics", "Do not include default metrics.").Default("false").OverrideDefaultFromEnvar("PG_EXPORTER_DISABLE_DEFAULT_METRICS").Bool()
|
||||
queriesPath = kingpin.Flag("extend.query-path", "Path to custom queries to run.").Default("").OverrideDefaultFromEnvar("PG_EXPORTER_EXTEND_QUERY_PATH").String()
|
||||
onlyDumpMaps = kingpin.Flag("dumpmaps", "Do not run, simply dump the maps.").Bool()
|
||||
constantLabelsList = kingpin.Flag("constantLabels", "A list of label=value separated by comma(,).").Default("").OverrideDefaultFromEnvar("PG_EXPORTER_CONTANT_LABELS").String()
|
||||
listenAddress = kingpin.Flag("web.listen-address", "Address to listen on for web interface and telemetry.").Default(":9187").OverrideDefaultFromEnvar("PG_EXPORTER_WEB_LISTEN_ADDRESS").String()
|
||||
metricPath = kingpin.Flag("web.telemetry-path", "Path under which to expose metrics.").Default("/metrics").OverrideDefaultFromEnvar("PG_EXPORTER_WEB_TELEMETRY_PATH").String()
|
||||
disableDefaultMetrics = kingpin.Flag("disable-default-metrics", "Do not include default metrics.").Default("false").OverrideDefaultFromEnvar("PG_EXPORTER_DISABLE_DEFAULT_METRICS").Bool()
|
||||
disableSettingsMetrics = kingpin.Flag("disable-settings-metrics", "Do not include pg_settings metrics.").Default("false").OverrideDefaultFromEnvar("PG_EXPORTER_DISABLE_SETTINGS_METRICS").Bool()
|
||||
queriesPath = kingpin.Flag("extend.query-path", "Path to custom queries to run.").Default("").OverrideDefaultFromEnvar("PG_EXPORTER_EXTEND_QUERY_PATH").String()
|
||||
onlyDumpMaps = kingpin.Flag("dumpmaps", "Do not run, simply dump the maps.").Bool()
|
||||
constantLabelsList = kingpin.Flag("constantLabels", "A list of label=value separated by comma(,).").Default("").OverrideDefaultFromEnvar("PG_EXPORTER_CONTANT_LABELS").String()
|
||||
)
|
||||
|
||||
// Metric name parts.
|
||||
@ -50,6 +51,8 @@ const (
|
||||
// Metric label used for static string data thats handy to send to Prometheus
|
||||
// e.g. version
|
||||
staticLabelName = "static"
|
||||
// Metric label used for server identification.
|
||||
serverLabelName = "server"
|
||||
)
|
||||
|
||||
// ColumnUsage should be one of several enum values which describe how a
|
||||
@ -386,7 +389,7 @@ func makeQueryOverrideMap(pgVersion semver.Version, queryOverrides map[string][]
|
||||
// TODO: test code for all cu.
|
||||
// TODO: use proper struct type system
|
||||
// TODO: the YAML this supports is "non-standard" - we should move away from it.
|
||||
func addQueries(content []byte, pgVersion semver.Version, exporterMap map[string]MetricMapNamespace, queryOverrideMap map[string]string) error {
|
||||
func addQueries(content []byte, pgVersion semver.Version, server *Server) error {
|
||||
var extra map[string]interface{}
|
||||
|
||||
err := yaml.Unmarshal(content, &extra)
|
||||
@ -450,35 +453,35 @@ func addQueries(content []byte, pgVersion semver.Version, exporterMap map[string
|
||||
}
|
||||
|
||||
// Convert the loaded metric map into exporter representation
|
||||
partialExporterMap := makeDescMap(pgVersion, metricMaps)
|
||||
partialExporterMap := makeDescMap(pgVersion, server.labels, metricMaps)
|
||||
|
||||
// Merge the two maps (which are now quite flatteend)
|
||||
for k, v := range partialExporterMap {
|
||||
_, found := exporterMap[k]
|
||||
_, found := server.metricMap[k]
|
||||
if found {
|
||||
log.Debugln("Overriding metric", k, "from user YAML file.")
|
||||
} else {
|
||||
log.Debugln("Adding new metric", k, "from user YAML file.")
|
||||
}
|
||||
exporterMap[k] = v
|
||||
server.metricMap[k] = v
|
||||
}
|
||||
|
||||
// Merge the query override map
|
||||
for k, v := range newQueryOverrides {
|
||||
_, found := queryOverrideMap[k]
|
||||
_, found := server.queryOverrides[k]
|
||||
if found {
|
||||
log.Debugln("Overriding query override", k, "from user YAML file.")
|
||||
} else {
|
||||
log.Debugln("Adding new query override", k, "from user YAML file.")
|
||||
}
|
||||
queryOverrideMap[k] = v
|
||||
server.queryOverrides[k] = v
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Turn the MetricMap column mapping into a prometheus descriptor mapping.
|
||||
func makeDescMap(pgVersion semver.Version, metricMaps map[string]map[string]ColumnMapping) map[string]MetricMapNamespace {
|
||||
func makeDescMap(pgVersion semver.Version, serverLabels prometheus.Labels, metricMaps map[string]map[string]ColumnMapping) map[string]MetricMapNamespace {
|
||||
var metricMap = make(map[string]MetricMapNamespace)
|
||||
|
||||
for namespace, mappings := range metricMaps {
|
||||
@ -492,8 +495,6 @@ func makeDescMap(pgVersion semver.Version, metricMaps map[string]map[string]Colu
|
||||
}
|
||||
}
|
||||
|
||||
constLabels := newConstLabels()
|
||||
|
||||
for columnName, columnMapping := range mappings {
|
||||
// Check column version compatibility for the current map
|
||||
// Force to discard if not compatible.
|
||||
@ -525,7 +526,7 @@ func makeDescMap(pgVersion semver.Version, metricMaps map[string]map[string]Colu
|
||||
case COUNTER:
|
||||
thisMap[columnName] = MetricMap{
|
||||
vtype: prometheus.CounterValue,
|
||||
desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, constLabels),
|
||||
desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, serverLabels),
|
||||
conversion: func(in interface{}) (float64, bool) {
|
||||
return dbToFloat64(in)
|
||||
},
|
||||
@ -533,7 +534,7 @@ func makeDescMap(pgVersion semver.Version, metricMaps map[string]map[string]Colu
|
||||
case GAUGE:
|
||||
thisMap[columnName] = MetricMap{
|
||||
vtype: prometheus.GaugeValue,
|
||||
desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, constLabels),
|
||||
desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, serverLabels),
|
||||
conversion: func(in interface{}) (float64, bool) {
|
||||
return dbToFloat64(in)
|
||||
},
|
||||
@ -541,7 +542,7 @@ func makeDescMap(pgVersion semver.Version, metricMaps map[string]map[string]Colu
|
||||
case MAPPEDMETRIC:
|
||||
thisMap[columnName] = MetricMap{
|
||||
vtype: prometheus.GaugeValue,
|
||||
desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, constLabels),
|
||||
desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, serverLabels),
|
||||
conversion: func(in interface{}) (float64, bool) {
|
||||
text, ok := in.(string)
|
||||
if !ok {
|
||||
@ -558,7 +559,7 @@ func makeDescMap(pgVersion semver.Version, metricMaps map[string]map[string]Colu
|
||||
case DURATION:
|
||||
thisMap[columnName] = MetricMap{
|
||||
vtype: prometheus.GaugeValue,
|
||||
desc: prometheus.NewDesc(fmt.Sprintf("%s_%s_milliseconds", namespace, columnName), columnMapping.description, variableLabels, constLabels),
|
||||
desc: prometheus.NewDesc(fmt.Sprintf("%s_%s_milliseconds", namespace, columnName), columnMapping.description, variableLabels, serverLabels),
|
||||
conversion: func(in interface{}) (float64, bool) {
|
||||
var durationString string
|
||||
switch t := in.(type) {
|
||||
@ -676,25 +677,56 @@ func dbToString(t interface{}) (string, bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// Exporter collects Postgres metrics. It implements prometheus.Collector.
|
||||
type Exporter struct {
|
||||
// Holds a reference to the build in column mappings. Currently this is for testing purposes
|
||||
// only, since it just points to the global.
|
||||
builtinMetricMaps map[string]map[string]ColumnMapping
|
||||
func parseFingerprint(url string) (string, error) {
|
||||
dsn, err := pq.ParseURL(url)
|
||||
if err != nil {
|
||||
dsn = url
|
||||
}
|
||||
|
||||
dsn string
|
||||
disableDefaultMetrics bool
|
||||
userQueriesPath string
|
||||
duration prometheus.Gauge
|
||||
error prometheus.Gauge
|
||||
psqlUp prometheus.Gauge
|
||||
userQueriesError *prometheus.GaugeVec
|
||||
totalScrapes prometheus.Counter
|
||||
pairs := strings.Split(dsn, " ")
|
||||
kv := make(map[string]string, len(pairs))
|
||||
for _, pair := range pairs {
|
||||
splitted := strings.Split(pair, "=")
|
||||
if len(splitted) != 2 {
|
||||
return "", fmt.Errorf("malformed dsn %q", dsn)
|
||||
}
|
||||
kv[splitted[0]] = splitted[1]
|
||||
}
|
||||
|
||||
// dbDsn is the connection string used to establish the dbConnection
|
||||
dbDsn string
|
||||
// dbConnection is used to allow re-using the DB connection between scrapes
|
||||
dbConnection *sql.DB
|
||||
var fingerprint string
|
||||
|
||||
if host, ok := kv["host"]; ok {
|
||||
fingerprint += host
|
||||
} else {
|
||||
fingerprint += "localhost"
|
||||
}
|
||||
|
||||
if port, ok := kv["port"]; ok {
|
||||
fingerprint += ":" + port
|
||||
} else {
|
||||
fingerprint += ":5432"
|
||||
}
|
||||
|
||||
return fingerprint, nil
|
||||
}
|
||||
|
||||
func parseDSN(dsn string) (*url.URL, error) {
|
||||
pDSN, err := url.Parse(dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Blank user info if not nil
|
||||
if pDSN.User != nil {
|
||||
pDSN.User = url.UserPassword(pDSN.User.Username(), "PASSWORD_REMOVED")
|
||||
}
|
||||
return pDSN, nil
|
||||
}
|
||||
|
||||
// Server describes a connection to Postgres.
|
||||
// Also it contains metrics map and query overrides.
|
||||
type Server struct {
|
||||
db *sql.DB
|
||||
labels prometheus.Labels
|
||||
|
||||
// Last version used to calculate metric map. If mismatch on scrape,
|
||||
// then maps are recalculated.
|
||||
@ -706,52 +738,271 @@ type Exporter struct {
|
||||
mappingMtx sync.RWMutex
|
||||
}
|
||||
|
||||
// NewExporter returns a new PostgreSQL exporter for the provided DSN.
|
||||
func NewExporter(dsn string, disableDefaultMetrics bool, userQueriesPath string) *Exporter {
|
||||
return &Exporter{
|
||||
builtinMetricMaps: builtinMetricMaps,
|
||||
dsn: dsn,
|
||||
disableDefaultMetrics: disableDefaultMetrics,
|
||||
userQueriesPath: userQueriesPath,
|
||||
duration: prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: exporter,
|
||||
Name: "last_scrape_duration_seconds",
|
||||
Help: "Duration of the last scrape of metrics from PostgresSQL.",
|
||||
ConstLabels: newConstLabels(),
|
||||
}),
|
||||
totalScrapes: prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: exporter,
|
||||
Name: "scrapes_total",
|
||||
Help: "Total number of times PostgresSQL was scraped for metrics.",
|
||||
ConstLabels: newConstLabels(),
|
||||
}),
|
||||
error: prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: exporter,
|
||||
Name: "last_scrape_error",
|
||||
Help: "Whether the last scrape of metrics from PostgreSQL resulted in an error (1 for error, 0 for success).",
|
||||
ConstLabels: newConstLabels(),
|
||||
}),
|
||||
psqlUp: prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "up",
|
||||
Help: "Whether the last scrape of metrics from PostgreSQL was able to connect to the server (1 for yes, 0 for no).",
|
||||
ConstLabels: newConstLabels(),
|
||||
}),
|
||||
userQueriesError: prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: exporter,
|
||||
Name: "user_queries_load_error",
|
||||
Help: "Whether the user queries file was loaded and parsed successfully (1 for error, 0 for success).",
|
||||
ConstLabels: newConstLabels(),
|
||||
}, []string{"filename", "hashsum"}),
|
||||
metricMap: nil,
|
||||
queryOverrides: nil,
|
||||
// ServerOpt configures a server.
|
||||
type ServerOpt func(*Server)
|
||||
|
||||
// ServerWithLabels configures a set of labels.
|
||||
func ServerWithLabels(labels prometheus.Labels) ServerOpt {
|
||||
return func(s *Server) {
|
||||
for k, v := range labels {
|
||||
s.labels[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewServer establishes a new connection using DSN.
|
||||
func NewServer(dsn string, opts ...ServerOpt) (*Server, error) {
|
||||
fingerprint, err := parseFingerprint(dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
db, err := sql.Open("postgres", dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
db.SetMaxOpenConns(1)
|
||||
db.SetMaxIdleConns(1)
|
||||
|
||||
log.Infof("Established new database connection to %q.", fingerprint)
|
||||
|
||||
s := &Server{
|
||||
db: db,
|
||||
labels: prometheus.Labels{
|
||||
serverLabelName: fingerprint,
|
||||
},
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(s)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Close disconnects from Postgres.
|
||||
func (s *Server) Close() error {
|
||||
if s.db != nil {
|
||||
err := s.db.Close()
|
||||
s.db = nil
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ping checks connection availability and possibly invalidates the connection if it fails.
|
||||
func (s *Server) Ping() error {
|
||||
if err := s.db.Ping(); err != nil {
|
||||
if cerr := s.db.Close(); cerr != nil {
|
||||
log.Infof("Error while closing non-pinging DB connection to %q: %v", s, cerr)
|
||||
}
|
||||
s.db = nil
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns server's fingerprint.
|
||||
func (s *Server) String() string {
|
||||
return s.labels[serverLabelName]
|
||||
}
|
||||
|
||||
// Scrape loads metrics.
|
||||
func (s *Server) Scrape(ch chan<- prometheus.Metric, errGauge prometheus.Gauge, disableSettingsMetrics bool) {
|
||||
s.mappingMtx.RLock()
|
||||
defer s.mappingMtx.RUnlock()
|
||||
|
||||
if !disableSettingsMetrics {
|
||||
if err := querySettings(ch, s); err != nil {
|
||||
log.Infof("Error retrieving settings: %s", err)
|
||||
errGauge.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
errMap := queryNamespaceMappings(ch, s)
|
||||
if len(errMap) > 0 {
|
||||
errGauge.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// Servers contains a collection of servers to Postgres.
|
||||
type Servers struct {
|
||||
m sync.Mutex
|
||||
servers map[string]*Server
|
||||
opts []ServerOpt
|
||||
}
|
||||
|
||||
// NewServers creates a collection of servers to Postgres.
|
||||
func NewServers(opts ...ServerOpt) *Servers {
|
||||
return &Servers{
|
||||
servers: make(map[string]*Server),
|
||||
opts: opts,
|
||||
}
|
||||
}
|
||||
|
||||
// GetServer returns established connection from a collection.
|
||||
func (s *Servers) GetServer(dsn string) (*Server, error) {
|
||||
s.m.Lock()
|
||||
defer s.m.Unlock()
|
||||
var err error
|
||||
server, ok := s.servers[dsn]
|
||||
if !ok {
|
||||
server, err = NewServer(dsn, s.opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.servers[dsn] = server
|
||||
}
|
||||
if err = server.Ping(); err != nil {
|
||||
delete(s.servers, dsn)
|
||||
return nil, err
|
||||
}
|
||||
return server, nil
|
||||
}
|
||||
|
||||
// Close disconnects from all known servers.
|
||||
func (s *Servers) Close() {
|
||||
s.m.Lock()
|
||||
defer s.m.Unlock()
|
||||
for _, server := range s.servers {
|
||||
if err := server.Close(); err != nil {
|
||||
log.Errorf("failed to close connection to %q: %v", server, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Exporter collects Postgres metrics. It implements prometheus.Collector.
|
||||
type Exporter struct {
|
||||
// Holds a reference to the build in column mappings. Currently this is for testing purposes
|
||||
// only, since it just points to the global.
|
||||
builtinMetricMaps map[string]map[string]ColumnMapping
|
||||
|
||||
disableDefaultMetrics, disableSettingsMetrics bool
|
||||
|
||||
dsn []string
|
||||
userQueriesPath string
|
||||
constantLabels prometheus.Labels
|
||||
duration prometheus.Gauge
|
||||
error prometheus.Gauge
|
||||
psqlUp prometheus.Gauge
|
||||
userQueriesError *prometheus.GaugeVec
|
||||
totalScrapes prometheus.Counter
|
||||
|
||||
// servers are used to allow re-using the DB connection between scrapes.
|
||||
// servers contains metrics map and query overrides.
|
||||
servers *Servers
|
||||
}
|
||||
|
||||
// ExporterOpt configures Exporter.
|
||||
type ExporterOpt func(*Exporter)
|
||||
|
||||
// DisableDefaultMetrics configures default metrics export.
|
||||
func DisableDefaultMetrics(b bool) ExporterOpt {
|
||||
return func(e *Exporter) {
|
||||
e.disableDefaultMetrics = b
|
||||
}
|
||||
}
|
||||
|
||||
// DisableSettingsMetrics configures pg_settings export.
|
||||
func DisableSettingsMetrics(b bool) ExporterOpt {
|
||||
return func(e *Exporter) {
|
||||
e.disableSettingsMetrics = b
|
||||
}
|
||||
}
|
||||
|
||||
// WithUserQueriesPath configures user's queries path.
|
||||
func WithUserQueriesPath(p string) ExporterOpt {
|
||||
return func(e *Exporter) {
|
||||
e.userQueriesPath = p
|
||||
}
|
||||
}
|
||||
|
||||
// WithConstantLabels configures constant labels.
|
||||
func WithConstantLabels(s string) ExporterOpt {
|
||||
return func(e *Exporter) {
|
||||
e.constantLabels = parseConstLabels(s)
|
||||
}
|
||||
}
|
||||
|
||||
func parseConstLabels(s string) prometheus.Labels {
|
||||
labels := make(prometheus.Labels)
|
||||
|
||||
parts := strings.Split(s, ",")
|
||||
for _, p := range parts {
|
||||
keyValue := strings.Split(strings.TrimSpace(p), "=")
|
||||
if len(keyValue) != 2 {
|
||||
log.Errorf(`Wrong constant labels format %q, should be "key=value"`, p)
|
||||
continue
|
||||
}
|
||||
key := strings.TrimSpace(keyValue[0])
|
||||
value := strings.TrimSpace(keyValue[1])
|
||||
if key == "" || value == "" {
|
||||
continue
|
||||
}
|
||||
labels[key] = value
|
||||
}
|
||||
|
||||
return labels
|
||||
}
|
||||
|
||||
// NewExporter returns a new PostgreSQL exporter for the provided DSN.
|
||||
func NewExporter(dsn []string, opts ...ExporterOpt) *Exporter {
|
||||
e := &Exporter{
|
||||
dsn: dsn,
|
||||
builtinMetricMaps: builtinMetricMaps,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(e)
|
||||
}
|
||||
|
||||
e.setupInternalMetrics()
|
||||
e.setupServers()
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *Exporter) setupServers() {
|
||||
e.servers = NewServers(ServerWithLabels(e.constantLabels))
|
||||
}
|
||||
|
||||
func (e *Exporter) setupInternalMetrics() {
|
||||
e.duration = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: exporter,
|
||||
Name: "last_scrape_duration_seconds",
|
||||
Help: "Duration of the last scrape of metrics from PostgresSQL.",
|
||||
ConstLabels: e.constantLabels,
|
||||
})
|
||||
e.totalScrapes = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: exporter,
|
||||
Name: "scrapes_total",
|
||||
Help: "Total number of times PostgresSQL was scraped for metrics.",
|
||||
ConstLabels: e.constantLabels,
|
||||
})
|
||||
e.error = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: exporter,
|
||||
Name: "last_scrape_error",
|
||||
Help: "Whether the last scrape of metrics from PostgreSQL resulted in an error (1 for error, 0 for success).",
|
||||
ConstLabels: e.constantLabels,
|
||||
})
|
||||
e.psqlUp = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "up",
|
||||
Help: "Whether the last scrape of metrics from PostgreSQL was able to connect to the server (1 for yes, 0 for no).",
|
||||
ConstLabels: e.constantLabels,
|
||||
})
|
||||
e.userQueriesError = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: exporter,
|
||||
Name: "user_queries_load_error",
|
||||
Help: "Whether the user queries file was loaded and parsed successfully (1 for error, 0 for success).",
|
||||
ConstLabels: e.constantLabels,
|
||||
}, []string{"filename", "hashsum"})
|
||||
}
|
||||
|
||||
// Describe implements prometheus.Collector.
|
||||
func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
|
||||
// We cannot know in advance what metrics the exporter will generate
|
||||
@ -764,7 +1015,6 @@ func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
|
||||
// don't detect inconsistent metrics created by this exporter
|
||||
// itself. Also, a change in the monitored Postgres instance may change the
|
||||
// exported metrics during the runtime of the exporter.
|
||||
|
||||
metricCh := make(chan prometheus.Metric)
|
||||
doneCh := make(chan struct{})
|
||||
|
||||
@ -791,40 +1041,18 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
|
||||
e.userQueriesError.Collect(ch)
|
||||
}
|
||||
|
||||
func newConstLabels() prometheus.Labels {
|
||||
if constantLabelsList == nil || *constantLabelsList == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
var constLabels = make(prometheus.Labels)
|
||||
parts := strings.Split(*constantLabelsList, ",")
|
||||
for _, p := range parts {
|
||||
keyValue := strings.Split(strings.TrimSpace(p), "=")
|
||||
if len(keyValue) != 2 {
|
||||
continue
|
||||
}
|
||||
key := strings.TrimSpace(keyValue[0])
|
||||
value := strings.TrimSpace(keyValue[1])
|
||||
if key == "" || value == "" {
|
||||
continue
|
||||
}
|
||||
constLabels[key] = value
|
||||
}
|
||||
return constLabels
|
||||
}
|
||||
|
||||
func newDesc(subsystem, name, help string) *prometheus.Desc {
|
||||
func newDesc(subsystem, name, help string, labels prometheus.Labels) *prometheus.Desc {
|
||||
return prometheus.NewDesc(
|
||||
prometheus.BuildFQName(namespace, subsystem, name),
|
||||
help, nil, newConstLabels(),
|
||||
help, nil, labels,
|
||||
)
|
||||
}
|
||||
|
||||
// Query within a namespace mapping and emit metrics. Returns fatal errors if
|
||||
// the scrape fails, and a slice of errors if they were non-fatal.
|
||||
func queryNamespaceMapping(ch chan<- prometheus.Metric, db *sql.DB, namespace string, mapping MetricMapNamespace, queryOverrides map[string]string) ([]error, error) {
|
||||
func queryNamespaceMapping(ch chan<- prometheus.Metric, server *Server, namespace string, mapping MetricMapNamespace) ([]error, error) {
|
||||
// Check for a query override for this namespace
|
||||
query, found := queryOverrides[namespace]
|
||||
query, found := server.queryOverrides[namespace]
|
||||
|
||||
// Was this query disabled (i.e. nothing sensible can be queried on cu
|
||||
// version of PostgreSQL?
|
||||
@ -840,12 +1068,12 @@ func queryNamespaceMapping(ch chan<- prometheus.Metric, db *sql.DB, namespace st
|
||||
if !found {
|
||||
// I've no idea how to avoid this properly at the moment, but this is
|
||||
// an admin tool so you're not injecting SQL right?
|
||||
rows, err = db.Query(fmt.Sprintf("SELECT * FROM %s;", namespace)) // nolint: gas, safesql
|
||||
rows, err = server.db.Query(fmt.Sprintf("SELECT * FROM %s;", namespace)) // nolint: gas, safesql
|
||||
} else {
|
||||
rows, err = db.Query(query) // nolint: safesql
|
||||
rows, err = server.db.Query(query) // nolint: safesql
|
||||
}
|
||||
if err != nil {
|
||||
return []error{}, errors.New(fmt.Sprintln("Error running query on database: ", namespace, err))
|
||||
return []error{}, fmt.Errorf("Error running query on database %q: %s %v", server, namespace, err)
|
||||
}
|
||||
defer rows.Close() // nolint: errcheck
|
||||
|
||||
@ -875,10 +1103,10 @@ func queryNamespaceMapping(ch chan<- prometheus.Metric, db *sql.DB, namespace st
|
||||
return []error{}, errors.New(fmt.Sprintln("Error retrieving rows:", namespace, err))
|
||||
}
|
||||
|
||||
// Get the label values for this row
|
||||
var labels = make([]string, len(mapping.labels))
|
||||
for idx, columnName := range mapping.labels {
|
||||
labels[idx], _ = dbToString(columnData[columnIdx[columnName]])
|
||||
// Get the label values for this row.
|
||||
labels := make([]string, len(mapping.labels))
|
||||
for idx, label := range mapping.labels {
|
||||
labels[idx], _ = dbToString(columnData[columnIdx[label]])
|
||||
}
|
||||
|
||||
// Loop over column names, and match to scan data. Unknown columns
|
||||
@ -902,7 +1130,7 @@ func queryNamespaceMapping(ch chan<- prometheus.Metric, db *sql.DB, namespace st
|
||||
} else {
|
||||
// Unknown metric. Report as untyped if scan to float64 works, else note an error too.
|
||||
metricLabel := fmt.Sprintf("%s_%s", namespace, columnName)
|
||||
desc := prometheus.NewDesc(metricLabel, fmt.Sprintf("Unknown metric from %s", namespace), mapping.labels, newConstLabels())
|
||||
desc := prometheus.NewDesc(metricLabel, fmt.Sprintf("Unknown metric from %s", namespace), mapping.labels, server.labels)
|
||||
|
||||
// Its not an error to fail here, since the values are
|
||||
// unexpected anyway.
|
||||
@ -920,13 +1148,13 @@ func queryNamespaceMapping(ch chan<- prometheus.Metric, db *sql.DB, namespace st
|
||||
|
||||
// Iterate through all the namespace mappings in the exporter and run their
|
||||
// queries.
|
||||
func queryNamespaceMappings(ch chan<- prometheus.Metric, db *sql.DB, metricMap map[string]MetricMapNamespace, queryOverrides map[string]string) map[string]error {
|
||||
func queryNamespaceMappings(ch chan<- prometheus.Metric, server *Server) map[string]error {
|
||||
// Return a map of namespace -> errors
|
||||
namespaceErrors := make(map[string]error)
|
||||
|
||||
for namespace, mapping := range metricMap {
|
||||
for namespace, mapping := range server.metricMap {
|
||||
log.Debugln("Querying namespace: ", namespace)
|
||||
nonFatalErrors, err := queryNamespaceMapping(ch, db, namespace, mapping, queryOverrides)
|
||||
nonFatalErrors, err := queryNamespaceMapping(ch, server, namespace, mapping)
|
||||
// Serious error - a namespace disappeared
|
||||
if err != nil {
|
||||
namespaceErrors[namespace] = err
|
||||
@ -944,40 +1172,36 @@ func queryNamespaceMappings(ch chan<- prometheus.Metric, db *sql.DB, metricMap m
|
||||
}
|
||||
|
||||
// Check and update the exporters query maps if the version has changed.
|
||||
func (e *Exporter) checkMapVersions(ch chan<- prometheus.Metric, db *sql.DB) error {
|
||||
log.Debugln("Querying Postgres Version")
|
||||
versionRow := db.QueryRow("SELECT version();")
|
||||
func (e *Exporter) checkMapVersions(ch chan<- prometheus.Metric, server *Server) error {
|
||||
log.Debugf("Querying Postgres Version on %q", server)
|
||||
versionRow := server.db.QueryRow("SELECT version();")
|
||||
var versionString string
|
||||
err := versionRow.Scan(&versionString)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error scanning version string: %v", err)
|
||||
return fmt.Errorf("Error scanning version string on %q: %v", server, err)
|
||||
}
|
||||
semanticVersion, err := parseVersion(versionString)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error parsing version string: %v", err)
|
||||
return fmt.Errorf("Error parsing version string on %q: %v", server, err)
|
||||
}
|
||||
if !e.disableDefaultMetrics && semanticVersion.LT(lowestSupportedVersion) {
|
||||
log.Warnln("PostgreSQL version is lower then our lowest supported version! Got", semanticVersion.String(), "minimum supported is", lowestSupportedVersion.String())
|
||||
log.Warnf("PostgreSQL version is lower on %q then our lowest supported version! Got %s minimum supported is %s.", server, semanticVersion, lowestSupportedVersion)
|
||||
}
|
||||
|
||||
// Check if semantic version changed and recalculate maps if needed.
|
||||
if semanticVersion.NE(e.lastMapVersion) || e.metricMap == nil {
|
||||
log.Infoln("Semantic Version Changed:", e.lastMapVersion.String(), "->", semanticVersion.String())
|
||||
e.mappingMtx.Lock()
|
||||
if semanticVersion.NE(server.lastMapVersion) || server.metricMap == nil {
|
||||
log.Infof("Semantic Version Changed on %q: %s -> %s", server, server.lastMapVersion, semanticVersion)
|
||||
server.mappingMtx.Lock()
|
||||
|
||||
if e.disableDefaultMetrics {
|
||||
e.metricMap = make(map[string]MetricMapNamespace)
|
||||
server.metricMap = make(map[string]MetricMapNamespace)
|
||||
server.queryOverrides = make(map[string]string)
|
||||
} else {
|
||||
e.metricMap = makeDescMap(semanticVersion, e.builtinMetricMaps)
|
||||
server.metricMap = makeDescMap(semanticVersion, server.labels, e.builtinMetricMaps)
|
||||
server.queryOverrides = makeQueryOverrideMap(semanticVersion, queryOverrides)
|
||||
}
|
||||
|
||||
if e.disableDefaultMetrics {
|
||||
e.queryOverrides = make(map[string]string)
|
||||
} else {
|
||||
e.queryOverrides = makeQueryOverrideMap(semanticVersion, queryOverrides)
|
||||
}
|
||||
|
||||
e.lastMapVersion = semanticVersion
|
||||
server.lastMapVersion = semanticVersion
|
||||
|
||||
if e.userQueriesPath != "" {
|
||||
// Clear the metric while a reload is happening
|
||||
@ -991,7 +1215,7 @@ func (e *Exporter) checkMapVersions(ch chan<- prometheus.Metric, db *sql.DB) err
|
||||
} else {
|
||||
hashsumStr := fmt.Sprintf("%x", sha256.Sum256(userQueriesData))
|
||||
|
||||
if err := addQueries(userQueriesData, semanticVersion, e.metricMap, e.queryOverrides); err != nil {
|
||||
if err := addQueries(userQueriesData, semanticVersion, server); err != nil {
|
||||
log.Errorln("Failed to reload user queries:", e.userQueriesPath, err)
|
||||
e.userQueriesError.WithLabelValues(e.userQueriesPath, hashsumStr).Set(1)
|
||||
} else {
|
||||
@ -1001,98 +1225,50 @@ func (e *Exporter) checkMapVersions(ch chan<- prometheus.Metric, db *sql.DB) err
|
||||
}
|
||||
}
|
||||
|
||||
e.mappingMtx.Unlock()
|
||||
server.mappingMtx.Unlock()
|
||||
}
|
||||
|
||||
// Output the version as a special metric
|
||||
versionDesc := prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, staticLabelName),
|
||||
"Version string as reported by postgres", []string{"version", "short_version"}, newConstLabels())
|
||||
"Version string as reported by postgres", []string{"version", "short_version"}, server.labels)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(versionDesc,
|
||||
prometheus.UntypedValue, 1, versionString, semanticVersion.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Exporter) getDB(conn string) (*sql.DB, error) {
|
||||
// Has dsn changed?
|
||||
if (e.dbConnection != nil) && (e.dsn != e.dbDsn) {
|
||||
err := e.dbConnection.Close()
|
||||
log.Warnln("Error while closing obsolete DB connection:", err)
|
||||
e.dbConnection = nil
|
||||
e.dbDsn = ""
|
||||
}
|
||||
|
||||
if e.dbConnection == nil {
|
||||
d, err := sql.Open("postgres", conn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d.SetMaxOpenConns(1)
|
||||
d.SetMaxIdleConns(1)
|
||||
e.dbConnection = d
|
||||
e.dbDsn = e.dsn
|
||||
log.Infoln("Established new database connection.")
|
||||
}
|
||||
|
||||
// Always send a ping and possibly invalidate the connection if it fails
|
||||
if err := e.dbConnection.Ping(); err != nil {
|
||||
cerr := e.dbConnection.Close()
|
||||
log.Infoln("Error while closing non-pinging DB connection:", cerr)
|
||||
e.dbConnection = nil
|
||||
e.psqlUp.Set(0)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return e.dbConnection, nil
|
||||
}
|
||||
|
||||
func (e *Exporter) scrape(ch chan<- prometheus.Metric) {
|
||||
defer func(begun time.Time) {
|
||||
e.duration.Set(time.Since(begun).Seconds())
|
||||
}(time.Now())
|
||||
|
||||
e.error.Set(0)
|
||||
e.psqlUp.Set(0)
|
||||
e.totalScrapes.Inc()
|
||||
|
||||
db, err := e.getDB(e.dsn)
|
||||
if err != nil {
|
||||
loggableDsn := "could not parse DATA_SOURCE_NAME"
|
||||
// If the DSN is parseable, log it with a blanked out password
|
||||
pDsn, pErr := url.Parse(e.dsn)
|
||||
if pErr == nil {
|
||||
// Blank user info if not nil
|
||||
if pDsn.User != nil {
|
||||
pDsn.User = url.UserPassword(pDsn.User.Username(), "PASSWORD_REMOVED")
|
||||
for _, dsn := range e.dsn {
|
||||
server, err := e.servers.GetServer(dsn)
|
||||
if err != nil {
|
||||
loggableDSN := "could not parse DATA_SOURCE_NAME"
|
||||
pDSN, pErr := parseDSN(dsn)
|
||||
if pErr == nil {
|
||||
loggableDSN = pDSN.String()
|
||||
}
|
||||
loggableDsn = pDsn.String()
|
||||
log.Infof("Error opening connection to database (%s): %v", loggableDSN, err)
|
||||
e.error.Inc()
|
||||
continue
|
||||
}
|
||||
log.Infof("Error opening connection to database (%s): %s", loggableDsn, err)
|
||||
e.psqlUp.Set(0)
|
||||
e.error.Set(1)
|
||||
return
|
||||
}
|
||||
|
||||
// Didn't fail, can mark connection as up for this scrape.
|
||||
e.psqlUp.Set(1)
|
||||
// Didn't fail, can mark connection as up for this scrape.
|
||||
e.psqlUp.Inc()
|
||||
|
||||
// Check if map versions need to be updated
|
||||
if err := e.checkMapVersions(ch, db); err != nil {
|
||||
log.Warnln("Proceeding with outdated query maps, as the Postgres version could not be determined:", err)
|
||||
e.error.Set(1)
|
||||
}
|
||||
// Check if map versions need to be updated
|
||||
if err := e.checkMapVersions(ch, server); err != nil {
|
||||
log.Warnln("Proceeding with outdated query maps, as the Postgres version could not be determined:", err)
|
||||
e.error.Inc()
|
||||
}
|
||||
|
||||
// Lock the exporter maps
|
||||
e.mappingMtx.RLock()
|
||||
defer e.mappingMtx.RUnlock()
|
||||
if err := querySettings(ch, db); err != nil {
|
||||
log.Infof("Error retrieving settings: %s", err)
|
||||
e.error.Set(1)
|
||||
}
|
||||
|
||||
errMap := queryNamespaceMappings(ch, db, e.metricMap, e.queryOverrides)
|
||||
if len(errMap) > 0 {
|
||||
e.error.Set(1)
|
||||
server.Scrape(ch, e.error, e.disableSettingsMetrics)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1100,7 +1276,7 @@ func (e *Exporter) scrape(ch chan<- prometheus.Metric) {
|
||||
// DATA_SOURCE_NAME always wins so we do not break older versions
|
||||
// reading secrets from files wins over secrets in environment variables
|
||||
// DATA_SOURCE_NAME > DATA_SOURCE_{USER|PASS}_FILE > DATA_SOURCE_{USER|PASS}
|
||||
func getDataSource() string {
|
||||
func getDataSources() []string {
|
||||
var dsn = os.Getenv("DATA_SOURCE_NAME")
|
||||
if len(dsn) == 0 {
|
||||
var user string
|
||||
@ -1129,9 +1305,10 @@ func getDataSource() string {
|
||||
ui := url.UserPassword(user, pass).String()
|
||||
uri := os.Getenv("DATA_SOURCE_URI")
|
||||
dsn = "postgresql://" + ui + "@" + uri
|
||||
}
|
||||
|
||||
return dsn
|
||||
return []string{dsn}
|
||||
}
|
||||
return strings.Split(dsn, ",")
|
||||
}
|
||||
|
||||
func main() {
|
||||
@ -1155,16 +1332,19 @@ func main() {
|
||||
return
|
||||
}
|
||||
|
||||
dsn := getDataSource()
|
||||
dsn := getDataSources()
|
||||
if len(dsn) == 0 {
|
||||
log.Fatal("couldn't find environment variables describing the datasource to use")
|
||||
}
|
||||
|
||||
exporter := NewExporter(dsn, *disableDefaultMetrics, *queriesPath)
|
||||
exporter := NewExporter(dsn,
|
||||
DisableDefaultMetrics(*disableDefaultMetrics),
|
||||
DisableSettingsMetrics(*disableSettingsMetrics),
|
||||
WithUserQueriesPath(*queriesPath),
|
||||
WithConstantLabels(*constantLabelsList),
|
||||
)
|
||||
defer func() {
|
||||
if exporter.dbConnection != nil {
|
||||
exporter.dbConnection.Close() // nolint: errcheck
|
||||
}
|
||||
exporter.servers.Close()
|
||||
}()
|
||||
|
||||
prometheus.MustRegister(exporter)
|
||||
|
@ -7,11 +7,11 @@ package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
_ "github.com/lib/pq"
|
||||
@ -31,7 +31,7 @@ func (s *IntegrationSuite) SetUpSuite(c *C) {
|
||||
dsn := os.Getenv("DATA_SOURCE_NAME")
|
||||
c.Assert(dsn, Not(Equals), "")
|
||||
|
||||
exporter := NewExporter(dsn, false, "")
|
||||
exporter := NewExporter(strings.Split(dsn, ","))
|
||||
c.Assert(exporter, NotNil)
|
||||
// Assign the exporter to the suite
|
||||
s.e = exporter
|
||||
@ -48,29 +48,31 @@ func (s *IntegrationSuite) TestAllNamespacesReturnResults(c *C) {
|
||||
}
|
||||
}()
|
||||
|
||||
// Open a database connection
|
||||
db, err := sql.Open("postgres", s.e.dsn)
|
||||
c.Assert(db, NotNil)
|
||||
c.Assert(err, IsNil)
|
||||
defer db.Close()
|
||||
for _, dsn := range s.e.dsn {
|
||||
// Open a database connection
|
||||
server, err := NewServer(dsn)
|
||||
c.Assert(server, NotNil)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
// Do a version update
|
||||
err = s.e.checkMapVersions(ch, db)
|
||||
c.Assert(err, IsNil)
|
||||
// Do a version update
|
||||
err = s.e.checkMapVersions(ch, server)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
err = querySettings(ch, db)
|
||||
if !c.Check(err, Equals, nil) {
|
||||
fmt.Println("## ERRORS FOUND")
|
||||
fmt.Println(err)
|
||||
}
|
||||
|
||||
// This should never happen in our test cases.
|
||||
errMap := queryNamespaceMappings(ch, db, s.e.metricMap, s.e.queryOverrides)
|
||||
if !c.Check(len(errMap), Equals, 0) {
|
||||
fmt.Println("## NAMESPACE ERRORS FOUND")
|
||||
for namespace, err := range errMap {
|
||||
fmt.Println(namespace, ":", err)
|
||||
err = querySettings(ch, server)
|
||||
if !c.Check(err, Equals, nil) {
|
||||
fmt.Println("## ERRORS FOUND")
|
||||
fmt.Println(err)
|
||||
}
|
||||
|
||||
// This should never happen in our test cases.
|
||||
errMap := queryNamespaceMappings(ch, server)
|
||||
if !c.Check(len(errMap), Equals, 0) {
|
||||
fmt.Println("## NAMESPACE ERRORS FOUND")
|
||||
for namespace, err := range errMap {
|
||||
fmt.Println(namespace, ":", err)
|
||||
}
|
||||
}
|
||||
server.Close()
|
||||
}
|
||||
}
|
||||
|
||||
@ -86,12 +88,12 @@ func (s *IntegrationSuite) TestInvalidDsnDoesntCrash(c *C) {
|
||||
}()
|
||||
|
||||
// Send a bad DSN
|
||||
exporter := NewExporter("invalid dsn", false, *queriesPath)
|
||||
exporter := NewExporter([]string{"invalid dsn"})
|
||||
c.Assert(exporter, NotNil)
|
||||
exporter.scrape(ch)
|
||||
|
||||
// Send a DSN to a non-listening port.
|
||||
exporter = NewExporter("postgresql://nothing:nothing@127.0.0.1:1/nothing", false, *queriesPath)
|
||||
exporter = NewExporter([]string{"postgresql://nothing:nothing@127.0.0.1:1/nothing"})
|
||||
c.Assert(exporter, NotNil)
|
||||
exporter.scrape(ch)
|
||||
}
|
||||
@ -109,7 +111,7 @@ func (s *IntegrationSuite) TestUnknownMetricParsingDoesntCrash(c *C) {
|
||||
dsn := os.Getenv("DATA_SOURCE_NAME")
|
||||
c.Assert(dsn, Not(Equals), "")
|
||||
|
||||
exporter := NewExporter(dsn, false, "")
|
||||
exporter := NewExporter(strings.Split(dsn, ","))
|
||||
c.Assert(exporter, NotNil)
|
||||
|
||||
// Convert the default maps into a list of empty maps.
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// Hook up gocheck into the "go test" runner.
|
||||
@ -34,7 +35,7 @@ func (s *FunctionalSuite) TestSemanticVersionColumnDiscard(c *C) {
|
||||
|
||||
{
|
||||
// No metrics should be eliminated
|
||||
resultMap := makeDescMap(semver.MustParse("0.0.1"), testMetricMap)
|
||||
resultMap := makeDescMap(semver.MustParse("0.0.1"), prometheus.Labels{}, testMetricMap)
|
||||
c.Check(
|
||||
resultMap["test_namespace"].columnMappings["metric_which_stays"].discard,
|
||||
Equals,
|
||||
@ -55,7 +56,7 @@ func (s *FunctionalSuite) TestSemanticVersionColumnDiscard(c *C) {
|
||||
testMetricMap["test_namespace"]["metric_which_discards"] = discardableMetric
|
||||
|
||||
// Discard metric should be discarded
|
||||
resultMap := makeDescMap(semver.MustParse("0.0.1"), testMetricMap)
|
||||
resultMap := makeDescMap(semver.MustParse("0.0.1"), prometheus.Labels{}, testMetricMap)
|
||||
c.Check(
|
||||
resultMap["test_namespace"].columnMappings["metric_which_stays"].discard,
|
||||
Equals,
|
||||
@ -76,7 +77,7 @@ func (s *FunctionalSuite) TestSemanticVersionColumnDiscard(c *C) {
|
||||
testMetricMap["test_namespace"]["metric_which_discards"] = discardableMetric
|
||||
|
||||
// Discard metric should be discarded
|
||||
resultMap := makeDescMap(semver.MustParse("0.0.2"), testMetricMap)
|
||||
resultMap := makeDescMap(semver.MustParse("0.0.2"), prometheus.Labels{}, testMetricMap)
|
||||
c.Check(
|
||||
resultMap["test_namespace"].columnMappings["metric_which_stays"].discard,
|
||||
Equals,
|
||||
@ -92,7 +93,6 @@ func (s *FunctionalSuite) TestSemanticVersionColumnDiscard(c *C) {
|
||||
|
||||
// test read username and password from file
|
||||
func (s *FunctionalSuite) TestEnvironmentSettingWithSecretsFiles(c *C) {
|
||||
|
||||
err := os.Setenv("DATA_SOURCE_USER_FILE", "./tests/username_file")
|
||||
c.Assert(err, IsNil)
|
||||
defer UnsetEnvironment(c, "DATA_SOURCE_USER_FILE")
|
||||
@ -107,29 +107,33 @@ func (s *FunctionalSuite) TestEnvironmentSettingWithSecretsFiles(c *C) {
|
||||
|
||||
var expected = "postgresql://custom_username$&+,%2F%3A;=%3F%40:custom_password$&+,%2F%3A;=%3F%40@localhost:5432/?sslmode=disable"
|
||||
|
||||
dsn := getDataSource()
|
||||
if dsn != expected {
|
||||
c.Errorf("Expected Username to be read from file. Found=%v, expected=%v", dsn, expected)
|
||||
dsn := getDataSources()
|
||||
if len(dsn) == 0 {
|
||||
c.Errorf("Expected one data source, zero found")
|
||||
}
|
||||
if dsn[0] != expected {
|
||||
c.Errorf("Expected Username to be read from file. Found=%v, expected=%v", dsn[0], expected)
|
||||
}
|
||||
}
|
||||
|
||||
// test read DATA_SOURCE_NAME from environment
|
||||
func (s *FunctionalSuite) TestEnvironmentSettingWithDns(c *C) {
|
||||
|
||||
envDsn := "postgresql://user:password@localhost:5432/?sslmode=enabled"
|
||||
err := os.Setenv("DATA_SOURCE_NAME", envDsn)
|
||||
c.Assert(err, IsNil)
|
||||
defer UnsetEnvironment(c, "DATA_SOURCE_NAME")
|
||||
|
||||
dsn := getDataSource()
|
||||
if dsn != envDsn {
|
||||
c.Errorf("Expected Username to be read from file. Found=%v, expected=%v", dsn, envDsn)
|
||||
dsn := getDataSources()
|
||||
if len(dsn) == 0 {
|
||||
c.Errorf("Expected one data source, zero found")
|
||||
}
|
||||
if dsn[0] != envDsn {
|
||||
c.Errorf("Expected Username to be read from file. Found=%v, expected=%v", dsn[0], envDsn)
|
||||
}
|
||||
}
|
||||
|
||||
// test DATA_SOURCE_NAME is used even if username and password environment variables are set
|
||||
func (s *FunctionalSuite) TestEnvironmentSettingWithDnsAndSecrets(c *C) {
|
||||
|
||||
envDsn := "postgresql://userDsn:passwordDsn@localhost:55432/?sslmode=disabled"
|
||||
err := os.Setenv("DATA_SOURCE_NAME", envDsn)
|
||||
c.Assert(err, IsNil)
|
||||
@ -143,9 +147,12 @@ func (s *FunctionalSuite) TestEnvironmentSettingWithDnsAndSecrets(c *C) {
|
||||
c.Assert(err, IsNil)
|
||||
defer UnsetEnvironment(c, "DATA_SOURCE_PASS")
|
||||
|
||||
dsn := getDataSource()
|
||||
if dsn != envDsn {
|
||||
c.Errorf("Expected Username to be read from file. Found=%v, expected=%v", dsn, envDsn)
|
||||
dsn := getDataSources()
|
||||
if len(dsn) == 0 {
|
||||
c.Errorf("Expected one data source, zero found")
|
||||
}
|
||||
if dsn[0] != envDsn {
|
||||
c.Errorf("Expected Username to be read from file. Found=%v, expected=%v", dsn[0], envDsn)
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user