multi-server-exporter multi server exporter is introduced

This commit is contained in:
Anthony Regeda 2018-09-04 13:02:20 +03:00 committed by Will Rouesnel
parent 72446a5b1e
commit 1d6a733ba2
6 changed files with 487 additions and 287 deletions

View File

@ -49,16 +49,19 @@ Package vendoring is handled with [`govendor`](https://github.com/kardianos/gove
Path under which to expose metrics. Default is `/metrics`. Path under which to expose metrics. Default is `/metrics`.
* `disable-default-metrics` * `disable-default-metrics`
Use only metrics supplied from `queries.yaml` via `--extend.query-path` Use only metrics supplied from `queries.yaml` via `--extend.query-path`.
* `disable-settings-metrics`
Use the flag if you don't want to scrape `pg_settings`.
* `extend.query-path` * `extend.query-path`
Path to a YAML file containing custom queries to run. Check out [`queries.yaml`](queries.yaml) Path to a YAML file containing custom queries to run. Check out [`queries.yaml`](queries.yaml)
for examples of the format. for examples of the format.
* `dumpmaps` * `dumpmaps`
Do not run - print the internal representation of the metric maps. Useful when debugging a custom Do not run - print the internal representation of the metric maps. Useful when debugging a custom
queries file. queries file.
* `log.level` * `log.level`
Set logging level: one of `debug`, `info`, `warn`, `error`, `fatal` Set logging level: one of `debug`, `info`, `warn`, `error`, `fatal`
@ -78,21 +81,23 @@ The following environment variables configure the exporter:
URI may contain the username and password to connect with. URI may contain the username and password to connect with.
* `DATA_SOURCE_URI` * `DATA_SOURCE_URI`
an alternative to DATA_SOURCE_NAME which exclusively accepts the raw URI an alternative to `DATA_SOURCE_NAME` which exclusively accepts the raw URI
without a username and password component. without a username and password component.
* `DATA_SOURCE_USER` * `DATA_SOURCE_USER`
When using `DATA_SOURCE_URI`, this environment variable is used to specify When using `DATA_SOURCE_URI`, this environment variable is used to specify
the username. the username.
* `DATA_SOURCE_USER_FILE` * `DATA_SOURCE_USER_FILE`
The same, but reads the username from a file. The same, but reads the username from a file.
* `DATA_SOURCE_PASS` * `DATA_SOURCE_PASS`
When using `DATA_SOURCE_URI`, this environment variable is used to specify When using `DATA_SOURCE_URI`, this environment variable is used to specify
the password to connect with. the password to connect with.
* `DATA_SOURCE_PASS_FILE` * `DATA_SOURCE_PASS_FILE`
The same as above but reads the password from a file. The same as above but reads the password from a file.
* `PG_EXPORTER_WEB_LISTEN_ADDRESS` * `PG_EXPORTER_WEB_LISTEN_ADDRESS`
Address to listen on for web interface and telemetry. Default is `:9187`. Address to listen on for web interface and telemetry. Default is `:9187`.
@ -102,13 +107,16 @@ The following environment variables configure the exporter:
* `PG_EXPORTER_DISABLE_DEFAULT_METRICS` * `PG_EXPORTER_DISABLE_DEFAULT_METRICS`
Use only metrics supplied from `queries.yaml`. Value can be `true` or `false`. Default is `false`. Use only metrics supplied from `queries.yaml`. Value can be `true` or `false`. Default is `false`.
* `PG_EXPORTER_DISABLE_SETTINGS_METRICS`
Use the flag if you don't want to scrape `pg_settings`. Value can be `true` or `false`. Defauls is `false`.
* `PG_EXPORTER_EXTEND_QUERY_PATH` * `PG_EXPORTER_EXTEND_QUERY_PATH`
Path to a YAML file containing custom queries to run. Check out [`queries.yaml`](queries.yaml) Path to a YAML file containing custom queries to run. Check out [`queries.yaml`](queries.yaml)
for examples of the format. for examples of the format.
* `PG_EXPORTER_CONSTANT_LABELS` * `PG_EXPORTER_CONSTANT_LABELS`
Labels to set in all metrics. A list of `label=value` pairs, separated by commas. Labels to set in all metrics. A list of `label=value` pairs, separated by commas.
Settings set by environment variables starting with `PG_` will be overwritten by the corresponding CLI flag if given. Settings set by environment variables starting with `PG_` will be overwritten by the corresponding CLI flag if given.
### Setting the Postgres server's data source name ### Setting the Postgres server's data source name
@ -120,6 +128,10 @@ For running it locally on a default Debian/Ubuntu install, this will work (trans
sudo -u postgres DATA_SOURCE_NAME="user=postgres host=/var/run/postgresql/ sslmode=disable" postgres_exporter sudo -u postgres DATA_SOURCE_NAME="user=postgres host=/var/run/postgresql/ sslmode=disable" postgres_exporter
Also, you can set a list of sources to scrape different instances from the one exporter setup. Just define a comma separated string.
sudo -u postgres DATA_SOURCE_NAME="port=5432,port=6432" postgres_exporter
See the [github.com/lib/pq](http://github.com/lib/pq) module for other ways to format the connection string. See the [github.com/lib/pq](http://github.com/lib/pq) module for other ways to format the connection string.
### Adding new metrics ### Adding new metrics
@ -143,18 +155,18 @@ The -extend.query-path command-line argument specifies a YAML file containing ad
Some examples are provided in [queries.yaml](queries.yaml). Some examples are provided in [queries.yaml](queries.yaml).
### Disabling default metrics ### Disabling default metrics
To work with non-officially-supported postgres versions you can try disabling (e.g. 8.2.15) To work with non-officially-supported postgres versions you can try disabling (e.g. 8.2.15)
or a variant of postgres (e.g. Greenplum) you can disable the default metrics with the `--disable-default-metrics` or a variant of postgres (e.g. Greenplum) you can disable the default metrics with the `--disable-default-metrics`
flag. This removes all built-in metrics, and uses only metrics defined by queries in the `queries.yaml` file you supply flag. This removes all built-in metrics, and uses only metrics defined by queries in the `queries.yaml` file you supply
(so you must supply one, otherwise the exporter will return nothing but internal statuses and not your database). (so you must supply one, otherwise the exporter will return nothing but internal statuses and not your database).
### Running as non-superuser ### Running as non-superuser
To be able to collect metrics from `pg_stat_activity` and `pg_stat_replication` To be able to collect metrics from `pg_stat_activity` and `pg_stat_replication`
as non-superuser you have to create views as a superuser, and assign permissions as non-superuser you have to create views as a superuser, and assign permissions
separately to those. separately to those.
In PostgreSQL, views run with the permissions of the user that created them so In PostgreSQL, views run with the permissions of the user that created them so
they can act as security barriers. they can act as security barriers.
```sql ```sql

View File

@ -1,8 +1,6 @@
package main package main
import ( import (
"database/sql"
"errors"
"fmt" "fmt"
"math" "math"
"strconv" "strconv"
@ -13,8 +11,8 @@ import (
) )
// Query the pg_settings view containing runtime variables // Query the pg_settings view containing runtime variables
func querySettings(ch chan<- prometheus.Metric, db *sql.DB) error { func querySettings(ch chan<- prometheus.Metric, server *Server) error {
log.Debugln("Querying pg_setting view") log.Debugf("Querying pg_setting view on %q", server)
// pg_settings docs: https://www.postgresql.org/docs/current/static/view-pg-settings.html // pg_settings docs: https://www.postgresql.org/docs/current/static/view-pg-settings.html
// //
@ -22,9 +20,9 @@ func querySettings(ch chan<- prometheus.Metric, db *sql.DB) error {
// types in normaliseUnit() below // types in normaliseUnit() below
query := "SELECT name, setting, COALESCE(unit, ''), short_desc, vartype FROM pg_settings WHERE vartype IN ('bool', 'integer', 'real');" query := "SELECT name, setting, COALESCE(unit, ''), short_desc, vartype FROM pg_settings WHERE vartype IN ('bool', 'integer', 'real');"
rows, err := db.Query(query) rows, err := server.db.Query(query)
if err != nil { if err != nil {
return errors.New(fmt.Sprintln("Error running query on database: ", namespace, err)) return fmt.Errorf("Error running query on database %q: %s %v", server, namespace, err)
} }
defer rows.Close() // nolint: errcheck defer rows.Close() // nolint: errcheck
@ -32,10 +30,10 @@ func querySettings(ch chan<- prometheus.Metric, db *sql.DB) error {
s := &pgSetting{} s := &pgSetting{}
err = rows.Scan(&s.name, &s.setting, &s.unit, &s.shortDesc, &s.vartype) err = rows.Scan(&s.name, &s.setting, &s.unit, &s.shortDesc, &s.vartype)
if err != nil { if err != nil {
return errors.New(fmt.Sprintln("Error retrieving rows:", namespace, err)) return fmt.Errorf("Error retrieving rows on %q: %s %v", server, namespace, err)
} }
ch <- s.metric() ch <- s.metric(server.labels)
} }
return nil return nil
@ -47,7 +45,7 @@ type pgSetting struct {
name, setting, unit, shortDesc, vartype string name, setting, unit, shortDesc, vartype string
} }
func (s *pgSetting) metric() prometheus.Metric { func (s *pgSetting) metric(labels prometheus.Labels) prometheus.Metric {
var ( var (
err error err error
name = strings.Replace(s.name, ".", "_", -1) name = strings.Replace(s.name, ".", "_", -1)
@ -78,7 +76,7 @@ func (s *pgSetting) metric() prometheus.Metric {
panic(fmt.Sprintf("Unsupported vartype %q", s.vartype)) panic(fmt.Sprintf("Unsupported vartype %q", s.vartype))
} }
desc := newDesc(subsystem, name, shortDesc) desc := newDesc(subsystem, name, shortDesc, labels)
return prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, val) return prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, val)
} }

View File

@ -3,6 +3,7 @@
package main package main
import ( import (
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
. "gopkg.in/check.v1" . "gopkg.in/check.v1"
) )
@ -25,7 +26,7 @@ var fixtures = []fixture{
unit: "seconds", unit: "seconds",
err: "", err: "",
}, },
d: "Desc{fqName: \"pg_settings_seconds_fixture_metric_seconds\", help: \"Foo foo foo [Units converted to seconds.]\", constLabels: {}, variableLabels: []}", d: `Desc{fqName: "pg_settings_seconds_fixture_metric_seconds", help: "Foo foo foo [Units converted to seconds.]", constLabels: {}, variableLabels: []}`,
v: 5, v: 5,
}, },
{ {
@ -41,7 +42,7 @@ var fixtures = []fixture{
unit: "seconds", unit: "seconds",
err: "", err: "",
}, },
d: "Desc{fqName: \"pg_settings_milliseconds_fixture_metric_seconds\", help: \"Foo foo foo [Units converted to seconds.]\", constLabels: {}, variableLabels: []}", d: `Desc{fqName: "pg_settings_milliseconds_fixture_metric_seconds", help: "Foo foo foo [Units converted to seconds.]", constLabels: {}, variableLabels: []}`,
v: 5, v: 5,
}, },
{ {
@ -57,7 +58,7 @@ var fixtures = []fixture{
unit: "bytes", unit: "bytes",
err: "", err: "",
}, },
d: "Desc{fqName: \"pg_settings_eight_kb_fixture_metric_bytes\", help: \"Foo foo foo [Units converted to bytes.]\", constLabels: {}, variableLabels: []}", d: `Desc{fqName: "pg_settings_eight_kb_fixture_metric_bytes", help: "Foo foo foo [Units converted to bytes.]", constLabels: {}, variableLabels: []}`,
v: 139264, v: 139264,
}, },
{ {
@ -73,7 +74,7 @@ var fixtures = []fixture{
unit: "bytes", unit: "bytes",
err: "", err: "",
}, },
d: "Desc{fqName: \"pg_settings_16_kb_real_fixture_metric_bytes\", help: \"Foo foo foo [Units converted to bytes.]\", constLabels: {}, variableLabels: []}", d: `Desc{fqName: "pg_settings_16_kb_real_fixture_metric_bytes", help: "Foo foo foo [Units converted to bytes.]", constLabels: {}, variableLabels: []}`,
v: 49152, v: 49152,
}, },
{ {
@ -89,7 +90,7 @@ var fixtures = []fixture{
unit: "bytes", unit: "bytes",
err: "", err: "",
}, },
d: "Desc{fqName: \"pg_settings_16_mb_real_fixture_metric_bytes\", help: \"Foo foo foo [Units converted to bytes.]\", constLabels: {}, variableLabels: []}", d: `Desc{fqName: "pg_settings_16_mb_real_fixture_metric_bytes", help: "Foo foo foo [Units converted to bytes.]", constLabels: {}, variableLabels: []}`,
v: 5.0331648e+07, v: 5.0331648e+07,
}, },
{ {
@ -105,7 +106,7 @@ var fixtures = []fixture{
unit: "bytes", unit: "bytes",
err: "", err: "",
}, },
d: "Desc{fqName: \"pg_settings_32_mb_real_fixture_metric_bytes\", help: \"Foo foo foo [Units converted to bytes.]\", constLabels: {}, variableLabels: []}", d: `Desc{fqName: "pg_settings_32_mb_real_fixture_metric_bytes", help: "Foo foo foo [Units converted to bytes.]", constLabels: {}, variableLabels: []}`,
v: 1.00663296e+08, v: 1.00663296e+08,
}, },
{ {
@ -121,7 +122,7 @@ var fixtures = []fixture{
unit: "bytes", unit: "bytes",
err: "", err: "",
}, },
d: "Desc{fqName: \"pg_settings_64_mb_real_fixture_metric_bytes\", help: \"Foo foo foo [Units converted to bytes.]\", constLabels: {}, variableLabels: []}", d: `Desc{fqName: "pg_settings_64_mb_real_fixture_metric_bytes", help: "Foo foo foo [Units converted to bytes.]", constLabels: {}, variableLabels: []}`,
v: 2.01326592e+08, v: 2.01326592e+08,
}, },
{ {
@ -137,7 +138,7 @@ var fixtures = []fixture{
unit: "", unit: "",
err: "", err: "",
}, },
d: "Desc{fqName: \"pg_settings_bool_on_fixture_metric\", help: \"Foo foo foo\", constLabels: {}, variableLabels: []}", d: `Desc{fqName: "pg_settings_bool_on_fixture_metric", help: "Foo foo foo", constLabels: {}, variableLabels: []}`,
v: 1, v: 1,
}, },
{ {
@ -153,7 +154,7 @@ var fixtures = []fixture{
unit: "", unit: "",
err: "", err: "",
}, },
d: "Desc{fqName: \"pg_settings_bool_off_fixture_metric\", help: \"Foo foo foo\", constLabels: {}, variableLabels: []}", d: `Desc{fqName: "pg_settings_bool_off_fixture_metric", help: "Foo foo foo", constLabels: {}, variableLabels: []}`,
v: 0, v: 0,
}, },
{ {
@ -169,7 +170,7 @@ var fixtures = []fixture{
unit: "seconds", unit: "seconds",
err: "", err: "",
}, },
d: "Desc{fqName: \"pg_settings_special_minus_one_value_seconds\", help: \"foo foo foo [Units converted to seconds.]\", constLabels: {}, variableLabels: []}", d: `Desc{fqName: "pg_settings_special_minus_one_value_seconds", help: "foo foo foo [Units converted to seconds.]", constLabels: {}, variableLabels: []}`,
v: -1, v: -1,
}, },
{ {
@ -185,7 +186,7 @@ var fixtures = []fixture{
unit: "", unit: "",
err: "", err: "",
}, },
d: "Desc{fqName: \"pg_settings_rds_rds_superuser_reserved_connections\", help: \"Sets the number of connection slots reserved for rds_superusers.\", constLabels: {}, variableLabels: []}", d: `Desc{fqName: "pg_settings_rds_rds_superuser_reserved_connections", help: "Sets the number of connection slots reserved for rds_superusers.", constLabels: {}, variableLabels: []}`,
v: 2, v: 2,
}, },
{ {
@ -233,7 +234,7 @@ func (s *PgSettingSuite) TestMetric(c *C) {
for _, f := range fixtures { for _, f := range fixtures {
d := &dto.Metric{} d := &dto.Metric{}
m := f.p.metric() m := f.p.metric(prometheus.Labels{})
m.Write(d) // nolint: errcheck m.Write(d) // nolint: errcheck
c.Check(m.Desc().String(), Equals, f.d) c.Check(m.Desc().String(), Equals, f.d)

View File

@ -22,7 +22,7 @@ import (
"crypto/sha256" "crypto/sha256"
"github.com/blang/semver" "github.com/blang/semver"
_ "github.com/lib/pq" "github.com/lib/pq"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/common/log" "github.com/prometheus/common/log"
@ -33,12 +33,13 @@ import (
var Version = "0.0.1" var Version = "0.0.1"
var ( var (
listenAddress = kingpin.Flag("web.listen-address", "Address to listen on for web interface and telemetry.").Default(":9187").OverrideDefaultFromEnvar("PG_EXPORTER_WEB_LISTEN_ADDRESS").String() listenAddress = kingpin.Flag("web.listen-address", "Address to listen on for web interface and telemetry.").Default(":9187").OverrideDefaultFromEnvar("PG_EXPORTER_WEB_LISTEN_ADDRESS").String()
metricPath = kingpin.Flag("web.telemetry-path", "Path under which to expose metrics.").Default("/metrics").OverrideDefaultFromEnvar("PG_EXPORTER_WEB_TELEMETRY_PATH").String() metricPath = kingpin.Flag("web.telemetry-path", "Path under which to expose metrics.").Default("/metrics").OverrideDefaultFromEnvar("PG_EXPORTER_WEB_TELEMETRY_PATH").String()
disableDefaultMetrics = kingpin.Flag("disable-default-metrics", "Do not include default metrics.").Default("false").OverrideDefaultFromEnvar("PG_EXPORTER_DISABLE_DEFAULT_METRICS").Bool() disableDefaultMetrics = kingpin.Flag("disable-default-metrics", "Do not include default metrics.").Default("false").OverrideDefaultFromEnvar("PG_EXPORTER_DISABLE_DEFAULT_METRICS").Bool()
queriesPath = kingpin.Flag("extend.query-path", "Path to custom queries to run.").Default("").OverrideDefaultFromEnvar("PG_EXPORTER_EXTEND_QUERY_PATH").String() disableSettingsMetrics = kingpin.Flag("disable-settings-metrics", "Do not include pg_settings metrics.").Default("false").OverrideDefaultFromEnvar("PG_EXPORTER_DISABLE_SETTINGS_METRICS").Bool()
onlyDumpMaps = kingpin.Flag("dumpmaps", "Do not run, simply dump the maps.").Bool() queriesPath = kingpin.Flag("extend.query-path", "Path to custom queries to run.").Default("").OverrideDefaultFromEnvar("PG_EXPORTER_EXTEND_QUERY_PATH").String()
constantLabelsList = kingpin.Flag("constantLabels", "A list of label=value separated by comma(,).").Default("").OverrideDefaultFromEnvar("PG_EXPORTER_CONTANT_LABELS").String() onlyDumpMaps = kingpin.Flag("dumpmaps", "Do not run, simply dump the maps.").Bool()
constantLabelsList = kingpin.Flag("constantLabels", "A list of label=value separated by comma(,).").Default("").OverrideDefaultFromEnvar("PG_EXPORTER_CONTANT_LABELS").String()
) )
// Metric name parts. // Metric name parts.
@ -50,6 +51,8 @@ const (
// Metric label used for static string data thats handy to send to Prometheus // Metric label used for static string data thats handy to send to Prometheus
// e.g. version // e.g. version
staticLabelName = "static" staticLabelName = "static"
// Metric label used for server identification.
serverLabelName = "server"
) )
// ColumnUsage should be one of several enum values which describe how a // ColumnUsage should be one of several enum values which describe how a
@ -386,7 +389,7 @@ func makeQueryOverrideMap(pgVersion semver.Version, queryOverrides map[string][]
// TODO: test code for all cu. // TODO: test code for all cu.
// TODO: use proper struct type system // TODO: use proper struct type system
// TODO: the YAML this supports is "non-standard" - we should move away from it. // TODO: the YAML this supports is "non-standard" - we should move away from it.
func addQueries(content []byte, pgVersion semver.Version, exporterMap map[string]MetricMapNamespace, queryOverrideMap map[string]string) error { func addQueries(content []byte, pgVersion semver.Version, server *Server) error {
var extra map[string]interface{} var extra map[string]interface{}
err := yaml.Unmarshal(content, &extra) err := yaml.Unmarshal(content, &extra)
@ -450,35 +453,35 @@ func addQueries(content []byte, pgVersion semver.Version, exporterMap map[string
} }
// Convert the loaded metric map into exporter representation // Convert the loaded metric map into exporter representation
partialExporterMap := makeDescMap(pgVersion, metricMaps) partialExporterMap := makeDescMap(pgVersion, server.labels, metricMaps)
// Merge the two maps (which are now quite flatteend) // Merge the two maps (which are now quite flatteend)
for k, v := range partialExporterMap { for k, v := range partialExporterMap {
_, found := exporterMap[k] _, found := server.metricMap[k]
if found { if found {
log.Debugln("Overriding metric", k, "from user YAML file.") log.Debugln("Overriding metric", k, "from user YAML file.")
} else { } else {
log.Debugln("Adding new metric", k, "from user YAML file.") log.Debugln("Adding new metric", k, "from user YAML file.")
} }
exporterMap[k] = v server.metricMap[k] = v
} }
// Merge the query override map // Merge the query override map
for k, v := range newQueryOverrides { for k, v := range newQueryOverrides {
_, found := queryOverrideMap[k] _, found := server.queryOverrides[k]
if found { if found {
log.Debugln("Overriding query override", k, "from user YAML file.") log.Debugln("Overriding query override", k, "from user YAML file.")
} else { } else {
log.Debugln("Adding new query override", k, "from user YAML file.") log.Debugln("Adding new query override", k, "from user YAML file.")
} }
queryOverrideMap[k] = v server.queryOverrides[k] = v
} }
return nil return nil
} }
// Turn the MetricMap column mapping into a prometheus descriptor mapping. // Turn the MetricMap column mapping into a prometheus descriptor mapping.
func makeDescMap(pgVersion semver.Version, metricMaps map[string]map[string]ColumnMapping) map[string]MetricMapNamespace { func makeDescMap(pgVersion semver.Version, serverLabels prometheus.Labels, metricMaps map[string]map[string]ColumnMapping) map[string]MetricMapNamespace {
var metricMap = make(map[string]MetricMapNamespace) var metricMap = make(map[string]MetricMapNamespace)
for namespace, mappings := range metricMaps { for namespace, mappings := range metricMaps {
@ -492,8 +495,6 @@ func makeDescMap(pgVersion semver.Version, metricMaps map[string]map[string]Colu
} }
} }
constLabels := newConstLabels()
for columnName, columnMapping := range mappings { for columnName, columnMapping := range mappings {
// Check column version compatibility for the current map // Check column version compatibility for the current map
// Force to discard if not compatible. // Force to discard if not compatible.
@ -525,7 +526,7 @@ func makeDescMap(pgVersion semver.Version, metricMaps map[string]map[string]Colu
case COUNTER: case COUNTER:
thisMap[columnName] = MetricMap{ thisMap[columnName] = MetricMap{
vtype: prometheus.CounterValue, vtype: prometheus.CounterValue,
desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, constLabels), desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, serverLabels),
conversion: func(in interface{}) (float64, bool) { conversion: func(in interface{}) (float64, bool) {
return dbToFloat64(in) return dbToFloat64(in)
}, },
@ -533,7 +534,7 @@ func makeDescMap(pgVersion semver.Version, metricMaps map[string]map[string]Colu
case GAUGE: case GAUGE:
thisMap[columnName] = MetricMap{ thisMap[columnName] = MetricMap{
vtype: prometheus.GaugeValue, vtype: prometheus.GaugeValue,
desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, constLabels), desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, serverLabels),
conversion: func(in interface{}) (float64, bool) { conversion: func(in interface{}) (float64, bool) {
return dbToFloat64(in) return dbToFloat64(in)
}, },
@ -541,7 +542,7 @@ func makeDescMap(pgVersion semver.Version, metricMaps map[string]map[string]Colu
case MAPPEDMETRIC: case MAPPEDMETRIC:
thisMap[columnName] = MetricMap{ thisMap[columnName] = MetricMap{
vtype: prometheus.GaugeValue, vtype: prometheus.GaugeValue,
desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, constLabels), desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, serverLabels),
conversion: func(in interface{}) (float64, bool) { conversion: func(in interface{}) (float64, bool) {
text, ok := in.(string) text, ok := in.(string)
if !ok { if !ok {
@ -558,7 +559,7 @@ func makeDescMap(pgVersion semver.Version, metricMaps map[string]map[string]Colu
case DURATION: case DURATION:
thisMap[columnName] = MetricMap{ thisMap[columnName] = MetricMap{
vtype: prometheus.GaugeValue, vtype: prometheus.GaugeValue,
desc: prometheus.NewDesc(fmt.Sprintf("%s_%s_milliseconds", namespace, columnName), columnMapping.description, variableLabels, constLabels), desc: prometheus.NewDesc(fmt.Sprintf("%s_%s_milliseconds", namespace, columnName), columnMapping.description, variableLabels, serverLabels),
conversion: func(in interface{}) (float64, bool) { conversion: func(in interface{}) (float64, bool) {
var durationString string var durationString string
switch t := in.(type) { switch t := in.(type) {
@ -676,25 +677,56 @@ func dbToString(t interface{}) (string, bool) {
} }
} }
// Exporter collects Postgres metrics. It implements prometheus.Collector. func parseFingerprint(url string) (string, error) {
type Exporter struct { dsn, err := pq.ParseURL(url)
// Holds a reference to the build in column mappings. Currently this is for testing purposes if err != nil {
// only, since it just points to the global. dsn = url
builtinMetricMaps map[string]map[string]ColumnMapping }
dsn string pairs := strings.Split(dsn, " ")
disableDefaultMetrics bool kv := make(map[string]string, len(pairs))
userQueriesPath string for _, pair := range pairs {
duration prometheus.Gauge splitted := strings.Split(pair, "=")
error prometheus.Gauge if len(splitted) != 2 {
psqlUp prometheus.Gauge return "", fmt.Errorf("malformed dsn %q", dsn)
userQueriesError *prometheus.GaugeVec }
totalScrapes prometheus.Counter kv[splitted[0]] = splitted[1]
}
// dbDsn is the connection string used to establish the dbConnection var fingerprint string
dbDsn string
// dbConnection is used to allow re-using the DB connection between scrapes if host, ok := kv["host"]; ok {
dbConnection *sql.DB fingerprint += host
} else {
fingerprint += "localhost"
}
if port, ok := kv["port"]; ok {
fingerprint += ":" + port
} else {
fingerprint += ":5432"
}
return fingerprint, nil
}
func parseDSN(dsn string) (*url.URL, error) {
pDSN, err := url.Parse(dsn)
if err != nil {
return nil, err
}
// Blank user info if not nil
if pDSN.User != nil {
pDSN.User = url.UserPassword(pDSN.User.Username(), "PASSWORD_REMOVED")
}
return pDSN, nil
}
// Server describes a connection to Postgres.
// Also it contains metrics map and query overrides.
type Server struct {
db *sql.DB
labels prometheus.Labels
// Last version used to calculate metric map. If mismatch on scrape, // Last version used to calculate metric map. If mismatch on scrape,
// then maps are recalculated. // then maps are recalculated.
@ -706,52 +738,271 @@ type Exporter struct {
mappingMtx sync.RWMutex mappingMtx sync.RWMutex
} }
// NewExporter returns a new PostgreSQL exporter for the provided DSN. // ServerOpt configures a server.
func NewExporter(dsn string, disableDefaultMetrics bool, userQueriesPath string) *Exporter { type ServerOpt func(*Server)
return &Exporter{
builtinMetricMaps: builtinMetricMaps, // ServerWithLabels configures a set of labels.
dsn: dsn, func ServerWithLabels(labels prometheus.Labels) ServerOpt {
disableDefaultMetrics: disableDefaultMetrics, return func(s *Server) {
userQueriesPath: userQueriesPath, for k, v := range labels {
duration: prometheus.NewGauge(prometheus.GaugeOpts{ s.labels[k] = v
Namespace: namespace, }
Subsystem: exporter,
Name: "last_scrape_duration_seconds",
Help: "Duration of the last scrape of metrics from PostgresSQL.",
ConstLabels: newConstLabels(),
}),
totalScrapes: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: exporter,
Name: "scrapes_total",
Help: "Total number of times PostgresSQL was scraped for metrics.",
ConstLabels: newConstLabels(),
}),
error: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: exporter,
Name: "last_scrape_error",
Help: "Whether the last scrape of metrics from PostgreSQL resulted in an error (1 for error, 0 for success).",
ConstLabels: newConstLabels(),
}),
psqlUp: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "up",
Help: "Whether the last scrape of metrics from PostgreSQL was able to connect to the server (1 for yes, 0 for no).",
ConstLabels: newConstLabels(),
}),
userQueriesError: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: exporter,
Name: "user_queries_load_error",
Help: "Whether the user queries file was loaded and parsed successfully (1 for error, 0 for success).",
ConstLabels: newConstLabels(),
}, []string{"filename", "hashsum"}),
metricMap: nil,
queryOverrides: nil,
} }
} }
// NewServer establishes a new connection using DSN.
func NewServer(dsn string, opts ...ServerOpt) (*Server, error) {
fingerprint, err := parseFingerprint(dsn)
if err != nil {
return nil, err
}
db, err := sql.Open("postgres", dsn)
if err != nil {
return nil, err
}
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(1)
log.Infof("Established new database connection to %q.", fingerprint)
s := &Server{
db: db,
labels: prometheus.Labels{
serverLabelName: fingerprint,
},
}
for _, opt := range opts {
opt(s)
}
return s, nil
}
// Close disconnects from Postgres.
func (s *Server) Close() error {
if s.db != nil {
err := s.db.Close()
s.db = nil
return err
}
return nil
}
// Ping checks connection availability and possibly invalidates the connection if it fails.
func (s *Server) Ping() error {
if err := s.db.Ping(); err != nil {
if cerr := s.db.Close(); cerr != nil {
log.Infof("Error while closing non-pinging DB connection to %q: %v", s, cerr)
}
s.db = nil
return err
}
return nil
}
// String returns server's fingerprint.
func (s *Server) String() string {
return s.labels[serverLabelName]
}
// Scrape loads metrics.
func (s *Server) Scrape(ch chan<- prometheus.Metric, errGauge prometheus.Gauge, disableSettingsMetrics bool) {
s.mappingMtx.RLock()
defer s.mappingMtx.RUnlock()
if !disableSettingsMetrics {
if err := querySettings(ch, s); err != nil {
log.Infof("Error retrieving settings: %s", err)
errGauge.Inc()
}
}
errMap := queryNamespaceMappings(ch, s)
if len(errMap) > 0 {
errGauge.Inc()
}
}
// Servers contains a collection of servers to Postgres.
type Servers struct {
m sync.Mutex
servers map[string]*Server
opts []ServerOpt
}
// NewServers creates a collection of servers to Postgres.
func NewServers(opts ...ServerOpt) *Servers {
return &Servers{
servers: make(map[string]*Server),
opts: opts,
}
}
// GetServer returns established connection from a collection.
func (s *Servers) GetServer(dsn string) (*Server, error) {
s.m.Lock()
defer s.m.Unlock()
var err error
server, ok := s.servers[dsn]
if !ok {
server, err = NewServer(dsn, s.opts...)
if err != nil {
return nil, err
}
s.servers[dsn] = server
}
if err = server.Ping(); err != nil {
delete(s.servers, dsn)
return nil, err
}
return server, nil
}
// Close disconnects from all known servers.
func (s *Servers) Close() {
s.m.Lock()
defer s.m.Unlock()
for _, server := range s.servers {
if err := server.Close(); err != nil {
log.Errorf("failed to close connection to %q: %v", server, err)
}
}
}
// Exporter collects Postgres metrics. It implements prometheus.Collector.
type Exporter struct {
// Holds a reference to the build in column mappings. Currently this is for testing purposes
// only, since it just points to the global.
builtinMetricMaps map[string]map[string]ColumnMapping
disableDefaultMetrics, disableSettingsMetrics bool
dsn []string
userQueriesPath string
constantLabels prometheus.Labels
duration prometheus.Gauge
error prometheus.Gauge
psqlUp prometheus.Gauge
userQueriesError *prometheus.GaugeVec
totalScrapes prometheus.Counter
// servers are used to allow re-using the DB connection between scrapes.
// servers contains metrics map and query overrides.
servers *Servers
}
// ExporterOpt configures Exporter.
type ExporterOpt func(*Exporter)
// DisableDefaultMetrics configures default metrics export.
func DisableDefaultMetrics(b bool) ExporterOpt {
return func(e *Exporter) {
e.disableDefaultMetrics = b
}
}
// DisableSettingsMetrics configures pg_settings export.
func DisableSettingsMetrics(b bool) ExporterOpt {
return func(e *Exporter) {
e.disableSettingsMetrics = b
}
}
// WithUserQueriesPath configures user's queries path.
func WithUserQueriesPath(p string) ExporterOpt {
return func(e *Exporter) {
e.userQueriesPath = p
}
}
// WithConstantLabels configures constant labels.
func WithConstantLabels(s string) ExporterOpt {
return func(e *Exporter) {
e.constantLabels = parseConstLabels(s)
}
}
func parseConstLabels(s string) prometheus.Labels {
labels := make(prometheus.Labels)
parts := strings.Split(s, ",")
for _, p := range parts {
keyValue := strings.Split(strings.TrimSpace(p), "=")
if len(keyValue) != 2 {
log.Errorf(`Wrong constant labels format %q, should be "key=value"`, p)
continue
}
key := strings.TrimSpace(keyValue[0])
value := strings.TrimSpace(keyValue[1])
if key == "" || value == "" {
continue
}
labels[key] = value
}
return labels
}
// NewExporter returns a new PostgreSQL exporter for the provided DSN.
func NewExporter(dsn []string, opts ...ExporterOpt) *Exporter {
e := &Exporter{
dsn: dsn,
builtinMetricMaps: builtinMetricMaps,
}
for _, opt := range opts {
opt(e)
}
e.setupInternalMetrics()
e.setupServers()
return e
}
func (e *Exporter) setupServers() {
e.servers = NewServers(ServerWithLabels(e.constantLabels))
}
func (e *Exporter) setupInternalMetrics() {
e.duration = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: exporter,
Name: "last_scrape_duration_seconds",
Help: "Duration of the last scrape of metrics from PostgresSQL.",
ConstLabels: e.constantLabels,
})
e.totalScrapes = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: exporter,
Name: "scrapes_total",
Help: "Total number of times PostgresSQL was scraped for metrics.",
ConstLabels: e.constantLabels,
})
e.error = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: exporter,
Name: "last_scrape_error",
Help: "Whether the last scrape of metrics from PostgreSQL resulted in an error (1 for error, 0 for success).",
ConstLabels: e.constantLabels,
})
e.psqlUp = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "up",
Help: "Whether the last scrape of metrics from PostgreSQL was able to connect to the server (1 for yes, 0 for no).",
ConstLabels: e.constantLabels,
})
e.userQueriesError = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: exporter,
Name: "user_queries_load_error",
Help: "Whether the user queries file was loaded and parsed successfully (1 for error, 0 for success).",
ConstLabels: e.constantLabels,
}, []string{"filename", "hashsum"})
}
// Describe implements prometheus.Collector. // Describe implements prometheus.Collector.
func (e *Exporter) Describe(ch chan<- *prometheus.Desc) { func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
// We cannot know in advance what metrics the exporter will generate // We cannot know in advance what metrics the exporter will generate
@ -764,7 +1015,6 @@ func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
// don't detect inconsistent metrics created by this exporter // don't detect inconsistent metrics created by this exporter
// itself. Also, a change in the monitored Postgres instance may change the // itself. Also, a change in the monitored Postgres instance may change the
// exported metrics during the runtime of the exporter. // exported metrics during the runtime of the exporter.
metricCh := make(chan prometheus.Metric) metricCh := make(chan prometheus.Metric)
doneCh := make(chan struct{}) doneCh := make(chan struct{})
@ -791,40 +1041,18 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
e.userQueriesError.Collect(ch) e.userQueriesError.Collect(ch)
} }
func newConstLabels() prometheus.Labels { func newDesc(subsystem, name, help string, labels prometheus.Labels) *prometheus.Desc {
if constantLabelsList == nil || *constantLabelsList == "" {
return nil
}
var constLabels = make(prometheus.Labels)
parts := strings.Split(*constantLabelsList, ",")
for _, p := range parts {
keyValue := strings.Split(strings.TrimSpace(p), "=")
if len(keyValue) != 2 {
continue
}
key := strings.TrimSpace(keyValue[0])
value := strings.TrimSpace(keyValue[1])
if key == "" || value == "" {
continue
}
constLabels[key] = value
}
return constLabels
}
func newDesc(subsystem, name, help string) *prometheus.Desc {
return prometheus.NewDesc( return prometheus.NewDesc(
prometheus.BuildFQName(namespace, subsystem, name), prometheus.BuildFQName(namespace, subsystem, name),
help, nil, newConstLabels(), help, nil, labels,
) )
} }
// Query within a namespace mapping and emit metrics. Returns fatal errors if // Query within a namespace mapping and emit metrics. Returns fatal errors if
// the scrape fails, and a slice of errors if they were non-fatal. // the scrape fails, and a slice of errors if they were non-fatal.
func queryNamespaceMapping(ch chan<- prometheus.Metric, db *sql.DB, namespace string, mapping MetricMapNamespace, queryOverrides map[string]string) ([]error, error) { func queryNamespaceMapping(ch chan<- prometheus.Metric, server *Server, namespace string, mapping MetricMapNamespace) ([]error, error) {
// Check for a query override for this namespace // Check for a query override for this namespace
query, found := queryOverrides[namespace] query, found := server.queryOverrides[namespace]
// Was this query disabled (i.e. nothing sensible can be queried on cu // Was this query disabled (i.e. nothing sensible can be queried on cu
// version of PostgreSQL? // version of PostgreSQL?
@ -840,12 +1068,12 @@ func queryNamespaceMapping(ch chan<- prometheus.Metric, db *sql.DB, namespace st
if !found { if !found {
// I've no idea how to avoid this properly at the moment, but this is // I've no idea how to avoid this properly at the moment, but this is
// an admin tool so you're not injecting SQL right? // an admin tool so you're not injecting SQL right?
rows, err = db.Query(fmt.Sprintf("SELECT * FROM %s;", namespace)) // nolint: gas, safesql rows, err = server.db.Query(fmt.Sprintf("SELECT * FROM %s;", namespace)) // nolint: gas, safesql
} else { } else {
rows, err = db.Query(query) // nolint: safesql rows, err = server.db.Query(query) // nolint: safesql
} }
if err != nil { if err != nil {
return []error{}, errors.New(fmt.Sprintln("Error running query on database: ", namespace, err)) return []error{}, fmt.Errorf("Error running query on database %q: %s %v", server, namespace, err)
} }
defer rows.Close() // nolint: errcheck defer rows.Close() // nolint: errcheck
@ -875,10 +1103,10 @@ func queryNamespaceMapping(ch chan<- prometheus.Metric, db *sql.DB, namespace st
return []error{}, errors.New(fmt.Sprintln("Error retrieving rows:", namespace, err)) return []error{}, errors.New(fmt.Sprintln("Error retrieving rows:", namespace, err))
} }
// Get the label values for this row // Get the label values for this row.
var labels = make([]string, len(mapping.labels)) labels := make([]string, len(mapping.labels))
for idx, columnName := range mapping.labels { for idx, label := range mapping.labels {
labels[idx], _ = dbToString(columnData[columnIdx[columnName]]) labels[idx], _ = dbToString(columnData[columnIdx[label]])
} }
// Loop over column names, and match to scan data. Unknown columns // Loop over column names, and match to scan data. Unknown columns
@ -902,7 +1130,7 @@ func queryNamespaceMapping(ch chan<- prometheus.Metric, db *sql.DB, namespace st
} else { } else {
// Unknown metric. Report as untyped if scan to float64 works, else note an error too. // Unknown metric. Report as untyped if scan to float64 works, else note an error too.
metricLabel := fmt.Sprintf("%s_%s", namespace, columnName) metricLabel := fmt.Sprintf("%s_%s", namespace, columnName)
desc := prometheus.NewDesc(metricLabel, fmt.Sprintf("Unknown metric from %s", namespace), mapping.labels, newConstLabels()) desc := prometheus.NewDesc(metricLabel, fmt.Sprintf("Unknown metric from %s", namespace), mapping.labels, server.labels)
// Its not an error to fail here, since the values are // Its not an error to fail here, since the values are
// unexpected anyway. // unexpected anyway.
@ -920,13 +1148,13 @@ func queryNamespaceMapping(ch chan<- prometheus.Metric, db *sql.DB, namespace st
// Iterate through all the namespace mappings in the exporter and run their // Iterate through all the namespace mappings in the exporter and run their
// queries. // queries.
func queryNamespaceMappings(ch chan<- prometheus.Metric, db *sql.DB, metricMap map[string]MetricMapNamespace, queryOverrides map[string]string) map[string]error { func queryNamespaceMappings(ch chan<- prometheus.Metric, server *Server) map[string]error {
// Return a map of namespace -> errors // Return a map of namespace -> errors
namespaceErrors := make(map[string]error) namespaceErrors := make(map[string]error)
for namespace, mapping := range metricMap { for namespace, mapping := range server.metricMap {
log.Debugln("Querying namespace: ", namespace) log.Debugln("Querying namespace: ", namespace)
nonFatalErrors, err := queryNamespaceMapping(ch, db, namespace, mapping, queryOverrides) nonFatalErrors, err := queryNamespaceMapping(ch, server, namespace, mapping)
// Serious error - a namespace disappeared // Serious error - a namespace disappeared
if err != nil { if err != nil {
namespaceErrors[namespace] = err namespaceErrors[namespace] = err
@ -944,40 +1172,36 @@ func queryNamespaceMappings(ch chan<- prometheus.Metric, db *sql.DB, metricMap m
} }
// Check and update the exporters query maps if the version has changed. // Check and update the exporters query maps if the version has changed.
func (e *Exporter) checkMapVersions(ch chan<- prometheus.Metric, db *sql.DB) error { func (e *Exporter) checkMapVersions(ch chan<- prometheus.Metric, server *Server) error {
log.Debugln("Querying Postgres Version") log.Debugf("Querying Postgres Version on %q", server)
versionRow := db.QueryRow("SELECT version();") versionRow := server.db.QueryRow("SELECT version();")
var versionString string var versionString string
err := versionRow.Scan(&versionString) err := versionRow.Scan(&versionString)
if err != nil { if err != nil {
return fmt.Errorf("Error scanning version string: %v", err) return fmt.Errorf("Error scanning version string on %q: %v", server, err)
} }
semanticVersion, err := parseVersion(versionString) semanticVersion, err := parseVersion(versionString)
if err != nil { if err != nil {
return fmt.Errorf("Error parsing version string: %v", err) return fmt.Errorf("Error parsing version string on %q: %v", server, err)
} }
if !e.disableDefaultMetrics && semanticVersion.LT(lowestSupportedVersion) { if !e.disableDefaultMetrics && semanticVersion.LT(lowestSupportedVersion) {
log.Warnln("PostgreSQL version is lower then our lowest supported version! Got", semanticVersion.String(), "minimum supported is", lowestSupportedVersion.String()) log.Warnf("PostgreSQL version is lower on %q then our lowest supported version! Got %s minimum supported is %s.", server, semanticVersion, lowestSupportedVersion)
} }
// Check if semantic version changed and recalculate maps if needed. // Check if semantic version changed and recalculate maps if needed.
if semanticVersion.NE(e.lastMapVersion) || e.metricMap == nil { if semanticVersion.NE(server.lastMapVersion) || server.metricMap == nil {
log.Infoln("Semantic Version Changed:", e.lastMapVersion.String(), "->", semanticVersion.String()) log.Infof("Semantic Version Changed on %q: %s -> %s", server, server.lastMapVersion, semanticVersion)
e.mappingMtx.Lock() server.mappingMtx.Lock()
if e.disableDefaultMetrics { if e.disableDefaultMetrics {
e.metricMap = make(map[string]MetricMapNamespace) server.metricMap = make(map[string]MetricMapNamespace)
server.queryOverrides = make(map[string]string)
} else { } else {
e.metricMap = makeDescMap(semanticVersion, e.builtinMetricMaps) server.metricMap = makeDescMap(semanticVersion, server.labels, e.builtinMetricMaps)
server.queryOverrides = makeQueryOverrideMap(semanticVersion, queryOverrides)
} }
if e.disableDefaultMetrics { server.lastMapVersion = semanticVersion
e.queryOverrides = make(map[string]string)
} else {
e.queryOverrides = makeQueryOverrideMap(semanticVersion, queryOverrides)
}
e.lastMapVersion = semanticVersion
if e.userQueriesPath != "" { if e.userQueriesPath != "" {
// Clear the metric while a reload is happening // Clear the metric while a reload is happening
@ -991,7 +1215,7 @@ func (e *Exporter) checkMapVersions(ch chan<- prometheus.Metric, db *sql.DB) err
} else { } else {
hashsumStr := fmt.Sprintf("%x", sha256.Sum256(userQueriesData)) hashsumStr := fmt.Sprintf("%x", sha256.Sum256(userQueriesData))
if err := addQueries(userQueriesData, semanticVersion, e.metricMap, e.queryOverrides); err != nil { if err := addQueries(userQueriesData, semanticVersion, server); err != nil {
log.Errorln("Failed to reload user queries:", e.userQueriesPath, err) log.Errorln("Failed to reload user queries:", e.userQueriesPath, err)
e.userQueriesError.WithLabelValues(e.userQueriesPath, hashsumStr).Set(1) e.userQueriesError.WithLabelValues(e.userQueriesPath, hashsumStr).Set(1)
} else { } else {
@ -1001,98 +1225,50 @@ func (e *Exporter) checkMapVersions(ch chan<- prometheus.Metric, db *sql.DB) err
} }
} }
e.mappingMtx.Unlock() server.mappingMtx.Unlock()
} }
// Output the version as a special metric // Output the version as a special metric
versionDesc := prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, staticLabelName), versionDesc := prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, staticLabelName),
"Version string as reported by postgres", []string{"version", "short_version"}, newConstLabels()) "Version string as reported by postgres", []string{"version", "short_version"}, server.labels)
ch <- prometheus.MustNewConstMetric(versionDesc, ch <- prometheus.MustNewConstMetric(versionDesc,
prometheus.UntypedValue, 1, versionString, semanticVersion.String()) prometheus.UntypedValue, 1, versionString, semanticVersion.String())
return nil return nil
} }
func (e *Exporter) getDB(conn string) (*sql.DB, error) {
// Has dsn changed?
if (e.dbConnection != nil) && (e.dsn != e.dbDsn) {
err := e.dbConnection.Close()
log.Warnln("Error while closing obsolete DB connection:", err)
e.dbConnection = nil
e.dbDsn = ""
}
if e.dbConnection == nil {
d, err := sql.Open("postgres", conn)
if err != nil {
return nil, err
}
d.SetMaxOpenConns(1)
d.SetMaxIdleConns(1)
e.dbConnection = d
e.dbDsn = e.dsn
log.Infoln("Established new database connection.")
}
// Always send a ping and possibly invalidate the connection if it fails
if err := e.dbConnection.Ping(); err != nil {
cerr := e.dbConnection.Close()
log.Infoln("Error while closing non-pinging DB connection:", cerr)
e.dbConnection = nil
e.psqlUp.Set(0)
return nil, err
}
return e.dbConnection, nil
}
func (e *Exporter) scrape(ch chan<- prometheus.Metric) { func (e *Exporter) scrape(ch chan<- prometheus.Metric) {
defer func(begun time.Time) { defer func(begun time.Time) {
e.duration.Set(time.Since(begun).Seconds()) e.duration.Set(time.Since(begun).Seconds())
}(time.Now()) }(time.Now())
e.error.Set(0) e.error.Set(0)
e.psqlUp.Set(0)
e.totalScrapes.Inc() e.totalScrapes.Inc()
db, err := e.getDB(e.dsn) for _, dsn := range e.dsn {
if err != nil { server, err := e.servers.GetServer(dsn)
loggableDsn := "could not parse DATA_SOURCE_NAME" if err != nil {
// If the DSN is parseable, log it with a blanked out password loggableDSN := "could not parse DATA_SOURCE_NAME"
pDsn, pErr := url.Parse(e.dsn) pDSN, pErr := parseDSN(dsn)
if pErr == nil { if pErr == nil {
// Blank user info if not nil loggableDSN = pDSN.String()
if pDsn.User != nil {
pDsn.User = url.UserPassword(pDsn.User.Username(), "PASSWORD_REMOVED")
} }
loggableDsn = pDsn.String() log.Infof("Error opening connection to database (%s): %v", loggableDSN, err)
e.error.Inc()
continue
} }
log.Infof("Error opening connection to database (%s): %s", loggableDsn, err)
e.psqlUp.Set(0)
e.error.Set(1)
return
}
// Didn't fail, can mark connection as up for this scrape. // Didn't fail, can mark connection as up for this scrape.
e.psqlUp.Set(1) e.psqlUp.Inc()
// Check if map versions need to be updated // Check if map versions need to be updated
if err := e.checkMapVersions(ch, db); err != nil { if err := e.checkMapVersions(ch, server); err != nil {
log.Warnln("Proceeding with outdated query maps, as the Postgres version could not be determined:", err) log.Warnln("Proceeding with outdated query maps, as the Postgres version could not be determined:", err)
e.error.Set(1) e.error.Inc()
} }
// Lock the exporter maps server.Scrape(ch, e.error, e.disableSettingsMetrics)
e.mappingMtx.RLock()
defer e.mappingMtx.RUnlock()
if err := querySettings(ch, db); err != nil {
log.Infof("Error retrieving settings: %s", err)
e.error.Set(1)
}
errMap := queryNamespaceMappings(ch, db, e.metricMap, e.queryOverrides)
if len(errMap) > 0 {
e.error.Set(1)
} }
} }
@ -1100,7 +1276,7 @@ func (e *Exporter) scrape(ch chan<- prometheus.Metric) {
// DATA_SOURCE_NAME always wins so we do not break older versions // DATA_SOURCE_NAME always wins so we do not break older versions
// reading secrets from files wins over secrets in environment variables // reading secrets from files wins over secrets in environment variables
// DATA_SOURCE_NAME > DATA_SOURCE_{USER|PASS}_FILE > DATA_SOURCE_{USER|PASS} // DATA_SOURCE_NAME > DATA_SOURCE_{USER|PASS}_FILE > DATA_SOURCE_{USER|PASS}
func getDataSource() string { func getDataSources() []string {
var dsn = os.Getenv("DATA_SOURCE_NAME") var dsn = os.Getenv("DATA_SOURCE_NAME")
if len(dsn) == 0 { if len(dsn) == 0 {
var user string var user string
@ -1129,9 +1305,10 @@ func getDataSource() string {
ui := url.UserPassword(user, pass).String() ui := url.UserPassword(user, pass).String()
uri := os.Getenv("DATA_SOURCE_URI") uri := os.Getenv("DATA_SOURCE_URI")
dsn = "postgresql://" + ui + "@" + uri dsn = "postgresql://" + ui + "@" + uri
}
return dsn return []string{dsn}
}
return strings.Split(dsn, ",")
} }
func main() { func main() {
@ -1155,16 +1332,19 @@ func main() {
return return
} }
dsn := getDataSource() dsn := getDataSources()
if len(dsn) == 0 { if len(dsn) == 0 {
log.Fatal("couldn't find environment variables describing the datasource to use") log.Fatal("couldn't find environment variables describing the datasource to use")
} }
exporter := NewExporter(dsn, *disableDefaultMetrics, *queriesPath) exporter := NewExporter(dsn,
DisableDefaultMetrics(*disableDefaultMetrics),
DisableSettingsMetrics(*disableSettingsMetrics),
WithUserQueriesPath(*queriesPath),
WithConstantLabels(*constantLabelsList),
)
defer func() { defer func() {
if exporter.dbConnection != nil { exporter.servers.Close()
exporter.dbConnection.Close() // nolint: errcheck
}
}() }()
prometheus.MustRegister(exporter) prometheus.MustRegister(exporter)

View File

@ -7,11 +7,11 @@ package main
import ( import (
"os" "os"
"strings"
"testing" "testing"
. "gopkg.in/check.v1" . "gopkg.in/check.v1"
"database/sql"
"fmt" "fmt"
_ "github.com/lib/pq" _ "github.com/lib/pq"
@ -31,7 +31,7 @@ func (s *IntegrationSuite) SetUpSuite(c *C) {
dsn := os.Getenv("DATA_SOURCE_NAME") dsn := os.Getenv("DATA_SOURCE_NAME")
c.Assert(dsn, Not(Equals), "") c.Assert(dsn, Not(Equals), "")
exporter := NewExporter(dsn, false, "") exporter := NewExporter(strings.Split(dsn, ","))
c.Assert(exporter, NotNil) c.Assert(exporter, NotNil)
// Assign the exporter to the suite // Assign the exporter to the suite
s.e = exporter s.e = exporter
@ -48,29 +48,31 @@ func (s *IntegrationSuite) TestAllNamespacesReturnResults(c *C) {
} }
}() }()
// Open a database connection for _, dsn := range s.e.dsn {
db, err := sql.Open("postgres", s.e.dsn) // Open a database connection
c.Assert(db, NotNil) server, err := NewServer(dsn)
c.Assert(err, IsNil) c.Assert(server, NotNil)
defer db.Close() c.Assert(err, IsNil)
// Do a version update // Do a version update
err = s.e.checkMapVersions(ch, db) err = s.e.checkMapVersions(ch, server)
c.Assert(err, IsNil) c.Assert(err, IsNil)
err = querySettings(ch, db) err = querySettings(ch, server)
if !c.Check(err, Equals, nil) { if !c.Check(err, Equals, nil) {
fmt.Println("## ERRORS FOUND") fmt.Println("## ERRORS FOUND")
fmt.Println(err) fmt.Println(err)
}
// This should never happen in our test cases.
errMap := queryNamespaceMappings(ch, db, s.e.metricMap, s.e.queryOverrides)
if !c.Check(len(errMap), Equals, 0) {
fmt.Println("## NAMESPACE ERRORS FOUND")
for namespace, err := range errMap {
fmt.Println(namespace, ":", err)
} }
// This should never happen in our test cases.
errMap := queryNamespaceMappings(ch, server)
if !c.Check(len(errMap), Equals, 0) {
fmt.Println("## NAMESPACE ERRORS FOUND")
for namespace, err := range errMap {
fmt.Println(namespace, ":", err)
}
}
server.Close()
} }
} }
@ -86,12 +88,12 @@ func (s *IntegrationSuite) TestInvalidDsnDoesntCrash(c *C) {
}() }()
// Send a bad DSN // Send a bad DSN
exporter := NewExporter("invalid dsn", false, *queriesPath) exporter := NewExporter([]string{"invalid dsn"})
c.Assert(exporter, NotNil) c.Assert(exporter, NotNil)
exporter.scrape(ch) exporter.scrape(ch)
// Send a DSN to a non-listening port. // Send a DSN to a non-listening port.
exporter = NewExporter("postgresql://nothing:nothing@127.0.0.1:1/nothing", false, *queriesPath) exporter = NewExporter([]string{"postgresql://nothing:nothing@127.0.0.1:1/nothing"})
c.Assert(exporter, NotNil) c.Assert(exporter, NotNil)
exporter.scrape(ch) exporter.scrape(ch)
} }
@ -109,7 +111,7 @@ func (s *IntegrationSuite) TestUnknownMetricParsingDoesntCrash(c *C) {
dsn := os.Getenv("DATA_SOURCE_NAME") dsn := os.Getenv("DATA_SOURCE_NAME")
c.Assert(dsn, Not(Equals), "") c.Assert(dsn, Not(Equals), "")
exporter := NewExporter(dsn, false, "") exporter := NewExporter(strings.Split(dsn, ","))
c.Assert(exporter, NotNil) c.Assert(exporter, NotNil)
// Convert the default maps into a list of empty maps. // Convert the default maps into a list of empty maps.

View File

@ -10,6 +10,7 @@ import (
"os" "os"
"github.com/blang/semver" "github.com/blang/semver"
"github.com/prometheus/client_golang/prometheus"
) )
// Hook up gocheck into the "go test" runner. // Hook up gocheck into the "go test" runner.
@ -34,7 +35,7 @@ func (s *FunctionalSuite) TestSemanticVersionColumnDiscard(c *C) {
{ {
// No metrics should be eliminated // No metrics should be eliminated
resultMap := makeDescMap(semver.MustParse("0.0.1"), testMetricMap) resultMap := makeDescMap(semver.MustParse("0.0.1"), prometheus.Labels{}, testMetricMap)
c.Check( c.Check(
resultMap["test_namespace"].columnMappings["metric_which_stays"].discard, resultMap["test_namespace"].columnMappings["metric_which_stays"].discard,
Equals, Equals,
@ -55,7 +56,7 @@ func (s *FunctionalSuite) TestSemanticVersionColumnDiscard(c *C) {
testMetricMap["test_namespace"]["metric_which_discards"] = discardableMetric testMetricMap["test_namespace"]["metric_which_discards"] = discardableMetric
// Discard metric should be discarded // Discard metric should be discarded
resultMap := makeDescMap(semver.MustParse("0.0.1"), testMetricMap) resultMap := makeDescMap(semver.MustParse("0.0.1"), prometheus.Labels{}, testMetricMap)
c.Check( c.Check(
resultMap["test_namespace"].columnMappings["metric_which_stays"].discard, resultMap["test_namespace"].columnMappings["metric_which_stays"].discard,
Equals, Equals,
@ -76,7 +77,7 @@ func (s *FunctionalSuite) TestSemanticVersionColumnDiscard(c *C) {
testMetricMap["test_namespace"]["metric_which_discards"] = discardableMetric testMetricMap["test_namespace"]["metric_which_discards"] = discardableMetric
// Discard metric should be discarded // Discard metric should be discarded
resultMap := makeDescMap(semver.MustParse("0.0.2"), testMetricMap) resultMap := makeDescMap(semver.MustParse("0.0.2"), prometheus.Labels{}, testMetricMap)
c.Check( c.Check(
resultMap["test_namespace"].columnMappings["metric_which_stays"].discard, resultMap["test_namespace"].columnMappings["metric_which_stays"].discard,
Equals, Equals,
@ -92,7 +93,6 @@ func (s *FunctionalSuite) TestSemanticVersionColumnDiscard(c *C) {
// test read username and password from file // test read username and password from file
func (s *FunctionalSuite) TestEnvironmentSettingWithSecretsFiles(c *C) { func (s *FunctionalSuite) TestEnvironmentSettingWithSecretsFiles(c *C) {
err := os.Setenv("DATA_SOURCE_USER_FILE", "./tests/username_file") err := os.Setenv("DATA_SOURCE_USER_FILE", "./tests/username_file")
c.Assert(err, IsNil) c.Assert(err, IsNil)
defer UnsetEnvironment(c, "DATA_SOURCE_USER_FILE") defer UnsetEnvironment(c, "DATA_SOURCE_USER_FILE")
@ -107,29 +107,33 @@ func (s *FunctionalSuite) TestEnvironmentSettingWithSecretsFiles(c *C) {
var expected = "postgresql://custom_username$&+,%2F%3A;=%3F%40:custom_password$&+,%2F%3A;=%3F%40@localhost:5432/?sslmode=disable" var expected = "postgresql://custom_username$&+,%2F%3A;=%3F%40:custom_password$&+,%2F%3A;=%3F%40@localhost:5432/?sslmode=disable"
dsn := getDataSource() dsn := getDataSources()
if dsn != expected { if len(dsn) == 0 {
c.Errorf("Expected Username to be read from file. Found=%v, expected=%v", dsn, expected) c.Errorf("Expected one data source, zero found")
}
if dsn[0] != expected {
c.Errorf("Expected Username to be read from file. Found=%v, expected=%v", dsn[0], expected)
} }
} }
// test read DATA_SOURCE_NAME from environment // test read DATA_SOURCE_NAME from environment
func (s *FunctionalSuite) TestEnvironmentSettingWithDns(c *C) { func (s *FunctionalSuite) TestEnvironmentSettingWithDns(c *C) {
envDsn := "postgresql://user:password@localhost:5432/?sslmode=enabled" envDsn := "postgresql://user:password@localhost:5432/?sslmode=enabled"
err := os.Setenv("DATA_SOURCE_NAME", envDsn) err := os.Setenv("DATA_SOURCE_NAME", envDsn)
c.Assert(err, IsNil) c.Assert(err, IsNil)
defer UnsetEnvironment(c, "DATA_SOURCE_NAME") defer UnsetEnvironment(c, "DATA_SOURCE_NAME")
dsn := getDataSource() dsn := getDataSources()
if dsn != envDsn { if len(dsn) == 0 {
c.Errorf("Expected Username to be read from file. Found=%v, expected=%v", dsn, envDsn) c.Errorf("Expected one data source, zero found")
}
if dsn[0] != envDsn {
c.Errorf("Expected Username to be read from file. Found=%v, expected=%v", dsn[0], envDsn)
} }
} }
// test DATA_SOURCE_NAME is used even if username and password environment variables are set // test DATA_SOURCE_NAME is used even if username and password environment variables are set
func (s *FunctionalSuite) TestEnvironmentSettingWithDnsAndSecrets(c *C) { func (s *FunctionalSuite) TestEnvironmentSettingWithDnsAndSecrets(c *C) {
envDsn := "postgresql://userDsn:passwordDsn@localhost:55432/?sslmode=disabled" envDsn := "postgresql://userDsn:passwordDsn@localhost:55432/?sslmode=disabled"
err := os.Setenv("DATA_SOURCE_NAME", envDsn) err := os.Setenv("DATA_SOURCE_NAME", envDsn)
c.Assert(err, IsNil) c.Assert(err, IsNil)
@ -143,9 +147,12 @@ func (s *FunctionalSuite) TestEnvironmentSettingWithDnsAndSecrets(c *C) {
c.Assert(err, IsNil) c.Assert(err, IsNil)
defer UnsetEnvironment(c, "DATA_SOURCE_PASS") defer UnsetEnvironment(c, "DATA_SOURCE_PASS")
dsn := getDataSource() dsn := getDataSources()
if dsn != envDsn { if len(dsn) == 0 {
c.Errorf("Expected Username to be read from file. Found=%v, expected=%v", dsn, envDsn) c.Errorf("Expected one data source, zero found")
}
if dsn[0] != envDsn {
c.Errorf("Expected Username to be read from file. Found=%v, expected=%v", dsn[0], envDsn)
} }
} }