mirror of
https://github.com/prometheus-community/postgres_exporter
synced 2025-04-01 22:48:15 +00:00
Use the pg_settings view to retrieve runtime variable
Adds all bool/integer/real runtime variables that can be retrieved ths way. Use the `pg_settings` view to retrieve runtime variables: https://www.postgresql.org/docs/current/static/view-pg-settings.html Replaces the use of `SHOW` to retrieve runtime variables. Only runtime variables with a `vartype` of `bool`, `real`, or `integer` are currently supported. Uses the `short_desc` field as a description. This commit deprecates the following metric names: ``` pg_runtime_variable_max_connections pg_runtime_variable_max_files_per_process pg_runtime_variable_max_function_args pg_runtime_variable_max_identifier_length pg_runtime_variable_max_index_keys pg_runtime_variable_max_locks_per_transaction pg_runtime_variable_max_pred_locks_per_transaction pg_runtime_variable_max_prepared_transactions pg_runtime_variable_max_standby_archive_delay_milliseconds pg_runtime_variable_max_standby_streaming_delay_milliseconds pg_runtime_variable_max_wal_senders ``` They are replaced by equivalent names under `pg_settings` with the exception of ``` pg_runtime_variable_max_standby_archive_delay_milliseconds pg_runtime_variable_max_standby_streaming_delay_milliseconds ``` which are replaced with ``` pg_settings_max_standby_archive_delay_seconds pg_settings_max_standby_streaming_delay_seconds ``` Adds approximately 195 new metrics, when considered across all supported PostgreSQL versions.
This commit is contained in:
parent
994be318d4
commit
98ba566322
132
pg_setting.go
Normal file
132
pg_setting.go
Normal file
@ -0,0 +1,132 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
// Query the pg_settings view containing runtime variables
|
||||
func querySettings(ch chan<- prometheus.Metric, db *sql.DB) error {
|
||||
log.Debugln("Querying pg_setting view")
|
||||
|
||||
// pg_settings docs: https://www.postgresql.org/docs/current/static/view-pg-settings.html
|
||||
//
|
||||
// NOTE: If you add more vartypes here, you must update the supported
|
||||
// types in normaliseUnit() below
|
||||
query := "SELECT name, setting, COALESCE(unit, ''), short_desc, vartype FROM pg_settings WHERE vartype IN ('bool', 'integer', 'real');"
|
||||
|
||||
rows, err := db.Query(query)
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintln("Error running query on database: ", namespace, err))
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
s := &pgSetting{}
|
||||
err = rows.Scan(&s.name, &s.setting, &s.unit, &s.shortDesc, &s.vartype)
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintln("Error retrieving rows:", namespace, err))
|
||||
}
|
||||
|
||||
ch <- s.metric()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// pgSetting is represents a PostgreSQL runtime variable as returned by the
|
||||
// pg_settings view.
|
||||
type pgSetting struct {
|
||||
name, setting, unit, shortDesc, vartype string
|
||||
}
|
||||
|
||||
func (s *pgSetting) metric() prometheus.Metric {
|
||||
var (
|
||||
err error
|
||||
name = s.name
|
||||
unit = s.unit
|
||||
shortDesc = s.shortDesc
|
||||
subsystem = "settings"
|
||||
val float64
|
||||
)
|
||||
|
||||
switch s.vartype {
|
||||
case "bool":
|
||||
if s.setting == "on" {
|
||||
val = 1
|
||||
}
|
||||
case "integer", "real":
|
||||
if val, unit, err = s.normaliseUnit(); err != nil {
|
||||
// Panic, since we should recognise all units
|
||||
// and don't want to silently exlude metrics
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if len(unit) > 0 {
|
||||
name = fmt.Sprintf("%s_%s", name, unit)
|
||||
shortDesc = fmt.Sprintf("%s [Units converted to %s.]", shortDesc, unit)
|
||||
}
|
||||
default:
|
||||
// Panic because we got a type we didn't ask for
|
||||
panic(fmt.Sprintf("Unsupported vartype %q", s.vartype))
|
||||
}
|
||||
|
||||
desc := newDesc(subsystem, name, shortDesc)
|
||||
return prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, val)
|
||||
}
|
||||
|
||||
func (s *pgSetting) normaliseUnit() (val float64, unit string, err error) {
|
||||
val, err = strconv.ParseFloat(s.setting, 64)
|
||||
if err != nil {
|
||||
return val, unit, errors.New(fmt.Sprintf("Error converting setting %q value %q to float: %s", s.name, s.setting, err))
|
||||
}
|
||||
|
||||
// Units defined in: https://www.postgresql.org/docs/current/static/config-setting.html
|
||||
switch s.unit {
|
||||
case "":
|
||||
return
|
||||
case "ms", "s", "min", "h", "d":
|
||||
unit = "seconds"
|
||||
case "kB", "MB", "GB", "TB", "8kB", "16MB":
|
||||
unit = "bytes"
|
||||
default:
|
||||
err = errors.New(fmt.Sprintf("Unknown unit for runtime variable: %q", s.unit))
|
||||
return
|
||||
}
|
||||
|
||||
// -1 is special, don't modify the value
|
||||
if val == -1 {
|
||||
return
|
||||
}
|
||||
|
||||
switch s.unit {
|
||||
case "ms":
|
||||
val /= 1000
|
||||
case "min":
|
||||
val *= 60
|
||||
case "h":
|
||||
val *= 60 * 60
|
||||
case "d":
|
||||
val *= 60 * 60 * 24
|
||||
case "kB":
|
||||
val *= math.Pow(2, 10)
|
||||
case "MB":
|
||||
val *= math.Pow(2, 20)
|
||||
case "GB":
|
||||
val *= math.Pow(2, 30)
|
||||
case "TB":
|
||||
val *= math.Pow(2, 40)
|
||||
case "8kB":
|
||||
val *= math.Pow(2, 13)
|
||||
case "16MB":
|
||||
val *= math.Pow(2, 24)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
191
pg_setting_test.go
Normal file
191
pg_setting_test.go
Normal file
@ -0,0 +1,191 @@
|
||||
// +build !integration
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
type PgSettingSuite struct{}
|
||||
|
||||
var _ = Suite(&PgSettingSuite{})
|
||||
|
||||
var fixtures = []fixture{
|
||||
fixture{
|
||||
p: pgSetting{
|
||||
name: "seconds_fixture_metric",
|
||||
setting: "5",
|
||||
unit: "s",
|
||||
shortDesc: "Foo foo foo",
|
||||
vartype: "integer",
|
||||
},
|
||||
n: normalised{
|
||||
val: 5,
|
||||
unit: "seconds",
|
||||
err: "",
|
||||
},
|
||||
d: "Desc{fqName: \"pg_settings_seconds_fixture_metric_seconds\", help: \"Foo foo foo [Units converted to seconds.]\", constLabels: {}, variableLabels: []}",
|
||||
v: 5,
|
||||
},
|
||||
fixture{
|
||||
p: pgSetting{
|
||||
name: "milliseconds_fixture_metric",
|
||||
setting: "5000",
|
||||
unit: "ms",
|
||||
shortDesc: "Foo foo foo",
|
||||
vartype: "integer",
|
||||
},
|
||||
n: normalised{
|
||||
val: 5,
|
||||
unit: "seconds",
|
||||
err: "",
|
||||
},
|
||||
d: "Desc{fqName: \"pg_settings_milliseconds_fixture_metric_seconds\", help: \"Foo foo foo [Units converted to seconds.]\", constLabels: {}, variableLabels: []}",
|
||||
v: 5,
|
||||
},
|
||||
fixture{
|
||||
p: pgSetting{
|
||||
name: "eight_kb_fixture_metric",
|
||||
setting: "17",
|
||||
unit: "8kB",
|
||||
shortDesc: "Foo foo foo",
|
||||
vartype: "integer",
|
||||
},
|
||||
n: normalised{
|
||||
val: 139264,
|
||||
unit: "bytes",
|
||||
err: "",
|
||||
},
|
||||
d: "Desc{fqName: \"pg_settings_eight_kb_fixture_metric_bytes\", help: \"Foo foo foo [Units converted to bytes.]\", constLabels: {}, variableLabels: []}",
|
||||
v: 139264,
|
||||
},
|
||||
fixture{
|
||||
p: pgSetting{
|
||||
name: "16_mb_real_fixture_metric",
|
||||
setting: "3.0",
|
||||
unit: "16MB",
|
||||
shortDesc: "Foo foo foo",
|
||||
vartype: "real",
|
||||
},
|
||||
n: normalised{
|
||||
val: 5.0331648e+07,
|
||||
unit: "bytes",
|
||||
err: "",
|
||||
},
|
||||
d: "Desc{fqName: \"pg_settings_16_mb_real_fixture_metric_bytes\", help: \"Foo foo foo [Units converted to bytes.]\", constLabels: {}, variableLabels: []}",
|
||||
v: 5.0331648e+07,
|
||||
},
|
||||
fixture{
|
||||
p: pgSetting{
|
||||
name: "bool_on_fixture_metric",
|
||||
setting: "on",
|
||||
unit: "",
|
||||
shortDesc: "Foo foo foo",
|
||||
vartype: "bool",
|
||||
},
|
||||
n: normalised{
|
||||
val: 1,
|
||||
unit: "",
|
||||
err: "",
|
||||
},
|
||||
d: "Desc{fqName: \"pg_settings_bool_on_fixture_metric\", help: \"Foo foo foo\", constLabels: {}, variableLabels: []}",
|
||||
v: 1,
|
||||
},
|
||||
fixture{
|
||||
p: pgSetting{
|
||||
name: "bool_off_fixture_metric",
|
||||
setting: "off",
|
||||
unit: "",
|
||||
shortDesc: "Foo foo foo",
|
||||
vartype: "bool",
|
||||
},
|
||||
n: normalised{
|
||||
val: 0,
|
||||
unit: "",
|
||||
err: "",
|
||||
},
|
||||
d: "Desc{fqName: \"pg_settings_bool_off_fixture_metric\", help: \"Foo foo foo\", constLabels: {}, variableLabels: []}",
|
||||
v: 0,
|
||||
},
|
||||
fixture{
|
||||
p: pgSetting{
|
||||
name: "special_minus_one_value",
|
||||
setting: "-1",
|
||||
unit: "d",
|
||||
shortDesc: "foo foo foo",
|
||||
vartype: "integer",
|
||||
},
|
||||
n: normalised{
|
||||
val: -1,
|
||||
unit: "seconds",
|
||||
err: "",
|
||||
},
|
||||
d: "Desc{fqName: \"pg_settings_special_minus_one_value_seconds\", help: \"foo foo foo [Units converted to seconds.]\", constLabels: {}, variableLabels: []}",
|
||||
v: -1,
|
||||
},
|
||||
fixture{
|
||||
p: pgSetting{
|
||||
name: "unknown_unit",
|
||||
setting: "10",
|
||||
unit: "nonexistent",
|
||||
shortDesc: "foo foo foo",
|
||||
vartype: "integer",
|
||||
},
|
||||
n: normalised{
|
||||
val: 10,
|
||||
unit: "",
|
||||
err: `Unknown unit for runtime variable: "nonexistent"`,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func (s *PgSettingSuite) TestNormaliseUnit(c *C) {
|
||||
for _, f := range fixtures {
|
||||
switch f.p.vartype {
|
||||
case "integer", "real":
|
||||
val, unit, err := f.p.normaliseUnit()
|
||||
|
||||
c.Check(val, Equals, f.n.val)
|
||||
c.Check(unit, Equals, f.n.unit)
|
||||
|
||||
if err == nil {
|
||||
c.Check("", Equals, f.n.err)
|
||||
} else {
|
||||
c.Check(err.Error(), Equals, f.n.err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *PgSettingSuite) TestMetric(c *C) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if r.(error).Error() != `Unknown unit for runtime variable: "nonexistent"` {
|
||||
panic(r)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for _, f := range fixtures {
|
||||
d := &dto.Metric{}
|
||||
m := f.p.metric()
|
||||
m.Write(d)
|
||||
|
||||
c.Check(m.Desc().String(), Equals, f.d)
|
||||
c.Check(d.GetGauge().GetValue(), Equals, f.v)
|
||||
}
|
||||
}
|
||||
|
||||
type normalised struct {
|
||||
val float64
|
||||
unit string
|
||||
err string
|
||||
}
|
||||
|
||||
type fixture struct {
|
||||
p pgSetting
|
||||
n normalised
|
||||
d string
|
||||
v float64
|
||||
}
|
@ -95,12 +95,6 @@ const (
|
||||
DURATION ColumnUsage = iota // This column should be interpreted as a text duration (and converted to milliseconds)
|
||||
)
|
||||
|
||||
// Special case matric mappings
|
||||
const (
|
||||
// Which metric mapping should be acquired using "SHOW" queries
|
||||
SHOW_METRIC = "pg_runtime_variables"
|
||||
)
|
||||
|
||||
// Regex used to get the "short-version" from the postgres version field.
|
||||
var versionRegex = regexp.MustCompile(`^\w+ (\d+\.\d+\.\d+)`)
|
||||
var lowestSupportedVersion = semver.MustParse("9.1.0")
|
||||
@ -147,24 +141,6 @@ type MetricMap struct {
|
||||
conversion func(interface{}) (float64, bool) // Conversion function to turn PG result into float64
|
||||
}
|
||||
|
||||
// Metric descriptors for dynamically created metrics.
|
||||
var variableMaps = map[string]map[string]ColumnMapping{
|
||||
"pg_runtime_variable": {
|
||||
"max_connections": {GAUGE, "Sets the maximum number of concurrent connections.", nil, nil},
|
||||
"max_files_per_process": {GAUGE, "Sets the maximum number of simultaneously open files for each server process.", nil, nil},
|
||||
"max_function_args": {GAUGE, "Shows the maximum number of function arguments.", nil, nil},
|
||||
"max_identifier_length": {GAUGE, "Shows the maximum identifier length.", nil, nil},
|
||||
"max_index_keys": {GAUGE, "Shows the maximum number of index keys.", nil, nil},
|
||||
"max_locks_per_transaction": {GAUGE, "Sets the maximum number of locks per transaction.", nil, nil},
|
||||
"max_pred_locks_per_transaction": {GAUGE, "Sets the maximum number of predicate locks per transaction.", nil, nil},
|
||||
"max_prepared_transactions": {GAUGE, "Sets the maximum number of simultaneously prepared transactions.", nil, nil},
|
||||
//"max_stack_depth" : { GAUGE, "Sets the maximum number of concurrent connections.", nil }, // No dehumanize support yet
|
||||
"max_standby_archive_delay": {DURATION, "Sets the maximum delay before canceling queries when a hot standby server is processing archived WAL data.", nil, nil},
|
||||
"max_standby_streaming_delay": {DURATION, "Sets the maximum delay before canceling queries when a hot standby server is processing streamed WAL data.", nil, nil},
|
||||
"max_wal_senders": {GAUGE, "Sets the maximum number of simultaneously running WAL sender processes.", nil, nil},
|
||||
},
|
||||
}
|
||||
|
||||
// TODO: revisit this with the semver system
|
||||
func dumpMaps() {
|
||||
for name, cmap := range metricMaps {
|
||||
@ -694,8 +670,6 @@ type Exporter struct {
|
||||
// Last version used to calculate metric map. If mismatch on scrape,
|
||||
// then maps are recalculated.
|
||||
lastMapVersion semver.Version
|
||||
// Currently active variable map
|
||||
variableMap map[string]MetricMapNamespace
|
||||
// Currently active metric map
|
||||
metricMap map[string]MetricMapNamespace
|
||||
// Currently active query overrides
|
||||
@ -726,7 +700,6 @@ func NewExporter(dsn string, userQueriesPath string) *Exporter {
|
||||
Name: "last_scrape_error",
|
||||
Help: "Whether the last scrape of metrics from PostgreSQL resulted in an error (1 for error, 0 for success).",
|
||||
}),
|
||||
variableMap: nil,
|
||||
metricMap: nil,
|
||||
queryOverrides: nil,
|
||||
}
|
||||
@ -776,42 +749,6 @@ func newDesc(subsystem, name, help string) *prometheus.Desc {
|
||||
)
|
||||
}
|
||||
|
||||
// Query the SHOW variables from the query map
|
||||
// TODO: make this more functional
|
||||
func queryShowVariables(ch chan<- prometheus.Metric, db *sql.DB, variableMap map[string]MetricMapNamespace) []error {
|
||||
log.Debugln("Querying SHOW variables")
|
||||
nonFatalErrors := []error{}
|
||||
|
||||
for _, mapping := range variableMap {
|
||||
for columnName, columnMapping := range mapping.columnMappings {
|
||||
// Check for a discard request on this value
|
||||
if columnMapping.discard {
|
||||
continue
|
||||
}
|
||||
|
||||
// Use SHOW to get the value
|
||||
row := db.QueryRow(fmt.Sprintf("SHOW %s;", columnName))
|
||||
|
||||
var val interface{}
|
||||
err := row.Scan(&val)
|
||||
if err != nil {
|
||||
nonFatalErrors = append(nonFatalErrors, errors.New(fmt.Sprintln("Error scanning runtime variable:", columnName, err)))
|
||||
continue
|
||||
}
|
||||
|
||||
fval, ok := columnMapping.conversion(val)
|
||||
if !ok {
|
||||
nonFatalErrors = append(nonFatalErrors, errors.New(fmt.Sprintln("Unexpected error parsing column: ", namespace, columnName, val)))
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(columnMapping.desc, columnMapping.vtype, fval)
|
||||
}
|
||||
}
|
||||
|
||||
return nonFatalErrors
|
||||
}
|
||||
|
||||
// Query within a namespace mapping and emit metrics. Returns fatal errors if
|
||||
// the scrape fails, and a slice of errors if they were non-fatal.
|
||||
func queryNamespaceMapping(ch chan<- prometheus.Metric, db *sql.DB, namespace string, mapping MetricMapNamespace, queryOverrides map[string]string) ([]error, error) {
|
||||
@ -942,11 +879,10 @@ func (e *Exporter) checkMapVersions(ch chan<- prometheus.Metric, db *sql.DB) err
|
||||
semanticVersion, err := parseVersion(versionString)
|
||||
|
||||
// Check if semantic version changed and recalculate maps if needed.
|
||||
if semanticVersion.NE(e.lastMapVersion) || e.variableMap == nil || e.metricMap == nil {
|
||||
if semanticVersion.NE(e.lastMapVersion) || e.metricMap == nil {
|
||||
log.Infoln("Semantic Version Changed:", e.lastMapVersion.String(), "->", semanticVersion.String())
|
||||
e.mappingMtx.Lock()
|
||||
|
||||
e.variableMap = makeDescMap(semanticVersion, variableMaps)
|
||||
e.metricMap = makeDescMap(semanticVersion, metricMaps)
|
||||
e.queryOverrides = makeQueryOverrideMap(semanticVersion, queryOverrides)
|
||||
e.lastMapVersion = semanticVersion
|
||||
@ -1016,9 +952,8 @@ func (e *Exporter) scrape(ch chan<- prometheus.Metric) {
|
||||
// Lock the exporter maps
|
||||
e.mappingMtx.RLock()
|
||||
defer e.mappingMtx.RUnlock()
|
||||
// Handle querying the show variables
|
||||
nonFatalErrors := queryShowVariables(ch, db, e.variableMap)
|
||||
if len(nonFatalErrors) > 0 {
|
||||
if err := querySettings(ch, db); err != nil {
|
||||
log.Infof("Error retrieving settings: %s", err)
|
||||
e.error.Set(1)
|
||||
}
|
||||
|
||||
|
@ -13,6 +13,7 @@ import (
|
||||
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
_ "github.com/lib/pq"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
@ -57,13 +58,10 @@ func (s *IntegrationSuite) TestAllNamespacesReturnResults(c *C) {
|
||||
err = s.e.checkMapVersions(ch, db)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
// Check the show variables work
|
||||
nonFatalErrors := queryShowVariables(ch, db, s.e.variableMap)
|
||||
if !c.Check(len(nonFatalErrors), Equals, 0) {
|
||||
fmt.Println("## NONFATAL ERRORS FOUND")
|
||||
for _, err := range nonFatalErrors {
|
||||
fmt.Println(err)
|
||||
}
|
||||
err = querySettings(ch, db)
|
||||
if !c.Check(err, Equals, nil) {
|
||||
fmt.Println("## ERRORS FOUND")
|
||||
fmt.Println(err)
|
||||
}
|
||||
|
||||
// This should never happen in our test cases.
|
||||
|
Loading…
Reference in New Issue
Block a user