mirror of
https://github.com/prometheus-community/postgres_exporter
synced 2025-05-07 10:28:01 +00:00
PMM-12154 compatibility improvements.
This commit is contained in:
parent
67642ebf6e
commit
47295e8fea
5
.gitignore
vendored
5
.gitignore
vendored
@ -22,7 +22,4 @@
|
||||
/vendor
|
||||
/percona_tests/assets/postgres_exporter
|
||||
/percona_tests/assets/postgres_exporter_percona
|
||||
/percona_tests/assets/metrics.new.txt
|
||||
/percona_tests/assets/metrics.old.txt
|
||||
/percona_tests/assets/metrics.names.new.txt
|
||||
/percona_tests/assets/metrics.names.old.txt
|
||||
/percona_tests/assets/metrics.*
|
@ -54,12 +54,14 @@ func (e *Exporter) discoverDatabaseDSNs() []string {
|
||||
level.Error(logger).Log("msg", "Error opening connection to database", "dsn", loggableDSN(dsn), "err", err)
|
||||
continue
|
||||
}
|
||||
server.dbMtx.Lock()
|
||||
dsns[dsn] = struct{}{}
|
||||
|
||||
// If autoDiscoverDatabases is true, set first dsn as master database (Default: false)
|
||||
server.master = true
|
||||
|
||||
databaseNames, err := queryDatabases(server)
|
||||
server.dbMtx.Unlock()
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "Error querying databases", "dsn", loggableDSN(dsn), "err", err)
|
||||
continue
|
||||
@ -101,6 +103,10 @@ func (e *Exporter) discoverDatabaseDSNs() []string {
|
||||
|
||||
func (e *Exporter) scrapeDSN(ch chan<- prometheus.Metric, dsn string) error {
|
||||
server, err := e.servers.GetServer(dsn)
|
||||
server.dbMtx.Lock()
|
||||
defer server.dbMtx.Unlock()
|
||||
|
||||
level.Debug(logger).Log("msg", "scrapeDSN:"+dsn)
|
||||
|
||||
if err != nil {
|
||||
return &ErrorConnectToServer{fmt.Sprintf("Error opening connection to database (%s): %s", loggableDSN(dsn), err.Error())}
|
||||
@ -116,7 +122,7 @@ func (e *Exporter) scrapeDSN(ch chan<- prometheus.Metric, dsn string) error {
|
||||
level.Warn(logger).Log("msg", "Proceeding with outdated query maps, as the Postgres version could not be determined", "err", err)
|
||||
}
|
||||
|
||||
return server.Scrape(ch, e.disableSettingsMetrics)
|
||||
return server.Scrape(ch, e.disableSettingsMetrics, e.resolutionEnabled)
|
||||
}
|
||||
|
||||
// try to get the DataSource
|
||||
|
@ -47,13 +47,13 @@ var (
|
||||
disableDefaultMetrics = kingpin.Flag("disable-default-metrics", "Do not include default metrics.").Default("false").Envar("PG_EXPORTER_DISABLE_DEFAULT_METRICS").Bool()
|
||||
disableSettingsMetrics = kingpin.Flag("disable-settings-metrics", "Do not include pg_settings metrics.").Default("false").Envar("PG_EXPORTER_DISABLE_SETTINGS_METRICS").Bool()
|
||||
autoDiscoverDatabases = kingpin.Flag("auto-discover-databases", "Whether to discover the databases on a server dynamically. (DEPRECATED)").Default("false").Envar("PG_EXPORTER_AUTO_DISCOVER_DATABASES").Bool()
|
||||
queriesPath = kingpin.Flag("extend.query-path", "Path to custom queries to run. (DEPRECATED)").Default("").Envar("PG_EXPORTER_EXTEND_QUERY_PATH").String()
|
||||
onlyDumpMaps = kingpin.Flag("dumpmaps", "Do not run, simply dump the maps.").Bool()
|
||||
constantLabelsList = kingpin.Flag("constantLabels", "A list of label=value separated by comma(,). (DEPRECATED)").Default("").Envar("PG_EXPORTER_CONSTANT_LABELS").String()
|
||||
excludeDatabases = kingpin.Flag("exclude-databases", "A list of databases to remove when autoDiscoverDatabases is enabled (DEPRECATED)").Default("").Envar("PG_EXPORTER_EXCLUDE_DATABASES").String()
|
||||
includeDatabases = kingpin.Flag("include-databases", "A list of databases to include when autoDiscoverDatabases is enabled (DEPRECATED)").Default("").Envar("PG_EXPORTER_INCLUDE_DATABASES").String()
|
||||
metricPrefix = kingpin.Flag("metric-prefix", "A metric prefix can be used to have non-default (not \"pg\") prefixes for each of the metrics").Default("pg").Envar("PG_EXPORTER_METRIC_PREFIX").String()
|
||||
logger = log.NewNopLogger()
|
||||
//queriesPath = kingpin.Flag("extend.query-path", "Path to custom queries to run. (DEPRECATED)").Default("").Envar("PG_EXPORTER_EXTEND_QUERY_PATH").String()
|
||||
onlyDumpMaps = kingpin.Flag("dumpmaps", "Do not run, simply dump the maps.").Bool()
|
||||
constantLabelsList = kingpin.Flag("constantLabels", "A list of label=value separated by comma(,). (DEPRECATED)").Default("").Envar("PG_EXPORTER_CONSTANT_LABELS").String()
|
||||
excludeDatabases = kingpin.Flag("exclude-databases", "A list of databases to remove when autoDiscoverDatabases is enabled (DEPRECATED)").Default("").Envar("PG_EXPORTER_EXCLUDE_DATABASES").String()
|
||||
includeDatabases = kingpin.Flag("include-databases", "A list of databases to include when autoDiscoverDatabases is enabled (DEPRECATED)").Default("").Envar("PG_EXPORTER_INCLUDE_DATABASES").String()
|
||||
metricPrefix = kingpin.Flag("metric-prefix", "A metric prefix can be used to have non-default (not \"pg\") prefixes for each of the metrics").Default("pg").Envar("PG_EXPORTER_METRIC_PREFIX").String()
|
||||
logger = log.NewNopLogger()
|
||||
)
|
||||
|
||||
// Metric name parts.
|
||||
@ -98,9 +98,9 @@ func main() {
|
||||
excludedDatabases := strings.Split(*excludeDatabases, ",")
|
||||
logger.Log("msg", "Excluded databases", "databases", fmt.Sprintf("%v", excludedDatabases))
|
||||
|
||||
if *queriesPath != "" {
|
||||
level.Warn(logger).Log("msg", "The extended queries.yaml config is DEPRECATED", "file", *queriesPath)
|
||||
}
|
||||
//if *queriesPath != "" {
|
||||
// level.Warn(logger).Log("msg", "The extended queries.yaml config is DEPRECATED", "file", *queriesPath)
|
||||
//}
|
||||
|
||||
if *autoDiscoverDatabases || *excludeDatabases != "" || *includeDatabases != "" {
|
||||
level.Warn(logger).Log("msg", "Scraping additional databases via auto discovery is DEPRECATED")
|
||||
@ -110,11 +110,15 @@ func main() {
|
||||
level.Warn(logger).Log("msg", "Constant labels on all metrics is DEPRECATED")
|
||||
}
|
||||
|
||||
servers := NewServers(ServerWithLabels(parseConstLabels(*constantLabelsList)))
|
||||
|
||||
opts := []ExporterOpt{
|
||||
CollectorName("exporter"),
|
||||
DisableDefaultMetrics(*disableDefaultMetrics),
|
||||
DisableSettingsMetrics(*disableSettingsMetrics),
|
||||
AutoDiscoverDatabases(*autoDiscoverDatabases),
|
||||
WithConstantLabels(*constantLabelsList),
|
||||
WithServers(servers),
|
||||
ExcludeDatabases(excludedDatabases),
|
||||
IncludeDatabases(*includeDatabases),
|
||||
}
|
||||
@ -135,7 +139,7 @@ func main() {
|
||||
dsn = dsns[0]
|
||||
}
|
||||
|
||||
cleanup, hr, mr, lr := initializePerconaExporters(dsns, opts)
|
||||
cleanup, hr, mr, lr := initializePerconaExporters(dsns, servers)
|
||||
defer cleanup()
|
||||
|
||||
pe, err := collector.NewPostgresCollector(
|
||||
|
@ -183,7 +183,7 @@ func queryNamespaceMapping(server *Server, namespace string, mapping MetricMapNa
|
||||
|
||||
// Iterate through all the namespace mappings in the exporter and run their
|
||||
// queries.
|
||||
func queryNamespaceMappings(ch chan<- prometheus.Metric, server *Server) map[string]error {
|
||||
func queryNamespaceMappings(ch chan<- prometheus.Metric, server *Server, res MetricResolution) map[string]error {
|
||||
// Return a map of namespace -> errors
|
||||
namespaceErrors := make(map[string]error)
|
||||
|
||||
|
@ -16,9 +16,10 @@ import (
|
||||
type MetricResolution string
|
||||
|
||||
const (
|
||||
LR MetricResolution = "lr"
|
||||
MR MetricResolution = "mr"
|
||||
HR MetricResolution = "hr"
|
||||
DISABLED MetricResolution = ""
|
||||
LR MetricResolution = "lr"
|
||||
MR MetricResolution = "mr"
|
||||
HR MetricResolution = "hr"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -30,71 +31,53 @@ var (
|
||||
collectCustomQueryHrDirectory = kingpin.Flag("collect.custom_query.hr.directory", "Path to custom queries with high resolution directory.").Envar("PG_EXPORTER_EXTEND_QUERY_HR_PATH").String()
|
||||
)
|
||||
|
||||
func initializePerconaExporters(dsn []string, opts []ExporterOpt) (func(), *Exporter, *Exporter, *Exporter) {
|
||||
func initializePerconaExporters(dsn []string, servers *Servers) (func(), *Exporter, *Exporter, *Exporter) {
|
||||
queriesPath := map[MetricResolution]string{
|
||||
HR: *collectCustomQueryHrDirectory,
|
||||
MR: *collectCustomQueryMrDirectory,
|
||||
LR: *collectCustomQueryLrDirectory,
|
||||
}
|
||||
|
||||
defaultOpts := []ExporterOpt{CollectorName("exporter")}
|
||||
defaultOpts = append(defaultOpts, opts...)
|
||||
defaultExporter := NewExporter(
|
||||
dsn,
|
||||
defaultOpts...,
|
||||
)
|
||||
prometheus.MustRegister(defaultExporter)
|
||||
|
||||
hrExporter := NewExporter(dsn,
|
||||
CollectorName("custom_query.hr"),
|
||||
excludedDatabases := strings.Split(*excludeDatabases, ",")
|
||||
opts := []ExporterOpt{
|
||||
DisableDefaultMetrics(true),
|
||||
DisableSettingsMetrics(true),
|
||||
AutoDiscoverDatabases(*autoDiscoverDatabases),
|
||||
WithUserQueriesEnabled(map[MetricResolution]bool{
|
||||
HR: *collectCustomQueryHr,
|
||||
MR: false,
|
||||
LR: false,
|
||||
}),
|
||||
WithServers(servers),
|
||||
WithUserQueriesPath(queriesPath),
|
||||
WithConstantLabels(*constantLabelsList),
|
||||
ExcludeDatabases(strings.Split(*excludeDatabases, ",")),
|
||||
ExcludeDatabases(excludedDatabases),
|
||||
}
|
||||
hrExporter := NewExporter(dsn,
|
||||
append(opts,
|
||||
CollectorName("custom_query.hr"),
|
||||
WithUserQueriesEnabled(HR),
|
||||
WithEnabled(*collectCustomQueryHr),
|
||||
WithConstantLabels(*constantLabelsList),
|
||||
)...,
|
||||
)
|
||||
prometheus.MustRegister(hrExporter)
|
||||
|
||||
mrExporter := NewExporter(dsn,
|
||||
CollectorName("custom_query.mr"),
|
||||
DisableDefaultMetrics(true),
|
||||
DisableSettingsMetrics(true),
|
||||
AutoDiscoverDatabases(*autoDiscoverDatabases),
|
||||
WithUserQueriesEnabled(map[MetricResolution]bool{
|
||||
HR: false,
|
||||
MR: *collectCustomQueryMr,
|
||||
LR: false,
|
||||
}),
|
||||
WithUserQueriesPath(queriesPath),
|
||||
WithConstantLabels(*constantLabelsList),
|
||||
ExcludeDatabases(strings.Split(*excludeDatabases, ",")),
|
||||
append(opts,
|
||||
CollectorName("custom_query.mr"),
|
||||
WithUserQueriesEnabled(MR),
|
||||
WithEnabled(*collectCustomQueryMr),
|
||||
WithConstantLabels(*constantLabelsList),
|
||||
)...,
|
||||
)
|
||||
prometheus.MustRegister(mrExporter)
|
||||
|
||||
lrExporter := NewExporter(dsn,
|
||||
CollectorName("custom_query.lr"),
|
||||
DisableDefaultMetrics(true),
|
||||
DisableSettingsMetrics(true),
|
||||
AutoDiscoverDatabases(*autoDiscoverDatabases),
|
||||
WithUserQueriesEnabled(map[MetricResolution]bool{
|
||||
HR: false,
|
||||
MR: false,
|
||||
LR: *collectCustomQueryLr,
|
||||
}),
|
||||
WithUserQueriesPath(queriesPath),
|
||||
WithConstantLabels(*constantLabelsList),
|
||||
ExcludeDatabases(strings.Split(*excludeDatabases, ",")),
|
||||
append(opts,
|
||||
CollectorName("custom_query.lr"),
|
||||
WithUserQueriesEnabled(LR),
|
||||
WithEnabled(*collectCustomQueryLr),
|
||||
WithConstantLabels(*constantLabelsList),
|
||||
)...,
|
||||
)
|
||||
prometheus.MustRegister(lrExporter)
|
||||
|
||||
return func() {
|
||||
defaultExporter.servers.Close()
|
||||
hrExporter.servers.Close()
|
||||
mrExporter.servers.Close()
|
||||
lrExporter.servers.Close()
|
||||
@ -109,6 +92,7 @@ func (e *Exporter) loadCustomQueries(res MetricResolution, version semver.Versio
|
||||
"err", err)
|
||||
return
|
||||
}
|
||||
level.Debug(logger).Log("msg", fmt.Sprintf("reading dir %q for custom query", e.userQueriesPath[res]))
|
||||
|
||||
for _, v := range fi {
|
||||
if v.IsDir() {
|
||||
|
@ -14,12 +14,10 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
@ -411,9 +409,10 @@ type cachedMetrics struct {
|
||||
|
||||
// Exporter collects Postgres metrics. It implements prometheus.Collector.
|
||||
type Exporter struct {
|
||||
collectorName string
|
||||
userQueriesPath map[MetricResolution]string
|
||||
userQueriesEnabled map[MetricResolution]bool
|
||||
collectorName string
|
||||
userQueriesPath map[MetricResolution]string
|
||||
resolutionEnabled MetricResolution
|
||||
enabled bool
|
||||
|
||||
// Holds a reference to the build in column mappings. Currently this is for testing purposes
|
||||
// only, since it just points to the global.
|
||||
@ -454,9 +453,16 @@ func CollectorName(name string) ExporterOpt {
|
||||
}
|
||||
|
||||
// WithUserQueriesEnabled enables user's queries.
|
||||
func WithUserQueriesEnabled(p map[MetricResolution]bool) ExporterOpt {
|
||||
func WithUserQueriesEnabled(p MetricResolution) ExporterOpt {
|
||||
return func(e *Exporter) {
|
||||
e.userQueriesEnabled = p
|
||||
e.resolutionEnabled = p
|
||||
}
|
||||
}
|
||||
|
||||
// WithUserQueriesEnabled enables user's queries.
|
||||
func WithEnabled(p bool) ExporterOpt {
|
||||
return func(e *Exporter) {
|
||||
e.enabled = p
|
||||
}
|
||||
}
|
||||
|
||||
@ -509,6 +515,13 @@ func WithConstantLabels(s string) ExporterOpt {
|
||||
}
|
||||
}
|
||||
|
||||
// WithServers configures constant labels.
|
||||
func WithServers(s *Servers) ExporterOpt {
|
||||
return func(e *Exporter) {
|
||||
e.servers = s
|
||||
}
|
||||
}
|
||||
|
||||
func parseConstLabels(s string) prometheus.Labels {
|
||||
labels := make(prometheus.Labels)
|
||||
|
||||
@ -540,6 +553,7 @@ func NewExporter(dsn []string, opts ...ExporterOpt) *Exporter {
|
||||
e := &Exporter{
|
||||
dsn: dsn,
|
||||
builtinMetricMaps: builtinMetricMaps,
|
||||
enabled: true,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
@ -547,7 +561,6 @@ func NewExporter(dsn []string, opts ...ExporterOpt) *Exporter {
|
||||
}
|
||||
|
||||
e.setupInternalMetrics()
|
||||
e.servers = NewServers(ServerWithLabels(e.constantLabels))
|
||||
|
||||
return e
|
||||
}
|
||||
@ -595,6 +608,9 @@ func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
|
||||
|
||||
// Collect implements prometheus.Collector.
|
||||
func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
|
||||
if !e.enabled {
|
||||
return
|
||||
}
|
||||
e.scrape(ch)
|
||||
|
||||
ch <- e.duration
|
||||
@ -639,50 +655,31 @@ func (e *Exporter) checkMapVersions(ch chan<- prometheus.Metric, server *Server)
|
||||
}
|
||||
|
||||
// Check if semantic version changed and recalculate maps if needed.
|
||||
if semanticVersion.NE(server.lastMapVersion) || server.metricMap == nil {
|
||||
level.Info(logger).Log("msg", "Semantic version changed", "server", server, "from", server.lastMapVersion, "to", semanticVersion)
|
||||
server.mappingMtx.Lock()
|
||||
//if semanticVersion.NE(server.lastMapVersion[e.resolutionEnabled]) || server.metricMap == nil {
|
||||
// level.Info(logger).Log("msg", "Semantic version changed", "server", server, "from", server.lastMapVersion[e.resolutionEnabled], "to", semanticVersion)
|
||||
server.mappingMtx.Lock()
|
||||
|
||||
// Get Default Metrics only for master database
|
||||
if !e.disableDefaultMetrics && server.master {
|
||||
server.metricMap = makeDescMap(semanticVersion, server.labels, e.builtinMetricMaps)
|
||||
server.queryOverrides = makeQueryOverrideMap(semanticVersion, queryOverrides)
|
||||
} else {
|
||||
server.metricMap = make(map[string]MetricMapNamespace)
|
||||
server.queryOverrides = make(map[string]string)
|
||||
}
|
||||
|
||||
server.lastMapVersion = semanticVersion
|
||||
|
||||
if e.userQueriesPath[HR] != "" || e.userQueriesPath[MR] != "" || e.userQueriesPath[LR] != "" {
|
||||
// Clear the metric while reload
|
||||
e.userQueriesError.Reset()
|
||||
for res := range e.userQueriesPath {
|
||||
if e.userQueriesEnabled[res] {
|
||||
|
||||
// Calculate the hashsum of the useQueries
|
||||
userQueriesData, err := os.ReadFile(e.userQueriesPath[res])
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "Failed to reload user queries", "path", e.userQueriesPath[res], "err", err)
|
||||
e.userQueriesError.WithLabelValues(e.userQueriesPath[res], "").Set(1)
|
||||
} else {
|
||||
hashsumStr := fmt.Sprintf("%x", sha256.Sum256(userQueriesData))
|
||||
|
||||
if err := addQueries(userQueriesData, semanticVersion, server); err != nil {
|
||||
level.Error(logger).Log("msg", "Failed to reload user queries", "path", e.userQueriesPath[res], "err", err)
|
||||
e.userQueriesError.WithLabelValues(e.userQueriesPath[res], hashsumStr).Set(1)
|
||||
} else {
|
||||
// Mark user queries as successfully loaded
|
||||
e.userQueriesError.WithLabelValues(e.userQueriesPath[res], hashsumStr).Set(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
server.mappingMtx.Unlock()
|
||||
// Get Default Metrics only for master database
|
||||
if !e.disableDefaultMetrics && server.master {
|
||||
server.metricMap = makeDescMap(semanticVersion, server.labels, e.builtinMetricMaps)
|
||||
server.queryOverrides = makeQueryOverrideMap(semanticVersion, queryOverrides)
|
||||
} else {
|
||||
server.metricMap = make(map[string]MetricMapNamespace)
|
||||
server.queryOverrides = make(map[string]string)
|
||||
}
|
||||
|
||||
//server.lastMapVersion[e.resolutionEnabled] = semanticVersion
|
||||
|
||||
if e.userQueriesPath[HR] != "" || e.userQueriesPath[MR] != "" || e.userQueriesPath[LR] != "" {
|
||||
// Clear the metric while reload
|
||||
e.userQueriesError.Reset()
|
||||
}
|
||||
|
||||
e.loadCustomQueries(e.resolutionEnabled, semanticVersion, server)
|
||||
|
||||
server.mappingMtx.Unlock()
|
||||
//}
|
||||
|
||||
// Output the version as a special metric only for master database
|
||||
versionDesc := prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, staticLabelName),
|
||||
"Version string as reported by postgres", []string{"version", "short_version"}, server.labels)
|
||||
|
@ -156,7 +156,11 @@ func (s *IntegrationSuite) TestExtendQueriesDoesntCrash(c *C) {
|
||||
|
||||
exporter := NewExporter(
|
||||
strings.Split(dsn, ","),
|
||||
WithUserQueriesPath("../user_queries_test.yaml"),
|
||||
WithUserQueriesPath(map[MetricResolution]string{
|
||||
HR: "../user_queries_test.yaml",
|
||||
MR: "../user_queries_test.yaml",
|
||||
LR: "../user_queries_test.yaml",
|
||||
}),
|
||||
)
|
||||
c.Assert(exporter, NotNil)
|
||||
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
// Also it contains metrics map and query overrides.
|
||||
type Server struct {
|
||||
db *sql.DB
|
||||
dbMtx sync.Mutex
|
||||
labels prometheus.Labels
|
||||
master bool
|
||||
runonserver string
|
||||
@ -54,6 +55,7 @@ func ServerWithLabels(labels prometheus.Labels) ServerOpt {
|
||||
for k, v := range labels {
|
||||
s.labels[k] = v
|
||||
}
|
||||
s.labels["collector"] = "exporter"
|
||||
}
|
||||
}
|
||||
|
||||
@ -111,7 +113,7 @@ func (s *Server) String() string {
|
||||
}
|
||||
|
||||
// Scrape loads metrics.
|
||||
func (s *Server) Scrape(ch chan<- prometheus.Metric, disableSettingsMetrics bool) error {
|
||||
func (s *Server) Scrape(ch chan<- prometheus.Metric, disableSettingsMetrics bool, res MetricResolution) error {
|
||||
s.mappingMtx.RLock()
|
||||
defer s.mappingMtx.RUnlock()
|
||||
|
||||
@ -123,7 +125,7 @@ func (s *Server) Scrape(ch chan<- prometheus.Metric, disableSettingsMetrics bool
|
||||
}
|
||||
}
|
||||
|
||||
errMap := queryNamespaceMappings(ch, s)
|
||||
errMap := queryNamespaceMappings(ch, s, res)
|
||||
if len(errMap) > 0 {
|
||||
err = fmt.Errorf("queryNamespaceMappings returned %d errors", len(errMap))
|
||||
level.Error(logger).Log("msg", "NAMESPACE ERRORS FOUND")
|
||||
|
@ -38,12 +38,12 @@ prepare-exporter-from-repo:
|
||||
make -C ../ build && cp ../postgres_exporter assets/postgres_exporter
|
||||
|
||||
prepare-base-exporter:
|
||||
tar -xf assets/postgres_exporter_percona.tar.xz -C assets/
|
||||
tar -xf assets/postgres_exporter_percona.tar.gz -C assets/
|
||||
|
||||
start-postgres-db:
|
||||
docker-compose -f assets/postgres-compose.yml up -d --force-recreate --renew-anon-volumes --remove-orphans
|
||||
docker-compose up -d --force-recreate --renew-anon-volumes --remove-orphans
|
||||
|
||||
stop-postgres-db:
|
||||
docker-compose -f assets/postgres-compose.yml down
|
||||
docker-compose down
|
||||
|
||||
prepare-env-from-repo: prepare-exporter-from-repo prepare-base-exporter start-postgres-db
|
||||
|
1
percona_tests/assets/postgres_exporter.yml
Normal file
1
percona_tests/assets/postgres_exporter.yml
Normal file
@ -0,0 +1 @@
|
||||
auth_modules:
|
7
percona_tests/assets/test.new-flags.txt
Normal file
7
percona_tests/assets/test.new-flags.txt
Normal file
@ -0,0 +1,7 @@
|
||||
--auto-discover-databases
|
||||
--collect.custom_query.hr
|
||||
--collect.custom_query.lr
|
||||
--collect.custom_query.mr
|
||||
--exclude-databases=template0,template1,postgres,cloudsqladmin,pmm-managed-dev,azure_maintenance,rdsadmin
|
||||
--log.level=warn
|
||||
--config.file=assets/postgres_exporter.yml
|
@ -23,7 +23,22 @@ services:
|
||||
networks:
|
||||
- postgres-test-srv-net
|
||||
|
||||
golang:
|
||||
image: golang:1.21
|
||||
container_name: golang-test
|
||||
command: >
|
||||
tail -f ./assets/test.new-flags.txt
|
||||
volumes:
|
||||
- ../:/usr/src/myapp
|
||||
- go-modules:/go/pkg/mod # Put modules cache into a separate volume
|
||||
working_dir: /usr/src/myapp/percona_tests
|
||||
depends_on:
|
||||
- postgres
|
||||
networks:
|
||||
- postgres-test-srv-net
|
||||
|
||||
volumes:
|
||||
go-modules: # Define the volume
|
||||
postgres-test-srv-vol:
|
||||
|
||||
networks:
|
@ -44,18 +44,28 @@ func TestMissingMetrics(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
newMetrics, err := getMetrics(updatedExporterFileName)
|
||||
endpoint := "metrics?collect[]=exporter&collect[]=postgres&collect[]=custom_query.mr"
|
||||
newMetrics, err := getMetricsFrom(updatedExporterFileName, updatedExporterArgs, endpoint)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
oldMetrics, err := getMetrics(oldExporterFileName)
|
||||
oldMetrics, err := getMetricsFrom(oldExporterFileName, oldExporterArgs, endpoint)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
err = os.WriteFile(updatedExporterMetrics, []byte(newMetrics), os.ModePerm)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = os.WriteFile(oldExporterMetrics, []byte(oldMetrics), os.ModePerm)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
oldMetricsCollection := parseMetricsCollection(oldMetrics)
|
||||
newMetricsCollection := parseMetricsCollection(newMetrics)
|
||||
|
||||
@ -70,18 +80,27 @@ func TestMissingLabels(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
newMetrics, err := getMetrics(updatedExporterFileName)
|
||||
newMetrics, err := getMetrics(updatedExporterFileName, updatedExporterArgs)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
oldMetrics, err := getMetrics(oldExporterFileName)
|
||||
oldMetrics, err := getMetrics(oldExporterFileName, oldExporterArgs)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
err = os.WriteFile(updatedExporterMetrics+"-labels", []byte(newMetrics), os.ModePerm)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = os.WriteFile(oldExporterMetrics+"-labels", []byte(oldMetrics), os.ModePerm)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
oldMetricsCollection := parseMetricsCollection(oldMetrics)
|
||||
newMetricsCollection := parseMetricsCollection(newMetrics)
|
||||
|
||||
@ -108,13 +127,13 @@ func TestDumpMetrics(t *testing.T) {
|
||||
ep = "metrics"
|
||||
}
|
||||
|
||||
newMetrics, err := getMetricsFrom(updatedExporterFileName, ep)
|
||||
newMetrics, err := getMetricsFrom(updatedExporterFileName, updatedExporterArgs, ep)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
oldMetrics, err := getMetricsFrom(oldExporterFileName, ep)
|
||||
oldMetrics, err := getMetricsFrom(oldExporterFileName, oldExporterArgs, ep)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
@ -132,19 +151,19 @@ func TestResolutionsMetricDuplicates(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
hrMetrics, err := getMetricsFrom(updatedExporterFileName, highResolutionEndpoint)
|
||||
hrMetrics, err := getMetricsFrom(updatedExporterFileName, updatedExporterArgs, highResolutionEndpoint)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
mrMetrics, err := getMetricsFrom(updatedExporterFileName, medResolutionEndpoint)
|
||||
mrMetrics, err := getMetricsFrom(updatedExporterFileName, updatedExporterArgs, medResolutionEndpoint)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
lrMetrics, err := getMetricsFrom(updatedExporterFileName, lowResolutionEndpoint)
|
||||
lrMetrics, err := getMetricsFrom(updatedExporterFileName, updatedExporterArgs, lowResolutionEndpoint)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
@ -203,18 +222,27 @@ func TestResolutions(t *testing.T) {
|
||||
}
|
||||
|
||||
func testResolution(t *testing.T, resolutionEp, resolutionName string) {
|
||||
newMetrics, err := getMetricsFrom(updatedExporterFileName, resolutionEp)
|
||||
newMetrics, err := getMetricsFrom(updatedExporterFileName, updatedExporterArgs, resolutionEp)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
oldMetrics, err := getMetricsFrom(oldExporterFileName, resolutionEp)
|
||||
oldMetrics, err := getMetricsFrom(oldExporterFileName, oldExporterArgs, resolutionEp)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
err = os.WriteFile(fmt.Sprintf("%s-%s", updatedExporterMetrics, resolutionName), []byte(newMetrics), os.ModePerm)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = os.WriteFile(fmt.Sprintf("%s-%s", oldExporterMetrics, resolutionName), []byte(oldMetrics), os.ModePerm)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
oldMetricsCollection := parseMetricsCollection(oldMetrics)
|
||||
newMetricsCollection := parseMetricsCollection(newMetrics)
|
||||
|
||||
@ -253,10 +281,10 @@ func testResolution(t *testing.T, resolutionEp, resolutionName string) {
|
||||
|
||||
if !metricFound {
|
||||
missingCount++
|
||||
missingMetrics += fmt.Sprintf("%s\n", oldMetric.name)
|
||||
missingMetrics += fmt.Sprintf("%s\n", oldMetric)
|
||||
} else if !labelsMatch {
|
||||
missingLabelsCount++
|
||||
missingLabels += fmt.Sprintf("%s\n", oldMetric.name)
|
||||
missingLabels += fmt.Sprintf("%s\n", oldMetric)
|
||||
}
|
||||
}
|
||||
|
||||
@ -554,12 +582,12 @@ func getMetricNames(metrics []string) []string {
|
||||
return ret
|
||||
}
|
||||
|
||||
func getMetrics(fileName string) (string, error) {
|
||||
return getMetricsFrom(fileName, "metrics")
|
||||
func getMetrics(fileName, argsFile string) (string, error) {
|
||||
return getMetricsFrom(fileName, argsFile, "metrics")
|
||||
}
|
||||
|
||||
func getMetricsFrom(fileName, endpoint string) (string, error) {
|
||||
cmd, port, collectOutput, err := launchExporter(fileName)
|
||||
func getMetricsFrom(fileName, argsFile, endpoint string) (string, error) {
|
||||
cmd, port, collectOutput, err := launchExporter(fileName, argsFile)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "Failed to launch exporter")
|
||||
}
|
||||
|
@ -47,11 +47,11 @@ func TestPerformance(t *testing.T) {
|
||||
|
||||
var updated, original *StatsData
|
||||
t.Run("upstream exporter", func(t *testing.T) {
|
||||
updated = doTestStats(t, repeatCount, scrapesCount, updatedExporterFileName)
|
||||
updated = doTestStats(t, repeatCount, scrapesCount, updatedExporterFileName, updatedExporterArgs)
|
||||
})
|
||||
|
||||
t.Run("percona exporter", func(t *testing.T) {
|
||||
original = doTestStats(t, repeatCount, scrapesCount, oldExporterFileName)
|
||||
original = doTestStats(t, repeatCount, scrapesCount, oldExporterFileName, oldExporterArgs)
|
||||
})
|
||||
|
||||
printStats(original, updated)
|
||||
@ -65,13 +65,13 @@ func calculatePerc(base, updated float64) float64 {
|
||||
return diffPerc
|
||||
}
|
||||
|
||||
func doTestStats(t *testing.T, cnt int, size int, fileName string) *StatsData {
|
||||
func doTestStats(t *testing.T, cnt, size int, fileName, argsFile string) *StatsData {
|
||||
var durations []float64
|
||||
var hwms []float64
|
||||
var datas []float64
|
||||
|
||||
for i := 0; i < cnt; i++ {
|
||||
d, hwm, data, err := doTest(size, fileName)
|
||||
d, hwm, data, err := doTest(size, fileName, argsFile)
|
||||
if !assert.NoError(t, err) {
|
||||
return nil
|
||||
}
|
||||
@ -124,8 +124,8 @@ func doTestStats(t *testing.T, cnt int, size int, fileName string) *StatsData {
|
||||
return &st
|
||||
}
|
||||
|
||||
func doTest(iterations int, fileName string) (cpu, hwm, data int64, _ error) {
|
||||
cmd, port, collectOutput, err := launchExporter(fileName)
|
||||
func doTest(iterations int, fileName, argsFile string) (cpu, hwm, data int64, _ error) {
|
||||
cmd, port, collectOutput, err := launchExporter(fileName, argsFile)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
postgresHost = "127.0.0.1"
|
||||
postgresHost = "postgres"
|
||||
postgresPort = 5432
|
||||
postgresUser = "postgres"
|
||||
postgresPassword = "postgres"
|
||||
@ -28,16 +28,20 @@ const (
|
||||
|
||||
exporterWaitTimeoutMs = 3000 // time to wait for exporter process start
|
||||
|
||||
updatedExporterFileName = "assets/postgres_exporter"
|
||||
oldExporterFileName = "assets/postgres_exporter_percona"
|
||||
updatedExporterFileName = "/usr/src/myapp/percona_tests/assets/postgres_exporter"
|
||||
oldExporterFileName = "/usr/src/myapp/percona_tests/assets/postgres_exporter_percona"
|
||||
updatedExporterArgs = "/usr/src/myapp/percona_tests/assets/test.new-flags.txt"
|
||||
oldExporterArgs = "/usr/src/myapp/percona_tests/assets/test.old-flags.txt"
|
||||
updatedExporterMetrics = "/usr/src/myapp/percona_tests/assets/metrics.new"
|
||||
oldExporterMetrics = "/usr/src/myapp/percona_tests/assets/metrics.old"
|
||||
)
|
||||
|
||||
func getBool(val *bool) bool {
|
||||
return val != nil && *val
|
||||
}
|
||||
|
||||
func launchExporter(fileName string) (cmd *exec.Cmd, port int, collectOutput func() string, _ error) {
|
||||
lines, err := os.ReadFile("assets/test.exporter-flags.txt")
|
||||
func launchExporter(fileName string, argsFile string) (cmd *exec.Cmd, port int, collectOutput func() string, _ error) {
|
||||
lines, err := os.ReadFile(argsFile)
|
||||
if err != nil {
|
||||
return nil, 0, nil, errors.Wrapf(err, "Unable to read exporter args file")
|
||||
}
|
||||
@ -116,6 +120,8 @@ func stopExporter(cmd *exec.Cmd, collectOutput func() string) error {
|
||||
return errors.Wrapf(err, "Failed to wait for exporter process termination.%s\n", collectOutput())
|
||||
}
|
||||
|
||||
fmt.Println(collectOutput())
|
||||
|
||||
return nil
|
||||
}
|
||||
func tryGetMetrics(port int) (string, error) {
|
||||
|
Loading…
Reference in New Issue
Block a user