Partially revert 47295e8

This commit is contained in:
Artem Gavrilov 2024-02-14 19:00:21 +02:00
parent 4328194f15
commit 8210082259
6 changed files with 18 additions and 32 deletions

View File

@ -54,14 +54,12 @@ func (e *Exporter) discoverDatabaseDSNs() []string {
level.Error(logger).Log("msg", "Error opening connection to database", "dsn", loggableDSN(dsn), "err", err) level.Error(logger).Log("msg", "Error opening connection to database", "dsn", loggableDSN(dsn), "err", err)
continue continue
} }
server.dbMtx.Lock()
dsns[dsn] = struct{}{} dsns[dsn] = struct{}{}
// If autoDiscoverDatabases is true, set first dsn as master database (Default: false) // If autoDiscoverDatabases is true, set first dsn as master database (Default: false)
server.master = true server.master = true
databaseNames, err := queryDatabases(server) databaseNames, err := queryDatabases(server)
server.dbMtx.Unlock()
if err != nil { if err != nil {
level.Error(logger).Log("msg", "Error querying databases", "dsn", loggableDSN(dsn), "err", err) level.Error(logger).Log("msg", "Error querying databases", "dsn", loggableDSN(dsn), "err", err)
continue continue
@ -103,8 +101,9 @@ func (e *Exporter) discoverDatabaseDSNs() []string {
func (e *Exporter) scrapeDSN(ch chan<- prometheus.Metric, dsn string) error { func (e *Exporter) scrapeDSN(ch chan<- prometheus.Metric, dsn string) error {
server, err := e.servers.GetServer(dsn) server, err := e.servers.GetServer(dsn)
server.dbMtx.Lock() if err != nil {
defer server.dbMtx.Unlock() return err // TODO
}
level.Debug(logger).Log("msg", "scrapeDSN:"+dsn) level.Debug(logger).Log("msg", "scrapeDSN:"+dsn)
@ -122,7 +121,7 @@ func (e *Exporter) scrapeDSN(ch chan<- prometheus.Metric, dsn string) error {
level.Warn(logger).Log("msg", "Proceeding with outdated query maps, as the Postgres version could not be determined", "err", err) level.Warn(logger).Log("msg", "Proceeding with outdated query maps, as the Postgres version could not be determined", "err", err)
} }
return server.Scrape(ch, e.disableSettingsMetrics, e.resolutionEnabled) return server.Scrape(ch, e.disableSettingsMetrics)
} }
// try to get the DataSource // try to get the DataSource

View File

@ -16,11 +16,10 @@ package main
import ( import (
"fmt" "fmt"
"net/http" "net/http"
_ "net/http/pprof"
"os" "os"
"strings" "strings"
_ "net/http/pprof"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
@ -51,7 +50,7 @@ var (
disableDefaultMetrics = kingpin.Flag("disable-default-metrics", "Do not include default metrics.").Default("false").Envar("PG_EXPORTER_DISABLE_DEFAULT_METRICS").Bool() disableDefaultMetrics = kingpin.Flag("disable-default-metrics", "Do not include default metrics.").Default("false").Envar("PG_EXPORTER_DISABLE_DEFAULT_METRICS").Bool()
disableSettingsMetrics = kingpin.Flag("disable-settings-metrics", "Do not include pg_settings metrics.").Default("false").Envar("PG_EXPORTER_DISABLE_SETTINGS_METRICS").Bool() disableSettingsMetrics = kingpin.Flag("disable-settings-metrics", "Do not include pg_settings metrics.").Default("false").Envar("PG_EXPORTER_DISABLE_SETTINGS_METRICS").Bool()
autoDiscoverDatabases = kingpin.Flag("auto-discover-databases", "Whether to discover the databases on a server dynamically. (DEPRECATED)").Default("false").Envar("PG_EXPORTER_AUTO_DISCOVER_DATABASES").Bool() autoDiscoverDatabases = kingpin.Flag("auto-discover-databases", "Whether to discover the databases on a server dynamically. (DEPRECATED)").Default("false").Envar("PG_EXPORTER_AUTO_DISCOVER_DATABASES").Bool()
//queriesPath = kingpin.Flag("extend.query-path", "Path to custom queries to run. (DEPRECATED)").Default("").Envar("PG_EXPORTER_EXTEND_QUERY_PATH").String() // queriesPath = kingpin.Flag("extend.query-path", "Path to custom queries to run. (DEPRECATED)").Default("").Envar("PG_EXPORTER_EXTEND_QUERY_PATH").String()
onlyDumpMaps = kingpin.Flag("dumpmaps", "Do not run, simply dump the maps.").Bool() onlyDumpMaps = kingpin.Flag("dumpmaps", "Do not run, simply dump the maps.").Bool()
constantLabelsList = kingpin.Flag("constantLabels", "A list of label=value separated by comma(,). (DEPRECATED)").Default("").Envar("PG_EXPORTER_CONSTANT_LABELS").String() constantLabelsList = kingpin.Flag("constantLabels", "A list of label=value separated by comma(,). (DEPRECATED)").Default("").Envar("PG_EXPORTER_CONSTANT_LABELS").String()
excludeDatabases = kingpin.Flag("exclude-databases", "A list of databases to remove when autoDiscoverDatabases is enabled (DEPRECATED)").Default("").Envar("PG_EXPORTER_EXCLUDE_DATABASES").String() excludeDatabases = kingpin.Flag("exclude-databases", "A list of databases to remove when autoDiscoverDatabases is enabled (DEPRECATED)").Default("").Envar("PG_EXPORTER_EXCLUDE_DATABASES").String()
@ -103,9 +102,9 @@ func main() {
excludedDatabases := strings.Split(*excludeDatabases, ",") excludedDatabases := strings.Split(*excludeDatabases, ",")
logger.Log("msg", "Excluded databases", "databases", fmt.Sprintf("%v", excludedDatabases)) logger.Log("msg", "Excluded databases", "databases", fmt.Sprintf("%v", excludedDatabases))
//if *queriesPath != "" { // if *queriesPath != "" {
// level.Warn(logger).Log("msg", "The extended queries.yaml config is DEPRECATED", "file", *queriesPath) // level.Warn(logger).Log("msg", "The extended queries.yaml config is DEPRECATED", "file", *queriesPath)
//} // }
if *autoDiscoverDatabases || *excludeDatabases != "" || *includeDatabases != "" { if *autoDiscoverDatabases || *excludeDatabases != "" || *includeDatabases != "" {
level.Warn(logger).Log("msg", "Scraping additional databases via auto discovery is DEPRECATED") level.Warn(logger).Log("msg", "Scraping additional databases via auto discovery is DEPRECATED")
@ -115,15 +114,11 @@ func main() {
level.Warn(logger).Log("msg", "Constant labels on all metrics is DEPRECATED") level.Warn(logger).Log("msg", "Constant labels on all metrics is DEPRECATED")
} }
servers := NewServers(ServerWithLabels(parseConstLabels(*constantLabelsList)))
opts := []ExporterOpt{ opts := []ExporterOpt{
CollectorName("exporter"),
DisableDefaultMetrics(*disableDefaultMetrics), DisableDefaultMetrics(*disableDefaultMetrics),
DisableSettingsMetrics(*disableSettingsMetrics), DisableSettingsMetrics(*disableSettingsMetrics),
AutoDiscoverDatabases(*autoDiscoverDatabases), AutoDiscoverDatabases(*autoDiscoverDatabases),
WithConstantLabels(*constantLabelsList), WithConstantLabels(*constantLabelsList),
WithServers(servers),
ExcludeDatabases(excludedDatabases), ExcludeDatabases(excludedDatabases),
IncludeDatabases(*includeDatabases), IncludeDatabases(*includeDatabases),
} }
@ -144,7 +139,7 @@ func main() {
dsn = dsns[0] dsn = dsns[0]
} }
cleanup, hr, mr, lr := initializePerconaExporters(dsns, servers) cleanup, hr, mr, lr := initializePerconaExporters(dsns)
defer cleanup() defer cleanup()
pe, err := collector.NewPostgresCollector( pe, err := collector.NewPostgresCollector(
@ -274,7 +269,7 @@ func (h *handler) innerHandler(filters ...string) (http.Handler, error) {
handler := promhttp.HandlerFor( handler := promhttp.HandlerFor(
registry, registry,
promhttp.HandlerOpts{ promhttp.HandlerOpts{
//ErrorLog: log.NewNopLogger() .NewErrorLogger(), // ErrorLog: log.NewNopLogger() .NewErrorLogger(),
ErrorHandling: promhttp.ContinueOnError, ErrorHandling: promhttp.ContinueOnError,
}, },
) )

View File

@ -183,7 +183,7 @@ func queryNamespaceMapping(server *Server, namespace string, mapping MetricMapNa
// Iterate through all the namespace mappings in the exporter and run their // Iterate through all the namespace mappings in the exporter and run their
// queries. // queries.
func queryNamespaceMappings(ch chan<- prometheus.Metric, server *Server, res MetricResolution) map[string]error { func queryNamespaceMappings(ch chan<- prometheus.Metric, server *Server) map[string]error {
// Return a map of namespace -> errors // Return a map of namespace -> errors
namespaceErrors := make(map[string]error) namespaceErrors := make(map[string]error)

View File

@ -31,7 +31,7 @@ var (
collectCustomQueryHrDirectory = kingpin.Flag("collect.custom_query.hr.directory", "Path to custom queries with high resolution directory.").Envar("PG_EXPORTER_EXTEND_QUERY_HR_PATH").String() collectCustomQueryHrDirectory = kingpin.Flag("collect.custom_query.hr.directory", "Path to custom queries with high resolution directory.").Envar("PG_EXPORTER_EXTEND_QUERY_HR_PATH").String()
) )
func initializePerconaExporters(dsn []string, servers *Servers) (func(), *Exporter, *Exporter, *Exporter) { func initializePerconaExporters(dsn []string) (func(), *Exporter, *Exporter, *Exporter) {
queriesPath := map[MetricResolution]string{ queriesPath := map[MetricResolution]string{
HR: *collectCustomQueryHrDirectory, HR: *collectCustomQueryHrDirectory,
MR: *collectCustomQueryMrDirectory, MR: *collectCustomQueryMrDirectory,
@ -43,7 +43,6 @@ func initializePerconaExporters(dsn []string, servers *Servers) (func(), *Export
DisableDefaultMetrics(true), DisableDefaultMetrics(true),
DisableSettingsMetrics(true), DisableSettingsMetrics(true),
AutoDiscoverDatabases(*autoDiscoverDatabases), AutoDiscoverDatabases(*autoDiscoverDatabases),
WithServers(servers),
WithUserQueriesPath(queriesPath), WithUserQueriesPath(queriesPath),
ExcludeDatabases(excludedDatabases), ExcludeDatabases(excludedDatabases),
} }

View File

@ -515,13 +515,6 @@ func WithConstantLabels(s string) ExporterOpt {
} }
} }
// WithServers configures constant labels.
func WithServers(s *Servers) ExporterOpt {
return func(e *Exporter) {
e.servers = s
}
}
func parseConstLabels(s string) prometheus.Labels { func parseConstLabels(s string) prometheus.Labels {
labels := make(prometheus.Labels) labels := make(prometheus.Labels)
@ -561,6 +554,7 @@ func NewExporter(dsn []string, opts ...ExporterOpt) *Exporter {
} }
e.setupInternalMetrics() e.setupInternalMetrics()
e.servers = NewServers(ServerWithLabels(e.constantLabels))
return e return e
} }
@ -655,7 +649,7 @@ func (e *Exporter) checkMapVersions(ch chan<- prometheus.Metric, server *Server)
} }
// Check if semantic version changed and recalculate maps if needed. // Check if semantic version changed and recalculate maps if needed.
//if semanticVersion.NE(server.lastMapVersion[e.resolutionEnabled]) || server.metricMap == nil { // if semanticVersion.NE(server.lastMapVersion[e.resolutionEnabled]) || server.metricMap == nil {
// level.Info(logger).Log("msg", "Semantic version changed", "server", server, "from", server.lastMapVersion[e.resolutionEnabled], "to", semanticVersion) // level.Info(logger).Log("msg", "Semantic version changed", "server", server, "from", server.lastMapVersion[e.resolutionEnabled], "to", semanticVersion)
server.mappingMtx.Lock() server.mappingMtx.Lock()
@ -668,7 +662,7 @@ func (e *Exporter) checkMapVersions(ch chan<- prometheus.Metric, server *Server)
server.queryOverrides = make(map[string]string) server.queryOverrides = make(map[string]string)
} }
//server.lastMapVersion[e.resolutionEnabled] = semanticVersion // server.lastMapVersion[e.resolutionEnabled] = semanticVersion
if e.userQueriesPath[HR] != "" || e.userQueriesPath[MR] != "" || e.userQueriesPath[LR] != "" { if e.userQueriesPath[HR] != "" || e.userQueriesPath[MR] != "" || e.userQueriesPath[LR] != "" {
// Clear the metric while reload // Clear the metric while reload
@ -678,7 +672,7 @@ func (e *Exporter) checkMapVersions(ch chan<- prometheus.Metric, server *Server)
e.loadCustomQueries(e.resolutionEnabled, semanticVersion, server) e.loadCustomQueries(e.resolutionEnabled, semanticVersion, server)
server.mappingMtx.Unlock() server.mappingMtx.Unlock()
//} // }
// Output the version as a special metric only for master database // Output the version as a special metric only for master database
versionDesc := prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, staticLabelName), versionDesc := prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, staticLabelName),

View File

@ -28,7 +28,6 @@ import (
// Also it contains metrics map and query overrides. // Also it contains metrics map and query overrides.
type Server struct { type Server struct {
db *sql.DB db *sql.DB
dbMtx sync.Mutex
labels prometheus.Labels labels prometheus.Labels
master bool master bool
runonserver string runonserver string
@ -113,7 +112,7 @@ func (s *Server) String() string {
} }
// Scrape loads metrics. // Scrape loads metrics.
func (s *Server) Scrape(ch chan<- prometheus.Metric, disableSettingsMetrics bool, res MetricResolution) error { func (s *Server) Scrape(ch chan<- prometheus.Metric, disableSettingsMetrics bool) error {
s.mappingMtx.RLock() s.mappingMtx.RLock()
defer s.mappingMtx.RUnlock() defer s.mappingMtx.RUnlock()
@ -125,7 +124,7 @@ func (s *Server) Scrape(ch chan<- prometheus.Metric, disableSettingsMetrics bool
} }
} }
errMap := queryNamespaceMappings(ch, s, res) errMap := queryNamespaceMappings(ch, s)
if len(errMap) > 0 { if len(errMap) > 0 {
err = fmt.Errorf("queryNamespaceMappings returned %d errors", len(errMap)) err = fmt.Errorf("queryNamespaceMappings returned %d errors", len(errMap))
level.Error(logger).Log("msg", "NAMESPACE ERRORS FOUND") level.Error(logger).Log("msg", "NAMESPACE ERRORS FOUND")