mirror of
https://github.com/prometheus-community/postgres_exporter
synced 2025-04-21 22:45:26 +00:00
Move queries from queries.yaml to collectors (#801)
Signed-off-by: Ben Kochie <superq@gmail.com>
This commit is contained in:
parent
16430fc955
commit
fe960c6b54
@ -40,7 +40,7 @@ const (
|
|||||||
namespace = "pg"
|
namespace = "pg"
|
||||||
|
|
||||||
defaultEnabled = true
|
defaultEnabled = true
|
||||||
// defaultDisabled = false
|
defaultDisabled = false
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
56
collector/collector_test.go
Normal file
56
collector/collector_test.go
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
// Copyright 2023 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
package collector
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
)
|
||||||
|
|
||||||
|
type labelMap map[string]string
|
||||||
|
|
||||||
|
type MetricResult struct {
|
||||||
|
labels labelMap
|
||||||
|
value float64
|
||||||
|
metricType dto.MetricType
|
||||||
|
}
|
||||||
|
|
||||||
|
func readMetric(m prometheus.Metric) MetricResult {
|
||||||
|
pb := &dto.Metric{}
|
||||||
|
m.Write(pb)
|
||||||
|
labels := make(labelMap, len(pb.Label))
|
||||||
|
for _, v := range pb.Label {
|
||||||
|
labels[v.GetName()] = v.GetValue()
|
||||||
|
}
|
||||||
|
if pb.Gauge != nil {
|
||||||
|
return MetricResult{labels: labels, value: pb.GetGauge().GetValue(), metricType: dto.MetricType_GAUGE}
|
||||||
|
}
|
||||||
|
if pb.Counter != nil {
|
||||||
|
return MetricResult{labels: labels, value: pb.GetCounter().GetValue(), metricType: dto.MetricType_COUNTER}
|
||||||
|
}
|
||||||
|
if pb.Untyped != nil {
|
||||||
|
return MetricResult{labels: labels, value: pb.GetUntyped().GetValue(), metricType: dto.MetricType_UNTYPED}
|
||||||
|
}
|
||||||
|
panic("Unsupported metric type")
|
||||||
|
}
|
||||||
|
|
||||||
|
func sanitizeQuery(q string) string {
|
||||||
|
q = strings.Join(strings.Fields(q), " ")
|
||||||
|
q = strings.Replace(q, "(", "\\(", -1)
|
||||||
|
q = strings.Replace(q, ")", "\\)", -1)
|
||||||
|
q = strings.Replace(q, "*", "\\*", -1)
|
||||||
|
q = strings.Replace(q, "$", "\\$", -1)
|
||||||
|
return q
|
||||||
|
}
|
@ -41,12 +41,17 @@ func NewPGDatabaseCollector(config collectorConfig) (Collector, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var pgDatabaseSizeDesc = prometheus.NewDesc(
|
var (
|
||||||
|
pgDatabaseSizeDesc = prometheus.NewDesc(
|
||||||
"pg_database_size_bytes",
|
"pg_database_size_bytes",
|
||||||
"Disk space used by the database",
|
"Disk space used by the database",
|
||||||
[]string{"datname"}, nil,
|
[]string{"datname"}, nil,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
pgDatabaseQuery = "SELECT pg_database.datname FROM pg_database;"
|
||||||
|
pgDatabaseSizeQuery = "SELECT pg_database_size($1)"
|
||||||
|
)
|
||||||
|
|
||||||
// Update implements Collector and exposes database size.
|
// Update implements Collector and exposes database size.
|
||||||
// It is called by the Prometheus registry when collecting metrics.
|
// It is called by the Prometheus registry when collecting metrics.
|
||||||
// The list of databases is retrieved from pg_database and filtered
|
// The list of databases is retrieved from pg_database and filtered
|
||||||
@ -58,9 +63,7 @@ var pgDatabaseSizeDesc = prometheus.NewDesc(
|
|||||||
func (c PGDatabaseCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
|
func (c PGDatabaseCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
// Query the list of databases
|
// Query the list of databases
|
||||||
rows, err := db.QueryContext(ctx,
|
rows, err := db.QueryContext(ctx,
|
||||||
`SELECT pg_database.datname
|
pgDatabaseQuery,
|
||||||
FROM pg_database;
|
|
||||||
`,
|
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -88,7 +91,7 @@ func (c PGDatabaseCollector) Update(ctx context.Context, db *sql.DB, ch chan<- p
|
|||||||
// Query the size of the databases
|
// Query the size of the databases
|
||||||
for _, datname := range databases {
|
for _, datname := range databases {
|
||||||
var size int64
|
var size int64
|
||||||
err = db.QueryRowContext(ctx, "SELECT pg_database_size($1)", datname).Scan(&size)
|
err = db.QueryRowContext(ctx, pgDatabaseSizeQuery, datname).Scan(&size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
59
collector/pg_database_test.go
Normal file
59
collector/pg_database_test.go
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
// Copyright 2023 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
package collector
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/DATA-DOG/go-sqlmock"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
"github.com/smartystreets/goconvey/convey"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPGDatabaseCollector(t *testing.T) {
|
||||||
|
db, mock, err := sqlmock.New()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error opening a stub db connection: %s", err)
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
mock.ExpectQuery(sanitizeQuery(pgDatabaseQuery)).WillReturnRows(sqlmock.NewRows([]string{"datname"}).
|
||||||
|
AddRow("postgres"))
|
||||||
|
|
||||||
|
mock.ExpectQuery(sanitizeQuery(pgDatabaseSizeQuery)).WithArgs("postgres").WillReturnRows(sqlmock.NewRows([]string{"pg_database_size"}).
|
||||||
|
AddRow(1024))
|
||||||
|
|
||||||
|
ch := make(chan prometheus.Metric)
|
||||||
|
go func() {
|
||||||
|
defer close(ch)
|
||||||
|
c := PGDatabaseCollector{}
|
||||||
|
if err := c.Update(context.Background(), db, ch); err != nil {
|
||||||
|
t.Errorf("Error calling PGDatabaseCollector.Update: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
expected := []MetricResult{
|
||||||
|
{labels: labelMap{"datname": "postgres"}, value: 1024, metricType: dto.MetricType_GAUGE},
|
||||||
|
}
|
||||||
|
convey.Convey("Metrics comparison", t, func() {
|
||||||
|
for _, expect := range expected {
|
||||||
|
m := readMetric(<-ch)
|
||||||
|
convey.So(expect, convey.ShouldResemble, m)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if err := mock.ExpectationsWereMet(); err != nil {
|
||||||
|
t.Errorf("there were unfulfilled exceptions: %s", err)
|
||||||
|
}
|
||||||
|
}
|
58
collector/pg_postmaster.go
Normal file
58
collector/pg_postmaster.go
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
// Copyright 2023 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package collector
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
registerCollector("postmaster", defaultEnabled, NewPGPostmasterCollector)
|
||||||
|
}
|
||||||
|
|
||||||
|
type PGPostmasterCollector struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPGPostmasterCollector(collectorConfig) (Collector, error) {
|
||||||
|
return &PGPostmasterCollector{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
pgPostMasterStartTimeSeconds = prometheus.NewDesc(
|
||||||
|
"pg_postmaster_start_time_seconds",
|
||||||
|
"Time at which postmaster started",
|
||||||
|
[]string{}, nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
pgPostmasterQuery = "SELECT pg_postmaster_start_time from pg_postmaster_start_time();"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c *PGPostmasterCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
|
row := db.QueryRowContext(ctx,
|
||||||
|
pgPostmasterQuery)
|
||||||
|
|
||||||
|
var startTimeSeconds float64
|
||||||
|
err := row.Scan(&startTimeSeconds)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
pgPostMasterStartTimeSeconds,
|
||||||
|
prometheus.GaugeValue, startTimeSeconds,
|
||||||
|
)
|
||||||
|
return nil
|
||||||
|
}
|
57
collector/pg_postmaster_test.go
Normal file
57
collector/pg_postmaster_test.go
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
// Copyright 2023 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
package collector
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/DATA-DOG/go-sqlmock"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
"github.com/smartystreets/goconvey/convey"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPgPostmasterCollector(t *testing.T) {
|
||||||
|
db, mock, err := sqlmock.New()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error opening a stub db connection: %s", err)
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
mock.ExpectQuery(sanitizeQuery(pgPostmasterQuery)).WillReturnRows(sqlmock.NewRows([]string{"pg_postmaster_start_time"}).
|
||||||
|
AddRow(1685739904))
|
||||||
|
|
||||||
|
ch := make(chan prometheus.Metric)
|
||||||
|
go func() {
|
||||||
|
defer close(ch)
|
||||||
|
c := PGPostmasterCollector{}
|
||||||
|
|
||||||
|
if err := c.Update(context.Background(), db, ch); err != nil {
|
||||||
|
t.Errorf("Error calling PGPostmasterCollector.Update: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
expected := []MetricResult{
|
||||||
|
{labels: labelMap{}, value: 1685739904, metricType: dto.MetricType_GAUGE},
|
||||||
|
}
|
||||||
|
convey.Convey("Metrics comparison", t, func() {
|
||||||
|
for _, expect := range expected {
|
||||||
|
m := readMetric(<-ch)
|
||||||
|
convey.So(expect, convey.ShouldResemble, m)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if err := mock.ExpectationsWereMet(); err != nil {
|
||||||
|
t.Errorf("there were unfulfilled exceptions: %s", err)
|
||||||
|
}
|
||||||
|
}
|
106
collector/pg_process_idle.go
Normal file
106
collector/pg_process_idle.go
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
// Copyright 2023 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package collector
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
|
||||||
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
registerCollector("statements", defaultEnabled, NewPGProcessIdleCollector)
|
||||||
|
}
|
||||||
|
|
||||||
|
type PGProcessIdleCollector struct {
|
||||||
|
log log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
const processIdleSubsystem = "process_idle"
|
||||||
|
|
||||||
|
func NewPGProcessIdleCollector(config collectorConfig) (Collector, error) {
|
||||||
|
return &PGProcessIdleCollector{log: config.logger}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var pgProcessIdleSeconds = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, processIdleSubsystem, "seconds"),
|
||||||
|
"Idle time of server processes",
|
||||||
|
[]string{"application_name"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
|
||||||
|
func (PGProcessIdleCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
|
row := db.QueryRowContext(ctx,
|
||||||
|
`WITH
|
||||||
|
metrics AS (
|
||||||
|
SELECT
|
||||||
|
application_name,
|
||||||
|
SUM(EXTRACT(EPOCH FROM (CURRENT_TIMESTAMP - state_change))::bigint)::float AS process_idle_seconds_sum,
|
||||||
|
COUNT(*) AS process_idle_seconds_count
|
||||||
|
FROM pg_stat_activity
|
||||||
|
WHERE state = 'idle'
|
||||||
|
GROUP BY application_name
|
||||||
|
),
|
||||||
|
buckets AS (
|
||||||
|
SELECT
|
||||||
|
application_name,
|
||||||
|
le,
|
||||||
|
SUM(
|
||||||
|
CASE WHEN EXTRACT(EPOCH FROM (CURRENT_TIMESTAMP - state_change)) <= le
|
||||||
|
THEN 1
|
||||||
|
ELSE 0
|
||||||
|
END
|
||||||
|
)::bigint AS bucket
|
||||||
|
FROM
|
||||||
|
pg_stat_activity,
|
||||||
|
UNNEST(ARRAY[1, 2, 5, 15, 30, 60, 90, 120, 300]) AS le
|
||||||
|
GROUP BY application_name, le
|
||||||
|
ORDER BY application_name, le
|
||||||
|
)
|
||||||
|
SELECT
|
||||||
|
application_name,
|
||||||
|
process_idle_seconds_sum as seconds_sum,
|
||||||
|
process_idle_seconds_count as seconds_count,
|
||||||
|
ARRAY_AGG(le) AS seconds,
|
||||||
|
ARRAY_AGG(bucket) AS seconds_bucket
|
||||||
|
FROM metrics JOIN buckets USING (application_name)
|
||||||
|
GROUP BY 1, 2, 3;`)
|
||||||
|
|
||||||
|
var applicationName string
|
||||||
|
var secondsSum int64
|
||||||
|
var secondsCount uint64
|
||||||
|
var seconds []int64
|
||||||
|
var secondsBucket []uint64
|
||||||
|
|
||||||
|
err := row.Scan(&applicationName, &secondsSum, &secondsCount, &seconds, &secondsBucket)
|
||||||
|
|
||||||
|
var buckets = make(map[float64]uint64, len(seconds))
|
||||||
|
for i, second := range seconds {
|
||||||
|
if i >= len(secondsBucket) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
buckets[float64(second)] = secondsBucket[i]
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ch <- prometheus.MustNewConstHistogram(
|
||||||
|
pgProcessIdleSeconds,
|
||||||
|
secondsCount, float64(secondsSum), buckets,
|
||||||
|
applicationName,
|
||||||
|
)
|
||||||
|
return nil
|
||||||
|
}
|
77
collector/pg_replication.go
Normal file
77
collector/pg_replication.go
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
// Copyright 2023 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package collector
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
registerCollector("replication", defaultEnabled, NewPGReplicationCollector)
|
||||||
|
}
|
||||||
|
|
||||||
|
type PGReplicationCollector struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPGReplicationCollector(collectorConfig) (Collector, error) {
|
||||||
|
return &PGPostmasterCollector{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
pgReplicationLag = prometheus.NewDesc(
|
||||||
|
"pg_replication_lag",
|
||||||
|
"Replication lag behind master in seconds",
|
||||||
|
[]string{}, nil,
|
||||||
|
)
|
||||||
|
pgReplicationIsReplica = prometheus.NewDesc(
|
||||||
|
"pg_replication_is_replica",
|
||||||
|
"Indicates if the server is a replica",
|
||||||
|
[]string{}, nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
pgReplicationQuery = `SELECT
|
||||||
|
CASE
|
||||||
|
WHEN NOT pg_is_in_recovery() THEN 0
|
||||||
|
ELSE GREATEST (0, EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp())))
|
||||||
|
END AS lag,
|
||||||
|
CASE
|
||||||
|
WHEN pg_is_in_recovery() THEN 1
|
||||||
|
ELSE 0
|
||||||
|
END as is_replica`
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c *PGReplicationCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
|
row := db.QueryRowContext(ctx,
|
||||||
|
pgReplicationQuery,
|
||||||
|
)
|
||||||
|
|
||||||
|
var lag float64
|
||||||
|
var isReplica int64
|
||||||
|
err := row.Scan(&lag, &isReplica)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
pgReplicationLag,
|
||||||
|
prometheus.GaugeValue, lag,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
pgReplicationIsReplica,
|
||||||
|
prometheus.GaugeValue, float64(isReplica),
|
||||||
|
)
|
||||||
|
return nil
|
||||||
|
}
|
61
collector/pg_replication_test.go
Normal file
61
collector/pg_replication_test.go
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
// Copyright 2023 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
package collector
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/DATA-DOG/go-sqlmock"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
"github.com/smartystreets/goconvey/convey"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPgReplicationCollector(t *testing.T) {
|
||||||
|
db, mock, err := sqlmock.New()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error opening a stub db connection: %s", err)
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
columns := []string{"lag", "is_replica"}
|
||||||
|
rows := sqlmock.NewRows(columns).
|
||||||
|
AddRow(1000, 1)
|
||||||
|
mock.ExpectQuery(sanitizeQuery(pgReplicationQuery)).WillReturnRows(rows)
|
||||||
|
|
||||||
|
ch := make(chan prometheus.Metric)
|
||||||
|
go func() {
|
||||||
|
defer close(ch)
|
||||||
|
c := PGReplicationCollector{}
|
||||||
|
|
||||||
|
if err := c.Update(context.Background(), db, ch); err != nil {
|
||||||
|
t.Errorf("Error calling PGReplicationCollector.Update: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
expected := []MetricResult{
|
||||||
|
{labels: labelMap{}, value: 1000, metricType: dto.MetricType_GAUGE},
|
||||||
|
{labels: labelMap{}, value: 1, metricType: dto.MetricType_GAUGE},
|
||||||
|
}
|
||||||
|
|
||||||
|
convey.Convey("Metrics comparison", t, func() {
|
||||||
|
for _, expect := range expected {
|
||||||
|
m := readMetric(<-ch)
|
||||||
|
convey.So(expect, convey.ShouldResemble, m)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if err := mock.ExpectationsWereMet(); err != nil {
|
||||||
|
t.Errorf("there were unfulfilled exceptions: %s", err)
|
||||||
|
}
|
||||||
|
}
|
@ -101,11 +101,8 @@ var (
|
|||||||
[]string{},
|
[]string{},
|
||||||
prometheus.Labels{},
|
prometheus.Labels{},
|
||||||
)
|
)
|
||||||
)
|
|
||||||
|
|
||||||
func (PGStatBGWriterCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
|
statBGWriterQuery = `SELECT
|
||||||
row := db.QueryRowContext(ctx,
|
|
||||||
`SELECT
|
|
||||||
checkpoints_timed
|
checkpoints_timed
|
||||||
,checkpoints_req
|
,checkpoints_req
|
||||||
,checkpoint_write_time
|
,checkpoint_write_time
|
||||||
@ -117,7 +114,12 @@ func (PGStatBGWriterCollector) Update(ctx context.Context, db *sql.DB, ch chan<-
|
|||||||
,buffers_backend_fsync
|
,buffers_backend_fsync
|
||||||
,buffers_alloc
|
,buffers_alloc
|
||||||
,stats_reset
|
,stats_reset
|
||||||
FROM pg_stat_bgwriter;`)
|
FROM pg_stat_bgwriter;`
|
||||||
|
)
|
||||||
|
|
||||||
|
func (PGStatBGWriterCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
|
row := db.QueryRowContext(ctx,
|
||||||
|
statBGWriterQuery)
|
||||||
|
|
||||||
var cpt int
|
var cpt int
|
||||||
var cpr int
|
var cpr int
|
||||||
|
88
collector/pg_stat_bgwriter_test.go
Normal file
88
collector/pg_stat_bgwriter_test.go
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
// Copyright 2023 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
package collector
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/DATA-DOG/go-sqlmock"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
"github.com/smartystreets/goconvey/convey"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPGStatBGWriterCollector(t *testing.T) {
|
||||||
|
db, mock, err := sqlmock.New()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error opening a stub db connection: %s", err)
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
columns := []string{
|
||||||
|
"checkpoints_timed",
|
||||||
|
"checkpoints_req",
|
||||||
|
"checkpoint_write_time",
|
||||||
|
"checkpoint_sync_time",
|
||||||
|
"buffers_checkpoint",
|
||||||
|
"buffers_clean",
|
||||||
|
"maxwritten_clean",
|
||||||
|
"buffers_backend",
|
||||||
|
"buffers_backend_fsync",
|
||||||
|
"buffers_alloc",
|
||||||
|
"stats_reset"}
|
||||||
|
|
||||||
|
srT, err := time.Parse("2006-01-02 15:04:05.00000-07", "2023-05-25 17:10:42.81132-07")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error parsing time: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rows := sqlmock.NewRows(columns).
|
||||||
|
AddRow(354, 4945, 289097744, 1242257, 3275602074, 89320867, 450139, 2034563757, 0, 2725688749, srT)
|
||||||
|
mock.ExpectQuery(sanitizeQuery(statBGWriterQuery)).WillReturnRows(rows)
|
||||||
|
|
||||||
|
ch := make(chan prometheus.Metric)
|
||||||
|
go func() {
|
||||||
|
defer close(ch)
|
||||||
|
c := PGStatBGWriterCollector{}
|
||||||
|
|
||||||
|
if err := c.Update(context.Background(), db, ch); err != nil {
|
||||||
|
t.Errorf("Error calling PGStatBGWriterCollector.Update: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
expected := []MetricResult{
|
||||||
|
{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 354},
|
||||||
|
{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 4945},
|
||||||
|
{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 289097744},
|
||||||
|
{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 1242257},
|
||||||
|
{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 3275602074},
|
||||||
|
{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 89320867},
|
||||||
|
{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 450139},
|
||||||
|
{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 2034563757},
|
||||||
|
{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0},
|
||||||
|
{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 2725688749},
|
||||||
|
{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 1685059842},
|
||||||
|
}
|
||||||
|
|
||||||
|
convey.Convey("Metrics comparison", t, func() {
|
||||||
|
for _, expect := range expected {
|
||||||
|
m := readMetric(<-ch)
|
||||||
|
convey.So(expect, convey.ShouldResemble, m)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if err := mock.ExpectationsWereMet(); err != nil {
|
||||||
|
t.Errorf("there were unfulfilled exceptions: %s", err)
|
||||||
|
}
|
||||||
|
}
|
152
collector/pg_stat_statements.go
Normal file
152
collector/pg_stat_statements.go
Normal file
@ -0,0 +1,152 @@
|
|||||||
|
// Copyright 2023 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package collector
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
|
||||||
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// WARNING:
|
||||||
|
// Disabled by default because this set of metrics can be quite expensive on a busy server
|
||||||
|
// Every unique query will cause a new timeseries to be created
|
||||||
|
registerCollector("statements", defaultDisabled, NewPGStatStatementsCollector)
|
||||||
|
}
|
||||||
|
|
||||||
|
type PGStatStatementsCollector struct {
|
||||||
|
log log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
const statStatementsSubsystem = "stat_statements"
|
||||||
|
|
||||||
|
func NewPGStatStatementsCollector(config collectorConfig) (Collector, error) {
|
||||||
|
return &PGStatStatementsCollector{log: config.logger}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
statSTatementsCallsTotal = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, statStatementsSubsystem, "calls_total"),
|
||||||
|
"Number of times executed",
|
||||||
|
[]string{"user", "datname", "queryid"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
statStatementsSecondsTotal = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, statStatementsSubsystem, "seconds_total"),
|
||||||
|
"Total time spent in the statement, in seconds",
|
||||||
|
[]string{"user", "datname", "queryid"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
statStatementsRowsTotal = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, statStatementsSubsystem, "rows_total"),
|
||||||
|
"Total number of rows retrieved or affected by the statement",
|
||||||
|
[]string{"user", "datname", "queryid"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
statStatementsBlockReadSecondsTotal = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, statStatementsSubsystem, "block_read_seconds_total"),
|
||||||
|
"Total time the statement spent reading blocks, in seconds",
|
||||||
|
[]string{"user", "datname", "queryid"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
statStatementsBlockWriteSecondsTotal = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, statStatementsSubsystem, "block_write_seconds_total"),
|
||||||
|
"Total time the statement spent writing blocks, in seconds",
|
||||||
|
[]string{"user", "datname", "queryid"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
|
||||||
|
pgStatStatementsQuery = `SELECT
|
||||||
|
pg_get_userbyid(userid) as user,
|
||||||
|
pg_database.datname,
|
||||||
|
pg_stat_statements.queryid,
|
||||||
|
pg_stat_statements.calls as calls_total,
|
||||||
|
pg_stat_statements.total_time / 1000.0 as seconds_total,
|
||||||
|
pg_stat_statements.rows as rows_total,
|
||||||
|
pg_stat_statements.blk_read_time / 1000.0 as block_read_seconds_total,
|
||||||
|
pg_stat_statements.blk_write_time / 1000.0 as block_write_seconds_total
|
||||||
|
FROM pg_stat_statements
|
||||||
|
JOIN pg_database
|
||||||
|
ON pg_database.oid = pg_stat_statements.dbid
|
||||||
|
WHERE
|
||||||
|
total_time > (
|
||||||
|
SELECT percentile_cont(0.1)
|
||||||
|
WITHIN GROUP (ORDER BY total_time)
|
||||||
|
FROM pg_stat_statements
|
||||||
|
)
|
||||||
|
ORDER BY seconds_total DESC
|
||||||
|
LIMIT 100;`
|
||||||
|
)
|
||||||
|
|
||||||
|
func (PGStatStatementsCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
|
rows, err := db.QueryContext(ctx,
|
||||||
|
pgStatStatementsQuery)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
for rows.Next() {
|
||||||
|
var user string
|
||||||
|
var datname string
|
||||||
|
var queryid string
|
||||||
|
var callsTotal int64
|
||||||
|
var secondsTotal float64
|
||||||
|
var rowsTotal int64
|
||||||
|
var blockReadSecondsTotal float64
|
||||||
|
var blockWriteSecondsTotal float64
|
||||||
|
|
||||||
|
if err := rows.Scan(&user, &datname, &queryid, &callsTotal, &secondsTotal, &rowsTotal, &blockReadSecondsTotal, &blockWriteSecondsTotal); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statSTatementsCallsTotal,
|
||||||
|
prometheus.CounterValue,
|
||||||
|
float64(callsTotal),
|
||||||
|
user, datname, queryid,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statStatementsSecondsTotal,
|
||||||
|
prometheus.CounterValue,
|
||||||
|
secondsTotal,
|
||||||
|
user, datname, queryid,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statStatementsRowsTotal,
|
||||||
|
prometheus.CounterValue,
|
||||||
|
float64(rowsTotal),
|
||||||
|
user, datname, queryid,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statStatementsBlockReadSecondsTotal,
|
||||||
|
prometheus.CounterValue,
|
||||||
|
blockReadSecondsTotal,
|
||||||
|
user, datname, queryid,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statStatementsBlockWriteSecondsTotal,
|
||||||
|
prometheus.CounterValue,
|
||||||
|
blockWriteSecondsTotal,
|
||||||
|
user, datname, queryid,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if err := rows.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
64
collector/pg_stat_statements_test.go
Normal file
64
collector/pg_stat_statements_test.go
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
// Copyright 2023 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
package collector
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/DATA-DOG/go-sqlmock"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
"github.com/smartystreets/goconvey/convey"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPGStateStatementsCollector(t *testing.T) {
|
||||||
|
db, mock, err := sqlmock.New()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error opening a stub db connection: %s", err)
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
columns := []string{"user", "datname", "queryid", "calls_total", "seconds_total", "rows_total", "block_read_seconds_total", "block_write_seconds_total"}
|
||||||
|
rows := sqlmock.NewRows(columns).
|
||||||
|
AddRow("postgres", "postgres", 1500, 5, 0.4, 100, 0.1, 0.2)
|
||||||
|
mock.ExpectQuery(sanitizeQuery(pgStatStatementsQuery)).WillReturnRows(rows)
|
||||||
|
|
||||||
|
ch := make(chan prometheus.Metric)
|
||||||
|
go func() {
|
||||||
|
defer close(ch)
|
||||||
|
c := PGStatStatementsCollector{}
|
||||||
|
|
||||||
|
if err := c.Update(context.Background(), db, ch); err != nil {
|
||||||
|
t.Errorf("Error calling PGStatStatementsCollector.Update: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
expected := []MetricResult{
|
||||||
|
{labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 5},
|
||||||
|
{labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 0.4},
|
||||||
|
{labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 100},
|
||||||
|
{labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 0.1},
|
||||||
|
{labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 0.2},
|
||||||
|
}
|
||||||
|
|
||||||
|
convey.Convey("Metrics comparison", t, func() {
|
||||||
|
for _, expect := range expected {
|
||||||
|
m := readMetric(<-ch)
|
||||||
|
convey.So(expect, convey.ShouldResemble, m)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if err := mock.ExpectationsWereMet(); err != nil {
|
||||||
|
t.Errorf("there were unfulfilled exceptions: %s", err)
|
||||||
|
}
|
||||||
|
}
|
339
collector/pg_stat_user_tables.go
Normal file
339
collector/pg_stat_user_tables.go
Normal file
@ -0,0 +1,339 @@
|
|||||||
|
// Copyright 2023 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package collector
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
registerCollector("user_tables", defaultEnabled, NewPGStatUserTablesCollector)
|
||||||
|
}
|
||||||
|
|
||||||
|
type PGStatUserTablesCollector struct {
|
||||||
|
log log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
const userTableSubsystem = "stat_user_tables"
|
||||||
|
|
||||||
|
func NewPGStatUserTablesCollector(config collectorConfig) (Collector, error) {
|
||||||
|
return &PGStatUserTablesCollector{log: config.logger}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
statUserTablesSeqScan = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, userTableSubsystem, "seq_scan"),
|
||||||
|
"Number of sequential scans initiated on this table",
|
||||||
|
[]string{"datname", "schemaname", "relname"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
statUserTablesSeqTupRead = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, userTableSubsystem, "seq_tup_read"),
|
||||||
|
"Number of live rows fetched by sequential scans",
|
||||||
|
[]string{"datname", "schemaname", "relname"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
statUserTablesIdxScan = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, userTableSubsystem, "idx_scan"),
|
||||||
|
"Number of index scans initiated on this table",
|
||||||
|
[]string{"datname", "schemaname", "relname"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
statUserTablesIdxTupFetch = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, userTableSubsystem, "idx_tup_fetch"),
|
||||||
|
"Number of live rows fetched by index scans",
|
||||||
|
[]string{"datname", "schemaname", "relname"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
statUserTablesNTupIns = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_ins"),
|
||||||
|
"Number of rows inserted",
|
||||||
|
[]string{"datname", "schemaname", "relname"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
statUserTablesNTupUpd = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_upd"),
|
||||||
|
"Number of rows updated",
|
||||||
|
[]string{"datname", "schemaname", "relname"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
statUserTablesNTupDel = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_del"),
|
||||||
|
"Number of rows deleted",
|
||||||
|
[]string{"datname", "schemaname", "relname"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
statUserTablesNTupHotUpd = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_hot_upd"),
|
||||||
|
"Number of rows HOT updated (i.e., with no separate index update required)",
|
||||||
|
[]string{"datname", "schemaname", "relname"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
statUserTablesNLiveTup = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, userTableSubsystem, "n_live_tup"),
|
||||||
|
"Estimated number of live rows",
|
||||||
|
[]string{"datname", "schemaname", "relname"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
statUserTablesNDeadTup = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, userTableSubsystem, "n_dead_tup"),
|
||||||
|
"Estimated number of dead rows",
|
||||||
|
[]string{"datname", "schemaname", "relname"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
statUserTablesNModSinceAnalyze = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, userTableSubsystem, "n_mod_since_analyze"),
|
||||||
|
"Estimated number of rows changed since last analyze",
|
||||||
|
[]string{"datname", "schemaname", "relname"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
statUserTablesLastVacuum = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, userTableSubsystem, "last_vacuum"),
|
||||||
|
"Last time at which this table was manually vacuumed (not counting VACUUM FULL)",
|
||||||
|
[]string{"datname", "schemaname", "relname"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
statUserTablesLastAutovacuum = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, userTableSubsystem, "last_autovacuum"),
|
||||||
|
"Last time at which this table was vacuumed by the autovacuum daemon",
|
||||||
|
[]string{"datname", "schemaname", "relname"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
statUserTablesLastAnalyze = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, userTableSubsystem, "last_analyze"),
|
||||||
|
"Last time at which this table was manually analyzed",
|
||||||
|
[]string{"datname", "schemaname", "relname"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
statUserTablesLastAutoanalyze = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, userTableSubsystem, "last_autoanalyze"),
|
||||||
|
"Last time at which this table was analyzed by the autovacuum daemon",
|
||||||
|
[]string{"datname", "schemaname", "relname"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
statUserTablesVacuumCount = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, userTableSubsystem, "vacuum_count"),
|
||||||
|
"Number of times this table has been manually vacuumed (not counting VACUUM FULL)",
|
||||||
|
[]string{"datname", "schemaname", "relname"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
statUserTablesAutovacuumCount = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, userTableSubsystem, "autovacuum_count"),
|
||||||
|
"Number of times this table has been vacuumed by the autovacuum daemon",
|
||||||
|
[]string{"datname", "schemaname", "relname"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
statUserTablesAnalyzeCount = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, userTableSubsystem, "analyze_count"),
|
||||||
|
"Number of times this table has been manually analyzed",
|
||||||
|
[]string{"datname", "schemaname", "relname"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
statUserTablesAutoanalyzeCount = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, userTableSubsystem, "autoanalyze_count"),
|
||||||
|
"Number of times this table has been analyzed by the autovacuum daemon",
|
||||||
|
[]string{"datname", "schemaname", "relname"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
|
||||||
|
statUserTablesQuery = `SELECT
|
||||||
|
current_database() datname,
|
||||||
|
schemaname,
|
||||||
|
relname,
|
||||||
|
seq_scan,
|
||||||
|
seq_tup_read,
|
||||||
|
idx_scan,
|
||||||
|
idx_tup_fetch,
|
||||||
|
n_tup_ins,
|
||||||
|
n_tup_upd,
|
||||||
|
n_tup_del,
|
||||||
|
n_tup_hot_upd,
|
||||||
|
n_live_tup,
|
||||||
|
n_dead_tup,
|
||||||
|
n_mod_since_analyze,
|
||||||
|
COALESCE(last_vacuum, '1970-01-01Z') as last_vacuum,
|
||||||
|
COALESCE(last_autovacuum, '1970-01-01Z') as last_autovacuum,
|
||||||
|
COALESCE(last_analyze, '1970-01-01Z') as last_analyze,
|
||||||
|
COALESCE(last_autoanalyze, '1970-01-01Z') as last_autoanalyze,
|
||||||
|
vacuum_count,
|
||||||
|
autovacuum_count,
|
||||||
|
analyze_count,
|
||||||
|
autoanalyze_count
|
||||||
|
FROM
|
||||||
|
pg_stat_user_tables`
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c *PGStatUserTablesCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
|
rows, err := db.QueryContext(ctx,
|
||||||
|
statUserTablesQuery)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
for rows.Next() {
|
||||||
|
var datname string
|
||||||
|
var schemaname string
|
||||||
|
var relname string
|
||||||
|
var seqScan int64
|
||||||
|
var seqTupRead int64
|
||||||
|
var idxScan int64
|
||||||
|
var idxTupFetch int64
|
||||||
|
var nTupIns int64
|
||||||
|
var nTupUpd int64
|
||||||
|
var nTupDel int64
|
||||||
|
var nTupHotUpd int64
|
||||||
|
var nLiveTup int64
|
||||||
|
var nDeadTup int64
|
||||||
|
var nModSinceAnalyze int64
|
||||||
|
var lastVacuum time.Time
|
||||||
|
var lastAutovacuum time.Time
|
||||||
|
var lastAnalyze time.Time
|
||||||
|
var lastAutoanalyze time.Time
|
||||||
|
var vacuumCount int64
|
||||||
|
var autovacuumCount int64
|
||||||
|
var analyzeCount int64
|
||||||
|
var autoanalyzeCount int64
|
||||||
|
|
||||||
|
if err := rows.Scan(&datname, &schemaname, &relname, &seqScan, &seqTupRead, &idxScan, &idxTupFetch, &nTupIns, &nTupUpd, &nTupDel, &nTupHotUpd, &nLiveTup, &nDeadTup, &nModSinceAnalyze, &lastVacuum, &lastAutovacuum, &lastAnalyze, &lastAutoanalyze, &vacuumCount, &autovacuumCount, &analyzeCount, &autoanalyzeCount); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statUserTablesSeqScan,
|
||||||
|
prometheus.CounterValue,
|
||||||
|
float64(seqScan),
|
||||||
|
datname, schemaname, relname,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statUserTablesSeqTupRead,
|
||||||
|
prometheus.CounterValue,
|
||||||
|
float64(seqTupRead),
|
||||||
|
datname, schemaname, relname,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statUserTablesIdxScan,
|
||||||
|
prometheus.CounterValue,
|
||||||
|
float64(idxScan),
|
||||||
|
datname, schemaname, relname,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statUserTablesIdxTupFetch,
|
||||||
|
prometheus.CounterValue,
|
||||||
|
float64(idxTupFetch),
|
||||||
|
datname, schemaname, relname,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statUserTablesNTupIns,
|
||||||
|
prometheus.CounterValue,
|
||||||
|
float64(nTupIns),
|
||||||
|
datname, schemaname, relname,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statUserTablesNTupUpd,
|
||||||
|
prometheus.CounterValue,
|
||||||
|
float64(nTupUpd),
|
||||||
|
datname, schemaname, relname,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statUserTablesNTupDel,
|
||||||
|
prometheus.CounterValue,
|
||||||
|
float64(nTupDel),
|
||||||
|
datname, schemaname, relname,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statUserTablesNTupHotUpd,
|
||||||
|
prometheus.CounterValue,
|
||||||
|
float64(nTupHotUpd),
|
||||||
|
datname, schemaname, relname,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statUserTablesNLiveTup,
|
||||||
|
prometheus.GaugeValue,
|
||||||
|
float64(nLiveTup),
|
||||||
|
datname, schemaname, relname,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statUserTablesNDeadTup,
|
||||||
|
prometheus.GaugeValue,
|
||||||
|
float64(nDeadTup),
|
||||||
|
datname, schemaname, relname,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statUserTablesNModSinceAnalyze,
|
||||||
|
prometheus.GaugeValue,
|
||||||
|
float64(nModSinceAnalyze),
|
||||||
|
datname, schemaname, relname,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statUserTablesLastVacuum,
|
||||||
|
prometheus.GaugeValue,
|
||||||
|
float64(lastVacuum.Unix()),
|
||||||
|
datname, schemaname, relname,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statUserTablesLastAutovacuum,
|
||||||
|
prometheus.GaugeValue,
|
||||||
|
float64(lastAutovacuum.Unix()),
|
||||||
|
datname, schemaname, relname,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statUserTablesLastAnalyze,
|
||||||
|
prometheus.GaugeValue,
|
||||||
|
float64(lastAnalyze.Unix()),
|
||||||
|
datname, schemaname, relname,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statUserTablesLastAutoanalyze,
|
||||||
|
prometheus.GaugeValue,
|
||||||
|
float64(lastAutoanalyze.Unix()),
|
||||||
|
datname, schemaname, relname,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statUserTablesVacuumCount,
|
||||||
|
prometheus.CounterValue,
|
||||||
|
float64(vacuumCount),
|
||||||
|
datname, schemaname, relname,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statUserTablesAutovacuumCount,
|
||||||
|
prometheus.CounterValue,
|
||||||
|
float64(autovacuumCount),
|
||||||
|
datname, schemaname, relname,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statUserTablesAnalyzeCount,
|
||||||
|
prometheus.CounterValue,
|
||||||
|
float64(analyzeCount),
|
||||||
|
datname, schemaname, relname,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statUserTablesAutoanalyzeCount,
|
||||||
|
prometheus.CounterValue,
|
||||||
|
float64(autoanalyzeCount),
|
||||||
|
datname, schemaname, relname,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := rows.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
138
collector/pg_stat_user_tables_test.go
Normal file
138
collector/pg_stat_user_tables_test.go
Normal file
@ -0,0 +1,138 @@
|
|||||||
|
// Copyright 2023 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
package collector
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/DATA-DOG/go-sqlmock"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
"github.com/smartystreets/goconvey/convey"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPGStatUserTablesCollector(t *testing.T) {
|
||||||
|
db, mock, err := sqlmock.New()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error opening a stub db connection: %s", err)
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
lastVacuumTime, err := time.Parse("2006-01-02Z", "2023-06-02Z")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error parsing vacuum time: %s", err)
|
||||||
|
}
|
||||||
|
lastAutoVacuumTime, err := time.Parse("2006-01-02Z", "2023-06-03Z")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error parsing vacuum time: %s", err)
|
||||||
|
}
|
||||||
|
lastAnalyzeTime, err := time.Parse("2006-01-02Z", "2023-06-04Z")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error parsing vacuum time: %s", err)
|
||||||
|
}
|
||||||
|
lastAutoAnalyzeTime, err := time.Parse("2006-01-02Z", "2023-06-05Z")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error parsing vacuum time: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
columns := []string{
|
||||||
|
"datname",
|
||||||
|
"schemaname",
|
||||||
|
"relname",
|
||||||
|
"seq_scan",
|
||||||
|
"seq_tup_read",
|
||||||
|
"idx_scan",
|
||||||
|
"idx_tup_fetch",
|
||||||
|
"n_tup_ins",
|
||||||
|
"n_tup_upd",
|
||||||
|
"n_tup_del",
|
||||||
|
"n_tup_hot_upd",
|
||||||
|
"n_live_tup",
|
||||||
|
"n_dead_tup",
|
||||||
|
"n_mod_since_analyze",
|
||||||
|
"last_vacuum",
|
||||||
|
"last_autovacuum",
|
||||||
|
"last_analyze",
|
||||||
|
"last_autoanalyze",
|
||||||
|
"vacuum_count",
|
||||||
|
"autovacuum_count",
|
||||||
|
"analyze_count",
|
||||||
|
"autoanalyze_count"}
|
||||||
|
rows := sqlmock.NewRows(columns).
|
||||||
|
AddRow("postgres",
|
||||||
|
"public",
|
||||||
|
"a_table",
|
||||||
|
1,
|
||||||
|
2,
|
||||||
|
3,
|
||||||
|
4,
|
||||||
|
5,
|
||||||
|
6,
|
||||||
|
7,
|
||||||
|
8,
|
||||||
|
9,
|
||||||
|
10,
|
||||||
|
0,
|
||||||
|
lastVacuumTime,
|
||||||
|
lastAutoVacuumTime,
|
||||||
|
lastAnalyzeTime,
|
||||||
|
lastAutoAnalyzeTime,
|
||||||
|
11,
|
||||||
|
12,
|
||||||
|
13,
|
||||||
|
14)
|
||||||
|
mock.ExpectQuery(sanitizeQuery(statUserTablesQuery)).WillReturnRows(rows)
|
||||||
|
ch := make(chan prometheus.Metric)
|
||||||
|
go func() {
|
||||||
|
defer close(ch)
|
||||||
|
c := PGStatUserTablesCollector{}
|
||||||
|
|
||||||
|
if err := c.Update(context.Background(), db, ch); err != nil {
|
||||||
|
t.Errorf("Error calling PGStatUserTablesCollector.Update: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
expected := []MetricResult{
|
||||||
|
{labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 1},
|
||||||
|
{labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 2},
|
||||||
|
{labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 3},
|
||||||
|
{labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 4},
|
||||||
|
{labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 5},
|
||||||
|
{labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 6},
|
||||||
|
{labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 7},
|
||||||
|
{labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 8},
|
||||||
|
{labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_GAUGE, value: 9},
|
||||||
|
{labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_GAUGE, value: 10},
|
||||||
|
{labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_GAUGE, value: 0},
|
||||||
|
{labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_GAUGE, value: 1685664000},
|
||||||
|
{labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_GAUGE, value: 1685750400},
|
||||||
|
{labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_GAUGE, value: 1685836800},
|
||||||
|
{labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_GAUGE, value: 1685923200},
|
||||||
|
{labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 11},
|
||||||
|
{labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 12},
|
||||||
|
{labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 13},
|
||||||
|
{labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 14},
|
||||||
|
}
|
||||||
|
|
||||||
|
convey.Convey("Metrics comparison", t, func() {
|
||||||
|
for _, expect := range expected {
|
||||||
|
m := readMetric(<-ch)
|
||||||
|
convey.So(expect, convey.ShouldResemble, m)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if err := mock.ExpectationsWereMet(); err != nil {
|
||||||
|
t.Errorf("there were unfulfilled exceptions: %s", err)
|
||||||
|
}
|
||||||
|
}
|
182
collector/pg_statio_user_tables.go
Normal file
182
collector/pg_statio_user_tables.go
Normal file
@ -0,0 +1,182 @@
|
|||||||
|
// Copyright 2023 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package collector
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
|
||||||
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
registerCollector("statio_user_tables", defaultEnabled, NewPGStatIOUserTablesCollector)
|
||||||
|
}
|
||||||
|
|
||||||
|
type PGStatIOUserTablesCollector struct {
|
||||||
|
log log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
const statioUserTableSubsystem = "statio_user_tables"
|
||||||
|
|
||||||
|
func NewPGStatIOUserTablesCollector(config collectorConfig) (Collector, error) {
|
||||||
|
return &PGStatIOUserTablesCollector{log: config.logger}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
statioUserTablesHeapBlksRead = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, statioUserTableSubsystem, "heap_blocks_read"),
|
||||||
|
"Number of disk blocks read from this table",
|
||||||
|
[]string{"datname", "schemaname", "relname"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
statioUserTablesHeapBlksHit = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, statioUserTableSubsystem, "heap_blocks_hit"),
|
||||||
|
"Number of buffer hits in this table",
|
||||||
|
[]string{"datname", "schemaname", "relname"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
statioUserTablesIdxBlksRead = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, statioUserTableSubsystem, "idx_blocks_read"),
|
||||||
|
"Number of disk blocks read from all indexes on this table",
|
||||||
|
[]string{"datname", "schemaname", "relname"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
statioUserTablesIdxBlksHit = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, statioUserTableSubsystem, "idx_blocks_hit"),
|
||||||
|
"Number of buffer hits in all indexes on this table",
|
||||||
|
[]string{"datname", "schemaname", "relname"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
statioUserTablesToastBlksRead = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, statioUserTableSubsystem, "toast_blocks_read"),
|
||||||
|
"Number of disk blocks read from this table's TOAST table (if any)",
|
||||||
|
[]string{"datname", "schemaname", "relname"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
statioUserTablesToastBlksHit = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, statioUserTableSubsystem, "toast_blocks_hit"),
|
||||||
|
"Number of buffer hits in this table's TOAST table (if any)",
|
||||||
|
[]string{"datname", "schemaname", "relname"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
statioUserTablesTidxBlksRead = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, statioUserTableSubsystem, "tidx_blocks_read"),
|
||||||
|
"Number of disk blocks read from this table's TOAST table indexes (if any)",
|
||||||
|
[]string{"datname", "schemaname", "relname"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
statioUserTablesTidxBlksHit = prometheus.NewDesc(
|
||||||
|
prometheus.BuildFQName(namespace, statioUserTableSubsystem, "tidx_blocks_hit"),
|
||||||
|
"Number of buffer hits in this table's TOAST table indexes (if any)",
|
||||||
|
[]string{"datname", "schemaname", "relname"},
|
||||||
|
prometheus.Labels{},
|
||||||
|
)
|
||||||
|
|
||||||
|
statioUserTablesQuery = `SELECT
|
||||||
|
current_database() datname,
|
||||||
|
schemaname,
|
||||||
|
relname,
|
||||||
|
heap_blks_read,
|
||||||
|
heap_blks_hit,
|
||||||
|
idx_blks_read,
|
||||||
|
idx_blks_hit,
|
||||||
|
toast_blks_read,
|
||||||
|
toast_blks_hit,
|
||||||
|
tidx_blks_read,
|
||||||
|
tidx_blks_hit
|
||||||
|
FROM pg_statio_user_tables`
|
||||||
|
)
|
||||||
|
|
||||||
|
func (PGStatIOUserTablesCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
|
rows, err := db.QueryContext(ctx,
|
||||||
|
statioUserTablesQuery)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
for rows.Next() {
|
||||||
|
var datname string
|
||||||
|
var schemaname string
|
||||||
|
var relname string
|
||||||
|
var heapBlksRead int64
|
||||||
|
var heapBlksHit int64
|
||||||
|
var idxBlksRead int64
|
||||||
|
var idxBlksHit int64
|
||||||
|
var toastBlksRead int64
|
||||||
|
var toastBlksHit int64
|
||||||
|
var tidxBlksRead int64
|
||||||
|
var tidxBlksHit int64
|
||||||
|
|
||||||
|
if err := rows.Scan(&datname, &schemaname, &relname, &heapBlksRead, &heapBlksHit, &idxBlksRead, &idxBlksHit, &toastBlksRead, &toastBlksHit, &tidxBlksRead, &tidxBlksHit); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statioUserTablesHeapBlksRead,
|
||||||
|
prometheus.CounterValue,
|
||||||
|
float64(heapBlksRead),
|
||||||
|
datname, schemaname, relname,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statioUserTablesHeapBlksHit,
|
||||||
|
prometheus.CounterValue,
|
||||||
|
float64(heapBlksHit),
|
||||||
|
datname, schemaname, relname,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statioUserTablesIdxBlksRead,
|
||||||
|
prometheus.CounterValue,
|
||||||
|
float64(idxBlksRead),
|
||||||
|
datname, schemaname, relname,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statioUserTablesIdxBlksHit,
|
||||||
|
prometheus.CounterValue,
|
||||||
|
float64(idxBlksHit),
|
||||||
|
datname, schemaname, relname,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statioUserTablesToastBlksRead,
|
||||||
|
prometheus.CounterValue,
|
||||||
|
float64(toastBlksRead),
|
||||||
|
datname, schemaname, relname,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statioUserTablesToastBlksHit,
|
||||||
|
prometheus.CounterValue,
|
||||||
|
float64(toastBlksHit),
|
||||||
|
datname, schemaname, relname,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statioUserTablesTidxBlksRead,
|
||||||
|
prometheus.CounterValue,
|
||||||
|
float64(tidxBlksRead),
|
||||||
|
datname, schemaname, relname,
|
||||||
|
)
|
||||||
|
ch <- prometheus.MustNewConstMetric(
|
||||||
|
statioUserTablesTidxBlksHit,
|
||||||
|
prometheus.CounterValue,
|
||||||
|
float64(tidxBlksHit),
|
||||||
|
datname, schemaname, relname,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if err := rows.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
88
collector/pg_statio_user_tables_test.go
Normal file
88
collector/pg_statio_user_tables_test.go
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
// Copyright 2023 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
package collector
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/DATA-DOG/go-sqlmock"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
"github.com/smartystreets/goconvey/convey"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPGStatIOUserTablesCollector(t *testing.T) {
|
||||||
|
db, mock, err := sqlmock.New()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error opening a stub db connection: %s", err)
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
columns := []string{
|
||||||
|
"datname",
|
||||||
|
"schemaname",
|
||||||
|
"relname",
|
||||||
|
"heap_blks_read",
|
||||||
|
"heap_blks_hit",
|
||||||
|
"idx_blks_read",
|
||||||
|
"idx_blks_hit",
|
||||||
|
"toast_blks_read",
|
||||||
|
"toast_blks_hit",
|
||||||
|
"tidx_blks_read",
|
||||||
|
"tidx_blks_hit",
|
||||||
|
}
|
||||||
|
rows := sqlmock.NewRows(columns).
|
||||||
|
AddRow("postgres",
|
||||||
|
"public",
|
||||||
|
"a_table",
|
||||||
|
1,
|
||||||
|
2,
|
||||||
|
3,
|
||||||
|
4,
|
||||||
|
5,
|
||||||
|
6,
|
||||||
|
7,
|
||||||
|
8)
|
||||||
|
mock.ExpectQuery(sanitizeQuery(statioUserTablesQuery)).WillReturnRows(rows)
|
||||||
|
ch := make(chan prometheus.Metric)
|
||||||
|
go func() {
|
||||||
|
defer close(ch)
|
||||||
|
c := PGStatIOUserTablesCollector{}
|
||||||
|
|
||||||
|
if err := c.Update(context.Background(), db, ch); err != nil {
|
||||||
|
t.Errorf("Error calling PGStatIOUserTablesCollector.Update: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
expected := []MetricResult{
|
||||||
|
{labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 1},
|
||||||
|
{labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 2},
|
||||||
|
{labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 3},
|
||||||
|
{labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 4},
|
||||||
|
{labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 5},
|
||||||
|
{labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 6},
|
||||||
|
{labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 7},
|
||||||
|
{labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 8},
|
||||||
|
}
|
||||||
|
|
||||||
|
convey.Convey("Metrics comparison", t, func() {
|
||||||
|
for _, expect := range expected {
|
||||||
|
m := readMetric(<-ch)
|
||||||
|
convey.So(expect, convey.ShouldResemble, m)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if err := mock.ExpectationsWereMet(); err != nil {
|
||||||
|
t.Errorf("there were unfulfilled exceptions: %s", err)
|
||||||
|
}
|
||||||
|
}
|
@ -46,20 +46,22 @@ var (
|
|||||||
)
|
)
|
||||||
pgReplicationSlotIsActiveDesc = prometheus.NewDesc(
|
pgReplicationSlotIsActiveDesc = prometheus.NewDesc(
|
||||||
"pg_replication_slot_is_active",
|
"pg_replication_slot_is_active",
|
||||||
"last lsn confirmed flushed to the replication slot",
|
"whether the replication slot is active or not",
|
||||||
[]string{"slot_name"}, nil,
|
[]string{"slot_name"}, nil,
|
||||||
)
|
)
|
||||||
)
|
|
||||||
|
|
||||||
func (PGReplicationSlotCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
|
pgReplicationSlotQuery = `SELECT
|
||||||
rows, err := db.QueryContext(ctx,
|
|
||||||
`SELECT
|
|
||||||
slot_name,
|
slot_name,
|
||||||
pg_current_wal_lsn() - '0/0' AS current_wal_lsn,
|
pg_current_wal_lsn() - '0/0' AS current_wal_lsn,
|
||||||
coalesce(confirmed_flush_lsn, '0/0') - '0/0',
|
coalesce(confirmed_flush_lsn, '0/0') - '0/0',
|
||||||
active
|
active
|
||||||
FROM
|
FROM
|
||||||
pg_replication_slots;`)
|
pg_replication_slots;`
|
||||||
|
)
|
||||||
|
|
||||||
|
func (PGReplicationSlotCollector) Update(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error {
|
||||||
|
rows, err := db.QueryContext(ctx,
|
||||||
|
pgReplicationSlotQuery)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -68,12 +70,17 @@ func (PGReplicationSlotCollector) Update(ctx context.Context, db *sql.DB, ch cha
|
|||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
var slotName string
|
var slotName string
|
||||||
var walLSN int64
|
var walLSN int64
|
||||||
var flusLSN int64
|
var flushLSN int64
|
||||||
var isActive bool
|
var isActive bool
|
||||||
if err := rows.Scan(&slotName, &walLSN, &flusLSN, &isActive); err != nil {
|
if err := rows.Scan(&slotName, &walLSN, &flushLSN, &isActive); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
isActiveValue := 0
|
||||||
|
if isActive {
|
||||||
|
isActiveValue = 1
|
||||||
|
}
|
||||||
|
|
||||||
ch <- prometheus.MustNewConstMetric(
|
ch <- prometheus.MustNewConstMetric(
|
||||||
pgReplicationSlotCurrentWalDesc,
|
pgReplicationSlotCurrentWalDesc,
|
||||||
prometheus.GaugeValue, float64(walLSN), slotName,
|
prometheus.GaugeValue, float64(walLSN), slotName,
|
||||||
@ -81,12 +88,12 @@ func (PGReplicationSlotCollector) Update(ctx context.Context, db *sql.DB, ch cha
|
|||||||
if isActive {
|
if isActive {
|
||||||
ch <- prometheus.MustNewConstMetric(
|
ch <- prometheus.MustNewConstMetric(
|
||||||
pgReplicationSlotCurrentFlushDesc,
|
pgReplicationSlotCurrentFlushDesc,
|
||||||
prometheus.GaugeValue, float64(flusLSN), slotName,
|
prometheus.GaugeValue, float64(flushLSN), slotName,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
ch <- prometheus.MustNewConstMetric(
|
ch <- prometheus.MustNewConstMetric(
|
||||||
pgReplicationSlotIsActiveDesc,
|
pgReplicationSlotIsActiveDesc,
|
||||||
prometheus.GaugeValue, float64(flusLSN), slotName,
|
prometheus.GaugeValue, float64(isActiveValue), slotName,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if err := rows.Err(); err != nil {
|
if err := rows.Err(); err != nil {
|
||||||
|
101
collector/replication_slots_test.go
Normal file
101
collector/replication_slots_test.go
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
// Copyright 2023 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
package collector
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/DATA-DOG/go-sqlmock"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
"github.com/smartystreets/goconvey/convey"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPgReplicationSlotCollectorActive(t *testing.T) {
|
||||||
|
db, mock, err := sqlmock.New()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error opening a stub db connection: %s", err)
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
columns := []string{"slot_name", "current_wal_lsn", "confirmed_flush_lsn", "active"}
|
||||||
|
rows := sqlmock.NewRows(columns).
|
||||||
|
AddRow("test_slot", 5, 3, true)
|
||||||
|
mock.ExpectQuery(sanitizeQuery(pgReplicationSlotQuery)).WillReturnRows(rows)
|
||||||
|
|
||||||
|
ch := make(chan prometheus.Metric)
|
||||||
|
go func() {
|
||||||
|
defer close(ch)
|
||||||
|
c := PGReplicationSlotCollector{}
|
||||||
|
|
||||||
|
if err := c.Update(context.Background(), db, ch); err != nil {
|
||||||
|
t.Errorf("Error calling PGPostmasterCollector.Update: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
expected := []MetricResult{
|
||||||
|
{labels: labelMap{"slot_name": "test_slot"}, value: 5, metricType: dto.MetricType_GAUGE},
|
||||||
|
{labels: labelMap{"slot_name": "test_slot"}, value: 3, metricType: dto.MetricType_GAUGE},
|
||||||
|
{labels: labelMap{"slot_name": "test_slot"}, value: 1, metricType: dto.MetricType_GAUGE},
|
||||||
|
}
|
||||||
|
|
||||||
|
convey.Convey("Metrics comparison", t, func() {
|
||||||
|
for _, expect := range expected {
|
||||||
|
m := readMetric(<-ch)
|
||||||
|
convey.So(expect, convey.ShouldResemble, m)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if err := mock.ExpectationsWereMet(); err != nil {
|
||||||
|
t.Errorf("there were unfulfilled exceptions: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPgReplicationSlotCollectorInActive(t *testing.T) {
|
||||||
|
db, mock, err := sqlmock.New()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error opening a stub db connection: %s", err)
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
columns := []string{"slot_name", "current_wal_lsn", "confirmed_flush_lsn", "active"}
|
||||||
|
rows := sqlmock.NewRows(columns).
|
||||||
|
AddRow("test_slot", 6, 12, false)
|
||||||
|
mock.ExpectQuery(sanitizeQuery(pgReplicationSlotQuery)).WillReturnRows(rows)
|
||||||
|
|
||||||
|
ch := make(chan prometheus.Metric)
|
||||||
|
go func() {
|
||||||
|
defer close(ch)
|
||||||
|
c := PGReplicationSlotCollector{}
|
||||||
|
|
||||||
|
if err := c.Update(context.Background(), db, ch); err != nil {
|
||||||
|
t.Errorf("Error calling PGReplicationSlotCollector.Update: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
expected := []MetricResult{
|
||||||
|
{labels: labelMap{"slot_name": "test_slot"}, value: 6, metricType: dto.MetricType_GAUGE},
|
||||||
|
{labels: labelMap{"slot_name": "test_slot"}, value: 0, metricType: dto.MetricType_GAUGE},
|
||||||
|
}
|
||||||
|
|
||||||
|
convey.Convey("Metrics comparison", t, func() {
|
||||||
|
for _, expect := range expected {
|
||||||
|
m := readMetric(<-ch)
|
||||||
|
convey.So(expect, convey.ShouldResemble, m)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if err := mock.ExpectationsWereMet(); err != nil {
|
||||||
|
t.Errorf("there were unfulfilled exceptions: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
5
go.mod
5
go.mod
@ -3,6 +3,7 @@ module github.com/prometheus-community/postgres_exporter
|
|||||||
go 1.19
|
go 1.19
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
github.com/DATA-DOG/go-sqlmock v1.5.0
|
||||||
github.com/alecthomas/kingpin/v2 v2.3.2
|
github.com/alecthomas/kingpin/v2 v2.3.2
|
||||||
github.com/blang/semver/v4 v4.0.0
|
github.com/blang/semver/v4 v4.0.0
|
||||||
github.com/go-kit/log v0.2.1
|
github.com/go-kit/log v0.2.1
|
||||||
@ -11,6 +12,7 @@ require (
|
|||||||
github.com/prometheus/client_model v0.4.0
|
github.com/prometheus/client_model v0.4.0
|
||||||
github.com/prometheus/common v0.44.0
|
github.com/prometheus/common v0.44.0
|
||||||
github.com/prometheus/exporter-toolkit v0.10.0
|
github.com/prometheus/exporter-toolkit v0.10.0
|
||||||
|
github.com/smartystreets/goconvey v1.8.0
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
@ -23,13 +25,16 @@ require (
|
|||||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||||
github.com/go-logfmt/logfmt v0.5.1 // indirect
|
github.com/go-logfmt/logfmt v0.5.1 // indirect
|
||||||
github.com/golang/protobuf v1.5.3 // indirect
|
github.com/golang/protobuf v1.5.3 // indirect
|
||||||
|
github.com/gopherjs/gopherjs v1.17.2 // indirect
|
||||||
github.com/jpillora/backoff v1.0.0 // indirect
|
github.com/jpillora/backoff v1.0.0 // indirect
|
||||||
|
github.com/jtolds/gls v4.20.0+incompatible // indirect
|
||||||
github.com/kr/pretty v0.3.1 // indirect
|
github.com/kr/pretty v0.3.1 // indirect
|
||||||
github.com/kr/text v0.2.0 // indirect
|
github.com/kr/text v0.2.0 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
||||||
github.com/prometheus/procfs v0.9.0 // indirect
|
github.com/prometheus/procfs v0.9.0 // indirect
|
||||||
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
||||||
|
github.com/smartystreets/assertions v1.13.1 // indirect
|
||||||
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
|
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
|
||||||
golang.org/x/crypto v0.8.0 // indirect
|
golang.org/x/crypto v0.8.0 // indirect
|
||||||
golang.org/x/net v0.10.0 // indirect
|
golang.org/x/net v0.10.0 // indirect
|
||||||
|
10
go.sum
10
go.sum
@ -1,3 +1,5 @@
|
|||||||
|
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
|
||||||
|
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||||
github.com/alecthomas/kingpin/v2 v2.3.2 h1:H0aULhgmSzN8xQ3nX1uxtdlTHYoPLu5AhHxWrKI6ocU=
|
github.com/alecthomas/kingpin/v2 v2.3.2 h1:H0aULhgmSzN8xQ3nX1uxtdlTHYoPLu5AhHxWrKI6ocU=
|
||||||
github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
|
github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
|
||||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc=
|
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc=
|
||||||
@ -25,8 +27,12 @@ github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg
|
|||||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||||
|
github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
|
||||||
|
github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
|
||||||
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
|
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
|
||||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||||
|
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||||
|
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
@ -56,6 +62,10 @@ github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB
|
|||||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||||
|
github.com/smartystreets/assertions v1.13.1 h1:Ef7KhSmjZcK6AVf9YbJdvPYG9avaF0ZxudX+ThRdWfU=
|
||||||
|
github.com/smartystreets/assertions v1.13.1/go.mod h1:cXr/IwVfSo/RbCSPhoAPv73p3hlSdrBH/b3SdnW/LMY=
|
||||||
|
github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL+JXWq3w=
|
||||||
|
github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||||
|
246
queries.yaml
246
queries.yaml
@ -1,244 +1,2 @@
|
|||||||
pg_replication:
|
# Adding queries to this file is deprecated
|
||||||
query: "SELECT CASE WHEN NOT pg_is_in_recovery() THEN 0 ELSE GREATEST (0, EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp()))) END AS lag"
|
# Example queries have been transformed into collectors.
|
||||||
master: true
|
|
||||||
metrics:
|
|
||||||
- lag:
|
|
||||||
usage: "GAUGE"
|
|
||||||
description: "Replication lag behind master in seconds"
|
|
||||||
|
|
||||||
pg_postmaster:
|
|
||||||
query: "SELECT pg_postmaster_start_time as start_time_seconds from pg_postmaster_start_time()"
|
|
||||||
master: true
|
|
||||||
metrics:
|
|
||||||
- start_time_seconds:
|
|
||||||
usage: "GAUGE"
|
|
||||||
description: "Time at which postmaster started"
|
|
||||||
|
|
||||||
pg_stat_user_tables:
|
|
||||||
query: |
|
|
||||||
SELECT
|
|
||||||
current_database() datname,
|
|
||||||
schemaname,
|
|
||||||
relname,
|
|
||||||
seq_scan,
|
|
||||||
seq_tup_read,
|
|
||||||
idx_scan,
|
|
||||||
idx_tup_fetch,
|
|
||||||
n_tup_ins,
|
|
||||||
n_tup_upd,
|
|
||||||
n_tup_del,
|
|
||||||
n_tup_hot_upd,
|
|
||||||
n_live_tup,
|
|
||||||
n_dead_tup,
|
|
||||||
n_mod_since_analyze,
|
|
||||||
COALESCE(last_vacuum, '1970-01-01Z') as last_vacuum,
|
|
||||||
COALESCE(last_autovacuum, '1970-01-01Z') as last_autovacuum,
|
|
||||||
COALESCE(last_analyze, '1970-01-01Z') as last_analyze,
|
|
||||||
COALESCE(last_autoanalyze, '1970-01-01Z') as last_autoanalyze,
|
|
||||||
vacuum_count,
|
|
||||||
autovacuum_count,
|
|
||||||
analyze_count,
|
|
||||||
autoanalyze_count
|
|
||||||
FROM
|
|
||||||
pg_stat_user_tables
|
|
||||||
metrics:
|
|
||||||
- datname:
|
|
||||||
usage: "LABEL"
|
|
||||||
description: "Name of current database"
|
|
||||||
- schemaname:
|
|
||||||
usage: "LABEL"
|
|
||||||
description: "Name of the schema that this table is in"
|
|
||||||
- relname:
|
|
||||||
usage: "LABEL"
|
|
||||||
description: "Name of this table"
|
|
||||||
- seq_scan:
|
|
||||||
usage: "COUNTER"
|
|
||||||
description: "Number of sequential scans initiated on this table"
|
|
||||||
- seq_tup_read:
|
|
||||||
usage: "COUNTER"
|
|
||||||
description: "Number of live rows fetched by sequential scans"
|
|
||||||
- idx_scan:
|
|
||||||
usage: "COUNTER"
|
|
||||||
description: "Number of index scans initiated on this table"
|
|
||||||
- idx_tup_fetch:
|
|
||||||
usage: "COUNTER"
|
|
||||||
description: "Number of live rows fetched by index scans"
|
|
||||||
- n_tup_ins:
|
|
||||||
usage: "COUNTER"
|
|
||||||
description: "Number of rows inserted"
|
|
||||||
- n_tup_upd:
|
|
||||||
usage: "COUNTER"
|
|
||||||
description: "Number of rows updated"
|
|
||||||
- n_tup_del:
|
|
||||||
usage: "COUNTER"
|
|
||||||
description: "Number of rows deleted"
|
|
||||||
- n_tup_hot_upd:
|
|
||||||
usage: "COUNTER"
|
|
||||||
description: "Number of rows HOT updated (i.e., with no separate index update required)"
|
|
||||||
- n_live_tup:
|
|
||||||
usage: "GAUGE"
|
|
||||||
description: "Estimated number of live rows"
|
|
||||||
- n_dead_tup:
|
|
||||||
usage: "GAUGE"
|
|
||||||
description: "Estimated number of dead rows"
|
|
||||||
- n_mod_since_analyze:
|
|
||||||
usage: "GAUGE"
|
|
||||||
description: "Estimated number of rows changed since last analyze"
|
|
||||||
- last_vacuum:
|
|
||||||
usage: "GAUGE"
|
|
||||||
description: "Last time at which this table was manually vacuumed (not counting VACUUM FULL)"
|
|
||||||
- last_autovacuum:
|
|
||||||
usage: "GAUGE"
|
|
||||||
description: "Last time at which this table was vacuumed by the autovacuum daemon"
|
|
||||||
- last_analyze:
|
|
||||||
usage: "GAUGE"
|
|
||||||
description: "Last time at which this table was manually analyzed"
|
|
||||||
- last_autoanalyze:
|
|
||||||
usage: "GAUGE"
|
|
||||||
description: "Last time at which this table was analyzed by the autovacuum daemon"
|
|
||||||
- vacuum_count:
|
|
||||||
usage: "COUNTER"
|
|
||||||
description: "Number of times this table has been manually vacuumed (not counting VACUUM FULL)"
|
|
||||||
- autovacuum_count:
|
|
||||||
usage: "COUNTER"
|
|
||||||
description: "Number of times this table has been vacuumed by the autovacuum daemon"
|
|
||||||
- analyze_count:
|
|
||||||
usage: "COUNTER"
|
|
||||||
description: "Number of times this table has been manually analyzed"
|
|
||||||
- autoanalyze_count:
|
|
||||||
usage: "COUNTER"
|
|
||||||
description: "Number of times this table has been analyzed by the autovacuum daemon"
|
|
||||||
|
|
||||||
pg_statio_user_tables:
|
|
||||||
query: "SELECT current_database() datname, schemaname, relname, heap_blks_read, heap_blks_hit, idx_blks_read, idx_blks_hit, toast_blks_read, toast_blks_hit, tidx_blks_read, tidx_blks_hit FROM pg_statio_user_tables"
|
|
||||||
metrics:
|
|
||||||
- datname:
|
|
||||||
usage: "LABEL"
|
|
||||||
description: "Name of current database"
|
|
||||||
- schemaname:
|
|
||||||
usage: "LABEL"
|
|
||||||
description: "Name of the schema that this table is in"
|
|
||||||
- relname:
|
|
||||||
usage: "LABEL"
|
|
||||||
description: "Name of this table"
|
|
||||||
- heap_blks_read:
|
|
||||||
usage: "COUNTER"
|
|
||||||
description: "Number of disk blocks read from this table"
|
|
||||||
- heap_blks_hit:
|
|
||||||
usage: "COUNTER"
|
|
||||||
description: "Number of buffer hits in this table"
|
|
||||||
- idx_blks_read:
|
|
||||||
usage: "COUNTER"
|
|
||||||
description: "Number of disk blocks read from all indexes on this table"
|
|
||||||
- idx_blks_hit:
|
|
||||||
usage: "COUNTER"
|
|
||||||
description: "Number of buffer hits in all indexes on this table"
|
|
||||||
- toast_blks_read:
|
|
||||||
usage: "COUNTER"
|
|
||||||
description: "Number of disk blocks read from this table's TOAST table (if any)"
|
|
||||||
- toast_blks_hit:
|
|
||||||
usage: "COUNTER"
|
|
||||||
description: "Number of buffer hits in this table's TOAST table (if any)"
|
|
||||||
- tidx_blks_read:
|
|
||||||
usage: "COUNTER"
|
|
||||||
description: "Number of disk blocks read from this table's TOAST table indexes (if any)"
|
|
||||||
- tidx_blks_hit:
|
|
||||||
usage: "COUNTER"
|
|
||||||
description: "Number of buffer hits in this table's TOAST table indexes (if any)"
|
|
||||||
|
|
||||||
#
|
|
||||||
# WARNING:
|
|
||||||
# This set of metrics can be very expensive on a busy server as every
|
|
||||||
# unique query executed will create an additional time series
|
|
||||||
#
|
|
||||||
# pg_stat_statements:
|
|
||||||
# query: |
|
|
||||||
# SELECT
|
|
||||||
# pg_get_userbyid(userid) as user,
|
|
||||||
# pg_database.datname,
|
|
||||||
# pg_stat_statements.queryid,
|
|
||||||
# pg_stat_statements.calls as calls_total,
|
|
||||||
# pg_stat_statements.total_time / 1000.0 as seconds_total,
|
|
||||||
# pg_stat_statements.rows as rows_total,
|
|
||||||
# pg_stat_statements.blk_read_time / 1000.0 as block_read_seconds_total,
|
|
||||||
# pg_stat_statements.blk_write_time / 1000.0 as block_write_seconds_total
|
|
||||||
# FROM pg_stat_statements
|
|
||||||
# JOIN pg_database
|
|
||||||
# ON pg_database.oid = pg_stat_statements.dbid
|
|
||||||
# WHERE
|
|
||||||
# total_time > (
|
|
||||||
# SELECT percentile_cont(0.1)
|
|
||||||
# WITHIN GROUP (ORDER BY total_time)
|
|
||||||
# FROM pg_stat_statements
|
|
||||||
# )
|
|
||||||
# ORDER BY seconds_total DESC
|
|
||||||
# LIMIT 100
|
|
||||||
# metrics:
|
|
||||||
# - user:
|
|
||||||
# usage: "LABEL"
|
|
||||||
# description: "The user who executed the statement"
|
|
||||||
# - datname:
|
|
||||||
# usage: "LABEL"
|
|
||||||
# description: "The database in which the statement was executed"
|
|
||||||
# - queryid:
|
|
||||||
# usage: "LABEL"
|
|
||||||
# description: "Internal hash code, computed from the statement's parse tree"
|
|
||||||
# - calls_total:
|
|
||||||
# usage: "COUNTER"
|
|
||||||
# description: "Number of times executed"
|
|
||||||
# - seconds_total:
|
|
||||||
# usage: "COUNTER"
|
|
||||||
# description: "Total time spent in the statement, in seconds"
|
|
||||||
# - rows_total:
|
|
||||||
# usage: "COUNTER"
|
|
||||||
# description: "Total number of rows retrieved or affected by the statement"
|
|
||||||
# - block_read_seconds_total:
|
|
||||||
# usage: "COUNTER"
|
|
||||||
# description: "Total time the statement spent reading blocks, in seconds"
|
|
||||||
# - block_write_seconds_total:
|
|
||||||
# usage: "COUNTER"
|
|
||||||
# description: "Total time the statement spent writing blocks, in seconds"
|
|
||||||
|
|
||||||
pg_process_idle:
|
|
||||||
query: |
|
|
||||||
WITH
|
|
||||||
metrics AS (
|
|
||||||
SELECT
|
|
||||||
application_name,
|
|
||||||
SUM(EXTRACT(EPOCH FROM (CURRENT_TIMESTAMP - state_change))::bigint)::float AS process_idle_seconds_sum,
|
|
||||||
COUNT(*) AS process_idle_seconds_count
|
|
||||||
FROM pg_stat_activity
|
|
||||||
WHERE state = 'idle'
|
|
||||||
GROUP BY application_name
|
|
||||||
),
|
|
||||||
buckets AS (
|
|
||||||
SELECT
|
|
||||||
application_name,
|
|
||||||
le,
|
|
||||||
SUM(
|
|
||||||
CASE WHEN EXTRACT(EPOCH FROM (CURRENT_TIMESTAMP - state_change)) <= le
|
|
||||||
THEN 1
|
|
||||||
ELSE 0
|
|
||||||
END
|
|
||||||
)::bigint AS bucket
|
|
||||||
FROM
|
|
||||||
pg_stat_activity,
|
|
||||||
UNNEST(ARRAY[1, 2, 5, 15, 30, 60, 90, 120, 300]) AS le
|
|
||||||
GROUP BY application_name, le
|
|
||||||
ORDER BY application_name, le
|
|
||||||
)
|
|
||||||
SELECT
|
|
||||||
application_name,
|
|
||||||
process_idle_seconds_sum as seconds_sum,
|
|
||||||
process_idle_seconds_count as seconds_count,
|
|
||||||
ARRAY_AGG(le) AS seconds,
|
|
||||||
ARRAY_AGG(bucket) AS seconds_bucket
|
|
||||||
FROM metrics JOIN buckets USING (application_name)
|
|
||||||
GROUP BY 1, 2, 3
|
|
||||||
metrics:
|
|
||||||
- application_name:
|
|
||||||
usage: "LABEL"
|
|
||||||
description: "Application Name"
|
|
||||||
- seconds:
|
|
||||||
usage: "HISTOGRAM"
|
|
||||||
description: "Idle time of server processes"
|
|
Loading…
Reference in New Issue
Block a user