Checkpoint related columns in PG 17 have been moved from pg_stat_bgwriter to pg_stat_checkpointer (#1072)

* Checkpoint related columns in PG 17 have been moved from pg_stat_bgwriter to pg_stat_checkpointer

Fix https://github.com/prometheus-community/postgres_exporter/issues/1060

See: https://www.dbi-services.com/blog/postgresql-17-new-catalog-view-pg_stat_checkpointer/
Signed-off-by: Nicolas Rodriguez <nico@nicoladmin.fr>

* Add support for pg_stat_checkpointer

See: https://www.dbi-services.com/blog/postgresql-17-new-catalog-view-pg_stat_checkpointer/
Signed-off-by: Nicolas Rodriguez <nico@nicoladmin.fr>

* Run integration tests with Postgres 17

Signed-off-by: Nicolas Rodriguez <nico@nicoladmin.fr>

* Update date in file header

Signed-off-by: Nicolas Rodriguez <nico@nicoladmin.fr>

---------

Signed-off-by: Nicolas Rodriguez <nico@nicoladmin.fr>
This commit is contained in:
Nicolas Rodriguez 2025-01-01 22:03:43 +01:00 committed by GitHub
parent 5145620988
commit bea2609519
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 533 additions and 111 deletions

View File

@ -63,6 +63,7 @@ workflows:
- cimg/postgres:14.9
- cimg/postgres:15.4
- cimg/postgres:16.0
- cimg/postgres:17.0
- prometheus/build:
name: build
parallelism: 3

View File

@ -17,6 +17,7 @@ import (
"context"
"database/sql"
"github.com/blang/semver/v4"
"github.com/prometheus/client_golang/prometheus"
)
@ -101,7 +102,7 @@ var (
prometheus.Labels{},
)
statBGWriterQuery = `SELECT
statBGWriterQueryBefore17 = `SELECT
checkpoints_timed
,checkpoints_req
,checkpoint_write_time
@ -114,121 +115,177 @@ var (
,buffers_alloc
,stats_reset
FROM pg_stat_bgwriter;`
statBGWriterQueryAfter17 = `SELECT
buffers_clean
,maxwritten_clean
,buffers_alloc
,stats_reset
FROM pg_stat_bgwriter;`
)
func (PGStatBGWriterCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error {
db := instance.getDB()
row := db.QueryRowContext(ctx,
statBGWriterQuery)
if instance.version.GE(semver.MustParse("17.0.0")) {
db := instance.getDB()
row := db.QueryRowContext(ctx, statBGWriterQueryAfter17)
var cpt, cpr, bcp, bc, mwc, bb, bbf, ba sql.NullInt64
var cpwt, cpst sql.NullFloat64
var sr sql.NullTime
var bc, mwc, ba sql.NullInt64
var sr sql.NullTime
err := row.Scan(&cpt, &cpr, &cpwt, &cpst, &bcp, &bc, &mwc, &bb, &bbf, &ba, &sr)
if err != nil {
return err
}
err := row.Scan(&bc, &mwc, &ba, &sr)
if err != nil {
return err
}
cptMetric := 0.0
if cpt.Valid {
cptMetric = float64(cpt.Int64)
bcMetric := 0.0
if bc.Valid {
bcMetric = float64(bc.Int64)
}
ch <- prometheus.MustNewConstMetric(
statBGWriterBuffersCleanDesc,
prometheus.CounterValue,
bcMetric,
)
mwcMetric := 0.0
if mwc.Valid {
mwcMetric = float64(mwc.Int64)
}
ch <- prometheus.MustNewConstMetric(
statBGWriterMaxwrittenCleanDesc,
prometheus.CounterValue,
mwcMetric,
)
baMetric := 0.0
if ba.Valid {
baMetric = float64(ba.Int64)
}
ch <- prometheus.MustNewConstMetric(
statBGWriterBuffersAllocDesc,
prometheus.CounterValue,
baMetric,
)
srMetric := 0.0
if sr.Valid {
srMetric = float64(sr.Time.Unix())
}
ch <- prometheus.MustNewConstMetric(
statBGWriterStatsResetDesc,
prometheus.CounterValue,
srMetric,
)
} else {
db := instance.getDB()
row := db.QueryRowContext(ctx, statBGWriterQueryBefore17)
var cpt, cpr, bcp, bc, mwc, bb, bbf, ba sql.NullInt64
var cpwt, cpst sql.NullFloat64
var sr sql.NullTime
err := row.Scan(&cpt, &cpr, &cpwt, &cpst, &bcp, &bc, &mwc, &bb, &bbf, &ba, &sr)
if err != nil {
return err
}
cptMetric := 0.0
if cpt.Valid {
cptMetric = float64(cpt.Int64)
}
ch <- prometheus.MustNewConstMetric(
statBGWriterCheckpointsTimedDesc,
prometheus.CounterValue,
cptMetric,
)
cprMetric := 0.0
if cpr.Valid {
cprMetric = float64(cpr.Int64)
}
ch <- prometheus.MustNewConstMetric(
statBGWriterCheckpointsReqDesc,
prometheus.CounterValue,
cprMetric,
)
cpwtMetric := 0.0
if cpwt.Valid {
cpwtMetric = float64(cpwt.Float64)
}
ch <- prometheus.MustNewConstMetric(
statBGWriterCheckpointsReqTimeDesc,
prometheus.CounterValue,
cpwtMetric,
)
cpstMetric := 0.0
if cpst.Valid {
cpstMetric = float64(cpst.Float64)
}
ch <- prometheus.MustNewConstMetric(
statBGWriterCheckpointsSyncTimeDesc,
prometheus.CounterValue,
cpstMetric,
)
bcpMetric := 0.0
if bcp.Valid {
bcpMetric = float64(bcp.Int64)
}
ch <- prometheus.MustNewConstMetric(
statBGWriterBuffersCheckpointDesc,
prometheus.CounterValue,
bcpMetric,
)
bcMetric := 0.0
if bc.Valid {
bcMetric = float64(bc.Int64)
}
ch <- prometheus.MustNewConstMetric(
statBGWriterBuffersCleanDesc,
prometheus.CounterValue,
bcMetric,
)
mwcMetric := 0.0
if mwc.Valid {
mwcMetric = float64(mwc.Int64)
}
ch <- prometheus.MustNewConstMetric(
statBGWriterMaxwrittenCleanDesc,
prometheus.CounterValue,
mwcMetric,
)
bbMetric := 0.0
if bb.Valid {
bbMetric = float64(bb.Int64)
}
ch <- prometheus.MustNewConstMetric(
statBGWriterBuffersBackendDesc,
prometheus.CounterValue,
bbMetric,
)
bbfMetric := 0.0
if bbf.Valid {
bbfMetric = float64(bbf.Int64)
}
ch <- prometheus.MustNewConstMetric(
statBGWriterBuffersBackendFsyncDesc,
prometheus.CounterValue,
bbfMetric,
)
baMetric := 0.0
if ba.Valid {
baMetric = float64(ba.Int64)
}
ch <- prometheus.MustNewConstMetric(
statBGWriterBuffersAllocDesc,
prometheus.CounterValue,
baMetric,
)
srMetric := 0.0
if sr.Valid {
srMetric = float64(sr.Time.Unix())
}
ch <- prometheus.MustNewConstMetric(
statBGWriterStatsResetDesc,
prometheus.CounterValue,
srMetric,
)
}
ch <- prometheus.MustNewConstMetric(
statBGWriterCheckpointsTimedDesc,
prometheus.CounterValue,
cptMetric,
)
cprMetric := 0.0
if cpr.Valid {
cprMetric = float64(cpr.Int64)
}
ch <- prometheus.MustNewConstMetric(
statBGWriterCheckpointsReqDesc,
prometheus.CounterValue,
cprMetric,
)
cpwtMetric := 0.0
if cpwt.Valid {
cpwtMetric = float64(cpwt.Float64)
}
ch <- prometheus.MustNewConstMetric(
statBGWriterCheckpointsReqTimeDesc,
prometheus.CounterValue,
cpwtMetric,
)
cpstMetric := 0.0
if cpst.Valid {
cpstMetric = float64(cpst.Float64)
}
ch <- prometheus.MustNewConstMetric(
statBGWriterCheckpointsSyncTimeDesc,
prometheus.CounterValue,
cpstMetric,
)
bcpMetric := 0.0
if bcp.Valid {
bcpMetric = float64(bcp.Int64)
}
ch <- prometheus.MustNewConstMetric(
statBGWriterBuffersCheckpointDesc,
prometheus.CounterValue,
bcpMetric,
)
bcMetric := 0.0
if bc.Valid {
bcMetric = float64(bc.Int64)
}
ch <- prometheus.MustNewConstMetric(
statBGWriterBuffersCleanDesc,
prometheus.CounterValue,
bcMetric,
)
mwcMetric := 0.0
if mwc.Valid {
mwcMetric = float64(mwc.Int64)
}
ch <- prometheus.MustNewConstMetric(
statBGWriterMaxwrittenCleanDesc,
prometheus.CounterValue,
mwcMetric,
)
bbMetric := 0.0
if bb.Valid {
bbMetric = float64(bb.Int64)
}
ch <- prometheus.MustNewConstMetric(
statBGWriterBuffersBackendDesc,
prometheus.CounterValue,
bbMetric,
)
bbfMetric := 0.0
if bbf.Valid {
bbfMetric = float64(bbf.Int64)
}
ch <- prometheus.MustNewConstMetric(
statBGWriterBuffersBackendFsyncDesc,
prometheus.CounterValue,
bbfMetric,
)
baMetric := 0.0
if ba.Valid {
baMetric = float64(ba.Int64)
}
ch <- prometheus.MustNewConstMetric(
statBGWriterBuffersAllocDesc,
prometheus.CounterValue,
baMetric,
)
srMetric := 0.0
if sr.Valid {
srMetric = float64(sr.Time.Unix())
}
ch <- prometheus.MustNewConstMetric(
statBGWriterStatsResetDesc,
prometheus.CounterValue,
srMetric,
)
return nil
}

View File

@ -52,7 +52,7 @@ func TestPGStatBGWriterCollector(t *testing.T) {
rows := sqlmock.NewRows(columns).
AddRow(354, 4945, 289097744, 1242257, int64(3275602074), 89320867, 450139, 2034563757, 0, int64(2725688749), srT)
mock.ExpectQuery(sanitizeQuery(statBGWriterQuery)).WillReturnRows(rows)
mock.ExpectQuery(sanitizeQuery(statBGWriterQueryBefore17)).WillReturnRows(rows)
ch := make(chan prometheus.Metric)
go func() {
@ -113,7 +113,7 @@ func TestPGStatBGWriterCollectorNullValues(t *testing.T) {
rows := sqlmock.NewRows(columns).
AddRow(nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil)
mock.ExpectQuery(sanitizeQuery(statBGWriterQuery)).WillReturnRows(rows)
mock.ExpectQuery(sanitizeQuery(statBGWriterQueryBefore17)).WillReturnRows(rows)
ch := make(chan prometheus.Metric)
go func() {

View File

@ -0,0 +1,221 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package collector
import (
"context"
"database/sql"
"github.com/prometheus/client_golang/prometheus"
)
const statCheckpointerSubsystem = "stat_checkpointer"
func init() {
// WARNING:
// Disabled by default because this set of metrics is only available from Postgres 17
registerCollector(statCheckpointerSubsystem, defaultDisabled, NewPGStatCheckpointerCollector)
}
type PGStatCheckpointerCollector struct {
}
func NewPGStatCheckpointerCollector(collectorConfig) (Collector, error) {
return &PGStatCheckpointerCollector{}, nil
}
var (
statCheckpointerNumTimedDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, statCheckpointerSubsystem, "num_timed_total"),
"Number of scheduled checkpoints due to timeout",
[]string{},
prometheus.Labels{},
)
statCheckpointerNumRequestedDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, statCheckpointerSubsystem, "num_requested_total"),
"Number of requested checkpoints that have been performed",
[]string{},
prometheus.Labels{},
)
statCheckpointerRestartpointsTimedDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, statCheckpointerSubsystem, "restartpoints_timed_total"),
"Number of scheduled restartpoints due to timeout or after a failed attempt to perform it",
[]string{},
prometheus.Labels{},
)
statCheckpointerRestartpointsReqDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, statCheckpointerSubsystem, "restartpoints_req_total"),
"Number of requested restartpoints",
[]string{},
prometheus.Labels{},
)
statCheckpointerRestartpointsDoneDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, statCheckpointerSubsystem, "restartpoints_done_total"),
"Number of restartpoints that have been performed",
[]string{},
prometheus.Labels{},
)
statCheckpointerWriteTimeDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, statCheckpointerSubsystem, "write_time_total"),
"Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds",
[]string{},
prometheus.Labels{},
)
statCheckpointerSyncTimeDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, statCheckpointerSubsystem, "sync_time_total"),
"Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds",
[]string{},
prometheus.Labels{},
)
statCheckpointerBuffersWrittenDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, statCheckpointerSubsystem, "buffers_written_total"),
"Number of buffers written during checkpoints and restartpoints",
[]string{},
prometheus.Labels{},
)
statCheckpointerStatsResetDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, statCheckpointerSubsystem, "stats_reset_total"),
"Time at which these statistics were last reset",
[]string{},
prometheus.Labels{},
)
statCheckpointerQuery = `SELECT
num_timed
,num_requested
,restartpoints_timed
,restartpoints_req
,restartpoints_done
,write_time
,sync_time
,buffers_written
,stats_reset
FROM pg_stat_checkpointer;`
)
func (PGStatCheckpointerCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error {
db := instance.getDB()
row := db.QueryRowContext(ctx, statCheckpointerQuery)
// num_timed = nt = bigint
// num_requested = nr = bigint
// restartpoints_timed = rpt = bigint
// restartpoints_req = rpr = bigint
// restartpoints_done = rpd = bigint
// write_time = wt = double precision
// sync_time = st = double precision
// buffers_written = bw = bigint
// stats_reset = sr = timestamp
var nt, nr, rpt, rpr, rpd, bw sql.NullInt64
var wt, st sql.NullFloat64
var sr sql.NullTime
err := row.Scan(&nt, &nr, &rpt, &rpr, &rpd, &wt, &st, &bw, &sr)
if err != nil {
return err
}
ntMetric := 0.0
if nt.Valid {
ntMetric = float64(nt.Int64)
}
ch <- prometheus.MustNewConstMetric(
statCheckpointerNumTimedDesc,
prometheus.CounterValue,
ntMetric,
)
nrMetric := 0.0
if nr.Valid {
nrMetric = float64(nr.Int64)
}
ch <- prometheus.MustNewConstMetric(
statCheckpointerNumRequestedDesc,
prometheus.CounterValue,
nrMetric,
)
rptMetric := 0.0
if rpt.Valid {
rptMetric = float64(rpt.Int64)
}
ch <- prometheus.MustNewConstMetric(
statCheckpointerRestartpointsTimedDesc,
prometheus.CounterValue,
rptMetric,
)
rprMetric := 0.0
if rpr.Valid {
rprMetric = float64(rpr.Int64)
}
ch <- prometheus.MustNewConstMetric(
statCheckpointerRestartpointsReqDesc,
prometheus.CounterValue,
rprMetric,
)
rpdMetric := 0.0
if rpd.Valid {
rpdMetric = float64(rpd.Int64)
}
ch <- prometheus.MustNewConstMetric(
statCheckpointerRestartpointsDoneDesc,
prometheus.CounterValue,
rpdMetric,
)
wtMetric := 0.0
if wt.Valid {
wtMetric = float64(wt.Float64)
}
ch <- prometheus.MustNewConstMetric(
statCheckpointerWriteTimeDesc,
prometheus.CounterValue,
wtMetric,
)
stMetric := 0.0
if st.Valid {
stMetric = float64(st.Float64)
}
ch <- prometheus.MustNewConstMetric(
statCheckpointerSyncTimeDesc,
prometheus.CounterValue,
stMetric,
)
bwMetric := 0.0
if bw.Valid {
bwMetric = float64(bw.Int64)
}
ch <- prometheus.MustNewConstMetric(
statCheckpointerBuffersWrittenDesc,
prometheus.CounterValue,
bwMetric,
)
srMetric := 0.0
if sr.Valid {
srMetric = float64(sr.Time.Unix())
}
ch <- prometheus.MustNewConstMetric(
statCheckpointerStatsResetDesc,
prometheus.CounterValue,
srMetric,
)
return nil
}

View File

@ -0,0 +1,143 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package collector
import (
"context"
"testing"
"time"
"github.com/DATA-DOG/go-sqlmock"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"github.com/smartystreets/goconvey/convey"
)
func TestPGStatCheckpointerCollector(t *testing.T) {
db, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("Error opening a stub db connection: %s", err)
}
defer db.Close()
inst := &instance{db: db}
columns := []string{
"num_timed",
"num_requested",
"restartpoints_timed",
"restartpoints_req",
"restartpoints_done",
"write_time",
"sync_time",
"buffers_written",
"stats_reset"}
srT, err := time.Parse("2006-01-02 15:04:05.00000-07", "2023-05-25 17:10:42.81132-07")
if err != nil {
t.Fatalf("Error parsing time: %s", err)
}
rows := sqlmock.NewRows(columns).
AddRow(354, 4945, 289097744, 1242257, int64(3275602074), 89320867, 450139, 2034563757, srT)
mock.ExpectQuery(sanitizeQuery(statCheckpointerQuery)).WillReturnRows(rows)
ch := make(chan prometheus.Metric)
go func() {
defer close(ch)
c := PGStatCheckpointerCollector{}
if err := c.Update(context.Background(), inst, ch); err != nil {
t.Errorf("Error calling PGStatCheckpointerCollector.Update: %s", err)
}
}()
expected := []MetricResult{
{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 354},
{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 4945},
{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 289097744},
{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 1242257},
{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 3275602074},
{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 89320867},
{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 450139},
{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 2034563757},
{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 1685059842},
}
convey.Convey("Metrics comparison", t, func() {
for _, expect := range expected {
m := readMetric(<-ch)
convey.So(expect, convey.ShouldResemble, m)
}
})
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled exceptions: %s", err)
}
}
func TestPGStatCheckpointerCollectorNullValues(t *testing.T) {
db, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("Error opening a stub db connection: %s", err)
}
defer db.Close()
inst := &instance{db: db}
columns := []string{
"num_timed",
"num_requested",
"restartpoints_timed",
"restartpoints_req",
"restartpoints_done",
"write_time",
"sync_time",
"buffers_written",
"stats_reset"}
rows := sqlmock.NewRows(columns).
AddRow(nil, nil, nil, nil, nil, nil, nil, nil, nil)
mock.ExpectQuery(sanitizeQuery(statCheckpointerQuery)).WillReturnRows(rows)
ch := make(chan prometheus.Metric)
go func() {
defer close(ch)
c := PGStatCheckpointerCollector{}
if err := c.Update(context.Background(), inst, ch); err != nil {
t.Errorf("Error calling PGStatCheckpointerCollector.Update: %s", err)
}
}()
expected := []MetricResult{
{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0},
{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0},
{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0},
{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0},
{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0},
{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0},
{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0},
{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0},
{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0},
}
convey.Convey("Metrics comparison", t, func() {
for _, expect := range expected {
m := readMetric(<-ch)
convey.So(expect, convey.ShouldResemble, m)
}
})
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled exceptions: %s", err)
}
}