From 8d087f2c64c4a16d9214392ad39a31720eb38ee0 Mon Sep 17 00:00:00 2001
From: Felix Yuan <felix.yuan@reddit.com>
Date: Mon, 26 Jun 2023 23:07:59 -0700
Subject: [PATCH] Bug fix: Make collector not fail on null values (#823)

* Make all values nullable

---------

Signed-off-by: Felix Yuan <felix.yuan@reddit.com>
Co-authored-by: Ben Kochie <superq@gmail.com>
---
 collector/pg_database.go                |  18 +-
 collector/pg_database_test.go           |  40 +++
 collector/pg_postmaster.go              |   9 +-
 collector/pg_postmaster_test.go         |  36 +++
 collector/pg_process_idle.go            |  36 ++-
 collector/pg_replication_slot.go        |  35 ++-
 collector/pg_replication_slot_test.go   |  81 ++++++
 collector/pg_stat_bgwriter.go           |  82 ++++--
 collector/pg_stat_bgwriter_test.go      |  61 ++++
 collector/pg_stat_database.go           | 227 +++++++++------
 collector/pg_stat_database_test.go      | 359 ++++++++++++++++++++++++
 collector/pg_stat_statements.go         |  69 +++--
 collector/pg_stat_statements_test.go    |  43 +++
 collector/pg_stat_user_tables.go        | 211 ++++++++++----
 collector/pg_stat_user_tables_test.go   |  99 +++++++
 collector/pg_statio_user_tables.go      |  97 +++++--
 collector/pg_statio_user_tables_test.go |  67 +++++
 17 files changed, 1326 insertions(+), 244 deletions(-)
 create mode 100644 collector/pg_stat_database_test.go

diff --git a/collector/pg_database.go b/collector/pg_database.go
index a4ea50d0..22d4918e 100644
--- a/collector/pg_database.go
+++ b/collector/pg_database.go
@@ -15,6 +15,7 @@ package collector
 
 import (
 	"context"
+	"database/sql"
 
 	"github.com/go-kit/log"
 	"github.com/prometheus/client_golang/prometheus"
@@ -79,32 +80,39 @@ func (c PGDatabaseCollector) Update(ctx context.Context, instance *instance, ch
 	var databases []string
 
 	for rows.Next() {
-		var datname string
+		var datname sql.NullString
 		if err := rows.Scan(&datname); err != nil {
 			return err
 		}
 
+		if !datname.Valid {
+			continue
+		}
 		// Ignore excluded databases
 		// Filtering is done here instead of in the query to avoid
 		// a complicated NOT IN query with a variable number of parameters
-		if sliceContains(c.excludedDatabases, datname) {
+		if sliceContains(c.excludedDatabases, datname.String) {
 			continue
 		}
 
-		databases = append(databases, datname)
+		databases = append(databases, datname.String)
 	}
 
 	// Query the size of the databases
 	for _, datname := range databases {
-		var size int64
+		var size sql.NullFloat64
 		err = db.QueryRowContext(ctx, pgDatabaseSizeQuery, datname).Scan(&size)
 		if err != nil {
 			return err
 		}
 
+		sizeMetric := 0.0
+		if size.Valid {
+			sizeMetric = size.Float64
+		}
 		ch <- prometheus.MustNewConstMetric(
 			pgDatabaseSizeDesc,
-			prometheus.GaugeValue, float64(size), datname,
+			prometheus.GaugeValue, sizeMetric, datname,
 		)
 	}
 	if err := rows.Err(); err != nil {
diff --git a/collector/pg_database_test.go b/collector/pg_database_test.go
index 058a6d25..b5052c5d 100644
--- a/collector/pg_database_test.go
+++ b/collector/pg_database_test.go
@@ -59,3 +59,43 @@ func TestPGDatabaseCollector(t *testing.T) {
 		t.Errorf("there were unfulfilled exceptions: %s", err)
 	}
 }
+
+// TODO add a null db test
+
+func TestPGDatabaseCollectorNullMetric(t *testing.T) {
+	db, mock, err := sqlmock.New()
+	if err != nil {
+		t.Fatalf("Error opening a stub db connection: %s", err)
+	}
+	defer db.Close()
+
+	inst := &instance{db: db}
+
+	mock.ExpectQuery(sanitizeQuery(pgDatabaseQuery)).WillReturnRows(sqlmock.NewRows([]string{"datname"}).
+		AddRow("postgres"))
+
+	mock.ExpectQuery(sanitizeQuery(pgDatabaseSizeQuery)).WithArgs("postgres").WillReturnRows(sqlmock.NewRows([]string{"pg_database_size"}).
+		AddRow(nil))
+
+	ch := make(chan prometheus.Metric)
+	go func() {
+		defer close(ch)
+		c := PGDatabaseCollector{}
+		if err := c.Update(context.Background(), inst, ch); err != nil {
+			t.Errorf("Error calling PGDatabaseCollector.Update: %s", err)
+		}
+	}()
+
+	expected := []MetricResult{
+		{labels: labelMap{"datname": "postgres"}, value: 0, metricType: dto.MetricType_GAUGE},
+	}
+	convey.Convey("Metrics comparison", t, func() {
+		for _, expect := range expected {
+			m := readMetric(<-ch)
+			convey.So(expect, convey.ShouldResemble, m)
+		}
+	})
+	if err := mock.ExpectationsWereMet(); err != nil {
+		t.Errorf("there were unfulfilled exceptions: %s", err)
+	}
+}
diff --git a/collector/pg_postmaster.go b/collector/pg_postmaster.go
index eae82d56..4bef4296 100644
--- a/collector/pg_postmaster.go
+++ b/collector/pg_postmaster.go
@@ -15,6 +15,7 @@ package collector
 
 import (
 	"context"
+	"database/sql"
 
 	"github.com/prometheus/client_golang/prometheus"
 )
@@ -51,14 +52,18 @@ func (c *PGPostmasterCollector) Update(ctx context.Context, instance *instance,
 	row := db.QueryRowContext(ctx,
 		pgPostmasterQuery)
 
-	var startTimeSeconds float64
+	var startTimeSeconds sql.NullFloat64
 	err := row.Scan(&startTimeSeconds)
 	if err != nil {
 		return err
 	}
+	startTimeSecondsMetric := 0.0
+	if startTimeSeconds.Valid {
+		startTimeSecondsMetric = startTimeSeconds.Float64
+	}
 	ch <- prometheus.MustNewConstMetric(
 		pgPostMasterStartTimeSeconds,
-		prometheus.GaugeValue, startTimeSeconds,
+		prometheus.GaugeValue, startTimeSecondsMetric,
 	)
 	return nil
 }
diff --git a/collector/pg_postmaster_test.go b/collector/pg_postmaster_test.go
index c40fe03a..8405b422 100644
--- a/collector/pg_postmaster_test.go
+++ b/collector/pg_postmaster_test.go
@@ -57,3 +57,39 @@ func TestPgPostmasterCollector(t *testing.T) {
 		t.Errorf("there were unfulfilled exceptions: %s", err)
 	}
 }
+
+func TestPgPostmasterCollectorNullTime(t *testing.T) {
+	db, mock, err := sqlmock.New()
+	if err != nil {
+		t.Fatalf("Error opening a stub db connection: %s", err)
+	}
+	defer db.Close()
+
+	inst := &instance{db: db}
+
+	mock.ExpectQuery(sanitizeQuery(pgPostmasterQuery)).WillReturnRows(sqlmock.NewRows([]string{"pg_postmaster_start_time"}).
+		AddRow(nil))
+
+	ch := make(chan prometheus.Metric)
+	go func() {
+		defer close(ch)
+		c := PGPostmasterCollector{}
+
+		if err := c.Update(context.Background(), inst, ch); err != nil {
+			t.Errorf("Error calling PGPostmasterCollector.Update: %s", err)
+		}
+	}()
+
+	expected := []MetricResult{
+		{labels: labelMap{}, value: 0, metricType: dto.MetricType_GAUGE},
+	}
+	convey.Convey("Metrics comparison", t, func() {
+		for _, expect := range expected {
+			m := readMetric(<-ch)
+			convey.So(expect, convey.ShouldResemble, m)
+		}
+	})
+	if err := mock.ExpectationsWereMet(); err != nil {
+		t.Errorf("there were unfulfilled exceptions: %s", err)
+	}
+}
diff --git a/collector/pg_process_idle.go b/collector/pg_process_idle.go
index 06244975..cc53fbb7 100644
--- a/collector/pg_process_idle.go
+++ b/collector/pg_process_idle.go
@@ -15,21 +15,23 @@ package collector
 
 import (
 	"context"
+	"database/sql"
 
 	"github.com/go-kit/log"
 	"github.com/prometheus/client_golang/prometheus"
 )
 
-const processIdleSubsystem = "process_idle"
-
 func init() {
-	registerCollector(processIdleSubsystem, defaultEnabled, NewPGProcessIdleCollector)
+	// Making this default disabled because we have no tests for it
+	registerCollector(processIdleSubsystem, defaultDisabled, NewPGProcessIdleCollector)
 }
 
 type PGProcessIdleCollector struct {
 	log log.Logger
 }
 
+const processIdleSubsystem = "process_idle"
+
 func NewPGProcessIdleCollector(config collectorConfig) (Collector, error) {
 	return &PGProcessIdleCollector{log: config.logger}, nil
 }
@@ -41,8 +43,8 @@ var pgProcessIdleSeconds = prometheus.NewDesc(
 	prometheus.Labels{},
 )
 
-func (PGProcessIdleCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error {
-	db := instance.getDB()
+func (PGProcessIdleCollector) Update(ctx context.Context, inst *instance, ch chan<- prometheus.Metric) error {
+	db := inst.getDB()
 	row := db.QueryRowContext(ctx,
 		`WITH
 			metrics AS (
@@ -79,9 +81,9 @@ func (PGProcessIdleCollector) Update(ctx context.Context, instance *instance, ch
 			FROM metrics JOIN buckets USING (application_name)
 			GROUP BY 1, 2, 3;`)
 
-	var applicationName string
-	var secondsSum int64
-	var secondsCount uint64
+	var applicationName sql.NullString
+	var secondsSum sql.NullInt64
+	var secondsCount sql.NullInt64
 	var seconds []int64
 	var secondsBucket []uint64
 
@@ -97,10 +99,24 @@ func (PGProcessIdleCollector) Update(ctx context.Context, instance *instance, ch
 	if err != nil {
 		return err
 	}
+
+	applicationNameLabel := "unknown"
+	if applicationName.Valid {
+		applicationNameLabel = applicationName.String
+	}
+
+	var secondsCountMetric uint64
+	if secondsCount.Valid {
+		secondsCountMetric = uint64(secondsCount.Int64)
+	}
+	secondsSumMetric := 0.0
+	if secondsSum.Valid {
+		secondsSumMetric = float64(secondsSum.Int64)
+	}
 	ch <- prometheus.MustNewConstHistogram(
 		pgProcessIdleSeconds,
-		secondsCount, float64(secondsSum), buckets,
-		applicationName,
+		secondsCountMetric, secondsSumMetric, buckets,
+		applicationNameLabel,
 	)
 	return nil
 }
diff --git a/collector/pg_replication_slot.go b/collector/pg_replication_slot.go
index 4278923f..43eb8a9d 100644
--- a/collector/pg_replication_slot.go
+++ b/collector/pg_replication_slot.go
@@ -15,6 +15,7 @@ package collector
 
 import (
 	"context"
+	"database/sql"
 
 	"github.com/go-kit/log"
 	"github.com/prometheus/client_golang/prometheus"
@@ -82,32 +83,44 @@ func (PGReplicationSlotCollector) Update(ctx context.Context, instance *instance
 	defer rows.Close()
 
 	for rows.Next() {
-		var slotName string
-		var walLSN int64
-		var flushLSN int64
-		var isActive bool
+		var slotName sql.NullString
+		var walLSN sql.NullFloat64
+		var flushLSN sql.NullFloat64
+		var isActive sql.NullBool
 		if err := rows.Scan(&slotName, &walLSN, &flushLSN, &isActive); err != nil {
 			return err
 		}
 
-		isActiveValue := 0
-		if isActive {
-			isActiveValue = 1
+		isActiveValue := 0.0
+		if isActive.Valid && isActive.Bool {
+			isActiveValue = 1.0
+		}
+		slotNameLabel := "unknown"
+		if slotName.Valid {
+			slotNameLabel = slotName.String
 		}
 
+		var walLSNMetric float64
+		if walLSN.Valid {
+			walLSNMetric = walLSN.Float64
+		}
 		ch <- prometheus.MustNewConstMetric(
 			pgReplicationSlotCurrentWalDesc,
-			prometheus.GaugeValue, float64(walLSN), slotName,
+			prometheus.GaugeValue, walLSNMetric, slotNameLabel,
 		)
-		if isActive {
+		if isActive.Valid && isActive.Bool {
+			var flushLSNMetric float64
+			if flushLSN.Valid {
+				flushLSNMetric = flushLSN.Float64
+			}
 			ch <- prometheus.MustNewConstMetric(
 				pgReplicationSlotCurrentFlushDesc,
-				prometheus.GaugeValue, float64(flushLSN), slotName,
+				prometheus.GaugeValue, flushLSNMetric, slotNameLabel,
 			)
 		}
 		ch <- prometheus.MustNewConstMetric(
 			pgReplicationSlotIsActiveDesc,
-			prometheus.GaugeValue, float64(isActiveValue), slotName,
+			prometheus.GaugeValue, isActiveValue, slotNameLabel,
 		)
 	}
 	if err := rows.Err(); err != nil {
diff --git a/collector/pg_replication_slot_test.go b/collector/pg_replication_slot_test.go
index cb25b755..7e91ea26 100644
--- a/collector/pg_replication_slot_test.go
+++ b/collector/pg_replication_slot_test.go
@@ -103,3 +103,84 @@ func TestPgReplicationSlotCollectorInActive(t *testing.T) {
 	}
 
 }
+
+func TestPgReplicationSlotCollectorActiveNil(t *testing.T) {
+	db, mock, err := sqlmock.New()
+	if err != nil {
+		t.Fatalf("Error opening a stub db connection: %s", err)
+	}
+	defer db.Close()
+
+	inst := &instance{db: db}
+
+	columns := []string{"slot_name", "current_wal_lsn", "confirmed_flush_lsn", "active"}
+	rows := sqlmock.NewRows(columns).
+		AddRow("test_slot", 6, 12, nil)
+	mock.ExpectQuery(sanitizeQuery(pgReplicationSlotQuery)).WillReturnRows(rows)
+
+	ch := make(chan prometheus.Metric)
+	go func() {
+		defer close(ch)
+		c := PGReplicationSlotCollector{}
+
+		if err := c.Update(context.Background(), inst, ch); err != nil {
+			t.Errorf("Error calling PGReplicationSlotCollector.Update: %s", err)
+		}
+	}()
+
+	expected := []MetricResult{
+		{labels: labelMap{"slot_name": "test_slot"}, value: 6, metricType: dto.MetricType_GAUGE},
+		{labels: labelMap{"slot_name": "test_slot"}, value: 0, metricType: dto.MetricType_GAUGE},
+	}
+
+	convey.Convey("Metrics comparison", t, func() {
+		for _, expect := range expected {
+			m := readMetric(<-ch)
+			convey.So(expect, convey.ShouldResemble, m)
+		}
+	})
+	if err := mock.ExpectationsWereMet(); err != nil {
+		t.Errorf("there were unfulfilled exceptions: %s", err)
+	}
+}
+
+func TestPgReplicationSlotCollectorTestNilValues(t *testing.T) {
+	db, mock, err := sqlmock.New()
+	if err != nil {
+		t.Fatalf("Error opening a stub db connection: %s", err)
+	}
+	defer db.Close()
+
+	inst := &instance{db: db}
+
+	columns := []string{"slot_name", "current_wal_lsn", "confirmed_flush_lsn", "active"}
+	rows := sqlmock.NewRows(columns).
+		AddRow(nil, nil, nil, true)
+	mock.ExpectQuery(sanitizeQuery(pgReplicationSlotQuery)).WillReturnRows(rows)
+
+	ch := make(chan prometheus.Metric)
+	go func() {
+		defer close(ch)
+		c := PGReplicationSlotCollector{}
+
+		if err := c.Update(context.Background(), inst, ch); err != nil {
+			t.Errorf("Error calling PGReplicationSlotCollector.Update: %s", err)
+		}
+	}()
+
+	expected := []MetricResult{
+		{labels: labelMap{"slot_name": "unknown"}, value: 0, metricType: dto.MetricType_GAUGE},
+		{labels: labelMap{"slot_name": "unknown"}, value: 0, metricType: dto.MetricType_GAUGE},
+		{labels: labelMap{"slot_name": "unknown"}, value: 1, metricType: dto.MetricType_GAUGE},
+	}
+
+	convey.Convey("Metrics comparison", t, func() {
+		for _, expect := range expected {
+			m := readMetric(<-ch)
+			convey.So(expect, convey.ShouldResemble, m)
+		}
+	})
+	if err := mock.ExpectationsWereMet(); err != nil {
+		t.Errorf("there were unfulfilled exceptions: %s", err)
+	}
+}
diff --git a/collector/pg_stat_bgwriter.go b/collector/pg_stat_bgwriter.go
index 2bdef8d4..ec446d58 100644
--- a/collector/pg_stat_bgwriter.go
+++ b/collector/pg_stat_bgwriter.go
@@ -15,7 +15,7 @@ package collector
 
 import (
 	"context"
-	"time"
+	"database/sql"
 
 	"github.com/prometheus/client_golang/prometheus"
 )
@@ -121,77 +121,113 @@ func (PGStatBGWriterCollector) Update(ctx context.Context, instance *instance, c
 	row := db.QueryRowContext(ctx,
 		statBGWriterQuery)
 
-	var cpt int
-	var cpr int
-	var cpwt float64
-	var cpst float64
-	var bcp int
-	var bc int
-	var mwc int
-	var bb int
-	var bbf int
-	var ba int
-	var sr time.Time
+	var cpt, cpr, bcp, bc, mwc, bb, bbf, ba sql.NullInt64
+	var cpwt, cpst sql.NullFloat64
+	var sr sql.NullTime
 
 	err := row.Scan(&cpt, &cpr, &cpwt, &cpst, &bcp, &bc, &mwc, &bb, &bbf, &ba, &sr)
 	if err != nil {
 		return err
 	}
 
+	cptMetric := 0.0
+	if cpt.Valid {
+		cptMetric = float64(cpt.Int64)
+	}
 	ch <- prometheus.MustNewConstMetric(
 		statBGWriterCheckpointsTimedDesc,
 		prometheus.CounterValue,
-		float64(cpt),
+		cptMetric,
 	)
+	cprMetric := 0.0
+	if cpr.Valid {
+		cprMetric = float64(cpr.Int64)
+	}
 	ch <- prometheus.MustNewConstMetric(
 		statBGWriterCheckpointsReqDesc,
 		prometheus.CounterValue,
-		float64(cpr),
+		cprMetric,
 	)
+	cpwtMetric := 0.0
+	if cpwt.Valid {
+		cpwtMetric = float64(cpwt.Float64)
+	}
 	ch <- prometheus.MustNewConstMetric(
 		statBGWriterCheckpointsReqTimeDesc,
 		prometheus.CounterValue,
-		float64(cpwt),
+		cpwtMetric,
 	)
+	cpstMetric := 0.0
+	if cpst.Valid {
+		cpstMetric = float64(cpst.Float64)
+	}
 	ch <- prometheus.MustNewConstMetric(
 		statBGWriterCheckpointsSyncTimeDesc,
 		prometheus.CounterValue,
-		float64(cpst),
+		cpstMetric,
 	)
+	bcpMetric := 0.0
+	if bcp.Valid {
+		bcpMetric = float64(bcp.Int64)
+	}
 	ch <- prometheus.MustNewConstMetric(
 		statBGWriterBuffersCheckpointDesc,
 		prometheus.CounterValue,
-		float64(bcp),
+		bcpMetric,
 	)
+	bcMetric := 0.0
+	if bc.Valid {
+		bcMetric = float64(bc.Int64)
+	}
 	ch <- prometheus.MustNewConstMetric(
 		statBGWriterBuffersCleanDesc,
 		prometheus.CounterValue,
-		float64(bc),
+		bcMetric,
 	)
+	mwcMetric := 0.0
+	if mwc.Valid {
+		mwcMetric = float64(mwc.Int64)
+	}
 	ch <- prometheus.MustNewConstMetric(
 		statBGWriterMaxwrittenCleanDesc,
 		prometheus.CounterValue,
-		float64(mwc),
+		mwcMetric,
 	)
+	bbMetric := 0.0
+	if bb.Valid {
+		bbMetric = float64(bb.Int64)
+	}
 	ch <- prometheus.MustNewConstMetric(
 		statBGWriterBuffersBackendDesc,
 		prometheus.CounterValue,
-		float64(bb),
+		bbMetric,
 	)
+	bbfMetric := 0.0
+	if bbf.Valid {
+		bbfMetric = float64(bbf.Int64)
+	}
 	ch <- prometheus.MustNewConstMetric(
 		statBGWriterBuffersBackendFsyncDesc,
 		prometheus.CounterValue,
-		float64(bbf),
+		bbfMetric,
 	)
+	baMetric := 0.0
+	if ba.Valid {
+		baMetric = float64(ba.Int64)
+	}
 	ch <- prometheus.MustNewConstMetric(
 		statBGWriterBuffersAllocDesc,
 		prometheus.CounterValue,
-		float64(ba),
+		baMetric,
 	)
+	srMetric := 0.0
+	if sr.Valid {
+		srMetric = float64(sr.Time.Unix())
+	}
 	ch <- prometheus.MustNewConstMetric(
 		statBGWriterStatsResetDesc,
 		prometheus.CounterValue,
-		float64(sr.Unix()),
+		srMetric,
 	)
 
 	return nil
diff --git a/collector/pg_stat_bgwriter_test.go b/collector/pg_stat_bgwriter_test.go
index 11f55f6b..ddf9976a 100644
--- a/collector/pg_stat_bgwriter_test.go
+++ b/collector/pg_stat_bgwriter_test.go
@@ -88,3 +88,64 @@ func TestPGStatBGWriterCollector(t *testing.T) {
 		t.Errorf("there were unfulfilled exceptions: %s", err)
 	}
 }
+
+func TestPGStatBGWriterCollectorNullValues(t *testing.T) {
+	db, mock, err := sqlmock.New()
+	if err != nil {
+		t.Fatalf("Error opening a stub db connection: %s", err)
+	}
+	defer db.Close()
+
+	inst := &instance{db: db}
+
+	columns := []string{
+		"checkpoints_timed",
+		"checkpoints_req",
+		"checkpoint_write_time",
+		"checkpoint_sync_time",
+		"buffers_checkpoint",
+		"buffers_clean",
+		"maxwritten_clean",
+		"buffers_backend",
+		"buffers_backend_fsync",
+		"buffers_alloc",
+		"stats_reset"}
+
+	rows := sqlmock.NewRows(columns).
+		AddRow(nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil)
+	mock.ExpectQuery(sanitizeQuery(statBGWriterQuery)).WillReturnRows(rows)
+
+	ch := make(chan prometheus.Metric)
+	go func() {
+		defer close(ch)
+		c := PGStatBGWriterCollector{}
+
+		if err := c.Update(context.Background(), inst, ch); err != nil {
+			t.Errorf("Error calling PGStatBGWriterCollector.Update: %s", err)
+		}
+	}()
+
+	expected := []MetricResult{
+		{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0},
+	}
+
+	convey.Convey("Metrics comparison", t, func() {
+		for _, expect := range expected {
+			m := readMetric(<-ch)
+			convey.So(expect, convey.ShouldResemble, m)
+		}
+	})
+	if err := mock.ExpectationsWereMet(); err != nil {
+		t.Errorf("there were unfulfilled exceptions: %s", err)
+	}
+}
diff --git a/collector/pg_stat_database.go b/collector/pg_stat_database.go
index bb39a84b..8a882f89 100644
--- a/collector/pg_stat_database.go
+++ b/collector/pg_stat_database.go
@@ -202,12 +202,9 @@ var (
 		[]string{"datid", "datname"},
 		prometheus.Labels{},
 	)
-)
 
-func (PGStatDatabaseCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error {
-	db := instance.getDB()
-	rows, err := db.QueryContext(ctx,
-		`SELECT
+	statDatabaseQuery = `
+		SELECT
 			datid
 			,datname
 			,numbackends
@@ -228,7 +225,13 @@ func (PGStatDatabaseCollector) Update(ctx context.Context, instance *instance, c
 			,blk_write_time
 			,stats_reset
 		FROM pg_stat_database;
-		`,
+	`
+)
+
+func (PGStatDatabaseCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error {
+	db := instance.getDB()
+	rows, err := db.QueryContext(ctx,
+		statDatabaseQuery,
 	)
 	if err != nil {
 		return err
@@ -236,24 +239,8 @@ func (PGStatDatabaseCollector) Update(ctx context.Context, instance *instance, c
 	defer rows.Close()
 
 	for rows.Next() {
-		var datid string
-		var datname string
-		var numBackends float64
-		var xactCommit float64
-		var xactRollback float64
-		var blksRead float64
-		var blksHit float64
-		var tupReturned float64
-		var tupFetched float64
-		var tupInserted float64
-		var tupUpdated float64
-		var tupDeleted float64
-		var conflicts float64
-		var tempFiles float64
-		var tempBytes float64
-		var deadlocks float64
-		var blkReadTime float64
-		var blkWriteTime float64
+		var datid, datname sql.NullString
+		var numBackends, xactCommit, xactRollback, blksRead, blksHit, tupReturned, tupFetched, tupInserted, tupUpdated, tupDeleted, conflicts, tempFiles, tempBytes, deadlocks, blkReadTime, blkWriteTime sql.NullFloat64
 		var statsReset sql.NullTime
 
 		err := rows.Scan(
@@ -280,152 +267,218 @@ func (PGStatDatabaseCollector) Update(ctx context.Context, instance *instance, c
 		if err != nil {
 			return err
 		}
+		datidLabel := "unknown"
+		if datid.Valid {
+			datidLabel = datid.String
+		}
+		datnameLabel := "unknown"
+		if datname.Valid {
+			datnameLabel = datname.String
+		}
 
+		numBackendsMetric := 0.0
+		if numBackends.Valid {
+			numBackendsMetric = numBackends.Float64
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statDatabaseNumbackends,
 			prometheus.GaugeValue,
-			numBackends,
-			datid,
-			datname,
+			numBackendsMetric,
+			datidLabel,
+			datnameLabel,
 		)
 
+		xactCommitMetric := 0.0
+		if xactCommit.Valid {
+			xactCommitMetric = xactCommit.Float64
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statDatabaseXactCommit,
 			prometheus.CounterValue,
-			xactCommit,
-			datid,
-			datname,
+			xactCommitMetric,
+			datidLabel,
+			datnameLabel,
 		)
 
+		xactRollbackMetric := 0.0
+		if xactRollback.Valid {
+			xactRollbackMetric = xactRollback.Float64
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statDatabaseXactRollback,
 			prometheus.CounterValue,
-			xactRollback,
-			datid,
-			datname,
+			xactRollbackMetric,
+			datidLabel,
+			datnameLabel,
 		)
 
+		blksReadMetric := 0.0
+		if blksRead.Valid {
+			blksReadMetric = blksRead.Float64
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statDatabaseBlksRead,
 			prometheus.CounterValue,
-			blksRead,
-			datid,
-			datname,
+			blksReadMetric,
+			datidLabel,
+			datnameLabel,
 		)
 
+		blksHitMetric := 0.0
+		if blksHit.Valid {
+			blksHitMetric = blksHit.Float64
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statDatabaseBlksHit,
 			prometheus.CounterValue,
-			blksHit,
-			datid,
-			datname,
+			blksHitMetric,
+			datidLabel,
+			datnameLabel,
 		)
 
+		tupReturnedMetric := 0.0
+		if tupReturned.Valid {
+			tupReturnedMetric = tupReturned.Float64
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statDatabaseTupReturned,
 			prometheus.CounterValue,
-			tupReturned,
-			datid,
-			datname,
+			tupReturnedMetric,
+			datidLabel,
+			datnameLabel,
 		)
 
+		tupFetchedMetric := 0.0
+		if tupFetched.Valid {
+			tupFetchedMetric = tupFetched.Float64
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statDatabaseTupFetched,
 			prometheus.CounterValue,
-			tupFetched,
-			datid,
-			datname,
+			tupFetchedMetric,
+			datidLabel,
+			datnameLabel,
 		)
 
+		tupInsertedMetric := 0.0
+		if tupInserted.Valid {
+			tupInsertedMetric = tupInserted.Float64
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statDatabaseTupInserted,
 			prometheus.CounterValue,
-			tupInserted,
-			datid,
-			datname,
+			tupInsertedMetric,
+			datidLabel,
+			datnameLabel,
 		)
 
+		tupUpdatedMetric := 0.0
+		if tupUpdated.Valid {
+			tupUpdatedMetric = tupUpdated.Float64
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statDatabaseTupUpdated,
 			prometheus.CounterValue,
-			tupUpdated,
-			datid,
-			datname,
+			tupUpdatedMetric,
+			datidLabel,
+			datnameLabel,
 		)
 
+		tupDeletedMetric := 0.0
+		if tupDeleted.Valid {
+			tupDeletedMetric = tupDeleted.Float64
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statDatabaseTupDeleted,
 			prometheus.CounterValue,
-			tupDeleted,
-			datid,
-			datname,
+			tupDeletedMetric,
+			datidLabel,
+			datnameLabel,
 		)
 
+		conflictsMetric := 0.0
+		if conflicts.Valid {
+			conflictsMetric = conflicts.Float64
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statDatabaseConflicts,
 			prometheus.CounterValue,
-			conflicts,
-			datid,
-			datname,
+			conflictsMetric,
+			datidLabel,
+			datnameLabel,
 		)
 
+		tempFilesMetric := 0.0
+		if tempFiles.Valid {
+			tempFilesMetric = tempFiles.Float64
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statDatabaseTempFiles,
 			prometheus.CounterValue,
-			tempFiles,
-			datid,
-			datname,
+			tempFilesMetric,
+			datidLabel,
+			datnameLabel,
 		)
 
+		tempBytesMetric := 0.0
+		if tempBytes.Valid {
+			tempBytesMetric = tempBytes.Float64
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statDatabaseTempBytes,
 			prometheus.CounterValue,
-			tempBytes,
-			datid,
-			datname,
+			tempBytesMetric,
+			datidLabel,
+			datnameLabel,
 		)
 
+		deadlocksMetric := 0.0
+		if deadlocks.Valid {
+			deadlocksMetric = deadlocks.Float64
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statDatabaseDeadlocks,
 			prometheus.CounterValue,
-			deadlocks,
-			datid,
-			datname,
+			deadlocksMetric,
+			datidLabel,
+			datnameLabel,
 		)
 
+		blkReadTimeMetric := 0.0
+		if blkReadTime.Valid {
+			blkReadTimeMetric = blkReadTime.Float64
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statDatabaseBlkReadTime,
 			prometheus.CounterValue,
-			blkReadTime,
-			datid,
-			datname,
+			blkReadTimeMetric,
+			datidLabel,
+			datnameLabel,
 		)
 
+		blkWriteTimeMetric := 0.0
+		if blkWriteTime.Valid {
+			blkWriteTimeMetric = blkWriteTime.Float64
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statDatabaseBlkWriteTime,
 			prometheus.CounterValue,
-			blkWriteTime,
-			datid,
-			datname,
+			blkWriteTimeMetric,
+			datidLabel,
+			datnameLabel,
 		)
 
+		statsResetMetric := 0.0
 		if statsReset.Valid {
-			ch <- prometheus.MustNewConstMetric(
-				statDatabaseStatsReset,
-				prometheus.CounterValue,
-				float64(statsReset.Time.Unix()),
-				datid,
-				datname,
-			)
-		} else {
-			ch <- prometheus.MustNewConstMetric(
-				statDatabaseStatsReset,
-				prometheus.CounterValue,
-				0,
-				datid,
-				datname,
-			)
+			statsResetMetric = float64(statsReset.Time.Unix())
 		}
+		ch <- prometheus.MustNewConstMetric(
+			statDatabaseStatsReset,
+			prometheus.CounterValue,
+			statsResetMetric,
+			datidLabel,
+			datnameLabel,
+		)
 	}
 	return nil
 }
diff --git a/collector/pg_stat_database_test.go b/collector/pg_stat_database_test.go
new file mode 100644
index 00000000..0c2b5ea8
--- /dev/null
+++ b/collector/pg_stat_database_test.go
@@ -0,0 +1,359 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+package collector
+
+import (
+	"context"
+	"testing"
+	"time"
+
+	"github.com/DATA-DOG/go-sqlmock"
+	"github.com/prometheus/client_golang/prometheus"
+	dto "github.com/prometheus/client_model/go"
+	"github.com/smartystreets/goconvey/convey"
+)
+
+func TestPGStatDatabaseCollector(t *testing.T) {
+	db, mock, err := sqlmock.New()
+	if err != nil {
+		t.Fatalf("Error opening a stub db connection: %s", err)
+	}
+	defer db.Close()
+
+	inst := &instance{db: db}
+
+	columns := []string{
+		"datid",
+		"datname",
+		"numbackends",
+		"xact_commit",
+		"xact_rollback",
+		"blks_read",
+		"blks_hit",
+		"tup_returned",
+		"tup_fetched",
+		"tup_inserted",
+		"tup_updated",
+		"tup_deleted",
+		"conflicts",
+		"temp_files",
+		"temp_bytes",
+		"deadlocks",
+		"blk_read_time",
+		"blk_write_time",
+		"stats_reset",
+	}
+
+	srT, err := time.Parse("2006-01-02 15:04:05.00000-07", "2023-05-25 17:10:42.81132-07")
+	if err != nil {
+		t.Fatalf("Error parsing time: %s", err)
+	}
+
+	rows := sqlmock.NewRows(columns).
+		AddRow(
+			"pid",
+			"postgres",
+			354,
+			4945,
+			289097744,
+			1242257,
+			3275602074,
+			89320867,
+			450139,
+			2034563757,
+			0,
+			2725688749,
+			23,
+			52,
+			74,
+			925,
+			16,
+			823,
+			srT)
+
+	mock.ExpectQuery(sanitizeQuery(statDatabaseQuery)).WillReturnRows(rows)
+
+	ch := make(chan prometheus.Metric)
+	go func() {
+		defer close(ch)
+		c := PGStatDatabaseCollector{}
+
+		if err := c.Update(context.Background(), inst, ch); err != nil {
+			t.Errorf("Error calling PGStatDatabaseCollector.Update: %s", err)
+		}
+	}()
+
+	expected := []MetricResult{
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_GAUGE, value: 354},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 4945},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 289097744},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 1242257},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 3275602074},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 89320867},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 450139},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 2034563757},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 2725688749},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 23},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 52},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 74},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 925},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 16},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 823},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 1685059842},
+	}
+
+	convey.Convey("Metrics comparison", t, func() {
+		for _, expect := range expected {
+			m := readMetric(<-ch)
+			convey.So(expect, convey.ShouldResemble, m)
+		}
+	})
+	if err := mock.ExpectationsWereMet(); err != nil {
+		t.Errorf("there were unfulfilled exceptions: %s", err)
+	}
+}
+
+func TestPGStatDatabaseCollectorNullValues(t *testing.T) {
+	db, mock, err := sqlmock.New()
+	if err != nil {
+		t.Fatalf("Error opening a stub db connection: %s", err)
+	}
+	defer db.Close()
+
+	inst := &instance{db: db}
+
+	columns := []string{
+		"datid",
+		"datname",
+		"numbackends",
+		"xact_commit",
+		"xact_rollback",
+		"blks_read",
+		"blks_hit",
+		"tup_returned",
+		"tup_fetched",
+		"tup_inserted",
+		"tup_updated",
+		"tup_deleted",
+		"conflicts",
+		"temp_files",
+		"temp_bytes",
+		"deadlocks",
+		"blk_read_time",
+		"blk_write_time",
+		"stats_reset",
+	}
+
+	rows := sqlmock.NewRows(columns).
+		AddRow(
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+		)
+	mock.ExpectQuery(sanitizeQuery(statDatabaseQuery)).WillReturnRows(rows)
+
+	ch := make(chan prometheus.Metric)
+	go func() {
+		defer close(ch)
+		c := PGStatDatabaseCollector{}
+
+		if err := c.Update(context.Background(), inst, ch); err != nil {
+			t.Errorf("Error calling PGStatDatabaseCollector.Update: %s", err)
+		}
+	}()
+
+	expected := []MetricResult{
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_GAUGE, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+	}
+
+	convey.Convey("Metrics comparison", t, func() {
+		for _, expect := range expected {
+			m := readMetric(<-ch)
+			convey.So(expect, convey.ShouldResemble, m)
+		}
+	})
+	if err := mock.ExpectationsWereMet(); err != nil {
+		t.Errorf("there were unfulfilled exceptions: %s", err)
+	}
+}
+func TestPGStatDatabaseCollectorRowLeakTest(t *testing.T) {
+	db, mock, err := sqlmock.New()
+	if err != nil {
+		t.Fatalf("Error opening a stub db connection: %s", err)
+	}
+	defer db.Close()
+
+	inst := &instance{db: db}
+
+	columns := []string{
+		"datid",
+		"datname",
+		"numbackends",
+		"xact_commit",
+		"xact_rollback",
+		"blks_read",
+		"blks_hit",
+		"tup_returned",
+		"tup_fetched",
+		"tup_inserted",
+		"tup_updated",
+		"tup_deleted",
+		"conflicts",
+		"temp_files",
+		"temp_bytes",
+		"deadlocks",
+		"blk_read_time",
+		"blk_write_time",
+		"stats_reset",
+	}
+
+	srT, err := time.Parse("2006-01-02 15:04:05.00000-07", "2023-05-25 17:10:42.81132-07")
+	if err != nil {
+		t.Fatalf("Error parsing time: %s", err)
+	}
+
+	rows := sqlmock.NewRows(columns).
+		AddRow(
+			"pid",
+			"postgres",
+			354,
+			4945,
+			289097744,
+			1242257,
+			3275602074,
+			89320867,
+			450139,
+			2034563757,
+			0,
+			2725688749,
+			23,
+			52,
+			74,
+			925,
+			16,
+			823,
+			srT).
+		AddRow(
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+		)
+
+	mock.ExpectQuery(sanitizeQuery(statDatabaseQuery)).WillReturnRows(rows)
+
+	ch := make(chan prometheus.Metric)
+	go func() {
+		defer close(ch)
+		c := PGStatDatabaseCollector{}
+
+		if err := c.Update(context.Background(), inst, ch); err != nil {
+			t.Errorf("Error calling PGStatDatabaseCollector.Update: %s", err)
+		}
+	}()
+
+	expected := []MetricResult{
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_GAUGE, value: 354},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 4945},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 289097744},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 1242257},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 3275602074},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 89320867},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 450139},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 2034563757},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 2725688749},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 23},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 52},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 74},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 925},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 16},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 823},
+		{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 1685059842},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_GAUGE, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datid": "unknown", "datname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+	}
+
+	convey.Convey("Metrics comparison", t, func() {
+		for _, expect := range expected {
+			m := readMetric(<-ch)
+			convey.So(expect, convey.ShouldResemble, m)
+		}
+	})
+	if err := mock.ExpectationsWereMet(); err != nil {
+		t.Errorf("there were unfulfilled exceptions: %s", err)
+	}
+}
diff --git a/collector/pg_stat_statements.go b/collector/pg_stat_statements.go
index eb629c38..bbfee1a2 100644
--- a/collector/pg_stat_statements.go
+++ b/collector/pg_stat_statements.go
@@ -15,6 +15,7 @@ package collector
 
 import (
 	"context"
+	"database/sql"
 
 	"github.com/go-kit/log"
 	"github.com/prometheus/client_golang/prometheus"
@@ -101,48 +102,80 @@ func (PGStatStatementsCollector) Update(ctx context.Context, instance *instance,
 	}
 	defer rows.Close()
 	for rows.Next() {
-		var user string
-		var datname string
-		var queryid string
-		var callsTotal int64
-		var secondsTotal float64
-		var rowsTotal int64
-		var blockReadSecondsTotal float64
-		var blockWriteSecondsTotal float64
+		var user, datname, queryid sql.NullString
+		var callsTotal, rowsTotal sql.NullInt64
+		var secondsTotal, blockReadSecondsTotal, blockWriteSecondsTotal sql.NullFloat64
 
 		if err := rows.Scan(&user, &datname, &queryid, &callsTotal, &secondsTotal, &rowsTotal, &blockReadSecondsTotal, &blockWriteSecondsTotal); err != nil {
 			return err
 		}
 
+		userLabel := "unknown"
+		if user.Valid {
+			userLabel = user.String
+		}
+		datnameLabel := "unknown"
+		if datname.Valid {
+			datnameLabel = datname.String
+		}
+		queryidLabel := "unknown"
+		if queryid.Valid {
+			queryidLabel = queryid.String
+		}
+
+		callsTotalMetric := 0.0
+		if callsTotal.Valid {
+			callsTotalMetric = float64(callsTotal.Int64)
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statSTatementsCallsTotal,
 			prometheus.CounterValue,
-			float64(callsTotal),
-			user, datname, queryid,
+			callsTotalMetric,
+			userLabel, datnameLabel, queryidLabel,
 		)
+
+		secondsTotalMetric := 0.0
+		if secondsTotal.Valid {
+			secondsTotalMetric = secondsTotal.Float64
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statStatementsSecondsTotal,
 			prometheus.CounterValue,
-			secondsTotal,
-			user, datname, queryid,
+			secondsTotalMetric,
+			userLabel, datnameLabel, queryidLabel,
 		)
+
+		rowsTotalMetric := 0.0
+		if rowsTotal.Valid {
+			rowsTotalMetric = float64(rowsTotal.Int64)
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statStatementsRowsTotal,
 			prometheus.CounterValue,
-			float64(rowsTotal),
-			user, datname, queryid,
+			rowsTotalMetric,
+			userLabel, datnameLabel, queryidLabel,
 		)
+
+		blockReadSecondsTotalMetric := 0.0
+		if blockReadSecondsTotal.Valid {
+			blockReadSecondsTotalMetric = blockReadSecondsTotal.Float64
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statStatementsBlockReadSecondsTotal,
 			prometheus.CounterValue,
-			blockReadSecondsTotal,
-			user, datname, queryid,
+			blockReadSecondsTotalMetric,
+			userLabel, datnameLabel, queryidLabel,
 		)
+
+		blockWriteSecondsTotalMetric := 0.0
+		if blockWriteSecondsTotal.Valid {
+			blockWriteSecondsTotalMetric = blockWriteSecondsTotal.Float64
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statStatementsBlockWriteSecondsTotal,
 			prometheus.CounterValue,
-			blockWriteSecondsTotal,
-			user, datname, queryid,
+			blockWriteSecondsTotalMetric,
+			userLabel, datnameLabel, queryidLabel,
 		)
 	}
 	if err := rows.Err(); err != nil {
diff --git a/collector/pg_stat_statements_test.go b/collector/pg_stat_statements_test.go
index 241699ad..c4f89a60 100644
--- a/collector/pg_stat_statements_test.go
+++ b/collector/pg_stat_statements_test.go
@@ -64,3 +64,46 @@ func TestPGStateStatementsCollector(t *testing.T) {
 		t.Errorf("there were unfulfilled exceptions: %s", err)
 	}
 }
+
+func TestPGStateStatementsCollectorNull(t *testing.T) {
+	db, mock, err := sqlmock.New()
+	if err != nil {
+		t.Fatalf("Error opening a stub db connection: %s", err)
+	}
+	defer db.Close()
+
+	inst := &instance{db: db}
+
+	columns := []string{"user", "datname", "queryid", "calls_total", "seconds_total", "rows_total", "block_read_seconds_total", "block_write_seconds_total"}
+	rows := sqlmock.NewRows(columns).
+		AddRow(nil, nil, nil, nil, nil, nil, nil, nil)
+	mock.ExpectQuery(sanitizeQuery(pgStatStatementsQuery)).WillReturnRows(rows)
+
+	ch := make(chan prometheus.Metric)
+	go func() {
+		defer close(ch)
+		c := PGStatStatementsCollector{}
+
+		if err := c.Update(context.Background(), inst, ch); err != nil {
+			t.Errorf("Error calling PGStatStatementsCollector.Update: %s", err)
+		}
+	}()
+
+	expected := []MetricResult{
+		{labels: labelMap{"user": "unknown", "datname": "unknown", "queryid": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"user": "unknown", "datname": "unknown", "queryid": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"user": "unknown", "datname": "unknown", "queryid": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"user": "unknown", "datname": "unknown", "queryid": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"user": "unknown", "datname": "unknown", "queryid": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+	}
+
+	convey.Convey("Metrics comparison", t, func() {
+		for _, expect := range expected {
+			m := readMetric(<-ch)
+			convey.So(expect, convey.ShouldResemble, m)
+		}
+	})
+	if err := mock.ExpectationsWereMet(); err != nil {
+		t.Errorf("there were unfulfilled exceptions: %s", err)
+	}
+}
diff --git a/collector/pg_stat_user_tables.go b/collector/pg_stat_user_tables.go
index 48ae96eb..949a0ea2 100644
--- a/collector/pg_stat_user_tables.go
+++ b/collector/pg_stat_user_tables.go
@@ -15,7 +15,7 @@ package collector
 
 import (
 	"context"
-	"time"
+	"database/sql"
 
 	"github.com/go-kit/log"
 	"github.com/prometheus/client_golang/prometheus"
@@ -189,146 +189,235 @@ func (c *PGStatUserTablesCollector) Update(ctx context.Context, instance *instan
 	defer rows.Close()
 
 	for rows.Next() {
-		var datname string
-		var schemaname string
-		var relname string
-		var seqScan int64
-		var seqTupRead int64
-		var idxScan int64
-		var idxTupFetch int64
-		var nTupIns int64
-		var nTupUpd int64
-		var nTupDel int64
-		var nTupHotUpd int64
-		var nLiveTup int64
-		var nDeadTup int64
-		var nModSinceAnalyze int64
-		var lastVacuum time.Time
-		var lastAutovacuum time.Time
-		var lastAnalyze time.Time
-		var lastAutoanalyze time.Time
-		var vacuumCount int64
-		var autovacuumCount int64
-		var analyzeCount int64
-		var autoanalyzeCount int64
+		var datname, schemaname, relname sql.NullString
+		var seqScan, seqTupRead, idxScan, idxTupFetch, nTupIns, nTupUpd, nTupDel, nTupHotUpd, nLiveTup, nDeadTup,
+			nModSinceAnalyze, vacuumCount, autovacuumCount, analyzeCount, autoanalyzeCount sql.NullInt64
+		var lastVacuum, lastAutovacuum, lastAnalyze, lastAutoanalyze sql.NullTime
 
 		if err := rows.Scan(&datname, &schemaname, &relname, &seqScan, &seqTupRead, &idxScan, &idxTupFetch, &nTupIns, &nTupUpd, &nTupDel, &nTupHotUpd, &nLiveTup, &nDeadTup, &nModSinceAnalyze, &lastVacuum, &lastAutovacuum, &lastAnalyze, &lastAutoanalyze, &vacuumCount, &autovacuumCount, &analyzeCount, &autoanalyzeCount); err != nil {
 			return err
 		}
 
+		datnameLabel := "unknown"
+		if datname.Valid {
+			datnameLabel = datname.String
+		}
+		schemanameLabel := "unknown"
+		if schemaname.Valid {
+			schemanameLabel = schemaname.String
+		}
+		relnameLabel := "unknown"
+		if relname.Valid {
+			relnameLabel = relname.String
+		}
+
+		seqScanMetric := 0.0
+		if seqScan.Valid {
+			seqScanMetric = float64(seqScan.Int64)
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statUserTablesSeqScan,
 			prometheus.CounterValue,
-			float64(seqScan),
-			datname, schemaname, relname,
+			seqScanMetric,
+			datnameLabel, schemanameLabel, relnameLabel,
 		)
+
+		seqTupReadMetric := 0.0
+		if seqTupRead.Valid {
+			seqTupReadMetric = float64(seqTupRead.Int64)
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statUserTablesSeqTupRead,
 			prometheus.CounterValue,
-			float64(seqTupRead),
-			datname, schemaname, relname,
+			seqTupReadMetric,
+			datnameLabel, schemanameLabel, relnameLabel,
 		)
+
+		idxScanMetric := 0.0
+		if idxScan.Valid {
+			idxScanMetric = float64(idxScan.Int64)
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statUserTablesIdxScan,
 			prometheus.CounterValue,
-			float64(idxScan),
-			datname, schemaname, relname,
+			idxScanMetric,
+			datnameLabel, schemanameLabel, relnameLabel,
 		)
+
+		idxTupFetchMetric := 0.0
+		if idxTupFetch.Valid {
+			idxTupFetchMetric = float64(idxTupFetch.Int64)
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statUserTablesIdxTupFetch,
 			prometheus.CounterValue,
-			float64(idxTupFetch),
-			datname, schemaname, relname,
+			idxTupFetchMetric,
+			datnameLabel, schemanameLabel, relnameLabel,
 		)
+
+		nTupInsMetric := 0.0
+		if nTupIns.Valid {
+			nTupInsMetric = float64(nTupIns.Int64)
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statUserTablesNTupIns,
 			prometheus.CounterValue,
-			float64(nTupIns),
-			datname, schemaname, relname,
+			nTupInsMetric,
+			datnameLabel, schemanameLabel, relnameLabel,
 		)
+
+		nTupUpdMetric := 0.0
+		if nTupUpd.Valid {
+			nTupUpdMetric = float64(nTupUpd.Int64)
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statUserTablesNTupUpd,
 			prometheus.CounterValue,
-			float64(nTupUpd),
-			datname, schemaname, relname,
+			nTupUpdMetric,
+			datnameLabel, schemanameLabel, relnameLabel,
 		)
+
+		nTupDelMetric := 0.0
+		if nTupDel.Valid {
+			nTupDelMetric = float64(nTupDel.Int64)
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statUserTablesNTupDel,
 			prometheus.CounterValue,
-			float64(nTupDel),
-			datname, schemaname, relname,
+			nTupDelMetric,
+			datnameLabel, schemanameLabel, relnameLabel,
 		)
+
+		nTupHotUpdMetric := 0.0
+		if nTupHotUpd.Valid {
+			nTupHotUpdMetric = float64(nTupHotUpd.Int64)
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statUserTablesNTupHotUpd,
 			prometheus.CounterValue,
-			float64(nTupHotUpd),
-			datname, schemaname, relname,
+			nTupHotUpdMetric,
+			datnameLabel, schemanameLabel, relnameLabel,
 		)
+
+		nLiveTupMetric := 0.0
+		if nLiveTup.Valid {
+			nLiveTupMetric = float64(nLiveTup.Int64)
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statUserTablesNLiveTup,
 			prometheus.GaugeValue,
-			float64(nLiveTup),
-			datname, schemaname, relname,
+			nLiveTupMetric,
+			datnameLabel, schemanameLabel, relnameLabel,
 		)
+
+		nDeadTupMetric := 0.0
+		if nDeadTup.Valid {
+			nDeadTupMetric = float64(nDeadTup.Int64)
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statUserTablesNDeadTup,
 			prometheus.GaugeValue,
-			float64(nDeadTup),
-			datname, schemaname, relname,
+			nDeadTupMetric,
+			datnameLabel, schemanameLabel, relnameLabel,
 		)
+
+		nModSinceAnalyzeMetric := 0.0
+		if nModSinceAnalyze.Valid {
+			nModSinceAnalyzeMetric = float64(nModSinceAnalyze.Int64)
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statUserTablesNModSinceAnalyze,
 			prometheus.GaugeValue,
-			float64(nModSinceAnalyze),
-			datname, schemaname, relname,
+			nModSinceAnalyzeMetric,
+			datnameLabel, schemanameLabel, relnameLabel,
 		)
+
+		lastVacuumMetric := 0.0
+		if lastVacuum.Valid {
+			lastVacuumMetric = float64(lastVacuum.Time.Unix())
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statUserTablesLastVacuum,
 			prometheus.GaugeValue,
-			float64(lastVacuum.Unix()),
-			datname, schemaname, relname,
+			lastVacuumMetric,
+			datnameLabel, schemanameLabel, relnameLabel,
 		)
+
+		lastAutovacuumMetric := 0.0
+		if lastAutovacuum.Valid {
+			lastAutovacuumMetric = float64(lastAutovacuum.Time.Unix())
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statUserTablesLastAutovacuum,
 			prometheus.GaugeValue,
-			float64(lastAutovacuum.Unix()),
-			datname, schemaname, relname,
+			lastAutovacuumMetric,
+			datnameLabel, schemanameLabel, relnameLabel,
 		)
+
+		lastAnalyzeMetric := 0.0
+		if lastAnalyze.Valid {
+			lastAnalyzeMetric = float64(lastAnalyze.Time.Unix())
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statUserTablesLastAnalyze,
 			prometheus.GaugeValue,
-			float64(lastAnalyze.Unix()),
-			datname, schemaname, relname,
+			lastAnalyzeMetric,
+			datnameLabel, schemanameLabel, relnameLabel,
 		)
+
+		lastAutoanalyzeMetric := 0.0
+		if lastAutoanalyze.Valid {
+			lastAutoanalyzeMetric = float64(lastAutoanalyze.Time.Unix())
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statUserTablesLastAutoanalyze,
 			prometheus.GaugeValue,
-			float64(lastAutoanalyze.Unix()),
-			datname, schemaname, relname,
+			lastAutoanalyzeMetric,
+			datnameLabel, schemanameLabel, relnameLabel,
 		)
+
+		vacuumCountMetric := 0.0
+		if vacuumCount.Valid {
+			vacuumCountMetric = float64(vacuumCount.Int64)
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statUserTablesVacuumCount,
 			prometheus.CounterValue,
-			float64(vacuumCount),
-			datname, schemaname, relname,
+			vacuumCountMetric,
+			datnameLabel, schemanameLabel, relnameLabel,
 		)
+
+		autovacuumCountMetric := 0.0
+		if autovacuumCount.Valid {
+			autovacuumCountMetric = float64(autovacuumCount.Int64)
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statUserTablesAutovacuumCount,
 			prometheus.CounterValue,
-			float64(autovacuumCount),
-			datname, schemaname, relname,
+			autovacuumCountMetric,
+			datnameLabel, schemanameLabel, relnameLabel,
 		)
+
+		analyzeCountMetric := 0.0
+		if analyzeCount.Valid {
+			analyzeCountMetric = float64(analyzeCount.Int64)
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statUserTablesAnalyzeCount,
 			prometheus.CounterValue,
-			float64(analyzeCount),
-			datname, schemaname, relname,
+			analyzeCountMetric,
+			datnameLabel, schemanameLabel, relnameLabel,
 		)
+
+		autoanalyzeCountMetric := 0.0
+		if autoanalyzeCount.Valid {
+			autoanalyzeCountMetric = float64(autoanalyzeCount.Int64)
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statUserTablesAutoanalyzeCount,
 			prometheus.CounterValue,
-			float64(autoanalyzeCount),
-			datname, schemaname, relname,
+			autoanalyzeCountMetric,
+			datnameLabel, schemanameLabel, relnameLabel,
 		)
 	}
 
diff --git a/collector/pg_stat_user_tables_test.go b/collector/pg_stat_user_tables_test.go
index 8bb9bc31..e592fa5e 100644
--- a/collector/pg_stat_user_tables_test.go
+++ b/collector/pg_stat_user_tables_test.go
@@ -138,3 +138,102 @@ func TestPGStatUserTablesCollector(t *testing.T) {
 		t.Errorf("there were unfulfilled exceptions: %s", err)
 	}
 }
+
+func TestPGStatUserTablesCollectorNullValues(t *testing.T) {
+	db, mock, err := sqlmock.New()
+	if err != nil {
+		t.Fatalf("Error opening a stub db connection: %s", err)
+	}
+	defer db.Close()
+
+	inst := &instance{db: db}
+
+	columns := []string{
+		"datname",
+		"schemaname",
+		"relname",
+		"seq_scan",
+		"seq_tup_read",
+		"idx_scan",
+		"idx_tup_fetch",
+		"n_tup_ins",
+		"n_tup_upd",
+		"n_tup_del",
+		"n_tup_hot_upd",
+		"n_live_tup",
+		"n_dead_tup",
+		"n_mod_since_analyze",
+		"last_vacuum",
+		"last_autovacuum",
+		"last_analyze",
+		"last_autoanalyze",
+		"vacuum_count",
+		"autovacuum_count",
+		"analyze_count",
+		"autoanalyze_count"}
+	rows := sqlmock.NewRows(columns).
+		AddRow("postgres",
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil)
+	mock.ExpectQuery(sanitizeQuery(statUserTablesQuery)).WillReturnRows(rows)
+	ch := make(chan prometheus.Metric)
+	go func() {
+		defer close(ch)
+		c := PGStatUserTablesCollector{}
+
+		if err := c.Update(context.Background(), inst, ch); err != nil {
+			t.Errorf("Error calling PGStatUserTablesCollector.Update: %s", err)
+		}
+	}()
+
+	expected := []MetricResult{
+		{labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_GAUGE, value: 0},
+		{labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_GAUGE, value: 0},
+		{labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_GAUGE, value: 0},
+		{labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_GAUGE, value: 0},
+		{labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_GAUGE, value: 0},
+		{labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_GAUGE, value: 0},
+		{labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_GAUGE, value: 0},
+		{labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+	}
+
+	convey.Convey("Metrics comparison", t, func() {
+		for _, expect := range expected {
+			m := readMetric(<-ch)
+			convey.So(expect, convey.ShouldResemble, m)
+		}
+	})
+	if err := mock.ExpectationsWereMet(); err != nil {
+		t.Errorf("there were unfulfilled exceptions: %s", err)
+	}
+}
diff --git a/collector/pg_statio_user_tables.go b/collector/pg_statio_user_tables.go
index 03d54161..89fdec53 100644
--- a/collector/pg_statio_user_tables.go
+++ b/collector/pg_statio_user_tables.go
@@ -15,6 +15,7 @@ package collector
 
 import (
 	"context"
+	"database/sql"
 
 	"github.com/go-kit/log"
 	"github.com/prometheus/client_golang/prometheus"
@@ -110,69 +111,111 @@ func (PGStatIOUserTablesCollector) Update(ctx context.Context, instance *instanc
 	defer rows.Close()
 
 	for rows.Next() {
-		var datname string
-		var schemaname string
-		var relname string
-		var heapBlksRead int64
-		var heapBlksHit int64
-		var idxBlksRead int64
-		var idxBlksHit int64
-		var toastBlksRead int64
-		var toastBlksHit int64
-		var tidxBlksRead int64
-		var tidxBlksHit int64
+		var datname, schemaname, relname sql.NullString
+		var heapBlksRead, heapBlksHit, idxBlksRead, idxBlksHit, toastBlksRead, toastBlksHit, tidxBlksRead, tidxBlksHit sql.NullInt64
 
 		if err := rows.Scan(&datname, &schemaname, &relname, &heapBlksRead, &heapBlksHit, &idxBlksRead, &idxBlksHit, &toastBlksRead, &toastBlksHit, &tidxBlksRead, &tidxBlksHit); err != nil {
 			return err
 		}
+		datnameLabel := "unknown"
+		if datname.Valid {
+			datnameLabel = datname.String
+		}
+		schemanameLabel := "unknown"
+		if schemaname.Valid {
+			schemanameLabel = schemaname.String
+		}
+		relnameLabel := "unknown"
+		if relname.Valid {
+			relnameLabel = relname.String
+		}
 
+		heapBlksReadMetric := 0.0
+		if heapBlksRead.Valid {
+			heapBlksReadMetric = float64(heapBlksRead.Int64)
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statioUserTablesHeapBlksRead,
 			prometheus.CounterValue,
-			float64(heapBlksRead),
-			datname, schemaname, relname,
+			heapBlksReadMetric,
+			datnameLabel, schemanameLabel, relnameLabel,
 		)
+
+		heapBlksHitMetric := 0.0
+		if heapBlksHit.Valid {
+			heapBlksHitMetric = float64(heapBlksHit.Int64)
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statioUserTablesHeapBlksHit,
 			prometheus.CounterValue,
-			float64(heapBlksHit),
-			datname, schemaname, relname,
+			heapBlksHitMetric,
+			datnameLabel, schemanameLabel, relnameLabel,
 		)
+
+		idxBlksReadMetric := 0.0
+		if idxBlksRead.Valid {
+			idxBlksReadMetric = float64(idxBlksRead.Int64)
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statioUserTablesIdxBlksRead,
 			prometheus.CounterValue,
-			float64(idxBlksRead),
-			datname, schemaname, relname,
+			idxBlksReadMetric,
+			datnameLabel, schemanameLabel, relnameLabel,
 		)
+
+		idxBlksHitMetric := 0.0
+		if idxBlksHit.Valid {
+			idxBlksHitMetric = float64(idxBlksHit.Int64)
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statioUserTablesIdxBlksHit,
 			prometheus.CounterValue,
-			float64(idxBlksHit),
-			datname, schemaname, relname,
+			idxBlksHitMetric,
+			datnameLabel, schemanameLabel, relnameLabel,
 		)
+
+		toastBlksReadMetric := 0.0
+		if toastBlksRead.Valid {
+			toastBlksReadMetric = float64(toastBlksRead.Int64)
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statioUserTablesToastBlksRead,
 			prometheus.CounterValue,
-			float64(toastBlksRead),
-			datname, schemaname, relname,
+			toastBlksReadMetric,
+			datnameLabel, schemanameLabel, relnameLabel,
 		)
+
+		toastBlksHitMetric := 0.0
+		if toastBlksHit.Valid {
+			toastBlksHitMetric = float64(toastBlksHit.Int64)
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statioUserTablesToastBlksHit,
 			prometheus.CounterValue,
-			float64(toastBlksHit),
-			datname, schemaname, relname,
+			toastBlksHitMetric,
+			datnameLabel, schemanameLabel, relnameLabel,
 		)
+
+		tidxBlksReadMetric := 0.0
+		if tidxBlksRead.Valid {
+			tidxBlksReadMetric = float64(tidxBlksRead.Int64)
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statioUserTablesTidxBlksRead,
 			prometheus.CounterValue,
-			float64(tidxBlksRead),
-			datname, schemaname, relname,
+			tidxBlksReadMetric,
+			datnameLabel, schemanameLabel, relnameLabel,
 		)
+
+		tidxBlksHitMetric := 0.0
+		if tidxBlksHit.Valid {
+			tidxBlksHitMetric = float64(tidxBlksHit.Int64)
+		}
 		ch <- prometheus.MustNewConstMetric(
 			statioUserTablesTidxBlksHit,
 			prometheus.CounterValue,
-			float64(tidxBlksHit),
-			datname, schemaname, relname,
+			tidxBlksHitMetric,
+			datnameLabel, schemanameLabel, relnameLabel,
 		)
 	}
 	if err := rows.Err(); err != nil {
diff --git a/collector/pg_statio_user_tables_test.go b/collector/pg_statio_user_tables_test.go
index d57cab9f..c7304a38 100644
--- a/collector/pg_statio_user_tables_test.go
+++ b/collector/pg_statio_user_tables_test.go
@@ -88,3 +88,70 @@ func TestPGStatIOUserTablesCollector(t *testing.T) {
 		t.Errorf("there were unfulfilled exceptions: %s", err)
 	}
 }
+
+func TestPGStatIOUserTablesCollectorNullValues(t *testing.T) {
+	db, mock, err := sqlmock.New()
+	if err != nil {
+		t.Fatalf("Error opening a stub db connection: %s", err)
+	}
+	defer db.Close()
+
+	inst := &instance{db: db}
+
+	columns := []string{
+		"datname",
+		"schemaname",
+		"relname",
+		"heap_blks_read",
+		"heap_blks_hit",
+		"idx_blks_read",
+		"idx_blks_hit",
+		"toast_blks_read",
+		"toast_blks_hit",
+		"tidx_blks_read",
+		"tidx_blks_hit",
+	}
+	rows := sqlmock.NewRows(columns).
+		AddRow(nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil,
+			nil)
+	mock.ExpectQuery(sanitizeQuery(statioUserTablesQuery)).WillReturnRows(rows)
+	ch := make(chan prometheus.Metric)
+	go func() {
+		defer close(ch)
+		c := PGStatIOUserTablesCollector{}
+
+		if err := c.Update(context.Background(), inst, ch); err != nil {
+			t.Errorf("Error calling PGStatIOUserTablesCollector.Update: %s", err)
+		}
+	}()
+
+	expected := []MetricResult{
+		{labels: labelMap{"datname": "unknown", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datname": "unknown", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datname": "unknown", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datname": "unknown", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datname": "unknown", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datname": "unknown", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datname": "unknown", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+		{labels: labelMap{"datname": "unknown", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0},
+	}
+
+	convey.Convey("Metrics comparison", t, func() {
+		for _, expect := range expected {
+			m := readMetric(<-ch)
+			convey.So(expect, convey.ShouldResemble, m)
+		}
+	})
+	if err := mock.ExpectationsWereMet(); err != nil {
+		t.Errorf("there were unfulfilled exceptions: %s", err)
+	}
+}