PMM-10278 postgres_exporter integration tests (#71)

PMM-10278 add compatibility tests for exporter upgrade
This commit is contained in:
Taras Shcherban 2022-10-03 15:30:44 +02:00 committed by GitHub
parent d5e6046ec6
commit a6e78dc07a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 1431 additions and 3 deletions

6
.gitignore vendored
View File

@ -20,3 +20,9 @@
/.metrics.*.removed
/tools/src
/vendor
/percona_tests/assets/postgres_exporter
/percona_tests/assets/postgres_exporter_percona
/percona_tests/assets/metrics.new.txt
/percona_tests/assets/metrics.old.txt
/percona_tests/assets/metrics.names.new.txt
/percona_tests/assets/metrics.names.old.txt

7
go.mod
View File

@ -6,11 +6,15 @@ require (
github.com/blang/semver v3.5.1+incompatible
github.com/go-kit/log v0.2.0
github.com/lib/pq v1.10.4
github.com/montanaflynn/stats v0.6.6
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.12.1
github.com/prometheus/client_model v0.2.0
github.com/prometheus/common v0.32.1
github.com/prometheus/exporter-toolkit v0.7.1
github.com/stretchr/testify v1.4.0
github.com/tklauser/go-sysconf v0.3.10
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27
gopkg.in/alecthomas/kingpin.v2 v2.2.6
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
gopkg.in/yaml.v2 v2.4.0
@ -29,13 +33,12 @@ require (
github.com/kr/text v0.1.0 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/tklauser/numcpus v0.4.0 // indirect
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e // indirect
golang.org/x/net v0.0.0-20210525063256-abc453219eb5 // indirect
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c // indirect
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
golang.org/x/text v0.3.6 // indirect
google.golang.org/appengine v1.6.6 // indirect
google.golang.org/protobuf v1.26.0 // indirect

9
go.sum
View File

@ -162,6 +162,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/montanaflynn/stats v0.6.6 h1:Duep6KMIDpY4Yo11iFsvyqJDyfzLF9+sndUKT+v64GQ=
github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
@ -206,6 +208,10 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw=
github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o=
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@ -335,8 +341,9 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=

48
percona_tests/Makefile Normal file
View File

@ -0,0 +1,48 @@
#########################
### tests
# measures avg scrape time and compares old vs new exporters
test-performance:
go test -v -run '^TestPerformance$$' -args -doRun=true
extraMetrics = false
multipleLabels = false
dumpMetrics = false
test-metrics:
go test -v -run '^TestMissingMetrics$$' -args -doRun=true
test-labels:
go test -v -run '^TestMissingLabels$$' -args -doRun=true
test-resolutions-duplicates:
go test -v -run '^TestResolutionsMetricDuplicates$$' -args -doRun=true
test-resolutions:
go test -v -run '^TestResolutions$$' -args -doRun=true
dump-metrics:
go test -v -run '^TestDumpMetrics$$' -args -doRun=true -extraMetrics=$(extraMetrics) -multipleLabels=$(multipleLabels) -dumpMetrics=$(dumpMetrics)
test-consistency: test-metrics test-resolutions test-resolutions-duplicates
#########################
### env preparation
# download exporter from provided feature build's client binary url
prepare-exporter-from-fb:
go test -v -run '^TestPrepareUpdatedExporter$\' -args -doRun=true -url=$(url)
prepare-exporter-from-repo:
make -C ../ build && cp ../postgres_exporter assets/postgres_exporter
prepare-base-exporter:
tar -xf assets/postgres_exporter_percona.tar.xz -C assets/
start-postgres-db:
docker-compose -f assets/postgres-compose.yml up -d --force-recreate --renew-anon-volumes --remove-orphans
stop-postgres-db:
docker-compose -f assets/postgres-compose.yml down
prepare-env-from-repo: prepare-exporter-from-repo prepare-base-exporter start-postgres-db

View File

@ -0,0 +1,30 @@
---
version: '3.7'
services:
postgres:
image: ${POSTGRES_IMAGE:-postgres:11}
container_name: postgres-test-srv
command: >
-c shared_preload_libraries='${PG_PRELOADED_LIBS:-pg_stat_statements}'
-c track_activity_query_size=2048
-c pg_stat_statements.max=10000
-c pg_stat_monitor.pgsm_query_max_len=10000
-c pg_stat_statements.track=all
-c pg_stat_statements.save=off
-c track_io_timing=on
ports:
- "127.0.0.1:5432:5432"
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
volumes:
- postgres-test-srv-vol:/docker-entrypoint-initdb.d/
networks:
- postgres-test-srv-net
volumes:
postgres-test-srv-vol:
networks:
postgres-test-srv-net:

Binary file not shown.

View File

@ -0,0 +1,6 @@
--auto-discover-databases
--collect.custom_query.hr
--collect.custom_query.lr
--collect.custom_query.mr
--exclude-databases=template0,template1,postgres,cloudsqladmin,pmm-managed-dev,azure_maintenance
--log.level=warn

View File

@ -0,0 +1,11 @@
## ######################################################
## WARNING: This is an example. Do not edit this file.
## To create your own Custom Queries - create a new file
## ######################################################
## Custom query example.
#pg_replication:
# query: "SELECT EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp())) as lag"
# metrics:
# - lag:
# usage: "GAUGE"
# description: "Replication lag behind master in seconds"

View File

@ -0,0 +1,7 @@
pg_postmaster_uptime:
query: "select extract(epoch from current_timestamp - pg_postmaster_start_time()) as seconds"
master: true
metrics:
- seconds:
usage: "GAUGE"
description: "Service uptime"

View File

@ -0,0 +1,11 @@
## ######################################################
## WARNING: This is an example. Do not edit this file.
## To create your own Custom Queries - create a new file
## ######################################################
## Custom query example.
#pg_replication:
# query: "SELECT EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp())) as lag"
# metrics:
# - lag:
# usage: "GAUGE"
# description: "Replication lag behind master in seconds"

View File

@ -0,0 +1,11 @@
## ######################################################
## WARNING: This is an example. Do not edit this file.
## To create your own Custom Queries - create a new file
## ######################################################
## Custom query example.
#pg_replication:
# query: "SELECT EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp())) as lag"
# metrics:
# - lag:
# usage: "GAUGE"
# description: "Replication lag behind master in seconds"

View File

@ -0,0 +1,231 @@
#### Queries are commented due to PMM-8859
pg_replication:
query: "SELECT CASE WHEN NOT pg_is_in_recovery() THEN 0 ELSE GREATEST (0, EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp()))) END AS lag"
master: true
metrics:
- lag:
usage: "GAUGE"
description: "Replication lag behind master in seconds"
pg_postmaster:
query: "SELECT pg_postmaster_start_time as start_time_seconds from pg_postmaster_start_time()"
master: true
metrics:
- start_time_seconds:
usage: "GAUGE"
description: "Time at which postmaster started"
pg_stat_user_tables:
query: |
SELECT
current_database() datname,
schemaname,
relname,
seq_scan,
seq_tup_read,
idx_scan,
idx_tup_fetch,
n_tup_ins,
n_tup_upd,
n_tup_del,
n_tup_hot_upd,
n_live_tup,
n_dead_tup,
n_mod_since_analyze,
COALESCE(last_vacuum, '1970-01-01Z') as last_vacuum,
COALESCE(last_autovacuum, '1970-01-01Z') as last_autovacuum,
COALESCE(last_analyze, '1970-01-01Z') as last_analyze,
COALESCE(last_autoanalyze, '1970-01-01Z') as last_autoanalyze,
vacuum_count,
autovacuum_count,
analyze_count,
autoanalyze_count
FROM
pg_stat_user_tables
metrics:
- datname:
usage: "LABEL"
description: "Name of current database"
- schemaname:
usage: "LABEL"
description: "Name of the schema that this table is in"
- relname:
usage: "LABEL"
description: "Name of this table"
- seq_scan:
usage: "COUNTER"
description: "Number of sequential scans initiated on this table"
- seq_tup_read:
usage: "COUNTER"
description: "Number of live rows fetched by sequential scans"
- idx_scan:
usage: "COUNTER"
description: "Number of index scans initiated on this table"
- idx_tup_fetch:
usage: "COUNTER"
description: "Number of live rows fetched by index scans"
- n_tup_ins:
usage: "COUNTER"
description: "Number of rows inserted"
- n_tup_upd:
usage: "COUNTER"
description: "Number of rows updated"
- n_tup_del:
usage: "COUNTER"
description: "Number of rows deleted"
- n_tup_hot_upd:
usage: "COUNTER"
description: "Number of rows HOT updated (i.e., with no separate index update required)"
- n_live_tup:
usage: "GAUGE"
description: "Estimated number of live rows"
- n_dead_tup:
usage: "GAUGE"
description: "Estimated number of dead rows"
- n_mod_since_analyze:
usage: "GAUGE"
description: "Estimated number of rows changed since last analyze"
- last_vacuum:
usage: "GAUGE"
description: "Last time at which this table was manually vacuumed (not counting VACUUM FULL)"
- last_autovacuum:
usage: "GAUGE"
description: "Last time at which this table was vacuumed by the autovacuum daemon"
- last_analyze:
usage: "GAUGE"
description: "Last time at which this table was manually analyzed"
- last_autoanalyze:
usage: "GAUGE"
description: "Last time at which this table was analyzed by the autovacuum daemon"
- vacuum_count:
usage: "COUNTER"
description: "Number of times this table has been manually vacuumed (not counting VACUUM FULL)"
- autovacuum_count:
usage: "COUNTER"
description: "Number of times this table has been vacuumed by the autovacuum daemon"
- analyze_count:
usage: "COUNTER"
description: "Number of times this table has been manually analyzed"
- autoanalyze_count:
usage: "COUNTER"
description: "Number of times this table has been analyzed by the autovacuum daemon"
pg_statio_user_tables:
query: "SELECT current_database() datname, schemaname, relname, heap_blks_read, heap_blks_hit, idx_blks_read, idx_blks_hit, toast_blks_read, toast_blks_hit, tidx_blks_read, tidx_blks_hit FROM pg_statio_user_tables"
metrics:
- datname:
usage: "LABEL"
description: "Name of current database"
- schemaname:
usage: "LABEL"
description: "Name of the schema that this table is in"
- relname:
usage: "LABEL"
description: "Name of this table"
- heap_blks_read:
usage: "COUNTER"
description: "Number of disk blocks read from this table"
- heap_blks_hit:
usage: "COUNTER"
description: "Number of buffer hits in this table"
- idx_blks_read:
usage: "COUNTER"
description: "Number of disk blocks read from all indexes on this table"
- idx_blks_hit:
usage: "COUNTER"
description: "Number of buffer hits in all indexes on this table"
- toast_blks_read:
usage: "COUNTER"
description: "Number of disk blocks read from this table's TOAST table (if any)"
- toast_blks_hit:
usage: "COUNTER"
description: "Number of buffer hits in this table's TOAST table (if any)"
- tidx_blks_read:
usage: "COUNTER"
description: "Number of disk blocks read from this table's TOAST table indexes (if any)"
- tidx_blks_hit:
usage: "COUNTER"
description: "Number of buffer hits in this table's TOAST table indexes (if any)"
pg_database:
query: "SELECT pg_database.datname, pg_database_size(pg_database.datname) as size_bytes FROM pg_database"
master: true
cache_seconds: 30
metrics:
- datname:
usage: "LABEL"
description: "Name of the database"
- size_bytes:
usage: "GAUGE"
description: "Disk space used by the database"
####
#pg_stat_statements:
# query: "SELECT t2.rolname, t3.datname, queryid, calls, total_time / 1000 as total_time_seconds, min_time / 1000 as min_time_seconds, max_time / 1000 as max_time_seconds, mean_time / 1000 as mean_time_seconds, stddev_time / 1000 as stddev_time_seconds, rows, shared_blks_hit, shared_blks_read, shared_blks_dirtied, shared_blks_written, local_blks_hit, local_blks_read, local_blks_dirtied, local_blks_written, temp_blks_read, temp_blks_written, blk_read_time / 1000 as blk_read_time_seconds, blk_write_time / 1000 as blk_write_time_seconds FROM pg_stat_statements t1 JOIN pg_roles t2 ON (t1.userid=t2.oid) JOIN pg_database t3 ON (t1.dbid=t3.oid) WHERE t2.rolname != 'rdsadmin'"
# master: true
# metrics:
# - rolname:
# usage: "LABEL"
# description: "Name of user"
# - datname:
# usage: "LABEL"
# description: "Name of database"
# - queryid:
# usage: "LABEL"
# description: "Query ID"
# - calls:
# usage: "COUNTER"
# description: "Number of times executed"
# - total_time_seconds:
# usage: "COUNTER"
# description: "Total time spent in the statement, in milliseconds"
# - min_time_seconds:
# usage: "GAUGE"
# description: "Minimum time spent in the statement, in milliseconds"
# - max_time_seconds:
# usage: "GAUGE"
# description: "Maximum time spent in the statement, in milliseconds"
# - mean_time_seconds:
# usage: "GAUGE"
# description: "Mean time spent in the statement, in milliseconds"
# - stddev_time_seconds:
# usage: "GAUGE"
# description: "Population standard deviation of time spent in the statement, in milliseconds"
# - rows:
# usage: "COUNTER"
# description: "Total number of rows retrieved or affected by the statement"
# - shared_blks_hit:
# usage: "COUNTER"
# description: "Total number of shared block cache hits by the statement"
# - shared_blks_read:
# usage: "COUNTER"
# description: "Total number of shared blocks read by the statement"
# - shared_blks_dirtied:
# usage: "COUNTER"
# description: "Total number of shared blocks dirtied by the statement"
# - shared_blks_written:
# usage: "COUNTER"
# description: "Total number of shared blocks written by the statement"
# - local_blks_hit:
# usage: "COUNTER"
# description: "Total number of local block cache hits by the statement"
# - local_blks_read:
# usage: "COUNTER"
# description: "Total number of local blocks read by the statement"
# - local_blks_dirtied:
# usage: "COUNTER"
# description: "Total number of local blocks dirtied by the statement"
# - local_blks_written:
# usage: "COUNTER"
# description: "Total number of local blocks written by the statement"
# - temp_blks_read:
# usage: "COUNTER"
# description: "Total number of temp blocks read by the statement"
# - temp_blks_written:
# usage: "COUNTER"
# description: "Total number of temp blocks written by the statement"
# - blk_read_time_seconds:
# usage: "COUNTER"
# description: "Total time the statement spent reading blocks, in milliseconds (if track_io_timing is enabled, otherwise zero)"
# - blk_write_time_seconds:
# usage: "COUNTER"
# description: "Total time the statement spent writing blocks, in milliseconds (if track_io_timing is enabled, otherwise zero)"

View File

@ -0,0 +1,89 @@
package percona_tests
import (
"archive/tar"
"compress/gzip"
"io"
"log"
"net/http"
"os"
"os/exec"
"strings"
"testing"
)
// TestPrepareExporters extracts exporter from client binary's tar.gz
func TestPrepareUpdatedExporter(t *testing.T) {
if doRun == nil || !*doRun {
t.Skip("For manual runs only through make")
return
}
if url == nil || *url == "" {
t.Error("URL not defined")
return
}
prepareExporter(*url, updatedExporterFileName)
}
func extractExporter(gzipStream io.Reader, fileName string) {
uncompressedStream, err := gzip.NewReader(gzipStream)
if err != nil {
log.Fatal("ExtractTarGz: NewReader failed")
}
tarReader := tar.NewReader(uncompressedStream)
exporterFound := false
for !exporterFound {
header, err := tarReader.Next()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("ExtractTarGz: Next() failed: %s", err.Error())
}
switch header.Typeflag {
case tar.TypeDir:
continue
case tar.TypeReg:
if strings.HasSuffix(header.Name, "postgres_exporter") {
outFile, err := os.Create(fileName)
if err != nil {
log.Fatalf("ExtractTarGz: Create() failed: %s", err.Error())
}
defer outFile.Close()
if _, err := io.Copy(outFile, tarReader); err != nil {
log.Fatalf("ExtractTarGz: Copy() failed: %s", err.Error())
}
exporterFound = true
}
default:
log.Fatalf(
"ExtractTarGz: uknown type: %d in %s",
header.Typeflag,
header.Name)
}
}
}
func prepareExporter(url, fileName string) {
resp, err := http.Get(url)
if err != nil {
log.Fatal(err)
}
defer resp.Body.Close()
extractExporter(resp.Body, fileName)
err = exec.Command("chmod", "+x", fileName).Run()
if err != nil {
log.Fatal(err)
}
}

View File

@ -0,0 +1,507 @@
package percona_tests
import (
"flag"
"fmt"
"os"
"sort"
"strings"
"testing"
"github.com/pkg/errors"
)
var dumpMetricsFlag = flag.Bool("dumpMetrics", false, "")
var printExtraMetrics = flag.Bool("extraMetrics", false, "")
var printMultipleLabels = flag.Bool("multipleLabels", false, "")
type Metric struct {
name string
labels string
}
type MetricsCollection struct {
RawMetricStr string
RawMetricStrArr []string
MetricNamesWithLabels []string
MetricsData []Metric
LabelsByMetric map[string][]string
}
func TestMissingMetrics(t *testing.T) {
if !getBool(doRun) {
t.Skip("For manual runs only through make")
return
}
newMetrics, err := getMetrics(updatedExporterFileName)
if err != nil {
t.Error(err)
return
}
oldMetrics, err := getMetrics(oldExporterFileName)
if err != nil {
t.Error(err)
return
}
oldMetricsCollection := parseMetricsCollection(oldMetrics)
newMetricsCollection := parseMetricsCollection(newMetrics)
if ok, msg := testForMissingMetrics(oldMetricsCollection, newMetricsCollection); !ok {
t.Error(msg)
}
}
func TestMissingLabels(t *testing.T) {
if !getBool(doRun) {
t.Skip("For manual runs only through make")
return
}
newMetrics, err := getMetrics(updatedExporterFileName)
if err != nil {
t.Error(err)
return
}
oldMetrics, err := getMetrics(oldExporterFileName)
if err != nil {
t.Error(err)
return
}
oldMetricsCollection := parseMetricsCollection(oldMetrics)
newMetricsCollection := parseMetricsCollection(newMetrics)
if ok, msg := testForMissingMetricsLabels(oldMetricsCollection, newMetricsCollection); !ok {
t.Error(msg)
}
}
func TestDumpMetrics(t *testing.T) {
if !getBool(doRun) {
t.Skip("For manual runs only through make")
return
}
newMetrics, err := getMetrics(updatedExporterFileName)
if err != nil {
t.Error(err)
return
}
oldMetrics, err := getMetrics(oldExporterFileName)
if err != nil {
t.Error(err)
return
}
oldMetricsCollection := parseMetricsCollection(oldMetrics)
newMetricsCollection := parseMetricsCollection(newMetrics)
dumpMetricsInfo(oldMetricsCollection, newMetricsCollection)
}
const highResolutionEndpoint = "metrics?collect%5B%5D=custom_query.hr&collect%5B%5D=exporter&collect%5B%5D=standard.go&collect%5B%5D=standard.process"
const medResolutionEndpoint = "metrics?collect%5B%5D=custom_query.mr"
const lowResolutionEndpoint = "metrics?collect%5B%5D=custom_query.lr"
func TestResolutionsMetricDuplicates(t *testing.T) {
if !getBool(doRun) {
t.Skip("For manual runs only through make")
return
}
hrMetrics, err := getMetricsFrom(updatedExporterFileName, highResolutionEndpoint)
if err != nil {
t.Error(err)
return
}
mrMetrics, err := getMetricsFrom(updatedExporterFileName, medResolutionEndpoint)
if err != nil {
t.Error(err)
return
}
lrMetrics, err := getMetricsFrom(updatedExporterFileName, lowResolutionEndpoint)
if err != nil {
t.Error(err)
return
}
hrMetricsColl := parseMetricsCollection(hrMetrics)
mrMetricsColl := parseMetricsCollection(mrMetrics)
lrMetricsColl := parseMetricsCollection(lrMetrics)
ms := make(map[string][]string)
addMetrics(ms, hrMetricsColl.MetricNamesWithLabels, "HR")
addMetrics(ms, mrMetricsColl.MetricNamesWithLabels, "MR")
addMetrics(ms, lrMetricsColl.MetricNamesWithLabels, "LR")
count := 0
msg := ""
for metric, resolutions := range ms {
if len(resolutions) > 1 {
count++
msg += fmt.Sprintf("'%s' is duplicated in %s\n", metric, resolutions)
}
}
if count > 0 {
t.Errorf("Found %d duplicated metrics:\n%s", count, msg)
}
}
func addMetrics(ms map[string][]string, metrics []string, resolution string) {
for _, m := range metrics {
if m == "" || strings.HasPrefix(m, "# ") {
continue
}
ms[m] = append(ms[m], resolution)
}
}
func TestResolutions(t *testing.T) {
if !getBool(doRun) {
t.Skip("For manual runs only through make")
return
}
t.Run("TestLowResolution", func(t *testing.T) {
testResolution(t, lowResolutionEndpoint, "Low")
})
t.Run("TestMediumResolution", func(t *testing.T) {
testResolution(t, medResolutionEndpoint, "Medium")
})
t.Run("TestHighResolution", func(t *testing.T) {
testResolution(t, highResolutionEndpoint, "High")
})
}
func testResolution(t *testing.T, resolutionEp, resolutionName string) {
newMetrics, err := getMetricsFrom(updatedExporterFileName, resolutionEp)
if err != nil {
t.Error(err)
return
}
oldMetrics, err := getMetricsFrom(oldExporterFileName, resolutionEp)
if err != nil {
t.Error(err)
return
}
oldMetricsCollection := parseMetricsCollection(oldMetrics)
newMetricsCollection := parseMetricsCollection(newMetrics)
missingCount := 0
missingMetrics := ""
for _, metric := range oldMetricsCollection.MetricNamesWithLabels {
if metric == "" || strings.HasPrefix(metric, "# ") {
continue
}
if !contains(newMetricsCollection.MetricNamesWithLabels, metric) {
missingCount++
missingMetrics += fmt.Sprintf("%s\n", metric)
}
}
if missingCount > 0 {
t.Errorf("%d metrics are missing in new exporter for %s resolution:\n%s", missingCount, resolutionName, missingMetrics)
}
extraCount := 0
extraMetrics := ""
for _, metric := range newMetricsCollection.MetricNamesWithLabels {
if metric == "" || strings.HasPrefix(metric, "# ") {
continue
}
if !contains(oldMetricsCollection.MetricNamesWithLabels, metric) {
extraCount++
extraMetrics += fmt.Sprintf("%s\n", metric)
}
}
if extraCount > 0 {
fmt.Printf("[WARN] %d metrics are redundant in new exporter for %s resolution\n%s", extraCount, resolutionName, extraMetrics)
}
}
func dumpMetricsInfo(oldMetricsCollection, newMetricsCollection MetricsCollection) {
if getBool(dumpMetricsFlag) {
dumpMetrics(oldMetricsCollection, newMetricsCollection)
}
if getBool(printExtraMetrics) {
dumpExtraMetrics(newMetricsCollection, oldMetricsCollection)
}
if getBool(printMultipleLabels) {
dumpMetricsWithMultipleLabelSets(newMetricsCollection)
}
}
func testForMissingMetricsLabels(oldMetricsCollection, newMetricsCollection MetricsCollection) (bool, string) {
missingMetricLabels := make(map[string]string)
missingMetricLabelsNames := make([]string, 0)
for metric, labels := range oldMetricsCollection.LabelsByMetric {
// skip version info label mismatch
if metric == "postgres_exporter_build_info" || metric == "go_info" {
continue
}
if _, ok := newMetricsCollection.LabelsByMetric[metric]; ok {
newLabels := newMetricsCollection.LabelsByMetric[metric]
if !arrIsSubsetOf(labels, newLabels) {
missingMetricLabels[metric] = fmt.Sprintf(" expected: %s\n actual: %s", labels, newLabels)
missingMetricLabelsNames = append(missingMetricLabelsNames, metric)
}
}
}
sort.Strings(missingMetricLabelsNames)
if len(missingMetricLabelsNames) > 0 {
ll := make([]string, 0)
for _, metric := range missingMetricLabelsNames {
labels := missingMetricLabels[metric]
ll = append(ll, metric+"\n"+labels)
}
return false, fmt.Sprintf("Missing metric's labels (%d metrics):\n%s", len(missingMetricLabelsNames), strings.Join(ll, "\n"))
}
return true, ""
}
func testForMissingMetrics(oldMetricsCollection, newMetricsCollection MetricsCollection) (bool, string) {
missingMetrics := make([]string, 0)
for metricName := range oldMetricsCollection.LabelsByMetric {
if _, ok := newMetricsCollection.LabelsByMetric[metricName]; !ok {
missingMetrics = append(missingMetrics, metricName)
}
}
sort.Strings(missingMetrics)
if len(missingMetrics) > 0 {
return false, fmt.Sprintf("Missing metrics (%d items):\n%s", len(missingMetrics), strings.Join(missingMetrics, "\n"))
}
return true, ""
}
func dumpMetricsWithMultipleLabelSets(newMetricsCollection MetricsCollection) {
metricsWithMultipleLabels := make(map[string][]string)
for metricName, newMetricLabels := range newMetricsCollection.LabelsByMetric {
if len(newMetricLabels) > 1 {
found := false
for i := 0; !found && i < len(newMetricLabels); i++ {
lbl := newMetricLabels[i]
for j := 0; j < len(newMetricLabels); j++ {
if i == j {
continue
}
lbl1 := newMetricLabels[j]
if lbl == "" || lbl1 == "" {
continue
}
if strings.Contains(lbl, lbl1) || strings.Contains(lbl1, lbl) {
found = true
break
}
}
}
if found {
metricsWithMultipleLabels[metricName] = newMetricLabels
}
}
}
if len(metricsWithMultipleLabels) > 0 {
ss := make([]string, 0, len(metricsWithMultipleLabels))
for k, v := range metricsWithMultipleLabels {
ss = append(ss, fmt.Sprintf("%s\n %s", k, strings.Join(v, "\n ")))
}
fmt.Printf("Some metrics were collected multiple times with extra labels (%d items):\n %s\n\n", len(metricsWithMultipleLabels), strings.Join(ss, "\n "))
}
}
func dumpExtraMetrics(newMetricsCollection, oldMetricsCollection MetricsCollection) {
extraMetrics := make([]string, 0)
for metricName := range newMetricsCollection.LabelsByMetric {
if _, ok := oldMetricsCollection.LabelsByMetric[metricName]; !ok {
extraMetrics = append(extraMetrics, metricName)
}
}
sort.Strings(extraMetrics)
if len(extraMetrics) > 0 {
fmt.Printf("Extra metrics (%d items):\n %s\n\n", len(extraMetrics), strings.Join(extraMetrics, "\n "))
}
}
func parseMetricsCollection(metricRaw string) MetricsCollection {
rawMetricsArr := strings.Split(metricRaw, "\n")
metricNamesArr := getMetricNames(rawMetricsArr)
metrics := parseMetrics(metricNamesArr)
labelsByMetrics := groupByMetrics(metrics)
return MetricsCollection{
MetricNamesWithLabels: metricNamesArr,
MetricsData: metrics,
RawMetricStr: metricRaw,
RawMetricStrArr: rawMetricsArr,
LabelsByMetric: labelsByMetrics,
}
}
func arrIsSubsetOf(a, b []string) bool {
if len(a) == 0 {
return len(b) == 0
}
for _, x := range a {
if !contains(b, x) {
return false
}
}
return true
}
func contains(s []string, e string) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
// groupByMetrics returns labels grouped by metric
func groupByMetrics(metrics []Metric) map[string][]string {
mtr := make(map[string][]string)
for i := 0; i < len(metrics); i++ {
metric := metrics[i]
if _, ok := mtr[metric.name]; ok {
labels := mtr[metric.name]
labels = append(labels, metric.labels)
mtr[metric.name] = labels
} else {
mtr[metric.name] = []string{metric.labels}
}
}
return mtr
}
func parseMetrics(metrics []string) []Metric {
metricsLength := len(metrics)
metricsData := make([]Metric, 0, metricsLength)
for i := 0; i < metricsLength; i++ {
metricRawStr := metrics[i]
if metricRawStr == "" || strings.HasPrefix(metricRawStr, "# ") {
continue
}
var mName, mLabels string
if strings.Contains(metricRawStr, "{") {
mName = metricRawStr[:strings.Index(metricRawStr, "{")]
mLabels = metricRawStr[strings.Index(metricRawStr, "{")+1 : len(metricRawStr)-1]
} else {
mName = metricRawStr
}
metric := Metric{
name: mName,
labels: mLabels,
}
metricsData = append(metricsData, metric)
}
return metricsData
}
func dumpMetrics(oldMetrics, newMetrics MetricsCollection) {
f, _ := os.Create("assets/metrics.old.txt")
for _, s := range oldMetrics.RawMetricStrArr {
f.WriteString(s)
f.WriteString("\n")
}
f.Close()
f, _ = os.Create("assets/metrics.new.txt")
for _, s := range newMetrics.RawMetricStrArr {
f.WriteString(s)
f.WriteString("\n")
}
f.Close()
f, _ = os.Create("assets/metrics.names.old.txt")
for _, s := range oldMetrics.MetricNamesWithLabels {
f.WriteString(s)
f.WriteString("\n")
}
f.Close()
f, _ = os.Create("assets/metrics.names.new.txt")
for _, s := range newMetrics.MetricNamesWithLabels {
f.WriteString(s)
f.WriteString("\n")
}
f.Close()
}
func getMetricNames(metrics []string) []string {
length := len(metrics)
ret := make([]string, length)
for i := 0; i < length; i++ {
str := metrics[i]
if str == "" || strings.HasPrefix(str, "# ") {
ret[i] = str
continue
}
idx := strings.LastIndex(str, " ")
if idx >= 0 {
str1 := str[:idx]
ret[i] = str1
} else {
ret[i] = str
}
}
return ret
}
func getMetrics(fileName string) (string, error) {
return getMetricsFrom(fileName, "metrics")
}
func getMetricsFrom(fileName, endpoint string) (string, error) {
cmd, port, collectOutput, err := launchExporter(fileName)
if err != nil {
return "", errors.Wrap(err, "Failed to launch exporter")
}
metrics, err := tryGetMetricsFrom(port, endpoint)
if err != nil {
return "", errors.Wrap(err, "Failed to get metrics")
}
err = stopExporter(cmd, collectOutput)
if err != nil {
return "", errors.Wrap(err, "Failed to stop exporter")
}
return metrics, nil
}

View File

@ -0,0 +1,219 @@
package percona_tests
import (
"flag"
"fmt"
"io/ioutil"
"strconv"
"strings"
"testing"
"time"
"github.com/montanaflynn/stats"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/tklauser/go-sysconf"
)
const (
repeatCount = 5
scrapesCount = 50
)
var doRun = flag.Bool("doRun", false, "")
var url = flag.String("url", "", "")
type StatsData struct {
meanMs float64
stdDevMs float64
stdDevPerc float64
meanHwm float64
stdDevHwmBytes float64
stdDevHwmPerc float64
meanData float64
stdDevDataBytes float64
stdDevDataPerc float64
}
func TestPerformance(t *testing.T) {
// put postgres_exporter and postgres_exporter_percona files in 'percona' folder
// or use TestPrepareExporters to download exporters from feature build
if !getBool(doRun) {
t.Skip("For manual runs only through make")
return
}
var updated, original *StatsData
t.Run("upstream exporter", func(t *testing.T) {
updated = doTestStats(t, repeatCount, scrapesCount, updatedExporterFileName)
})
t.Run("percona exporter", func(t *testing.T) {
original = doTestStats(t, repeatCount, scrapesCount, oldExporterFileName)
})
printStats(original, updated)
}
func calculatePerc(base, updated float64) float64 {
diff := base - updated
diffPerc := float64(100) / base * diff
diffPerc = diffPerc * -1
return diffPerc
}
func doTestStats(t *testing.T, cnt int, size int, fileName string) *StatsData {
var durations []float64
var hwms []float64
var datas []float64
for i := 0; i < cnt; i++ {
d, hwm, data, err := doTest(size, fileName)
if !assert.NoError(t, err) {
return nil
}
durations = append(durations, float64(d))
hwms = append(hwms, float64(hwm))
datas = append(datas, float64(data))
}
mean, _ := stats.Mean(durations)
stdDev, _ := stats.StandardDeviation(durations)
stdDev = float64(100) / mean * stdDev
clockTicks, err := sysconf.Sysconf(sysconf.SC_CLK_TCK)
if err != nil {
panic(err)
}
mean = mean * float64(1000) / float64(clockTicks) / float64(size)
stdDevMs := stdDev / float64(100) * mean
meanHwm, _ := stats.Mean(hwms)
stdDevHwm, _ := stats.StandardDeviation(hwms)
stdDevHwmPerc := float64(100) / meanHwm * stdDevHwm
meanData, _ := stats.Mean(datas)
stdDevData, _ := stats.StandardDeviation(datas)
stdDevDataPerc := float64(100) / meanData * stdDevData
st := StatsData{
meanMs: mean,
stdDevMs: stdDevMs,
stdDevPerc: stdDev,
meanHwm: meanHwm,
stdDevHwmBytes: stdDevHwm,
stdDevHwmPerc: stdDevHwmPerc,
meanData: meanData,
stdDevDataBytes: stdDevData,
stdDevDataPerc: stdDevDataPerc,
}
//fmt.Printf("loop %dx%d: sample time: %.2fms [deviation ±%.2fms, %.1f%%]\n", cnt, scrapesCount, st.meanMs, st.stdDevMs, st.stdDevPerc)
fmt.Printf("running %d scrapes %d times\n", size, cnt)
fmt.Printf("CPU\t%.1fms [±%.1fms, %.1f%%]\n", st.meanMs, st.stdDevMs, st.stdDevPerc)
fmt.Printf("HWM\t%.1fkB [±%.1f kB, %.1f%%]\n", st.meanHwm, st.stdDevHwmBytes, st.stdDevHwmPerc)
fmt.Printf("Data\t%.1fkB [±%.1f kB, %.1f%%]\n", st.meanData, st.stdDevDataBytes, st.stdDevDataPerc)
return &st
}
func doTest(iterations int, fileName string) (cpu, hwm, data int64, _ error) {
cmd, port, collectOutput, err := launchExporter(fileName)
if err != nil {
return 0, 0, 0, err
}
total1 := getCPUTime(cmd.Process.Pid)
for i := 0; i < iterations; i++ {
_, err = tryGetMetrics(port)
if err != nil {
return 0, 0, 0, errors.Wrapf(err, "Failed to perform test iteration %d.%s", i, collectOutput())
}
time.Sleep(1 * time.Millisecond)
}
total2 := getCPUTime(cmd.Process.Pid)
hwm, data = getCPUMem(cmd.Process.Pid)
err = stopExporter(cmd, collectOutput)
if err != nil {
return 0, 0, 0, err
}
return total2 - total1, hwm, data, nil
}
func getCPUMem(pid int) (hwm, data int64) {
contents, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/status", pid))
if err != nil {
return 0, 0
}
lines := strings.Split(string(contents), "\n")
for _, v := range lines {
if strings.HasPrefix(v, "VmHWM") {
val := strings.ReplaceAll(strings.ReplaceAll(strings.Split(v, ":\t")[1], " kB", ""), " ", "")
hwm, _ = strconv.ParseInt(val, 10, 64)
continue
}
if strings.HasPrefix(v, "VmData") {
val := strings.ReplaceAll(strings.ReplaceAll(strings.Split(v, ":\t")[1], " kB", ""), " ", "")
data, _ = strconv.ParseInt(val, 10, 64)
continue
}
}
return hwm, data
}
func getCPUTime(pid int) (total int64) {
contents, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/stat", pid))
if err != nil {
return
}
lines := strings.Split(string(contents), "\n")
for _, line := range lines {
fields := strings.Fields(line)
numFields := len(fields)
if numFields > 3 {
i, err := strconv.ParseInt(fields[13], 10, 64)
if err != nil {
panic(err)
}
totalTime := i
i, err = strconv.ParseInt(fields[14], 10, 64)
if err != nil {
panic(err)
}
totalTime += i
total = totalTime
return
}
}
return
}
func printStats(original, updated *StatsData) {
fmt.Println()
fmt.Println(" \told\tnew\tdiff")
fmt.Printf("CPU, ms \t%.1f\t%.1f\t%+.0f%%\n", original.meanMs, updated.meanMs, calculatePerc(original.meanMs, updated.meanMs))
fmt.Printf("HWM, kB \t%.1f\t%.1f\t%+.0f%%\n", original.meanHwm, updated.meanHwm, calculatePerc(original.meanHwm, updated.meanHwm))
fmt.Printf("DATA, kB\t%.1f\t%.1f\t%+.0f%%\n", original.meanData, updated.meanData, calculatePerc(original.meanData, updated.meanData))
fmt.Println()
}

40
percona_tests/readme.md Normal file
View File

@ -0,0 +1,40 @@
## integration tests for exporter update
### Fast start:
run
make prepare-env-from-repo
then run any of the ```make test-*```
### A bit of details:
1. unpack original exporter
make prepare-base-exporter
2.a. download updated exporter from specific feature build
make prepare-exporter-from-fb url="<feature build client binary url>"
2.b. or use current repo as updated exporter
make prepare-exporter-from-repo
3. start test postgres_server
make start-postgres-db
4. run basic performance comparison test
make test-performance
5. run metrics list compatibility test
make test-metrics

202
percona_tests/utils_test.go Normal file
View File

@ -0,0 +1,202 @@
package percona_tests
import (
"bytes"
"compress/gzip"
"fmt"
"io"
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
const (
postgresHost = "127.0.0.1"
postgresPort = 5432
postgresUser = "postgres"
postgresPassword = "postgres"
portRangeStart = 20000 // exporter web interface listening port
portRangeEnd = 20100 // exporter web interface listening port
exporterWaitTimeoutMs = 3000 // time to wait for exporter process start
updatedExporterFileName = "assets/postgres_exporter"
oldExporterFileName = "assets/postgres_exporter_percona"
)
func getBool(val *bool) bool {
return val != nil && *val
}
func launchExporter(fileName string) (cmd *exec.Cmd, port int, collectOutput func() string, _ error) {
lines, err := os.ReadFile("assets/test.exporter-flags.txt")
if err != nil {
return nil, 0, nil, errors.Wrapf(err, "Unable to read exporter args file")
}
port = -1
for i := portRangeStart; i < portRangeEnd; i++ {
if checkPort(i) {
port = i
break
}
}
if port == -1 {
return nil, 0, nil, errors.Wrapf(err, "Failed to find free port in range [%d..%d]", portRangeStart, portRangeEnd)
}
linesStr := string(lines)
linesStr += fmt.Sprintf("\n--web.listen-address=127.0.0.1:%d", port)
absolutePath, _ := filepath.Abs("custom-queries")
linesStr += fmt.Sprintf("\n--collect.custom_query.hr.directory=%s/high-resolution", absolutePath)
linesStr += fmt.Sprintf("\n--collect.custom_query.mr.directory=%s/medium-resolution", absolutePath)
linesStr += fmt.Sprintf("\n--collect.custom_query.lr.directory=%s/low-resolution", absolutePath)
linesArr := strings.Split(linesStr, "\n")
dsn := fmt.Sprintf("DATA_SOURCE_NAME=postgresql://%s:%s@%s:%d/postgres?sslmode=disable", postgresUser, postgresPassword, postgresHost, postgresPort)
cmd = exec.Command(fileName, linesArr...)
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, dsn)
var outBuffer, errorBuffer bytes.Buffer
cmd.Stdout = &outBuffer
cmd.Stderr = &errorBuffer
collectOutput = func() string {
result := ""
outStr := outBuffer.String()
if outStr == "" {
result = "Process stdOut was empty. "
} else {
result = fmt.Sprintf("Process stdOut:\n%s\n", outStr)
}
errStr := errorBuffer.String()
if errStr == "" {
result += "Process stdErr was empty."
} else {
result += fmt.Sprintf("Process stdErr:\n%s\n", errStr)
}
return result
}
err = cmd.Start()
if err != nil {
return nil, 0, nil, errors.Wrapf(err, "Failed to start exporter.%s", collectOutput())
}
err = waitForExporter(port)
if err != nil {
return nil, 0, nil, errors.Wrapf(err, "Failed to wait for exporter.%s", collectOutput())
}
return cmd, port, collectOutput, nil
}
func stopExporter(cmd *exec.Cmd, collectOutput func() string) error {
err := cmd.Process.Signal(unix.SIGINT)
if err != nil {
return errors.Wrapf(err, "Failed to send SIGINT to exporter process.%s\n", collectOutput())
}
err = cmd.Wait()
if err != nil && err.Error() != "signal: interrupt" {
return errors.Wrapf(err, "Failed to wait for exporter process termination.%s\n", collectOutput())
}
return nil
}
func tryGetMetrics(port int) (string, error) {
return tryGetMetricsFrom(port, "metrics")
}
func tryGetMetricsFrom(port int, endpoint string) (string, error) {
uri := fmt.Sprintf("http://127.0.0.1:%d/%s", port, endpoint)
client := new(http.Client)
request, err := http.NewRequest("GET", uri, nil)
if err != nil {
return "", err
}
request.Header.Add("Accept-Encoding", "gzip")
response, err := client.Do(request)
if err != nil {
return "", fmt.Errorf("failed to get response from exporters web interface: %w", err)
}
if response.StatusCode != http.StatusOK {
return "", fmt.Errorf("failed to get response from exporters web interface: %w", err)
}
// Check that the server actually sent compressed data
var reader io.ReadCloser
enc := response.Header.Get("Content-Encoding")
switch enc {
case "gzip":
reader, err = gzip.NewReader(response.Body)
if err != nil {
return "", fmt.Errorf("failed to create gzip reader: %w", err)
}
defer reader.Close()
default:
reader = response.Body
}
buf := new(strings.Builder)
_, err = io.Copy(buf, reader)
if err != nil {
return "", err
}
rr := buf.String()
if rr == "" {
return "", fmt.Errorf("failed to read response")
}
err = response.Body.Close()
if err != nil {
return "", fmt.Errorf("failed to close response: %w", err)
}
return rr, nil
}
func checkPort(port int) bool {
ln, err := net.Listen("tcp", ":"+fmt.Sprint(port))
if err != nil {
return false
}
_ = ln.Close()
return true
}
func waitForExporter(port int) error {
watchdog := exporterWaitTimeoutMs
_, e := tryGetMetrics(port)
for ; e != nil && watchdog > 0; watchdog-- {
time.Sleep(1 * time.Millisecond)
_, e = tryGetMetrics(port)
}
if watchdog == 0 {
return fmt.Errorf("failed to wait for exporter (on port %d)", port)
}
return nil
}