Fix the incrementing of prometheus_target_scrapes_exceeded_sample_limit_total (#3669)

Once a scrape target has reached the samle_limit, any further samples
from that target will continue to result in errSampleLimit.

This means that after the completion of the append loop `err` must still
be errSampleLimit, and so the metric would not have been incremented.
This commit is contained in:
Adam 2018-01-09 15:43:28 +00:00 committed by Brian Brazil
parent 97464236c7
commit f64014f70b
2 changed files with 54 additions and 2 deletions

View File

@ -904,9 +904,9 @@ loop:
if err == nil {
err = p.Err()
}
if err == nil && sampleLimitErr != nil {
if sampleLimitErr != nil {
// We only want to increment this once per scrape, so this is Inc'd outside the loop.
targetScrapeSampleLimit.Inc()
err = sampleLimitErr
}
if numOutOfOrder > 0 {
level.Warn(sl.l).Log("msg", "Error on ingesting out-of-order samples", "num_dropped", numOutOfOrder)

View File

@ -32,6 +32,8 @@ import (
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/timestamp"
@ -627,6 +629,56 @@ func TestScrapeLoopAppend(t *testing.T) {
}
}
func TestScrapeLoopAppendSampleLimit(t *testing.T) {
resApp := &collectResultAppender{}
app := &limitAppender{Appender: resApp, limit: 1}
sl := newScrapeLoop(context.Background(),
nil, nil, nil,
nopMutator,
nopMutator,
func() storage.Appender { return app },
)
// Get the value of the Counter before performing the append.
beforeMetric := dto.Metric{}
err := targetScrapeSampleLimit.Write(&beforeMetric)
if err != nil {
t.Fatal(err)
}
beforeMetricValue := beforeMetric.GetCounter().GetValue()
now := time.Now()
_, _, err = sl.append([]byte("metric_a 1\nmetric_b 1\nmetric_c 1\n"), now)
if err != errSampleLimit {
t.Fatalf("Did not see expected sample limit error: %s", err)
}
// Check that the Counter has been incremented a simgle time for the scrape,
// not multiple times for each sample.
metric := dto.Metric{}
err = targetScrapeSampleLimit.Write(&metric)
if err != nil {
t.Fatal(err)
}
value := metric.GetCounter().GetValue()
if (value - beforeMetricValue) != 1 {
t.Fatal("Unexpected change of sample limit metric: %f", (value - beforeMetricValue))
}
// And verify that we got the samples that fit under the limit.
want := []sample{
{
metric: labels.FromStrings(model.MetricNameLabel, "metric_a"),
t: timestamp.FromTime(now),
v: 1,
},
}
if !reflect.DeepEqual(want, resApp.result) {
t.Fatalf("Appended samples not as expected. Wanted: %+v Got: %+v", want, resApp.result)
}
}
func TestScrapeLoop_ChangingMetricString(t *testing.T) {
// This is a regression test for the scrape loop cache not properly maintaining
// IDs when the string representation of a metric changes across a scrape. Thus