diff --git a/retrieval/scrape.go b/retrieval/scrape.go index 8d1173cd5..7a2129c59 100644 --- a/retrieval/scrape.go +++ b/retrieval/scrape.go @@ -904,9 +904,9 @@ loop: if err == nil { err = p.Err() } - if err == nil && sampleLimitErr != nil { + if sampleLimitErr != nil { + // We only want to increment this once per scrape, so this is Inc'd outside the loop. targetScrapeSampleLimit.Inc() - err = sampleLimitErr } if numOutOfOrder > 0 { level.Warn(sl.l).Log("msg", "Error on ingesting out-of-order samples", "num_dropped", numOutOfOrder) diff --git a/retrieval/scrape_test.go b/retrieval/scrape_test.go index b3a0466ab..0e34bf629 100644 --- a/retrieval/scrape_test.go +++ b/retrieval/scrape_test.go @@ -32,6 +32,8 @@ import ( "github.com/prometheus/common/model" "github.com/stretchr/testify/require" + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/timestamp" @@ -627,6 +629,56 @@ func TestScrapeLoopAppend(t *testing.T) { } } +func TestScrapeLoopAppendSampleLimit(t *testing.T) { + resApp := &collectResultAppender{} + app := &limitAppender{Appender: resApp, limit: 1} + + sl := newScrapeLoop(context.Background(), + nil, nil, nil, + nopMutator, + nopMutator, + func() storage.Appender { return app }, + ) + + // Get the value of the Counter before performing the append. + beforeMetric := dto.Metric{} + err := targetScrapeSampleLimit.Write(&beforeMetric) + if err != nil { + t.Fatal(err) + } + beforeMetricValue := beforeMetric.GetCounter().GetValue() + + now := time.Now() + _, _, err = sl.append([]byte("metric_a 1\nmetric_b 1\nmetric_c 1\n"), now) + if err != errSampleLimit { + t.Fatalf("Did not see expected sample limit error: %s", err) + } + + // Check that the Counter has been incremented a simgle time for the scrape, + // not multiple times for each sample. + metric := dto.Metric{} + err = targetScrapeSampleLimit.Write(&metric) + if err != nil { + t.Fatal(err) + } + value := metric.GetCounter().GetValue() + if (value - beforeMetricValue) != 1 { + t.Fatal("Unexpected change of sample limit metric: %f", (value - beforeMetricValue)) + } + + // And verify that we got the samples that fit under the limit. + want := []sample{ + { + metric: labels.FromStrings(model.MetricNameLabel, "metric_a"), + t: timestamp.FromTime(now), + v: 1, + }, + } + if !reflect.DeepEqual(want, resApp.result) { + t.Fatalf("Appended samples not as expected. Wanted: %+v Got: %+v", want, resApp.result) + } +} + func TestScrapeLoop_ChangingMetricString(t *testing.T) { // This is a regression test for the scrape loop cache not properly maintaining // IDs when the string representation of a metric changes across a scrape. Thus