mirror of
https://github.com/prometheus/prometheus
synced 2024-12-26 00:23:18 +00:00
Treat a failed scrape as an empty scrape for staleness.
If a target has died but is still in SD, we want the previously scraped values to go stale. This would also apply to brief blips.
This commit is contained in:
parent
850ea412ad
commit
c0c7e32e61
@ -487,14 +487,16 @@ func (sl *scrapeLoop) run(interval, timeout time.Duration, errc chan<- error) {
|
||||
|
||||
err := sl.scraper.scrape(scrapeCtx, buf)
|
||||
cancel()
|
||||
var b []byte
|
||||
if err == nil {
|
||||
b := buf.Bytes()
|
||||
|
||||
if total, added, err = sl.append(b, start); err != nil {
|
||||
log.With("err", err).Error("append failed")
|
||||
}
|
||||
b = buf.Bytes()
|
||||
} else if errc != nil {
|
||||
errc <- err
|
||||
errc <- err
|
||||
}
|
||||
// A failed scrape is the same as an empty scrape,
|
||||
// we still call sl.append to trigger stale markers.
|
||||
if total, added, err = sl.append(b, start); err != nil {
|
||||
log.With("err", err).Error("append failed")
|
||||
}
|
||||
|
||||
sl.report(start, time.Since(start), total, added, err)
|
||||
|
@ -437,6 +437,55 @@ func TestScrapeLoopRun(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
|
||||
appender := &collectResultAppender{}
|
||||
var (
|
||||
signal = make(chan struct{})
|
||||
|
||||
scraper = &testScraper{}
|
||||
app = func() storage.Appender { return appender }
|
||||
reportApp = func() storage.Appender { return &nopAppender{} }
|
||||
numScrapes = 0
|
||||
)
|
||||
defer close(signal)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
sl := newScrapeLoop(ctx, scraper, app, reportApp)
|
||||
|
||||
// Succeed once, several failures, then stop.
|
||||
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
|
||||
numScrapes += 1
|
||||
if numScrapes == 1 {
|
||||
w.Write([]byte("metric_a 42\n"))
|
||||
return nil
|
||||
} else if numScrapes == 5 {
|
||||
cancel()
|
||||
}
|
||||
return fmt.Errorf("Scrape failed.")
|
||||
}
|
||||
|
||||
go func() {
|
||||
sl.run(10*time.Millisecond, time.Hour, nil)
|
||||
signal <- struct{}{}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-signal:
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("Scrape wasn't stopped.")
|
||||
}
|
||||
|
||||
if len(appender.result) != 2 {
|
||||
t.Fatalf("Appended samples not as expected. Wanted: %d samples Got: %d", 2, len(appender.result))
|
||||
}
|
||||
if appender.result[0].v != 42.0 {
|
||||
t.Fatalf("Appended first sample not as expected. Wanted: %f Got: %f", appender.result[0], 42)
|
||||
}
|
||||
if !value.IsStaleNaN(appender.result[1].v) {
|
||||
t.Fatalf("Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.result[1].v))
|
||||
}
|
||||
}
|
||||
|
||||
func TestScrapeLoopAppend(t *testing.T) {
|
||||
app := &collectResultAppender{}
|
||||
sl := &scrapeLoop{
|
||||
@ -686,7 +735,6 @@ type testScraper struct {
|
||||
lastDuration time.Duration
|
||||
lastError error
|
||||
|
||||
samples samples
|
||||
scrapeErr error
|
||||
scrapeFunc func(context.Context, io.Writer) error
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user