diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 5c004d660..0be3dfbc4 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -239,6 +239,9 @@ func main() { a.Flag("rules.alert.resend-delay", "Minimum amount of time to wait before resending an alert to Alertmanager."). Default("1m").SetValue(&cfg.resendDelay) + a.Flag("scrape.timestamp-tolerance", "Tolerance applied to scrapes timestamp to improve timestamp compression. Experimental."). + Hidden().Default("2ms").DurationVar(&scrape.ScrapeTimestampTolerance) + a.Flag("alertmanager.notification-queue-capacity", "The capacity of the queue for pending Alertmanager notifications."). Default("10000").IntVar(&cfg.notifier.QueueCapacity) diff --git a/scrape/scrape.go b/scrape/scrape.go index de248d876..53d573fcc 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -50,7 +50,7 @@ import ( // Temporary tolerance for scrape appends timestamps alignment, to enable better // compression at the TSDB level. // See https://github.com/prometheus/prometheus/issues/7846 -const scrapeTimestampTolerance = 2 * time.Millisecond +var ScrapeTimestampTolerance = 2 * time.Millisecond var errNameLabelMandatory = fmt.Errorf("missing metric name (%s label)", labels.MetricName) @@ -1009,14 +1009,14 @@ mainLoop: // increase in TSDB. // See https://github.com/prometheus/prometheus/issues/7846 scrapeTime := time.Now() - if interval > 100*scrapeTimestampTolerance { + if interval > 100*ScrapeTimestampTolerance { // For some reason, a tick might have been skipped, in which case we // would call alignedScrapeTime.Add(interval) multiple times. for scrapeTime.Sub(alignedScrapeTime) >= interval { alignedScrapeTime = alignedScrapeTime.Add(interval) } // Align the scrape time if we are in the tolerance boundaries. - if scrapeTime.Sub(alignedScrapeTime) <= scrapeTimestampTolerance { + if scrapeTime.Sub(alignedScrapeTime) <= ScrapeTimestampTolerance { scrapeTime = alignedScrapeTime } }