diff --git a/notifier/notifier.go b/notifier/notifier.go index 551b166e0..b54ccd1fb 100644 --- a/notifier/notifier.go +++ b/notifier/notifier.go @@ -293,7 +293,7 @@ func (n *Notifier) sendAll(alerts ...*model.Alert) int { n.errors.WithLabelValues(u).Inc() atomic.AddUint64(&numErrors, 1) } - n.latency.WithLabelValues(u).Observe(float64(time.Since(begin)) / float64(time.Second)) + n.latency.WithLabelValues(u).Observe(time.Since(begin).Seconds()) n.sent.WithLabelValues(u).Add(float64(len(alerts))) wg.Done() diff --git a/retrieval/scrape.go b/retrieval/scrape.go index 3df04b616..74010fae8 100644 --- a/retrieval/scrape.go +++ b/retrieval/scrape.go @@ -201,7 +201,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) { wg.Wait() targetReloadIntervalLength.WithLabelValues(interval.String()).Observe( - float64(time.Since(start)) / float64(time.Second), + time.Since(start).Seconds(), ) } @@ -256,7 +256,7 @@ func (sp *scrapePool) sync(targets []*Target) { // be inserting a previous sample set. wg.Wait() targetSyncIntervalLength.WithLabelValues(sp.config.JobName).Observe( - float64(time.Since(start)) / float64(time.Second), + time.Since(start).Seconds(), ) targetScrapePoolSyncsCounter.WithLabelValues(sp.config.JobName).Inc() } @@ -413,7 +413,7 @@ func (sl *scrapeLoop) run(interval, timeout time.Duration, errc chan<- error) { // Only record after the first scrape. if !last.IsZero() { targetIntervalLength.WithLabelValues(interval.String()).Observe( - float64(time.Since(last)) / float64(time.Second), // Sub-second precision. + time.Since(last).Seconds(), ) } @@ -493,7 +493,7 @@ func (sl *scrapeLoop) report(start time.Time, duration time.Duration, err error) model.MetricNameLabel: scrapeDurationMetricName, }, Timestamp: ts, - Value: model.SampleValue(float64(duration) / float64(time.Second)), + Value: model.SampleValue(duration.Seconds()), } if err := sl.reportAppender.Append(healthSample); err != nil { diff --git a/rules/manager.go b/rules/manager.go index 449e874ea..025c53d7e 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -153,7 +153,7 @@ func (g *Group) run() { start := time.Now() g.eval() - iterationDuration.Observe(float64(time.Since(start)) / float64(time.Second)) + iterationDuration.Observe(time.Since(start).Seconds()) } iter() @@ -252,7 +252,7 @@ func (g *Group) eval() { defer wg.Done() defer func(t time.Time) { - evalDuration.WithLabelValues(rtyp).Observe(float64(time.Since(t)) / float64(time.Second)) + evalDuration.WithLabelValues(rtyp).Observe(time.Since(t).Seconds()) }(time.Now()) evalTotal.WithLabelValues(rtyp).Inc() diff --git a/storage/local/persistence.go b/storage/local/persistence.go index 25db3eb24..ba15e7a2f 100644 --- a/storage/local/persistence.go +++ b/storage/local/persistence.go @@ -559,7 +559,7 @@ func (p *persistence) checkpointSeriesMapAndHeads(fingerprintToSeries *seriesMap } err = os.Rename(p.headsTempFileName(), p.headsFileName()) duration := time.Since(begin) - p.checkpointDuration.Set(float64(duration) / float64(time.Second)) + p.checkpointDuration.Set(duration.Seconds()) log.Infof("Done checkpointing in-memory metrics and chunks in %v.", duration) }() @@ -1238,9 +1238,7 @@ func (p *persistence) processIndexingQueue() { commitBatch := func() { p.indexingBatchSizes.Observe(float64(batchSize)) defer func(begin time.Time) { - p.indexingBatchDuration.Observe( - float64(time.Since(begin)) / float64(time.Second), - ) + p.indexingBatchDuration.Observe(time.Since(begin).Seconds()) }(time.Now()) if err := p.labelPairToFingerprints.IndexBatch(pairToFPs); err != nil { diff --git a/storage/local/storage.go b/storage/local/storage.go index 35e88349a..fd5b6e026 100644 --- a/storage/local/storage.go +++ b/storage/local/storage.go @@ -1119,7 +1119,7 @@ func (s *MemorySeriesStorage) maintainMemorySeries( ) (becameDirty bool) { defer func(begin time.Time) { s.maintainSeriesDuration.WithLabelValues(maintainInMemory).Observe( - float64(time.Since(begin)) / float64(time.Second), + time.Since(begin).Seconds(), ) }(time.Now()) @@ -1272,7 +1272,7 @@ func (s *MemorySeriesStorage) writeMemorySeries( func (s *MemorySeriesStorage) maintainArchivedSeries(fp model.Fingerprint, beforeTime model.Time) { defer func(begin time.Time) { s.maintainSeriesDuration.WithLabelValues(maintainArchived).Observe( - float64(time.Since(begin)) / float64(time.Second), + time.Since(begin).Seconds(), ) }(time.Now()) diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index be82ba8bc..d89e9406d 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -190,7 +190,7 @@ func (t *StorageQueueManager) sendSamples(s model.Samples) { // floor. begin := time.Now() err := t.tsdb.Store(s) - duration := time.Since(begin) / time.Second + duration := time.Since(begin).Seconds() labelValue := success if err != nil { @@ -200,7 +200,7 @@ func (t *StorageQueueManager) sendSamples(s model.Samples) { t.failedSamples.Add(float64(len(s))) } t.samplesCount.WithLabelValues(labelValue).Add(float64(len(s))) - t.sendLatency.Observe(float64(duration)) + t.sendLatency.Observe(duration) }() }