Merge pull request #1797 from prometheus/beorn7/storage
Consistently use the `Seconds()` method for conversion of durations
This commit is contained in:
commit
b4660a550c
|
@ -293,7 +293,7 @@ func (n *Notifier) sendAll(alerts ...*model.Alert) int {
|
||||||
n.errors.WithLabelValues(u).Inc()
|
n.errors.WithLabelValues(u).Inc()
|
||||||
atomic.AddUint64(&numErrors, 1)
|
atomic.AddUint64(&numErrors, 1)
|
||||||
}
|
}
|
||||||
n.latency.WithLabelValues(u).Observe(float64(time.Since(begin)) / float64(time.Second))
|
n.latency.WithLabelValues(u).Observe(time.Since(begin).Seconds())
|
||||||
n.sent.WithLabelValues(u).Add(float64(len(alerts)))
|
n.sent.WithLabelValues(u).Add(float64(len(alerts)))
|
||||||
|
|
||||||
wg.Done()
|
wg.Done()
|
||||||
|
|
|
@ -201,7 +201,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) {
|
||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
targetReloadIntervalLength.WithLabelValues(interval.String()).Observe(
|
targetReloadIntervalLength.WithLabelValues(interval.String()).Observe(
|
||||||
float64(time.Since(start)) / float64(time.Second),
|
time.Since(start).Seconds(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -256,7 +256,7 @@ func (sp *scrapePool) sync(targets []*Target) {
|
||||||
// be inserting a previous sample set.
|
// be inserting a previous sample set.
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
targetSyncIntervalLength.WithLabelValues(sp.config.JobName).Observe(
|
targetSyncIntervalLength.WithLabelValues(sp.config.JobName).Observe(
|
||||||
float64(time.Since(start)) / float64(time.Second),
|
time.Since(start).Seconds(),
|
||||||
)
|
)
|
||||||
targetScrapePoolSyncsCounter.WithLabelValues(sp.config.JobName).Inc()
|
targetScrapePoolSyncsCounter.WithLabelValues(sp.config.JobName).Inc()
|
||||||
}
|
}
|
||||||
|
@ -413,7 +413,7 @@ func (sl *scrapeLoop) run(interval, timeout time.Duration, errc chan<- error) {
|
||||||
// Only record after the first scrape.
|
// Only record after the first scrape.
|
||||||
if !last.IsZero() {
|
if !last.IsZero() {
|
||||||
targetIntervalLength.WithLabelValues(interval.String()).Observe(
|
targetIntervalLength.WithLabelValues(interval.String()).Observe(
|
||||||
float64(time.Since(last)) / float64(time.Second), // Sub-second precision.
|
time.Since(last).Seconds(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -493,7 +493,7 @@ func (sl *scrapeLoop) report(start time.Time, duration time.Duration, err error)
|
||||||
model.MetricNameLabel: scrapeDurationMetricName,
|
model.MetricNameLabel: scrapeDurationMetricName,
|
||||||
},
|
},
|
||||||
Timestamp: ts,
|
Timestamp: ts,
|
||||||
Value: model.SampleValue(float64(duration) / float64(time.Second)),
|
Value: model.SampleValue(duration.Seconds()),
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := sl.reportAppender.Append(healthSample); err != nil {
|
if err := sl.reportAppender.Append(healthSample); err != nil {
|
||||||
|
|
|
@ -153,7 +153,7 @@ func (g *Group) run() {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
g.eval()
|
g.eval()
|
||||||
|
|
||||||
iterationDuration.Observe(float64(time.Since(start)) / float64(time.Second))
|
iterationDuration.Observe(time.Since(start).Seconds())
|
||||||
}
|
}
|
||||||
iter()
|
iter()
|
||||||
|
|
||||||
|
@ -252,7 +252,7 @@ func (g *Group) eval() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
defer func(t time.Time) {
|
defer func(t time.Time) {
|
||||||
evalDuration.WithLabelValues(rtyp).Observe(float64(time.Since(t)) / float64(time.Second))
|
evalDuration.WithLabelValues(rtyp).Observe(time.Since(t).Seconds())
|
||||||
}(time.Now())
|
}(time.Now())
|
||||||
|
|
||||||
evalTotal.WithLabelValues(rtyp).Inc()
|
evalTotal.WithLabelValues(rtyp).Inc()
|
||||||
|
|
|
@ -559,7 +559,7 @@ func (p *persistence) checkpointSeriesMapAndHeads(fingerprintToSeries *seriesMap
|
||||||
}
|
}
|
||||||
err = os.Rename(p.headsTempFileName(), p.headsFileName())
|
err = os.Rename(p.headsTempFileName(), p.headsFileName())
|
||||||
duration := time.Since(begin)
|
duration := time.Since(begin)
|
||||||
p.checkpointDuration.Set(float64(duration) / float64(time.Second))
|
p.checkpointDuration.Set(duration.Seconds())
|
||||||
log.Infof("Done checkpointing in-memory metrics and chunks in %v.", duration)
|
log.Infof("Done checkpointing in-memory metrics and chunks in %v.", duration)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -1238,9 +1238,7 @@ func (p *persistence) processIndexingQueue() {
|
||||||
commitBatch := func() {
|
commitBatch := func() {
|
||||||
p.indexingBatchSizes.Observe(float64(batchSize))
|
p.indexingBatchSizes.Observe(float64(batchSize))
|
||||||
defer func(begin time.Time) {
|
defer func(begin time.Time) {
|
||||||
p.indexingBatchDuration.Observe(
|
p.indexingBatchDuration.Observe(time.Since(begin).Seconds())
|
||||||
float64(time.Since(begin)) / float64(time.Second),
|
|
||||||
)
|
|
||||||
}(time.Now())
|
}(time.Now())
|
||||||
|
|
||||||
if err := p.labelPairToFingerprints.IndexBatch(pairToFPs); err != nil {
|
if err := p.labelPairToFingerprints.IndexBatch(pairToFPs); err != nil {
|
||||||
|
|
|
@ -1119,7 +1119,7 @@ func (s *MemorySeriesStorage) maintainMemorySeries(
|
||||||
) (becameDirty bool) {
|
) (becameDirty bool) {
|
||||||
defer func(begin time.Time) {
|
defer func(begin time.Time) {
|
||||||
s.maintainSeriesDuration.WithLabelValues(maintainInMemory).Observe(
|
s.maintainSeriesDuration.WithLabelValues(maintainInMemory).Observe(
|
||||||
float64(time.Since(begin)) / float64(time.Second),
|
time.Since(begin).Seconds(),
|
||||||
)
|
)
|
||||||
}(time.Now())
|
}(time.Now())
|
||||||
|
|
||||||
|
@ -1272,7 +1272,7 @@ func (s *MemorySeriesStorage) writeMemorySeries(
|
||||||
func (s *MemorySeriesStorage) maintainArchivedSeries(fp model.Fingerprint, beforeTime model.Time) {
|
func (s *MemorySeriesStorage) maintainArchivedSeries(fp model.Fingerprint, beforeTime model.Time) {
|
||||||
defer func(begin time.Time) {
|
defer func(begin time.Time) {
|
||||||
s.maintainSeriesDuration.WithLabelValues(maintainArchived).Observe(
|
s.maintainSeriesDuration.WithLabelValues(maintainArchived).Observe(
|
||||||
float64(time.Since(begin)) / float64(time.Second),
|
time.Since(begin).Seconds(),
|
||||||
)
|
)
|
||||||
}(time.Now())
|
}(time.Now())
|
||||||
|
|
||||||
|
|
|
@ -190,7 +190,7 @@ func (t *StorageQueueManager) sendSamples(s model.Samples) {
|
||||||
// floor.
|
// floor.
|
||||||
begin := time.Now()
|
begin := time.Now()
|
||||||
err := t.tsdb.Store(s)
|
err := t.tsdb.Store(s)
|
||||||
duration := time.Since(begin) / time.Second
|
duration := time.Since(begin).Seconds()
|
||||||
|
|
||||||
labelValue := success
|
labelValue := success
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -200,7 +200,7 @@ func (t *StorageQueueManager) sendSamples(s model.Samples) {
|
||||||
t.failedSamples.Add(float64(len(s)))
|
t.failedSamples.Add(float64(len(s)))
|
||||||
}
|
}
|
||||||
t.samplesCount.WithLabelValues(labelValue).Add(float64(len(s)))
|
t.samplesCount.WithLabelValues(labelValue).Add(float64(len(s)))
|
||||||
t.sendLatency.Observe(float64(duration))
|
t.sendLatency.Observe(duration)
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue