Fix calculation of chunks to persist after restart

Since we are not overestimating the number of chunks to persist
anymore, this commit also adjusts the default value for
-storage.local.memory-chunks. Update of documentation will follow.
This commit is contained in:
beorn7 2016-01-25 19:33:51 +01:00
parent 972d94433a
commit a2cd479058
2 changed files with 8 additions and 2 deletions

View File

@ -113,7 +113,7 @@ func init() {
"How long to retain samples in the local storage.",
)
cfg.fs.IntVar(
&cfg.storage.MaxChunksToPersist, "storage.local.max-chunks-to-persist", 1024*1024,
&cfg.storage.MaxChunksToPersist, "storage.local.max-chunks-to-persist", 512*1024,
"How many chunks can be waiting for persistence before sample ingestion will stop. Many chunks waiting to be persisted will increase the checkpoint size.",
)
cfg.fs.DurationVar(

View File

@ -861,6 +861,12 @@ func (p *persistence) loadSeriesMapAndHeads() (sm *seriesMap, chunksToPersist in
}
}
headChunkClosed := persistWatermark >= numChunkDescs
if !headChunkClosed {
// Head chunk is not ready for persisting yet.
chunksToPersist--
}
fingerprintToSeries[model.Fingerprint(fp)] = &memorySeries{
metric: model.Metric(metric),
chunkDescs: chunkDescs,
@ -869,7 +875,7 @@ func (p *persistence) loadSeriesMapAndHeads() (sm *seriesMap, chunksToPersist in
chunkDescsOffset: int(chunkDescsOffset),
savedFirstTime: model.Time(savedFirstTime),
lastTime: chunkDescs[len(chunkDescs)-1].lastTime(),
headChunkClosed: persistWatermark >= numChunkDescs,
headChunkClosed: headChunkClosed,
}
}
return sm, chunksToPersist, nil