comments about the 120samples const and link to Gorilla papers. (#423)
Signed-off-by: Krasi Georgiev <kgeorgie@redhat.com>
This commit is contained in:
parent
66b6b87cd4
commit
1dd9a6bd29
|
@ -7,6 +7,8 @@ This repository contains the Prometheus storage layer that is used in its 2.x re
|
|||
|
||||
A writeup of its design can be found [here](https://fabxc.org/blog/2017-04-10-writing-a-tsdb/).
|
||||
|
||||
Based on the Gorilla TSDB [white papers](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf).
|
||||
|
||||
Video: [Storing 16 Bytes at Scale](https://youtu.be/b_pEevMAC3I) from [PromCon 2017](https://promcon.io/2017-munich/).
|
||||
|
||||
See also the [format documentation](docs/format/README.md).
|
||||
|
|
3
head.go
3
head.go
|
@ -1427,6 +1427,9 @@ func (s *memSeries) truncateChunksBefore(mint int64) (removed int) {
|
|||
|
||||
// append adds the sample (t, v) to the series.
|
||||
func (s *memSeries) append(t int64, v float64) (success, chunkCreated bool) {
|
||||
// Based on Gorilla white papers this offers near-optimal compression ratio
|
||||
// so anything bigger that this has diminishing returns and increases
|
||||
// the time range within which we have to decompress all samples.
|
||||
const samplesPerChunk = 120
|
||||
|
||||
c := s.head()
|
||||
|
|
Loading…
Reference in New Issue