Reduce test data allocations

This commit is contained in:
Fabian Reinartz 2016-12-08 10:04:24 +01:00
parent ce82bdb71a
commit b845f8d3a1
4 changed files with 39 additions and 13 deletions

View File

@ -41,8 +41,7 @@ func testChunk(c Chunk) error {
ts = int64(1234123324) ts = int64(1234123324)
v = 1243535.123 v = 1243535.123
) )
i := 0 for i := 0; i < 300; i++ {
for {
ts += int64(rand.Intn(10000) + 1) ts += int64(rand.Intn(10000) + 1)
// v = rand.Float64() // v = rand.Float64()
if i%2 == 0 { if i%2 == 0 {
@ -68,7 +67,6 @@ func testChunk(c Chunk) error {
return err return err
} }
exp = append(exp, pair{t: ts, v: v}) exp = append(exp, pair{t: ts, v: v})
i++
// fmt.Println("appended", len(c.Bytes()), c.Bytes()) // fmt.Println("appended", len(c.Bytes()), c.Bytes())
} }

View File

@ -16,7 +16,7 @@ type XORChunk struct {
// NewXORChunk returns a new chunk with XOR encoding of the given size. // NewXORChunk returns a new chunk with XOR encoding of the given size.
func NewXORChunk(size int) *XORChunk { func NewXORChunk(size int) *XORChunk {
b := make([]byte, 3, 64) b := make([]byte, 3, 128)
b[0] = byte(EncXOR) b[0] = byte(EncXOR)
return &XORChunk{ return &XORChunk{

View File

@ -9,6 +9,7 @@ import (
"path/filepath" "path/filepath"
"runtime" "runtime"
"runtime/pprof" "runtime/pprof"
"sort"
"sync" "sync"
"time" "time"
@ -172,16 +173,33 @@ func (b *writeBenchmark) ingestScrapesShard(metrics []model.Metric, scrapeCount
var sc tsdb.Vector var sc tsdb.Vector
ts := int64(model.Now()) ts := int64(model.Now())
type sample struct {
labels tsdb.Labels
value int64
}
scrape := make(map[uint64]*sample, len(metrics))
for _, m := range metrics {
lset := make(tsdb.Labels, 0, len(m))
for k, v := range m {
lset = append(lset, tsdb.Label{Name: string(k), Value: string(v)})
}
sort.Sort(lset)
scrape[lset.Hash()] = &sample{
labels: lset,
value: 123456789,
}
}
for i := 0; i < scrapeCount; i++ { for i := 0; i < scrapeCount; i++ {
ts = ts + int64(i*10000) ts = ts + int64(i*10000)
sc.Reset() sc.Reset()
for _, m := range metrics { for _, s := range scrape {
lset := make([]tsdb.Label, 0, len(m)) s.value += rand.Int63n(1000)
for k, v := range m { sc.Add(s.labels, float64(s.value))
lset = append(lset, tsdb.Label{Name: string(k), Value: string(v)})
}
sc.Add(lset, float64(rand.Int63()))
} }
if err := b.storage.ingestScrape(ts, &sc); err != nil { if err := b.storage.ingestScrape(ts, &sc); err != nil {
return err return err

16
db.go
View File

@ -70,7 +70,7 @@ func (db *DB) Close() error {
fmt.Println(" num samples", shard.head.samples) fmt.Println(" num samples", shard.head.samples)
} }
return fmt.Errorf("not implemented") return nil
} }
// Querier returns a new querier over the database. // Querier returns a new querier over the database.
@ -154,7 +154,8 @@ func (ls Labels) Less(i, j int) bool { return ls[i].Name < ls[j].Name }
// Hash returns a hash value for the label set. // Hash returns a hash value for the label set.
func (ls Labels) Hash() uint64 { func (ls Labels) Hash() uint64 {
b := make([]byte, 0, 512) b := make([]byte, 0, 1024)
for _, v := range ls { for _, v := range ls {
b = append(b, v.Name...) b = append(b, v.Name...)
b = append(b, sep) b = append(b, sep)
@ -222,6 +223,7 @@ func LabelsFromMap(m map[string]string) Labels {
// Label sets and values must have equal length. // Label sets and values must have equal length.
type Vector struct { type Vector struct {
Buckets map[uint16][]Sample Buckets map[uint16][]Sample
reused int
} }
type Sample struct { type Sample struct {
@ -232,7 +234,15 @@ type Sample struct {
// Reset the vector but keep resources allocated. // Reset the vector but keep resources allocated.
func (v *Vector) Reset() { func (v *Vector) Reset() {
v.Buckets = make(map[uint16][]Sample, len(v.Buckets)) // Do a full reset every n-th reusage to avoid memory leaks.
if v.Buckets == nil || v.reused > 100 {
v.Buckets = make(map[uint16][]Sample, 0)
return
}
for x, bkt := range v.Buckets {
v.Buckets[x] = bkt[:0]
}
v.reused++
} }
// Add a sample to the vector. // Add a sample to the vector.