From 80affd98a8a186f1f6e8a2a7563c85b2eae58120 Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Wed, 11 Jan 2017 12:54:18 +0100 Subject: [PATCH] Add barrier to benchmark writer This adds a barrier to avoid issues with unfair goroutine scheduling that causes some fake scrapers to run away from the other ones. --- cmd/tsdb/Makefile | 2 +- cmd/tsdb/main.go | 89 ++++++++++++++++++++++++++--------------------- 2 files changed, 50 insertions(+), 41 deletions(-) diff --git a/cmd/tsdb/Makefile b/cmd/tsdb/Makefile index cbfb83b95..71fc993fd 100644 --- a/cmd/tsdb/Makefile +++ b/cmd/tsdb/Makefile @@ -3,7 +3,7 @@ build: bench: build @echo ">> running benchmark" - @./tsdb bench write --out=$(OUT) --metrics=$(NUM_METRICS) testdata.100k + @./tsdb bench write --metrics=$(NUM_METRICS) testdata.100k @go tool pprof -svg ./tsdb benchout/cpu.prof > benchout/cpuprof.svg @go tool pprof -svg ./tsdb benchout/mem.prof > benchout/memprof.svg @go tool pprof -svg ./tsdb benchout/block.prof > benchout/blockprof.svg diff --git a/cmd/tsdb/main.go b/cmd/tsdb/main.go index 1f2f06a3c..07b51e36c 100644 --- a/cmd/tsdb/main.go +++ b/cmd/tsdb/main.go @@ -138,43 +138,7 @@ func (b *writeBenchmark) ingestScrapes(metrics []model.Metric, scrapeCount int) var mu sync.Mutex var total uint64 - for len(metrics) > 0 { - l := 1000 - if len(metrics) < 1000 { - l = len(metrics) - } - batch := metrics[:l] - metrics = metrics[l:] - - wg.Add(1) - go func(batch []model.Metric) { - n, err := b.ingestScrapesShard(batch, scrapeCount) - if err != nil { - // exitWithError(err) - fmt.Println(" err", err) - } - mu.Lock() - total += n - mu.Unlock() - wg.Done() - }(batch) - } - wg.Wait() - - fmt.Println("> total samples:", total) - return nil -} - -func (b *writeBenchmark) ingestScrapesShard(metrics []model.Metric, scrapeCount int) (uint64, error) { - app := b.storage.Appender() - ts := int64(model.Now()) - - type sample struct { - labels labels.Labels - value int64 - } - - scrape := make(map[uint64]*sample, len(metrics)) + lbls := make([]labels.Labels, 0, len(metrics)) for _, m := range metrics { lset := make(labels.Labels, 0, len(m)) @@ -183,10 +147,55 @@ func (b *writeBenchmark) ingestScrapesShard(metrics []model.Metric, scrapeCount } sort.Sort(lset) - scrape[lset.Hash()] = &sample{ - labels: lset, - value: 123456789, + lbls = append(lbls, lset) + } + + for i := 0; i < scrapeCount; i += 25 { + lbls := lbls + for len(lbls) > 0 { + l := 1000 + if len(lbls) < 1000 { + l = len(lbls) + } + batch := lbls[:l] + lbls = lbls[l:] + + wg.Add(1) + go func() { + n, err := b.ingestScrapesShard(batch, 25, int64(30000*i)) + if err != nil { + // exitWithError(err) + fmt.Println(" err", err) + } + mu.Lock() + total += n + mu.Unlock() + wg.Done() + }() } + wg.Wait() + } + + fmt.Println("> total samples:", total) + return nil +} + +func (b *writeBenchmark) ingestScrapesShard(metrics []labels.Labels, scrapeCount int, baset int64) (uint64, error) { + app := b.storage.Appender() + ts := baset + + type sample struct { + labels labels.Labels + value int64 + } + + scrape := make([]*sample, 0, len(metrics)) + + for _, m := range metrics { + scrape = append(scrape, &sample{ + labels: m, + value: 123456789, + }) } total := uint64(0)