prometheus/cmd/tsdb/main.go

334 lines
6.7 KiB
Go
Raw Normal View History

2016-12-07 16:30:10 +00:00
package main
import (
2016-12-09 12:41:38 +00:00
"flag"
2016-12-07 16:30:10 +00:00
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"runtime/pprof"
"sync"
"time"
2017-01-16 20:29:53 +00:00
"unsafe"
2016-12-07 16:30:10 +00:00
"github.com/fabxc/tsdb"
2016-12-21 08:39:01 +00:00
"github.com/fabxc/tsdb/labels"
2017-01-16 20:29:53 +00:00
promlabels "github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/textparse"
2016-12-07 16:30:10 +00:00
"github.com/spf13/cobra"
)
func main() {
root := &cobra.Command{
Use: "tsdb",
Short: "CLI tool for tsdb",
}
root.AddCommand(
NewBenchCommand(),
)
2016-12-09 12:41:38 +00:00
flag.CommandLine.Set("log.level", "debug")
2016-12-07 16:30:10 +00:00
root.Execute()
}
func NewBenchCommand() *cobra.Command {
c := &cobra.Command{
Use: "bench",
Short: "run benchmarks",
}
c.AddCommand(NewBenchWriteCommand())
return c
}
type writeBenchmark struct {
outPath string
cleanup bool
numMetrics int
storage *tsdb.PartitionedDB
2016-12-07 16:30:10 +00:00
cpuprof *os.File
memprof *os.File
blockprof *os.File
}
func NewBenchWriteCommand() *cobra.Command {
var wb writeBenchmark
c := &cobra.Command{
Use: "write <file>",
Short: "run a write performance benchmark",
Run: wb.run,
}
c.PersistentFlags().StringVar(&wb.outPath, "out", "benchout/", "set the output path")
c.PersistentFlags().IntVar(&wb.numMetrics, "metrics", 10000, "number of metrics to read")
return c
}
func (b *writeBenchmark) run(cmd *cobra.Command, args []string) {
if len(args) != 1 {
exitWithError(fmt.Errorf("missing file argument"))
}
if b.outPath == "" {
dir, err := ioutil.TempDir("", "tsdb_bench")
if err != nil {
exitWithError(err)
}
b.outPath = dir
b.cleanup = true
}
if err := os.RemoveAll(b.outPath); err != nil {
exitWithError(err)
}
if err := os.MkdirAll(b.outPath, 0777); err != nil {
exitWithError(err)
}
dir := filepath.Join(b.outPath, "storage")
2017-02-10 01:54:26 +00:00
st, err := tsdb.OpenPartitioned(dir, 1, nil, &tsdb.Options{
2017-02-14 07:53:19 +00:00
WALFlushInterval: 200 * time.Millisecond,
2017-02-10 01:54:26 +00:00
RetentionDuration: 1 * 24 * 60 * 60 * 1000, // 1 days in milliseconds
2017-02-14 07:53:19 +00:00
MinBlockDuration: 3 * 60 * 60 * 1000, // 2 hours in milliseconds
MaxBlockDuration: 24 * 60 * 60 * 1000, // 1 days in milliseconds
2017-02-10 01:54:26 +00:00
AppendableBlocks: 2,
})
if err != nil {
exitWithError(err)
2016-12-07 16:30:10 +00:00
}
b.storage = st
2017-01-16 20:29:53 +00:00
var metrics []labels.Labels
2016-12-07 16:30:10 +00:00
measureTime("readData", func() {
f, err := os.Open(args[0])
if err != nil {
exitWithError(err)
}
defer f.Close()
metrics, err = readPrometheusLabels(f, b.numMetrics)
if err != nil {
exitWithError(err)
}
})
defer func() {
reportSize(dir)
if b.cleanup {
os.RemoveAll(b.outPath)
}
}()
var total uint64
dur := measureTime("ingestScrapes", func() {
2016-12-07 16:30:10 +00:00
b.startProfiling()
2017-02-14 07:53:19 +00:00
total, err = b.ingestScrapes(metrics, 3000)
if err != nil {
2016-12-07 16:30:10 +00:00
exitWithError(err)
}
})
fmt.Println(" > total samples:", total)
fmt.Println(" > samples/sec:", float64(total)/dur.Seconds())
2016-12-07 16:30:10 +00:00
measureTime("stopStorage", func() {
if err := b.storage.Close(); err != nil {
2016-12-07 16:30:10 +00:00
exitWithError(err)
}
b.stopProfiling()
})
}
func (b *writeBenchmark) ingestScrapes(lbls []labels.Labels, scrapeCount int) (uint64, error) {
var mu sync.Mutex
var total uint64
2016-12-07 16:30:10 +00:00
for i := 0; i < scrapeCount; i += 100 {
var wg sync.WaitGroup
lbls := lbls
for len(lbls) > 0 {
l := 1000
if len(lbls) < 1000 {
l = len(lbls)
2016-12-07 16:30:10 +00:00
}
batch := lbls[:l]
lbls = lbls[l:]
wg.Add(1)
go func() {
n, err := b.ingestScrapesShard(batch, 100, int64(30000*i))
if err != nil {
// exitWithError(err)
fmt.Println(" err", err)
}
mu.Lock()
total += n
mu.Unlock()
wg.Done()
}()
}
wg.Wait()
2016-12-07 16:30:10 +00:00
}
return total, nil
2016-12-07 16:30:10 +00:00
}
func (b *writeBenchmark) ingestScrapesShard(metrics []labels.Labels, scrapeCount int, baset int64) (uint64, error) {
ts := baset
2016-12-07 16:30:10 +00:00
2016-12-08 09:04:24 +00:00
type sample struct {
2016-12-21 08:39:01 +00:00
labels labels.Labels
2016-12-08 09:04:24 +00:00
value int64
ref *uint64
2016-12-08 09:04:24 +00:00
}
scrape := make([]*sample, 0, len(metrics))
2016-12-08 09:04:24 +00:00
for _, m := range metrics {
scrape = append(scrape, &sample{
labels: m,
2016-12-08 09:04:24 +00:00
value: 123456789,
})
2016-12-08 09:04:24 +00:00
}
total := uint64(0)
2016-12-08 09:04:24 +00:00
2016-12-07 16:30:10 +00:00
for i := 0; i < scrapeCount; i++ {
app := b.storage.Appender()
2016-12-09 12:41:38 +00:00
ts += int64(30000)
2016-12-07 16:30:10 +00:00
2016-12-08 09:04:24 +00:00
for _, s := range scrape {
2016-12-09 09:00:14 +00:00
s.value += 1000
if s.ref == nil {
ref, err := app.Add(s.labels, ts, float64(s.value))
if err != nil {
panic(err)
}
s.ref = &ref
} else if err := app.AddFast(*s.ref, ts, float64(s.value)); err != nil {
if err.Error() != "not found" {
panic(err)
}
ref, err := app.Add(s.labels, ts, float64(s.value))
if err != nil {
panic(err)
}
s.ref = &ref
}
total++
2016-12-07 16:30:10 +00:00
}
if err := app.Commit(); err != nil {
return total, err
2016-12-07 16:30:10 +00:00
}
}
return total, nil
2016-12-07 16:30:10 +00:00
}
func (b *writeBenchmark) startProfiling() {
var err error
// Start CPU profiling.
b.cpuprof, err = os.Create(filepath.Join(b.outPath, "cpu.prof"))
if err != nil {
exitWithError(fmt.Errorf("bench: could not create cpu profile: %v\n", err))
}
pprof.StartCPUProfile(b.cpuprof)
// Start memory profiling.
b.memprof, err = os.Create(filepath.Join(b.outPath, "mem.prof"))
if err != nil {
exitWithError(fmt.Errorf("bench: could not create memory profile: %v\n", err))
}
runtime.MemProfileRate = 4096
// Start fatal profiling.
b.blockprof, err = os.Create(filepath.Join(b.outPath, "block.prof"))
if err != nil {
exitWithError(fmt.Errorf("bench: could not create block profile: %v\n", err))
}
runtime.SetBlockProfileRate(1)
}
func (b *writeBenchmark) stopProfiling() {
if b.cpuprof != nil {
pprof.StopCPUProfile()
b.cpuprof.Close()
b.cpuprof = nil
}
if b.memprof != nil {
pprof.Lookup("heap").WriteTo(b.memprof, 0)
b.memprof.Close()
b.memprof = nil
}
if b.blockprof != nil {
pprof.Lookup("block").WriteTo(b.blockprof, 0)
b.blockprof.Close()
b.blockprof = nil
runtime.SetBlockProfileRate(0)
}
}
func reportSize(dir string) {
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil || path == dir {
return err
}
if info.Size() < 10*1024*1024 {
return nil
}
fmt.Printf(" > file=%s size=%.04fGiB\n", path[len(dir):], float64(info.Size())/1024/1024/1024)
return nil
})
if err != nil {
exitWithError(err)
}
}
func measureTime(stage string, f func()) time.Duration {
2016-12-07 16:30:10 +00:00
fmt.Printf(">> start stage=%s\n", stage)
start := time.Now()
f()
fmt.Printf(">> completed stage=%s duration=%s\n", stage, time.Since(start))
return time.Since(start)
2016-12-07 16:30:10 +00:00
}
2017-01-16 20:29:53 +00:00
func readPrometheusLabels(r io.Reader, n int) ([]labels.Labels, error) {
b, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
2016-12-07 16:30:10 +00:00
2017-01-16 20:29:53 +00:00
p := textparse.New(b)
i := 0
var mets []labels.Labels
hashes := map[uint64]struct{}{}
2016-12-07 16:30:10 +00:00
2017-01-16 20:29:53 +00:00
for p.Next() && i < n {
m := make(labels.Labels, 0, 10)
p.Metric((*promlabels.Labels)(unsafe.Pointer(&m)))
2016-12-07 16:30:10 +00:00
2017-01-16 20:29:53 +00:00
h := m.Hash()
if _, ok := hashes[h]; ok {
continue
2016-12-07 16:30:10 +00:00
}
2017-01-16 20:29:53 +00:00
mets = append(mets, m)
hashes[h] = struct{}{}
i++
2016-12-07 16:30:10 +00:00
}
2017-01-16 20:29:53 +00:00
return mets, p.Err()
2016-12-07 16:30:10 +00:00
}
func exitWithError(err error) {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}