prometheus/db.go

797 lines
17 KiB
Go
Raw Normal View History

2016-11-15 09:34:25 +00:00
// Package tsdb implements a time series storage for float64 sample data.
package tsdb
import (
"bytes"
2016-12-04 12:16:11 +00:00
"fmt"
"io/ioutil"
"math"
2016-12-04 12:16:11 +00:00
"os"
"path/filepath"
"reflect"
2016-12-15 07:31:26 +00:00
"strconv"
"strings"
"sync"
"time"
"unsafe"
2016-11-15 09:34:25 +00:00
2016-12-15 07:31:26 +00:00
"golang.org/x/sync/errgroup"
2017-01-03 14:43:26 +00:00
"github.com/coreos/etcd/pkg/fileutil"
2016-12-21 08:39:01 +00:00
"github.com/fabxc/tsdb/labels"
"github.com/go-kit/kit/log"
2017-01-03 14:43:26 +00:00
"github.com/pkg/errors"
2016-12-31 08:48:49 +00:00
"github.com/prometheus/client_golang/prometheus"
2016-11-15 09:34:25 +00:00
)
2016-12-09 09:00:14 +00:00
// DefaultOptions used for the DB. They are sane for setups using
// millisecond precision timestampdb.
2016-11-15 09:34:25 +00:00
var DefaultOptions = &Options{
2017-01-18 05:18:32 +00:00
WALFlushInterval: 5 * time.Second,
MinBlockDuration: 2 * 60 * 60 * 1000, // 2 hours in milliseconds
MaxBlockDuration: 48 * 60 * 60 * 1000, // 1 day in milliseconds
AppendableBlocks: 2,
2016-11-15 09:34:25 +00:00
}
// Options of the DB storage.
type Options struct {
// The interval at which the write ahead log is flushed to disc.
WALFlushInterval time.Duration
// The timestamp range of head blocks after which they get persisted.
// It's the minimum duration of any persisted block.
MinBlockDuration uint64
// The maximum timestamp range of compacted blocks.
MaxBlockDuration uint64
// Number of head blocks that can be appended to.
// Should be two or higher to prevent write errors in general scenarios.
//
// After a new block is started for timestamp t0 or higher, appends with
// timestamps as early as t0 - (n-1) * MinBlockDuration are valid.
AppendableBlocks int
2016-11-15 09:34:25 +00:00
}
// Appender allows appending a batch of data. It must be completed with a
// call to Commit or Rollback and must not be reused afterwards.
type Appender interface {
// Add adds a sample pair for the given series. A reference number is
// returned which can be used to add further samples in the same or later
// transactions.
// Returned reference numbers are ephemeral and may be rejected in calls
// to AddFast() at any point. Adding the sample via Add() returns a new
// reference number.
Add(l labels.Labels, t int64, v float64) (uint64, error)
// Add adds a sample pair for the referenced series. It is generally faster
// than adding a sample by providing its full label set.
AddFast(ref uint64, t int64, v float64) error
// Commit submits the collected samples and purges the batch.
Commit() error
// Rollback rolls back all modifications made in the appender so far.
Rollback() error
}
2016-12-09 09:00:14 +00:00
const sep = '\xff'
// DB handles reads and writes of time series falling into
// a hashed partition of a seriedb.
type DB struct {
dir string
2017-01-02 21:24:35 +00:00
logger log.Logger
metrics *dbMetrics
2017-01-18 05:18:32 +00:00
opts *Options
2016-12-09 09:00:14 +00:00
2016-12-15 07:31:26 +00:00
mtx sync.RWMutex
2017-01-03 14:43:26 +00:00
persisted []*persistedBlock
heads []*headBlock
headGen uint8
compactor *compactor
compactc chan struct{}
donec chan struct{}
stopc chan struct{}
2016-12-09 09:00:14 +00:00
}
type dbMetrics struct {
samplesAppended prometheus.Counter
compactionsTriggered prometheus.Counter
2016-12-31 08:48:49 +00:00
}
func newDBMetrics(r prometheus.Registerer) *dbMetrics {
m := &dbMetrics{}
2017-01-03 14:43:26 +00:00
m.samplesAppended = prometheus.NewCounter(prometheus.CounterOpts{
Name: "tsdb_samples_appended_total",
Help: "Total number of appended sampledb.",
2017-01-03 14:43:26 +00:00
})
m.compactionsTriggered = prometheus.NewCounter(prometheus.CounterOpts{
Name: "tsdb_compactions_triggered_total",
Help: "Total number of triggered compactions for the partition.",
})
2016-12-31 08:48:49 +00:00
if r != nil {
r.MustRegister(
m.samplesAppended,
2017-01-09 18:14:21 +00:00
m.compactionsTriggered,
2016-12-31 08:48:49 +00:00
)
}
return m
}
// Open returns a new DB in the given directory.
2017-01-18 05:18:32 +00:00
func Open(dir string, logger log.Logger, opts *Options) (db *DB, err error) {
if !fileutil.Exist(dir) {
if err := os.MkdirAll(dir, 0777); err != nil {
2016-12-15 07:31:26 +00:00
return nil, err
}
}
var r prometheus.Registerer
// r := prometheus.DefaultRegisterer
2016-12-15 07:31:26 +00:00
2017-01-18 05:18:32 +00:00
if opts == nil {
opts = DefaultOptions
}
if opts.AppendableBlocks < 1 {
return nil, errors.Errorf("AppendableBlocks must be greater than 0")
}
2017-01-18 05:18:32 +00:00
db = &DB{
dir: dir,
logger: logger,
2017-01-09 18:14:21 +00:00
metrics: newDBMetrics(r),
2017-01-18 05:18:32 +00:00
opts: opts,
compactc: make(chan struct{}, 1),
donec: make(chan struct{}),
stopc: make(chan struct{}),
}
2017-01-18 05:18:32 +00:00
db.compactor = newCompactor(r, &compactorOptions{
maxBlockRange: opts.MaxBlockDuration,
2017-01-18 05:18:32 +00:00
})
if err := db.initBlocks(); err != nil {
2016-12-15 07:31:26 +00:00
return nil, err
}
go db.run()
return db, nil
}
func (db *DB) run() {
defer close(db.donec)
for {
select {
case <-db.compactc:
db.metrics.compactionsTriggered.Inc()
var infos []compactionInfo
for _, b := range db.compactable() {
m := b.Meta()
infos = append(infos, compactionInfo{
generation: m.Compaction.Generation,
mint: m.MinTime,
maxt: m.MaxTime,
})
}
i, j, ok := db.compactor.pick(infos)
2017-01-18 05:18:32 +00:00
if !ok {
continue
}
db.logger.Log("msg", "picked", "i", i, "j", j)
for k := i; k < j; k++ {
db.logger.Log("k", k, "generation", infos[k].generation)
}
2017-01-18 05:18:32 +00:00
if err := db.compact(i, j); err != nil {
db.logger.Log("msg", "compaction failed", "err", err)
continue
}
db.logger.Log("msg", "compaction completed")
2017-01-18 05:18:32 +00:00
// Trigger another compaction in case there's more work to do.
select {
case db.compactc <- struct{}{}:
default:
}
case <-db.stopc:
return
}
}
}
2017-01-18 05:18:32 +00:00
func (db *DB) getBlock(i int) Block {
if i < len(db.persisted) {
return db.persisted[i]
}
2017-01-18 05:18:32 +00:00
return db.heads[i-len(db.persisted)]
}
// removeBlocks removes the blocks in range [i, j) from the list of persisted
2017-01-18 05:18:32 +00:00
// and head blocks. The blocks are not closed and their files not deleted.
func (db *DB) removeBlocks(i, j int) {
for k := i; k < j; k++ {
2017-01-18 05:18:32 +00:00
if i < len(db.persisted) {
db.persisted = append(db.persisted[:i], db.persisted[i+1:]...)
} else {
l := i - len(db.persisted)
db.heads = append(db.heads[:l], db.heads[l+1:]...)
}
}
2017-01-18 05:18:32 +00:00
}
func (db *DB) blocks() (bs []Block) {
for _, b := range db.persisted {
bs = append(bs, b)
}
for _, b := range db.heads {
bs = append(bs, b)
}
return bs
}
// compact block in range [i, j) into a temporary directory and atomically
2017-01-19 18:45:52 +00:00
// swap the blocks out on successful completion.
2017-01-18 05:18:32 +00:00
func (db *DB) compact(i, j int) error {
if j <= i {
2017-01-18 05:18:32 +00:00
return errors.New("invalid compaction block range")
}
var blocks []Block
for k := i; k < j; k++ {
2017-01-18 05:18:32 +00:00
blocks = append(blocks, db.getBlock(k))
}
var (
dir = blocks[0].Dir()
tmpdir = dir + ".tmp"
)
if err := db.compactor.compact(tmpdir, blocks...); err != nil {
return err
}
2017-01-18 05:18:32 +00:00
pb, err := newPersistedBlock(tmpdir)
if err != nil {
return err
}
db.mtx.Lock()
defer db.mtx.Unlock()
2017-01-18 05:18:32 +00:00
if err := renameDir(tmpdir, dir); err != nil {
return errors.Wrap(err, "rename dir")
}
2017-01-18 05:18:32 +00:00
pb.dir = dir
2017-01-18 05:18:32 +00:00
db.removeBlocks(i, j)
db.persisted = append(db.persisted, pb)
2017-01-18 05:18:32 +00:00
for i, b := range blocks {
if err := b.Close(); err != nil {
return errors.Wrap(err, "close old block")
}
if i > 0 {
if err := os.RemoveAll(b.Dir()); err != nil {
return errors.Wrap(err, "removing old block")
}
}
}
2017-01-18 05:18:32 +00:00
return nil
}
2016-12-15 07:31:26 +00:00
func (db *DB) initBlocks() error {
var (
2017-01-18 05:18:32 +00:00
persisted []*persistedBlock
heads []*headBlock
)
2016-12-22 11:05:24 +00:00
2017-01-18 05:18:32 +00:00
dirs, err := blockDirs(db.dir)
if err != nil {
return err
2016-12-09 09:00:14 +00:00
}
2017-01-18 05:18:32 +00:00
for _, dir := range dirs {
if fileutil.Exist(filepath.Join(dir, walFileName)) {
2017-01-19 13:01:38 +00:00
h, err := openHeadBlock(dir, db.logger)
if err != nil {
return err
}
h.generation = db.headGen
db.headGen++
heads = append(heads, h)
continue
}
b, err := newPersistedBlock(dir)
if err != nil {
return err
}
2017-01-18 05:18:32 +00:00
persisted = append(persisted, b)
}
2017-01-18 05:18:32 +00:00
db.persisted = persisted
db.heads = heads
2017-01-03 14:43:26 +00:00
return nil
2017-01-02 21:24:35 +00:00
}
2017-01-06 07:08:02 +00:00
// Close the partition.
func (db *DB) Close() error {
close(db.stopc)
<-db.donec
2017-01-02 21:24:35 +00:00
var merr MultiError
2016-12-15 07:31:26 +00:00
db.mtx.Lock()
defer db.mtx.Unlock()
for _, pb := range db.persisted {
2017-01-02 21:24:35 +00:00
merr.Add(pb.Close())
2016-12-15 07:31:26 +00:00
}
for _, hb := range db.heads {
2017-01-03 14:43:26 +00:00
merr.Add(hb.Close())
}
2016-12-15 07:31:26 +00:00
2017-01-02 21:24:35 +00:00
return merr.Err()
2016-12-09 09:00:14 +00:00
}
2017-01-18 05:18:32 +00:00
// Appender returns a new Appender on the database.
2017-01-09 19:04:16 +00:00
func (db *DB) Appender() Appender {
db.mtx.RLock()
a := &dbAppender{db: db}
for _, b := range db.appendable() {
a.heads = append(a.heads, b.Appender().(*headAppender))
}
return a
2017-01-09 19:04:16 +00:00
}
type dbAppender struct {
db *DB
// gen uint8
// head *headAppender
maxGen uint8
heads []*headAppender
2017-01-09 19:04:16 +00:00
}
func (a *dbAppender) Add(lset labels.Labels, t int64, v float64) (uint64, error) {
h, err := a.appenderFor(t)
if err != nil {
return 0, err
}
ref, err := h.Add(lset, t, v)
if err != nil {
return 0, err
}
return ref | (uint64(h.generation) << 40), nil
2017-01-09 19:04:16 +00:00
}
func (a *dbAppender) hashedAdd(hash uint64, lset labels.Labels, t int64, v float64) (uint64, error) {
h, err := a.appenderFor(t)
if err != nil {
return 0, err
}
ref, err := h.hashedAdd(hash, lset, t, v)
if err != nil {
return 0, err
}
return ref | (uint64(h.generation) << 40), nil
2017-01-09 19:04:16 +00:00
}
func (a *dbAppender) AddFast(ref uint64, t int64, v float64) error {
// We store the head generation in the 4th byte and use it to reject
// stale references.
gen := uint8((ref << 16) >> 56)
2017-01-09 19:04:16 +00:00
h, err := a.appenderFor(t)
if err != nil {
return err
}
// If the reference pointed into a previous block, we cannot
// use it to append the sample.
if h.generation != gen {
2017-01-16 13:18:32 +00:00
return ErrNotFound
}
return h.AddFast(ref, t, v)
}
2016-12-09 12:41:38 +00:00
// appenderFor gets the appender for the head containing timestamp t.
// If the head block doesn't exist yet, it gets created.
func (a *dbAppender) appenderFor(t int64) (*headAppender, error) {
// If there's no fitting head block for t, ensure it gets created.
if len(a.heads) == 0 || t >= a.heads[len(a.heads)-1].meta.MaxTime {
a.db.mtx.RUnlock()
var mints []int64
for _, h := range a.heads {
mints = append(mints, h.meta.MinTime)
}
fmt.Println("ensure head", t, mints)
if err := a.db.ensureHead(t); err != nil {
a.db.mtx.RLock()
return nil, err
}
a.db.mtx.RLock()
if len(a.heads) == 0 {
for _, b := range a.db.appendable() {
a.heads = append(a.heads, b.Appender().(*headAppender))
}
} else {
maxSeq := a.heads[len(a.heads)-1].meta.Sequence
for _, b := range a.db.appendable() {
if b.meta.Sequence > maxSeq {
a.heads = append(a.heads, b.Appender().(*headAppender))
}
}
}
}
for i := len(a.heads) - 1; i >= 0; i-- {
if h := a.heads[i]; t >= h.meta.MinTime {
return h, nil
}
}
2017-01-03 14:43:26 +00:00
return nil, ErrNotFound
}
2016-12-09 12:41:38 +00:00
func (db *DB) ensureHead(t int64) error {
db.mtx.Lock()
defer db.mtx.Unlock()
// Initial case for a new database: we must create the first
// AppendableBlocks-1 front padding heads.
if len(db.heads) == 0 {
for i := int64(db.opts.AppendableBlocks - 1); i >= 0; i-- {
fmt.Println("cut init for", t-i*int64(db.opts.MinBlockDuration))
if _, err := db.cut(t - i*int64(db.opts.MinBlockDuration)); err != nil {
return err
}
2016-12-09 12:41:38 +00:00
}
}
for {
h := db.heads[len(db.heads)-1]
// If t doesn't exceed the range of heads blocks, there's nothing to do.
if t < h.meta.MaxTime {
return nil
}
fmt.Println("cut for", h.meta.MaxTime)
if _, err := db.cut(h.meta.MaxTime); err != nil {
return err
}
}
2016-12-09 12:41:38 +00:00
}
func (a *dbAppender) Commit() error {
var merr MultiError
for _, h := range a.heads {
merr.Add(h.Commit())
}
a.db.mtx.RUnlock()
return merr.Err()
}
func (a *dbAppender) Rollback() error {
var merr MultiError
for _, h := range a.heads {
merr.Add(h.Rollback())
}
a.db.mtx.RUnlock()
return merr.Err()
}
func (db *DB) appendable() []*headBlock {
if len(db.heads) <= db.opts.AppendableBlocks {
return db.heads
}
return db.heads[len(db.heads)-db.opts.AppendableBlocks:]
}
func (db *DB) compactable() []Block {
db.mtx.RLock()
defer db.mtx.RUnlock()
var blocks []Block
for _, pb := range db.persisted {
2017-01-18 05:18:32 +00:00
blocks = append(blocks, pb)
2017-01-03 14:43:26 +00:00
}
if len(db.heads) <= db.opts.AppendableBlocks {
return blocks
2017-01-03 14:43:26 +00:00
}
for _, h := range db.heads[:len(db.heads)-db.opts.AppendableBlocks] {
blocks = append(blocks, h)
}
2017-01-03 14:43:26 +00:00
return blocks
}
func intervalOverlap(amin, amax, bmin, bmax int64) bool {
if bmin >= amin && bmin <= amax {
return true
}
if amin >= bmin && amin <= bmax {
return true
}
return false
}
func intervalContains(min, max, t int64) bool {
return t >= min && t <= max
}
// blocksForInterval returns all blocks within the partition that may contain
2016-12-13 14:26:58 +00:00
// data for the given time range.
func (db *DB) blocksForInterval(mint, maxt int64) []Block {
var bs []Block
for _, b := range db.persisted {
m := b.Meta()
if intervalOverlap(mint, maxt, m.MinTime, m.MaxTime) {
bs = append(bs, b)
}
}
for _, b := range db.heads {
m := b.Meta()
if intervalOverlap(mint, maxt, m.MinTime, m.MaxTime) {
2017-01-03 14:43:26 +00:00
bs = append(bs, b)
}
}
return bs
2016-12-13 14:26:58 +00:00
}
2017-01-03 14:43:26 +00:00
// cut starts a new head block to append to. The completed head block
// will still be appendable for the configured grace period.
func (db *DB) cut(mint int64) (*headBlock, error) {
maxt := mint + int64(db.opts.MinBlockDuration)
dir, seq, err := nextBlockDir(db.dir)
2017-01-19 13:01:38 +00:00
if err != nil {
return nil, err
}
newHead, err := createHeadBlock(dir, seq, db.logger, mint, maxt)
if err != nil {
return nil, err
}
db.heads = append(db.heads, newHead)
db.headGen++
2016-12-09 12:41:38 +00:00
newHead.generation = db.headGen
select {
case db.compactc <- struct{}{}:
default:
}
return newHead, nil
}
func isBlockDir(fi os.FileInfo) bool {
if !fi.IsDir() {
return false
}
if !strings.HasPrefix(fi.Name(), "b-") {
return false
}
if _, err := strconv.ParseUint(fi.Name()[2:], 10, 32); err != nil {
return false
}
return true
}
func blockDirs(dir string) ([]string, error) {
files, err := ioutil.ReadDir(dir)
if err != nil {
return nil, err
}
var dirs []string
for _, fi := range files {
if isBlockDir(fi) {
dirs = append(dirs, filepath.Join(dir, fi.Name()))
}
}
return dirs, nil
2017-01-03 14:43:26 +00:00
}
2016-12-09 12:41:38 +00:00
func nextBlockDir(dir string) (string, int, error) {
names, err := fileutil.ReadDir(dir)
if err != nil {
return "", 0, err
}
i := uint64(0)
for _, n := range names {
if !strings.HasPrefix(n, "b-") {
continue
}
j, err := strconv.ParseUint(n[2:], 10, 32)
if err != nil {
continue
}
i = j
}
return filepath.Join(dir, fmt.Sprintf("b-%0.6d", i+1)), int(i + 1), nil
}
2016-12-09 12:41:38 +00:00
// PartitionedDB is a time series storage.
type PartitionedDB struct {
logger log.Logger
dir string
partitionPow uint
Partitions []*DB
}
func isPowTwo(x int) bool {
return x > 0 && (x&(x-1)) == 0
}
// OpenPartitioned or create a new DB.
func OpenPartitioned(dir string, n int, l log.Logger, opts *Options) (*PartitionedDB, error) {
if !isPowTwo(n) {
return nil, errors.Errorf("%d is not a power of two", n)
}
if opts == nil {
opts = DefaultOptions
}
if l == nil {
l = log.NewLogfmtLogger(os.Stdout)
l = log.NewContext(l).With("ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
}
if err := os.MkdirAll(dir, 0777); err != nil {
return nil, err
}
c := &PartitionedDB{
logger: l,
dir: dir,
partitionPow: uint(math.Log2(float64(n))),
}
// Initialize vertical partitiondb.
// TODO(fabxc): validate partition number to be power of 2, which is required
// for the bitshift-modulo when finding the right partition.
for i := 0; i < n; i++ {
l := log.NewContext(l).With("partition", i)
d := partitionDir(dir, i)
2017-01-18 05:18:32 +00:00
s, err := Open(d, l, opts)
if err != nil {
return nil, fmt.Errorf("initializing partition %q failed: %s", d, err)
}
c.Partitions = append(c.Partitions, s)
}
return c, nil
}
func partitionDir(base string, i int) string {
return filepath.Join(base, fmt.Sprintf("p-%0.4d", i))
}
// Close the database.
func (db *PartitionedDB) Close() error {
var g errgroup.Group
for _, partition := range db.Partitions {
g.Go(partition.Close)
}
return g.Wait()
}
// Appender returns a new appender against the database.
func (db *PartitionedDB) Appender() Appender {
2017-01-09 19:04:16 +00:00
app := &partitionedAppender{db: db}
for _, p := range db.Partitions {
app.partitions = append(app.partitions, p.Appender().(*dbAppender))
}
2017-01-09 19:04:16 +00:00
return app
}
type partitionedAppender struct {
db *PartitionedDB
partitions []*dbAppender
2017-01-09 19:04:16 +00:00
}
func (a *partitionedAppender) Add(lset labels.Labels, t int64, v float64) (uint64, error) {
h := lset.Hash()
p := h >> (64 - a.db.partitionPow)
2017-01-09 19:04:16 +00:00
ref, err := a.partitions[p].hashedAdd(h, lset, t, v)
if err != nil {
return 0, err
}
return ref | (p << 48), nil
}
func (a *partitionedAppender) AddFast(ref uint64, t int64, v float64) error {
p := uint8((ref << 8) >> 56)
return a.partitions[p].AddFast(ref, t, v)
}
func (a *partitionedAppender) Commit() error {
var merr MultiError
for _, p := range a.partitions {
merr.Add(p.Commit())
}
return merr.Err()
}
func (a *partitionedAppender) Rollback() error {
var merr MultiError
for _, p := range a.partitions {
merr.Add(p.Rollback())
}
return merr.Err()
}
// The MultiError type implements the error interface, and contains the
// Errors used to construct it.
type MultiError []error
// Returns a concatenated string of the contained errors
func (es MultiError) Error() string {
var buf bytes.Buffer
if len(es) > 1 {
fmt.Fprintf(&buf, "%d errors: ", len(es))
2016-12-08 09:04:24 +00:00
}
for i, err := range es {
if i != 0 {
buf.WriteString("; ")
}
buf.WriteString(err.Error())
}
return buf.String()
2016-11-15 09:34:25 +00:00
}
2016-12-15 07:31:26 +00:00
// Add adds the error to the error list if it is not nil.
func (es *MultiError) Add(err error) {
if err == nil {
return
}
if merr, ok := err.(MultiError); ok {
*es = append(*es, merr...)
} else {
*es = append(*es, err)
2016-12-15 07:31:26 +00:00
}
}
// Err returns the error list as an error or nil if it is empty.
2016-12-15 07:31:26 +00:00
func (es MultiError) Err() error {
if len(es) == 0 {
return nil
}
return es
}
func yoloString(b []byte) string {
sh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
h := reflect.StringHeader{
Data: sh.Data,
Len: sh.Len,
}
return *((*string)(unsafe.Pointer(&h)))
}