2016-12-22 11:05:24 +00:00
|
|
|
package tsdb
|
|
|
|
|
|
|
|
import (
|
2017-01-16 13:18:25 +00:00
|
|
|
"bufio"
|
2016-12-22 11:05:24 +00:00
|
|
|
"encoding/binary"
|
|
|
|
"hash/crc32"
|
|
|
|
"io"
|
|
|
|
"math"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2017-01-06 16:23:12 +00:00
|
|
|
"sync"
|
2017-01-06 14:18:06 +00:00
|
|
|
"time"
|
2016-12-22 11:05:24 +00:00
|
|
|
|
|
|
|
"github.com/coreos/etcd/pkg/fileutil"
|
|
|
|
"github.com/fabxc/tsdb/labels"
|
2017-01-06 14:18:06 +00:00
|
|
|
"github.com/go-kit/kit/log"
|
2016-12-22 14:18:33 +00:00
|
|
|
"github.com/pkg/errors"
|
2016-12-22 11:05:24 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// WALEntryType indicates what data a WAL entry contains.
|
|
|
|
type WALEntryType byte
|
|
|
|
|
|
|
|
// The valid WAL entry types.
|
|
|
|
const (
|
|
|
|
WALEntrySymbols = 1
|
|
|
|
WALEntrySeries = 2
|
|
|
|
WALEntrySamples = 3
|
|
|
|
)
|
|
|
|
|
|
|
|
// WAL is a write ahead log for series data. It can only be written to.
|
|
|
|
// Use WALReader to read back from a write ahead log.
|
|
|
|
type WAL struct {
|
2017-01-06 16:23:12 +00:00
|
|
|
mtx sync.Mutex
|
|
|
|
|
2017-01-06 14:18:06 +00:00
|
|
|
f *fileutil.LockedFile
|
|
|
|
enc *walEncoder
|
|
|
|
logger log.Logger
|
|
|
|
flushInterval time.Duration
|
|
|
|
|
|
|
|
stopc chan struct{}
|
|
|
|
donec chan struct{}
|
2016-12-22 11:05:24 +00:00
|
|
|
|
|
|
|
symbols map[string]uint32
|
|
|
|
}
|
|
|
|
|
2017-01-06 12:13:22 +00:00
|
|
|
const walFileName = "wal-000"
|
2017-01-06 08:26:39 +00:00
|
|
|
|
2016-12-22 14:18:33 +00:00
|
|
|
// OpenWAL opens or creates a write ahead log in the given directory.
|
|
|
|
// The WAL must be read completely before new data is written.
|
2017-01-06 14:18:06 +00:00
|
|
|
func OpenWAL(dir string, l log.Logger, flushInterval time.Duration) (*WAL, error) {
|
2016-12-22 11:05:24 +00:00
|
|
|
if err := os.MkdirAll(dir, 0777); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-01-06 08:26:39 +00:00
|
|
|
p := filepath.Join(dir, walFileName)
|
2016-12-22 11:05:24 +00:00
|
|
|
|
2016-12-22 14:54:39 +00:00
|
|
|
f, err := fileutil.TryLockFile(p, os.O_RDWR, 0666)
|
2016-12-22 11:05:24 +00:00
|
|
|
if err != nil {
|
2016-12-22 14:18:33 +00:00
|
|
|
if !os.IsNotExist(err) {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-12-22 14:54:39 +00:00
|
|
|
f, err = fileutil.LockFile(p, os.O_RDWR|os.O_CREATE, 0666)
|
2016-12-22 14:18:33 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if _, err = f.Seek(0, os.SEEK_END); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-12-22 11:05:24 +00:00
|
|
|
}
|
2016-12-22 15:14:34 +00:00
|
|
|
enc, err := newWALEncoder(f.File)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-12-22 11:05:24 +00:00
|
|
|
|
|
|
|
w := &WAL{
|
2017-01-06 14:18:06 +00:00
|
|
|
f: f,
|
|
|
|
logger: l,
|
|
|
|
enc: enc,
|
|
|
|
flushInterval: flushInterval,
|
|
|
|
symbols: map[string]uint32{},
|
|
|
|
donec: make(chan struct{}),
|
|
|
|
stopc: make(chan struct{}),
|
2016-12-22 11:05:24 +00:00
|
|
|
}
|
2017-01-06 14:18:06 +00:00
|
|
|
go w.run(flushInterval)
|
|
|
|
|
2016-12-22 11:05:24 +00:00
|
|
|
return w, nil
|
|
|
|
}
|
|
|
|
|
2016-12-22 14:18:33 +00:00
|
|
|
type walHandler struct {
|
2017-01-13 15:14:40 +00:00
|
|
|
sample func(refdSample)
|
2016-12-22 14:18:33 +00:00
|
|
|
series func(labels.Labels)
|
|
|
|
}
|
|
|
|
|
2017-01-02 13:41:13 +00:00
|
|
|
// ReadAll consumes all entries in the WAL and triggers the registered handlers.
|
2016-12-22 14:18:33 +00:00
|
|
|
func (w *WAL) ReadAll(h *walHandler) error {
|
|
|
|
dec := &walDecoder{
|
|
|
|
r: w.f,
|
|
|
|
handler: h,
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
if err := dec.entry(); err != nil {
|
|
|
|
if err == io.EOF {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-22 11:05:24 +00:00
|
|
|
// Log writes a batch of new series labels and samples to the log.
|
2017-01-13 15:14:40 +00:00
|
|
|
func (w *WAL) Log(series []labels.Labels, samples []refdSample) error {
|
2016-12-22 11:05:24 +00:00
|
|
|
if err := w.enc.encodeSeries(series); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := w.enc.encodeSamples(samples); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-01-06 14:18:06 +00:00
|
|
|
if w.flushInterval <= 0 {
|
|
|
|
return w.sync()
|
|
|
|
}
|
2016-12-22 11:05:24 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *WAL) sync() error {
|
2016-12-22 15:14:34 +00:00
|
|
|
if err := w.enc.flush(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-22 11:05:24 +00:00
|
|
|
return fileutil.Fdatasync(w.f.File)
|
|
|
|
}
|
|
|
|
|
2017-01-06 14:18:06 +00:00
|
|
|
func (w *WAL) run(interval time.Duration) {
|
|
|
|
var tick <-chan time.Time
|
|
|
|
|
|
|
|
if interval > 0 {
|
|
|
|
ticker := time.NewTicker(interval)
|
|
|
|
defer ticker.Stop()
|
|
|
|
tick = ticker.C
|
|
|
|
}
|
|
|
|
defer close(w.donec)
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-w.stopc:
|
|
|
|
return
|
|
|
|
case <-tick:
|
|
|
|
if err := w.sync(); err != nil {
|
|
|
|
w.logger.Log("msg", "sync failed", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-22 11:05:24 +00:00
|
|
|
// Close sync all data and closes the underlying resources.
|
|
|
|
func (w *WAL) Close() error {
|
2017-01-06 14:18:06 +00:00
|
|
|
close(w.stopc)
|
|
|
|
<-w.donec
|
|
|
|
|
2016-12-22 11:05:24 +00:00
|
|
|
if err := w.sync(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return w.f.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
type walEncoder struct {
|
2017-01-06 17:36:42 +00:00
|
|
|
mtx sync.Mutex
|
2017-01-16 13:18:25 +00:00
|
|
|
// w *ioutil.PageWriter
|
|
|
|
w *bufio.Writer
|
2016-12-22 11:05:24 +00:00
|
|
|
}
|
|
|
|
|
2016-12-22 19:00:24 +00:00
|
|
|
const (
|
|
|
|
minSectorSize = 512
|
|
|
|
|
|
|
|
// walPageBytes is the alignment for flushing records to the backing Writer.
|
|
|
|
// It should be a multiple of the minimum sector size so that WAL can safely
|
|
|
|
// distinguish between torn writes and ordinary data corruption.
|
2017-01-09 17:34:29 +00:00
|
|
|
walPageBytes = 16 * minSectorSize
|
2016-12-22 19:00:24 +00:00
|
|
|
)
|
2016-12-22 15:14:34 +00:00
|
|
|
|
|
|
|
func newWALEncoder(f *os.File) (*walEncoder, error) {
|
2017-01-16 13:18:25 +00:00
|
|
|
// offset, err := f.Seek(0, os.SEEK_CUR)
|
|
|
|
// if err != nil {
|
|
|
|
// return nil, err
|
|
|
|
// }
|
2017-01-02 12:27:52 +00:00
|
|
|
enc := &walEncoder{
|
2017-01-16 13:18:25 +00:00
|
|
|
// w: ioutil.NewPageWriter(f, walPageBytes, int(offset)),
|
|
|
|
w: bufio.NewWriterSize(f, 4*1024*1024),
|
2017-01-02 12:27:52 +00:00
|
|
|
}
|
|
|
|
return enc, nil
|
2016-12-22 15:14:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (e *walEncoder) flush() error {
|
2017-01-06 17:36:42 +00:00
|
|
|
e.mtx.Lock()
|
|
|
|
defer e.mtx.Unlock()
|
|
|
|
|
2016-12-22 15:14:34 +00:00
|
|
|
return e.w.Flush()
|
2016-12-22 11:05:24 +00:00
|
|
|
}
|
|
|
|
|
2017-01-06 17:36:42 +00:00
|
|
|
func (e *walEncoder) entry(et WALEntryType, flag byte, buf []byte) error {
|
|
|
|
e.mtx.Lock()
|
|
|
|
defer e.mtx.Unlock()
|
|
|
|
|
2016-12-22 11:05:24 +00:00
|
|
|
h := crc32.NewIEEE()
|
|
|
|
w := io.MultiWriter(h, e.w)
|
|
|
|
|
|
|
|
b := make([]byte, 6)
|
|
|
|
b[0] = byte(et)
|
|
|
|
b[1] = flag
|
|
|
|
|
2017-01-06 17:36:42 +00:00
|
|
|
binary.BigEndian.PutUint32(b[2:], uint32(len(buf)))
|
2016-12-22 11:05:24 +00:00
|
|
|
|
|
|
|
if _, err := w.Write(b); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-01-06 17:36:42 +00:00
|
|
|
if _, err := w.Write(buf); err != nil {
|
2016-12-22 11:05:24 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if _, err := e.w.Write(h.Sum(nil)); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-01-06 17:36:42 +00:00
|
|
|
putWALBuffer(buf)
|
2016-12-22 11:05:24 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
const (
|
|
|
|
walSeriesSimple = 1
|
|
|
|
walSamplesSimple = 1
|
|
|
|
)
|
|
|
|
|
2017-01-06 17:36:42 +00:00
|
|
|
var walBuffers = sync.Pool{}
|
|
|
|
|
|
|
|
func getWALBuffer() []byte {
|
|
|
|
b := walBuffers.Get()
|
|
|
|
if b == nil {
|
|
|
|
return make([]byte, 0, 64*1024)
|
|
|
|
}
|
|
|
|
return b.([]byte)
|
|
|
|
}
|
|
|
|
|
|
|
|
func putWALBuffer(b []byte) {
|
|
|
|
b = b[:0]
|
|
|
|
walBuffers.Put(b)
|
|
|
|
}
|
|
|
|
|
2016-12-22 11:05:24 +00:00
|
|
|
func (e *walEncoder) encodeSeries(series []labels.Labels) error {
|
|
|
|
if len(series) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
2016-12-22 14:18:33 +00:00
|
|
|
|
|
|
|
b := make([]byte, binary.MaxVarintLen32)
|
2017-01-06 17:36:42 +00:00
|
|
|
buf := getWALBuffer()
|
2016-12-22 11:05:24 +00:00
|
|
|
|
|
|
|
for _, lset := range series {
|
|
|
|
n := binary.PutUvarint(b, uint64(len(lset)))
|
2017-01-06 17:36:42 +00:00
|
|
|
buf = append(buf, b[:n]...)
|
2016-12-22 11:05:24 +00:00
|
|
|
|
|
|
|
for _, l := range lset {
|
|
|
|
n = binary.PutUvarint(b, uint64(len(l.Name)))
|
2017-01-06 17:36:42 +00:00
|
|
|
buf = append(buf, b[:n]...)
|
|
|
|
buf = append(buf, l.Name...)
|
2016-12-22 11:05:24 +00:00
|
|
|
|
|
|
|
n = binary.PutUvarint(b, uint64(len(l.Value)))
|
2017-01-06 17:36:42 +00:00
|
|
|
buf = append(buf, b[:n]...)
|
|
|
|
buf = append(buf, l.Value...)
|
2016-12-22 11:05:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-06 17:36:42 +00:00
|
|
|
return e.entry(WALEntrySeries, walSeriesSimple, buf)
|
2016-12-22 11:05:24 +00:00
|
|
|
}
|
|
|
|
|
2017-01-13 15:14:40 +00:00
|
|
|
func (e *walEncoder) encodeSamples(samples []refdSample) error {
|
2016-12-22 11:05:24 +00:00
|
|
|
if len(samples) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
2016-12-22 14:18:33 +00:00
|
|
|
|
|
|
|
b := make([]byte, binary.MaxVarintLen64)
|
2017-01-06 17:36:42 +00:00
|
|
|
buf := getWALBuffer()
|
2016-12-22 11:05:24 +00:00
|
|
|
|
|
|
|
// Store base timestamp and base reference number of first sample.
|
|
|
|
// All samples encode their timestamp and ref as delta to those.
|
|
|
|
//
|
|
|
|
// TODO(fabxc): optimize for all samples having the same timestamp.
|
|
|
|
first := samples[0]
|
|
|
|
|
2017-01-13 15:14:40 +00:00
|
|
|
binary.BigEndian.PutUint64(b, first.ref)
|
2017-01-06 17:36:42 +00:00
|
|
|
buf = append(buf, b[:4]...)
|
2016-12-22 11:05:24 +00:00
|
|
|
binary.BigEndian.PutUint64(b, uint64(first.t))
|
2017-01-06 17:36:42 +00:00
|
|
|
buf = append(buf, b[:8]...)
|
2016-12-22 11:05:24 +00:00
|
|
|
|
|
|
|
for _, s := range samples {
|
|
|
|
n := binary.PutVarint(b, int64(s.ref)-int64(first.ref))
|
2017-01-06 17:36:42 +00:00
|
|
|
buf = append(buf, b[:n]...)
|
2016-12-22 11:05:24 +00:00
|
|
|
|
|
|
|
n = binary.PutVarint(b, s.t-first.t)
|
2017-01-06 17:36:42 +00:00
|
|
|
buf = append(buf, b[:n]...)
|
2016-12-22 11:05:24 +00:00
|
|
|
|
|
|
|
binary.BigEndian.PutUint64(b, math.Float64bits(s.v))
|
2017-01-06 17:36:42 +00:00
|
|
|
buf = append(buf, b[:8]...)
|
2016-12-22 11:05:24 +00:00
|
|
|
}
|
|
|
|
|
2017-01-06 17:36:42 +00:00
|
|
|
return e.entry(WALEntrySamples, walSamplesSimple, buf)
|
2016-12-22 11:05:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type walDecoder struct {
|
2016-12-22 14:18:33 +00:00
|
|
|
r io.Reader
|
|
|
|
handler *walHandler
|
|
|
|
|
|
|
|
buf []byte
|
|
|
|
}
|
|
|
|
|
|
|
|
func newWALDecoer(r io.Reader, h *walHandler) *walDecoder {
|
|
|
|
return &walDecoder{
|
|
|
|
r: r,
|
|
|
|
handler: h,
|
|
|
|
buf: make([]byte, 0, 1024*1024),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *walDecoder) decodeSeries(flag byte, b []byte) error {
|
|
|
|
for len(b) > 0 {
|
|
|
|
l, n := binary.Uvarint(b)
|
|
|
|
if n < 1 {
|
|
|
|
return errors.Wrap(errInvalidSize, "number of labels")
|
|
|
|
}
|
|
|
|
b = b[n:]
|
|
|
|
lset := make(labels.Labels, l)
|
|
|
|
|
|
|
|
for i := 0; i < int(l); i++ {
|
|
|
|
nl, n := binary.Uvarint(b)
|
|
|
|
if n < 1 || len(b) < n+int(nl) {
|
|
|
|
return errors.Wrap(errInvalidSize, "label name")
|
|
|
|
}
|
|
|
|
lset[i].Name = string(b[n : n+int(nl)])
|
|
|
|
b = b[n+int(nl):]
|
|
|
|
|
|
|
|
vl, n := binary.Uvarint(b)
|
|
|
|
if n < 1 || len(b) < n+int(vl) {
|
|
|
|
return errors.Wrap(errInvalidSize, "label value")
|
|
|
|
}
|
|
|
|
lset[i].Value = string(b[n : n+int(vl)])
|
|
|
|
b = b[n+int(vl):]
|
|
|
|
}
|
|
|
|
|
|
|
|
d.handler.series(lset)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *walDecoder) decodeSamples(flag byte, b []byte) error {
|
|
|
|
if len(b) < 12 {
|
|
|
|
return errors.Wrap(errInvalidSize, "header length")
|
|
|
|
}
|
|
|
|
var (
|
2017-01-13 15:14:40 +00:00
|
|
|
baseRef = binary.BigEndian.Uint64(b)
|
2016-12-22 14:18:33 +00:00
|
|
|
baseTime = int64(binary.BigEndian.Uint64(b[4:]))
|
|
|
|
)
|
|
|
|
b = b[12:]
|
|
|
|
|
|
|
|
for len(b) > 0 {
|
2017-01-13 15:14:40 +00:00
|
|
|
var smpl refdSample
|
2016-12-22 14:18:33 +00:00
|
|
|
|
|
|
|
dref, n := binary.Varint(b)
|
|
|
|
if n < 1 {
|
|
|
|
return errors.Wrap(errInvalidSize, "sample ref delta")
|
|
|
|
}
|
|
|
|
b = b[n:]
|
2017-01-13 15:14:40 +00:00
|
|
|
|
|
|
|
smpl.ref = uint64(int64(baseRef) + dref)
|
2016-12-22 14:18:33 +00:00
|
|
|
|
|
|
|
dtime, n := binary.Varint(b)
|
|
|
|
if n < 1 {
|
|
|
|
return errors.Wrap(errInvalidSize, "sample timestamp delta")
|
|
|
|
}
|
|
|
|
b = b[n:]
|
|
|
|
smpl.t = baseTime + dtime
|
|
|
|
|
|
|
|
if len(b) < 8 {
|
|
|
|
return errors.Wrapf(errInvalidSize, "sample value bits %d", len(b))
|
|
|
|
}
|
|
|
|
smpl.v = float64(math.Float64frombits(binary.BigEndian.Uint64(b)))
|
|
|
|
b = b[8:]
|
|
|
|
|
|
|
|
d.handler.sample(smpl)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *walDecoder) entry() error {
|
|
|
|
b := make([]byte, 6)
|
|
|
|
if _, err := d.r.Read(b); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
etype = WALEntryType(b[0])
|
|
|
|
flag = b[1]
|
|
|
|
length = int(binary.BigEndian.Uint32(b[2:]))
|
|
|
|
)
|
|
|
|
|
|
|
|
if length > len(d.buf) {
|
|
|
|
d.buf = make([]byte, length)
|
|
|
|
}
|
|
|
|
buf := d.buf[:length]
|
|
|
|
|
|
|
|
if _, err := d.r.Read(buf); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Read away checksum.
|
|
|
|
// TODO(fabxc): verify it
|
|
|
|
if _, err := d.r.Read(b[:4]); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
switch etype {
|
|
|
|
case WALEntrySeries:
|
|
|
|
return d.decodeSeries(flag, buf)
|
|
|
|
case WALEntrySamples:
|
|
|
|
return d.decodeSamples(flag, buf)
|
|
|
|
}
|
2016-12-22 11:05:24 +00:00
|
|
|
|
2016-12-22 14:18:33 +00:00
|
|
|
return errors.Errorf("unknown WAL entry type %q", etype)
|
2016-12-22 11:05:24 +00:00
|
|
|
}
|