Export refdSample

The type was part of a exported method signatures and should therefore
be exported as well.
This commit is contained in:
Fabian Reinartz 2017-05-12 17:06:26 +02:00
parent 5534e6c53c
commit 535532ca02
3 changed files with 71 additions and 72 deletions

38
head.go
View File

@ -131,13 +131,13 @@ Outer:
h.meta.Stats.NumSeries++ h.meta.Stats.NumSeries++
} }
for _, s := range samples { for _, s := range samples {
if int(s.ref) >= len(h.series) { if int(s.Ref) >= len(h.series) {
l.Log("msg", "unknown series reference, abort WAL restore", "got", s.ref, "max", len(h.series)-1) l.Log("msg", "unknown series reference, abort WAL restore", "got", s.Ref, "max", len(h.series)-1)
break Outer break Outer
} }
h.series[s.ref].append(s.t, s.v) h.series[s.Ref].append(s.T, s.V)
if !h.inBounds(s.t) { if !h.inBounds(s.T) {
return nil, errors.Wrap(ErrOutOfBounds, "consume WAL") return nil, errors.Wrap(ErrOutOfBounds, "consume WAL")
} }
h.meta.Stats.NumSamples++ h.meta.Stats.NumSamples++
@ -262,15 +262,15 @@ func (h *HeadBlock) Busy() bool {
var headPool = sync.Pool{} var headPool = sync.Pool{}
func getHeadAppendBuffer() []refdSample { func getHeadAppendBuffer() []RefSample {
b := headPool.Get() b := headPool.Get()
if b == nil { if b == nil {
return make([]refdSample, 0, 512) return make([]RefSample, 0, 512)
} }
return b.([]refdSample) return b.([]RefSample)
} }
func putHeadAppendBuffer(b []refdSample) { func putHeadAppendBuffer(b []RefSample) {
headPool.Put(b[:0]) headPool.Put(b[:0])
} }
@ -282,7 +282,7 @@ type headAppender struct {
refmap map[uint64]uint64 refmap map[uint64]uint64
newLabels []labels.Labels newLabels []labels.Labels
samples []refdSample samples []RefSample
} }
type hashedLabels struct { type hashedLabels struct {
@ -290,12 +290,6 @@ type hashedLabels struct {
labels labels.Labels labels labels.Labels
} }
type refdSample struct {
ref uint64
t int64
v float64
}
func (a *headAppender) Add(lset labels.Labels, t int64, v float64) (uint64, error) { func (a *headAppender) Add(lset labels.Labels, t int64, v float64) (uint64, error) {
if !a.inBounds(t) { if !a.inBounds(t) {
return 0, ErrOutOfBounds return 0, ErrOutOfBounds
@ -370,10 +364,10 @@ func (a *headAppender) AddFast(ref uint64, t int64, v float64) error {
} }
} }
a.samples = append(a.samples, refdSample{ a.samples = append(a.samples, RefSample{
ref: ref, Ref: ref,
t: t, T: t,
v: v, V: v,
}) })
return nil return nil
} }
@ -419,8 +413,8 @@ func (a *headAppender) Commit() error {
for i := range a.samples { for i := range a.samples {
s := &a.samples[i] s := &a.samples[i]
if s.ref&(1<<32) > 0 { if s.Ref&(1<<32) > 0 {
s.ref = a.refmap[s.ref] s.Ref = a.refmap[s.Ref]
} }
} }
@ -434,7 +428,7 @@ func (a *headAppender) Commit() error {
total := uint64(len(a.samples)) total := uint64(len(a.samples))
for _, s := range a.samples { for _, s := range a.samples {
if !a.series[s.ref].append(s.t, s.v) { if !a.series[s.Ref].append(s.T, s.V) {
total-- total--
} }
} }

71
wal.go
View File

@ -50,7 +50,7 @@ const (
) )
// WAL is a write ahead log for series data. It can only be written to. // WAL is a write ahead log for series data. It can only be written to.
// Use WALReader to read back from a write ahead log. // Use walReader to read back from a write ahead log.
type WAL struct { type WAL struct {
mtx sync.Mutex mtx sync.Mutex
@ -69,6 +69,13 @@ type WAL struct {
donec chan struct{} donec chan struct{}
} }
// RefSample is a timestamp/value pair associated with a reference to a series.
type RefSample struct {
Ref uint64
T int64
V float64
}
const ( const (
walDirName = "wal" walDirName = "wal"
walSegmentSizeBytes = 256 * 1024 * 1024 // 256 MB walSegmentSizeBytes = 256 * 1024 * 1024 // 256 MB
@ -119,12 +126,12 @@ func OpenWAL(dir string, logger log.Logger, flushInterval time.Duration) (*WAL,
// Reader returns a new reader over the the write ahead log data. // Reader returns a new reader over the the write ahead log data.
// It must be completely consumed before writing to the WAL. // It must be completely consumed before writing to the WAL.
func (w *WAL) Reader() *WALReader { func (w *WAL) Reader() WALReader {
return NewWALReader(w.logger, w) return newWALReader(w, w.logger)
} }
// Log writes a batch of new series labels and samples to the log. // Log writes a batch of new series labels and samples to the log.
func (w *WAL) Log(series []labels.Labels, samples []refdSample) error { func (w *WAL) Log(series []labels.Labels, samples []RefSample) error {
if err := w.encodeSeries(series); err != nil { if err := w.encodeSeries(series); err != nil {
return err return err
} }
@ -395,7 +402,7 @@ func (w *WAL) encodeSeries(series []labels.Labels) error {
return w.entry(WALEntrySeries, walSeriesSimple, buf) return w.entry(WALEntrySeries, walSeriesSimple, buf)
} }
func (w *WAL) encodeSamples(samples []refdSample) error { func (w *WAL) encodeSamples(samples []RefSample) error {
if len(samples) == 0 { if len(samples) == 0 {
return nil return nil
} }
@ -409,27 +416,27 @@ func (w *WAL) encodeSamples(samples []refdSample) error {
// TODO(fabxc): optimize for all samples having the same timestamp. // TODO(fabxc): optimize for all samples having the same timestamp.
first := samples[0] first := samples[0]
binary.BigEndian.PutUint64(b, first.ref) binary.BigEndian.PutUint64(b, first.Ref)
buf = append(buf, b[:8]...) buf = append(buf, b[:8]...)
binary.BigEndian.PutUint64(b, uint64(first.t)) binary.BigEndian.PutUint64(b, uint64(first.T))
buf = append(buf, b[:8]...) buf = append(buf, b[:8]...)
for _, s := range samples { for _, s := range samples {
n := binary.PutVarint(b, int64(s.ref)-int64(first.ref)) n := binary.PutVarint(b, int64(s.Ref)-int64(first.Ref))
buf = append(buf, b[:n]...) buf = append(buf, b[:n]...)
n = binary.PutVarint(b, s.t-first.t) n = binary.PutVarint(b, s.T-first.T)
buf = append(buf, b[:n]...) buf = append(buf, b[:n]...)
binary.BigEndian.PutUint64(b, math.Float64bits(s.v)) binary.BigEndian.PutUint64(b, math.Float64bits(s.V))
buf = append(buf, b[:8]...) buf = append(buf, b[:8]...)
} }
return w.entry(WALEntrySamples, walSamplesSimple, buf) return w.entry(WALEntrySamples, walSamplesSimple, buf)
} }
// WALReader decodes and emits write ahead log entries. // walReader decodes and emits write ahead log entries.
type WALReader struct { type walReader struct {
logger log.Logger logger log.Logger
wal *WAL wal *WAL
@ -439,37 +446,35 @@ type WALReader struct {
err error err error
labels []labels.Labels labels []labels.Labels
samples []refdSample samples []RefSample
} }
// NewWALReader returns a new WALReader over the sequence of the given ReadClosers. func newWALReader(w *WAL, l log.Logger) *walReader {
func NewWALReader(logger log.Logger, w *WAL) *WALReader { if l == nil {
if logger == nil { l = log.NewNopLogger()
logger = log.NewNopLogger()
} }
r := &WALReader{ return &walReader{
logger: logger, logger: l,
wal: w, wal: w,
buf: make([]byte, 0, 128*4096), buf: make([]byte, 0, 128*4096),
crc32: crc32.New(crc32.MakeTable(crc32.Castagnoli)), crc32: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
} }
return r
} }
// At returns the last decoded entry of labels or samples. // At returns the last decoded entry of labels or samples.
// The returned slices are only valid until the next call to Next(). Their elements // The returned slices are only valid until the next call to Next(). Their elements
// have to be copied to preserve them. // have to be copied to preserve them.
func (r *WALReader) At() ([]labels.Labels, []refdSample) { func (r *walReader) At() ([]labels.Labels, []RefSample) {
return r.labels, r.samples return r.labels, r.samples
} }
// Err returns the last error the reader encountered. // Err returns the last error the reader encountered.
func (r *WALReader) Err() error { func (r *walReader) Err() error {
return r.err return r.err
} }
// nextEntry retrieves the next entry. It is also used as a testing hook. // nextEntry retrieves the next entry. It is also used as a testing hook.
func (r *WALReader) nextEntry() (WALEntryType, byte, []byte, error) { func (r *walReader) nextEntry() (WALEntryType, byte, []byte, error) {
if r.cur >= len(r.wal.files) { if r.cur >= len(r.wal.files) {
return 0, 0, nil, io.EOF return 0, 0, nil, io.EOF
} }
@ -492,7 +497,7 @@ func (r *WALReader) nextEntry() (WALEntryType, byte, []byte, error) {
// Next returns decodes the next entry pair and returns true // Next returns decodes the next entry pair and returns true
// if it was succesful. // if it was succesful.
func (r *WALReader) Next() bool { func (r *walReader) Next() bool {
r.labels = r.labels[:0] r.labels = r.labels[:0]
r.samples = r.samples[:0] r.samples = r.samples[:0]
@ -549,12 +554,12 @@ func (r *WALReader) Next() bool {
return r.err == nil return r.err == nil
} }
func (r *WALReader) current() *os.File { func (r *walReader) current() *os.File {
return r.wal.files[r.cur] return r.wal.files[r.cur]
} }
// truncate the WAL after the last valid entry. // truncate the WAL after the last valid entry.
func (r *WALReader) truncate(lastOffset int64) error { func (r *walReader) truncate(lastOffset int64) error {
r.logger.Log("msg", "WAL corruption detected; truncating", r.logger.Log("msg", "WAL corruption detected; truncating",
"err", r.err, "file", r.current().Name(), "pos", lastOffset) "err", r.err, "file", r.current().Name(), "pos", lastOffset)
@ -582,7 +587,7 @@ func walCorruptionErrf(s string, args ...interface{}) error {
return walCorruptionErr(errors.Errorf(s, args...)) return walCorruptionErr(errors.Errorf(s, args...))
} }
func (r *WALReader) entry(cr io.Reader) (WALEntryType, byte, []byte, error) { func (r *walReader) entry(cr io.Reader) (WALEntryType, byte, []byte, error) {
r.crc32.Reset() r.crc32.Reset()
tr := io.TeeReader(cr, r.crc32) tr := io.TeeReader(cr, r.crc32)
@ -629,7 +634,7 @@ func (r *WALReader) entry(cr io.Reader) (WALEntryType, byte, []byte, error) {
return etype, flag, buf, nil return etype, flag, buf, nil
} }
func (r *WALReader) decodeSeries(flag byte, b []byte) error { func (r *walReader) decodeSeries(flag byte, b []byte) error {
for len(b) > 0 { for len(b) > 0 {
l, n := binary.Uvarint(b) l, n := binary.Uvarint(b)
if n < 1 { if n < 1 {
@ -659,7 +664,7 @@ func (r *WALReader) decodeSeries(flag byte, b []byte) error {
return nil return nil
} }
func (r *WALReader) decodeSamples(flag byte, b []byte) error { func (r *walReader) decodeSamples(flag byte, b []byte) error {
if len(b) < 16 { if len(b) < 16 {
return errors.Wrap(errInvalidSize, "header length") return errors.Wrap(errInvalidSize, "header length")
} }
@ -670,7 +675,7 @@ func (r *WALReader) decodeSamples(flag byte, b []byte) error {
b = b[16:] b = b[16:]
for len(b) > 0 { for len(b) > 0 {
var smpl refdSample var smpl RefSample
dref, n := binary.Varint(b) dref, n := binary.Varint(b)
if n < 1 { if n < 1 {
@ -678,19 +683,19 @@ func (r *WALReader) decodeSamples(flag byte, b []byte) error {
} }
b = b[n:] b = b[n:]
smpl.ref = uint64(int64(baseRef) + dref) smpl.Ref = uint64(int64(baseRef) + dref)
dtime, n := binary.Varint(b) dtime, n := binary.Varint(b)
if n < 1 { if n < 1 {
return errors.Wrap(errInvalidSize, "sample timestamp delta") return errors.Wrap(errInvalidSize, "sample timestamp delta")
} }
b = b[n:] b = b[n:]
smpl.t = baseTime + dtime smpl.T = baseTime + dtime
if len(b) < 8 { if len(b) < 8 {
return errors.Wrapf(errInvalidSize, "sample value bits %d", len(b)) return errors.Wrapf(errInvalidSize, "sample value bits %d", len(b))
} }
smpl.v = float64(math.Float64frombits(binary.BigEndian.Uint64(b))) smpl.V = float64(math.Float64frombits(binary.BigEndian.Uint64(b)))
b = b[8:] b = b[8:]
r.samples = append(r.samples, smpl) r.samples = append(r.samples, smpl)

View File

@ -122,7 +122,7 @@ func TestWAL_cut(t *testing.T) {
// We cannot actually check for correct pre-allocation as it is // We cannot actually check for correct pre-allocation as it is
// optional per filesystem and handled transparently. // optional per filesystem and handled transparently.
et, flag, b, err := NewWALReader(nil, nil).entry(f) et, flag, b, err := newWALReader(nil, nil).entry(f)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, WALEntrySeries, et) require.Equal(t, WALEntrySeries, et)
require.Equal(t, flag, byte(walSeriesSimple)) require.Equal(t, flag, byte(walSeriesSimple))
@ -148,7 +148,7 @@ func TestWAL_Log_Restore(t *testing.T) {
var ( var (
recordedSeries [][]labels.Labels recordedSeries [][]labels.Labels
recordedSamples [][]refdSample recordedSamples [][]RefSample
) )
var totalSamples int var totalSamples int
@ -165,7 +165,7 @@ func TestWAL_Log_Restore(t *testing.T) {
var ( var (
resultSeries [][]labels.Labels resultSeries [][]labels.Labels
resultSamples [][]refdSample resultSamples [][]RefSample
) )
for r.Next() { for r.Next() {
@ -177,7 +177,7 @@ func TestWAL_Log_Restore(t *testing.T) {
resultSeries = append(resultSeries, clsets) resultSeries = append(resultSeries, clsets)
} }
if len(smpls) > 0 { if len(smpls) > 0 {
csmpls := make([]refdSample, len(smpls)) csmpls := make([]RefSample, len(smpls))
copy(csmpls, smpls) copy(csmpls, smpls)
resultSamples = append(resultSamples, csmpls) resultSamples = append(resultSamples, csmpls)
} }
@ -191,13 +191,13 @@ func TestWAL_Log_Restore(t *testing.T) {
// Insert in batches and generate different amounts of samples for each. // Insert in batches and generate different amounts of samples for each.
for i := 0; i < len(series); i += stepSize { for i := 0; i < len(series); i += stepSize {
var samples []refdSample var samples []RefSample
for j := 0; j < i*10; j++ { for j := 0; j < i*10; j++ {
samples = append(samples, refdSample{ samples = append(samples, RefSample{
ref: uint64(j % 10000), Ref: uint64(j % 10000),
t: int64(j * 2), T: int64(j * 2),
v: rand.Float64(), V: rand.Float64(),
}) })
} }
@ -292,13 +292,13 @@ func TestWALRestoreCorrupted(t *testing.T) {
w, err := OpenWAL(dir, nil, 0) w, err := OpenWAL(dir, nil, 0)
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, w.Log(nil, []refdSample{{t: 1, v: 2}})) require.NoError(t, w.Log(nil, []RefSample{{T: 1, V: 2}}))
require.NoError(t, w.Log(nil, []refdSample{{t: 2, v: 3}})) require.NoError(t, w.Log(nil, []RefSample{{T: 2, V: 3}}))
require.NoError(t, w.cut()) require.NoError(t, w.cut())
require.NoError(t, w.Log(nil, []refdSample{{t: 3, v: 4}})) require.NoError(t, w.Log(nil, []RefSample{{T: 3, V: 4}}))
require.NoError(t, w.Log(nil, []refdSample{{t: 5, v: 6}})) require.NoError(t, w.Log(nil, []RefSample{{T: 5, V: 6}}))
require.NoError(t, w.Close()) require.NoError(t, w.Close())
@ -318,13 +318,13 @@ func TestWALRestoreCorrupted(t *testing.T) {
require.True(t, r.Next()) require.True(t, r.Next())
l, s := r.At() l, s := r.At()
require.Equal(t, 0, len(l)) require.Equal(t, 0, len(l))
require.Equal(t, []refdSample{{t: 1, v: 2}}, s) require.Equal(t, []RefSample{{T: 1, V: 2}}, s)
// Truncation should happen transparently and now cause an error. // Truncation should happen transparently and now cause an error.
require.False(t, r.Next()) require.False(t, r.Next())
require.Nil(t, r.Err()) require.Nil(t, r.Err())
require.NoError(t, w2.Log(nil, []refdSample{{t: 99, v: 100}})) require.NoError(t, w2.Log(nil, []RefSample{{T: 99, V: 100}}))
require.NoError(t, w2.Close()) require.NoError(t, w2.Close())
files, err := fileutil.ReadDir(dir) files, err := fileutil.ReadDir(dir)
@ -341,12 +341,12 @@ func TestWALRestoreCorrupted(t *testing.T) {
require.True(t, r.Next()) require.True(t, r.Next())
l, s = r.At() l, s = r.At()
require.Equal(t, 0, len(l)) require.Equal(t, 0, len(l))
require.Equal(t, []refdSample{{t: 1, v: 2}}, s) require.Equal(t, []RefSample{{T: 1, V: 2}}, s)
require.True(t, r.Next()) require.True(t, r.Next())
l, s = r.At() l, s = r.At()
require.Equal(t, 0, len(l)) require.Equal(t, 0, len(l))
require.Equal(t, []refdSample{{t: 99, v: 100}}, s) require.Equal(t, []RefSample{{T: 99, V: 100}}, s)
require.False(t, r.Next()) require.False(t, r.Next())
require.Nil(t, r.Err()) require.Nil(t, r.Err())