vendor: sync organisation migration of tsdb

This commit is contained in:
Fabian Reinartz 2017-04-04 11:33:51 +02:00
parent bbcf20ba01
commit cfb2a7f1d5
17 changed files with 865 additions and 54 deletions

View File

@ -4,11 +4,11 @@ import (
"time"
"unsafe"
"github.com/fabxc/tsdb"
tsdbLabels "github.com/fabxc/tsdb/labels"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/tsdb"
tsdbLabels "github.com/prometheus/tsdb/labels"
)
// adapter implements a storage.Storage around TSDB.

View File

@ -10,8 +10,8 @@ import (
"os"
"github.com/coreos/etcd/pkg/fileutil"
"github.com/fabxc/tsdb/chunks"
"github.com/pkg/errors"
"github.com/prometheus/tsdb/chunks"
)
const (

169
vendor/github.com/prometheus/tsdb/chunks/bstream.go generated vendored Normal file
View File

@ -0,0 +1,169 @@
package chunks
import "io"
// bstream is a stream of bits
type bstream struct {
stream []byte // the data stream
count uint8 // how many bits are valid in current byte
}
func newBReader(b []byte) *bstream {
return &bstream{stream: b, count: 8}
}
func newBWriter(size int) *bstream {
return &bstream{stream: make([]byte, 0, size), count: 0}
}
func (b *bstream) clone() *bstream {
d := make([]byte, len(b.stream))
copy(d, b.stream)
return &bstream{stream: d, count: b.count}
}
func (b *bstream) bytes() []byte {
return b.stream
}
type bit bool
const (
zero bit = false
one bit = true
)
func (b *bstream) writeBit(bit bit) {
if b.count == 0 {
b.stream = append(b.stream, 0)
b.count = 8
}
i := len(b.stream) - 1
if bit {
b.stream[i] |= 1 << (b.count - 1)
}
b.count--
}
func (b *bstream) writeByte(byt byte) {
if b.count == 0 {
b.stream = append(b.stream, 0)
b.count = 8
}
i := len(b.stream) - 1
// fill up b.b with b.count bits from byt
b.stream[i] |= byt >> (8 - b.count)
b.stream = append(b.stream, 0)
i++
b.stream[i] = byt << b.count
}
func (b *bstream) writeBits(u uint64, nbits int) {
u <<= (64 - uint(nbits))
for nbits >= 8 {
byt := byte(u >> 56)
b.writeByte(byt)
u <<= 8
nbits -= 8
}
for nbits > 0 {
b.writeBit((u >> 63) == 1)
u <<= 1
nbits--
}
}
func (b *bstream) readBit() (bit, error) {
if len(b.stream) == 0 {
return false, io.EOF
}
if b.count == 0 {
b.stream = b.stream[1:]
if len(b.stream) == 0 {
return false, io.EOF
}
b.count = 8
}
d := (b.stream[0] << (8 - b.count)) & 0x80
b.count--
return d != 0, nil
}
func (b *bstream) ReadByte() (byte, error) {
return b.readByte()
}
func (b *bstream) readByte() (byte, error) {
if len(b.stream) == 0 {
return 0, io.EOF
}
if b.count == 0 {
b.stream = b.stream[1:]
if len(b.stream) == 0 {
return 0, io.EOF
}
return b.stream[0], nil
}
if b.count == 8 {
b.count = 0
return b.stream[0], nil
}
byt := b.stream[0] << (8 - b.count)
b.stream = b.stream[1:]
if len(b.stream) == 0 {
return 0, io.EOF
}
// We just advanced the stream and can assume the shift to be 0.
byt |= b.stream[0] >> b.count
return byt, nil
}
func (b *bstream) readBits(nbits int) (uint64, error) {
var u uint64
for nbits >= 8 {
byt, err := b.readByte()
if err != nil {
return 0, err
}
u = (u << 8) | uint64(byt)
nbits -= 8
}
if nbits == 0 {
return u, nil
}
if nbits > int(b.count) {
u = (u << uint(b.count)) | uint64((b.stream[0]<<(8-b.count))>>(8-b.count))
nbits -= int(b.count)
b.stream = b.stream[1:]
if len(b.stream) == 0 {
return 0, io.EOF
}
b.count = 8
}
u = (u << uint(nbits)) | uint64((b.stream[0]<<(8-b.count))>>(8-uint(nbits)))
b.count -= uint8(nbits)
return u, nil
}

57
vendor/github.com/prometheus/tsdb/chunks/chunk.go generated vendored Normal file
View File

@ -0,0 +1,57 @@
package chunks
import (
"encoding/binary"
"fmt"
)
// Encoding is the identifier for a chunk encoding
type Encoding uint8
func (e Encoding) String() string {
switch e {
case EncNone:
return "none"
case EncXOR:
return "XOR"
}
return "<unknown>"
}
// The different available chunk encodings.
const (
EncNone Encoding = iota
EncXOR
)
// Chunk holds a sequence of sample pairs that can be iterated over and appended to.
type Chunk interface {
Bytes() []byte
Encoding() Encoding
Appender() (Appender, error)
Iterator() Iterator
}
// FromData returns a chunk from a byte slice of chunk data.
func FromData(e Encoding, d []byte) (Chunk, error) {
switch e {
case EncXOR:
return &XORChunk{
b: &bstream{count: 0, stream: d},
num: binary.BigEndian.Uint16(d),
}, nil
}
return nil, fmt.Errorf("unknown chunk encoding: %d", e)
}
// Appender adds sample pairs to a chunk.
type Appender interface {
Append(int64, float64)
}
// Iterator is a simple iterator that can only get the next value.
type Iterator interface {
At() (int64, float64)
Err() error
Next() bool
}

342
vendor/github.com/prometheus/tsdb/chunks/xor.go generated vendored Normal file
View File

@ -0,0 +1,342 @@
package chunks
import (
"encoding/binary"
"math"
bits "github.com/dgryski/go-bits"
)
// XORChunk holds XOR encoded sample data.
type XORChunk struct {
b *bstream
num uint16
}
// NewXORChunk returns a new chunk with XOR encoding of the given size.
func NewXORChunk() *XORChunk {
b := make([]byte, 2, 128)
return &XORChunk{b: &bstream{stream: b, count: 0}}
}
// Encoding returns the encoding type.
func (c *XORChunk) Encoding() Encoding {
return EncXOR
}
// Bytes returns the underlying byte slice of the chunk.
func (c *XORChunk) Bytes() []byte {
return c.b.bytes()
}
// Appender implements the Chunk interface.
func (c *XORChunk) Appender() (Appender, error) {
it := c.iterator()
// To get an appender we must know the state it would have if we had
// appended all existing data from scratch.
// We iterate through the end and populate via the iterator's state.
for it.Next() {
}
if err := it.Err(); err != nil {
return nil, err
}
a := &xorAppender{
c: c,
b: c.b,
t: it.t,
v: it.val,
tDelta: it.tDelta,
leading: it.leading,
trailing: it.trailing,
}
if binary.BigEndian.Uint16(a.b.bytes()) == 0 {
a.leading = 0xff
}
return a, nil
}
func (c *XORChunk) iterator() *xorIterator {
// Should iterators guarantee to act on a copy of the data so it doesn't lock append?
// When using striped locks to guard access to chunks, probably yes.
// Could only copy data if the chunk is not completed yet.
return &xorIterator{
br: newBReader(c.b.bytes()[2:]),
numTotal: binary.BigEndian.Uint16(c.b.bytes()),
}
}
// Iterator implements the Chunk interface.
func (c *XORChunk) Iterator() Iterator {
return c.iterator()
}
type xorAppender struct {
c *XORChunk
b *bstream
t int64
v float64
tDelta uint64
leading uint8
trailing uint8
}
func (a *xorAppender) Append(t int64, v float64) {
var tDelta uint64
num := binary.BigEndian.Uint16(a.b.bytes())
if num == 0 {
buf := make([]byte, binary.MaxVarintLen64)
for _, b := range buf[:binary.PutVarint(buf, t)] {
a.b.writeByte(b)
}
a.b.writeBits(math.Float64bits(v), 64)
} else if num == 1 {
tDelta = uint64(t - a.t)
buf := make([]byte, binary.MaxVarintLen64)
for _, b := range buf[:binary.PutUvarint(buf, tDelta)] {
a.b.writeByte(b)
}
a.writeVDelta(v)
} else {
tDelta = uint64(t - a.t)
dod := int64(tDelta - a.tDelta)
// Gorilla has a max resolution of seconds, Prometheus milliseconds.
// Thus we use higher value range steps with larger bit size.
switch {
case dod == 0:
a.b.writeBit(zero)
case bitRange(dod, 14):
a.b.writeBits(0x02, 2) // '10'
a.b.writeBits(uint64(dod), 14)
case bitRange(dod, 17):
a.b.writeBits(0x06, 3) // '110'
a.b.writeBits(uint64(dod), 17)
case bitRange(dod, 20):
a.b.writeBits(0x0e, 4) // '1110'
a.b.writeBits(uint64(dod), 20)
default:
a.b.writeBits(0x0f, 4) // '1111'
a.b.writeBits(uint64(dod), 64)
}
a.writeVDelta(v)
}
a.t = t
a.v = v
binary.BigEndian.PutUint16(a.b.bytes(), num+1)
a.tDelta = tDelta
}
func bitRange(x int64, nbits uint8) bool {
return -((1<<(nbits-1))-1) <= x && x <= 1<<(nbits-1)
}
func (a *xorAppender) writeVDelta(v float64) {
vDelta := math.Float64bits(v) ^ math.Float64bits(a.v)
if vDelta == 0 {
a.b.writeBit(zero)
return
}
a.b.writeBit(one)
leading := uint8(bits.Clz(vDelta))
trailing := uint8(bits.Ctz(vDelta))
// Clamp number of leading zeros to avoid overflow when encoding.
if leading >= 32 {
leading = 31
}
if a.leading != 0xff && leading >= a.leading && trailing >= a.trailing {
a.b.writeBit(zero)
a.b.writeBits(vDelta>>a.trailing, 64-int(a.leading)-int(a.trailing))
} else {
a.leading, a.trailing = leading, trailing
a.b.writeBit(one)
a.b.writeBits(uint64(leading), 5)
// Note that if leading == trailing == 0, then sigbits == 64. But that value doesn't actually fit into the 6 bits we have.
// Luckily, we never need to encode 0 significant bits, since that would put us in the other case (vdelta == 0).
// So instead we write out a 0 and adjust it back to 64 on unpacking.
sigbits := 64 - leading - trailing
a.b.writeBits(uint64(sigbits), 6)
a.b.writeBits(vDelta>>trailing, int(sigbits))
}
}
type xorIterator struct {
br *bstream
numTotal uint16
numRead uint16
t int64
val float64
leading uint8
trailing uint8
tDelta uint64
err error
}
func (it *xorIterator) At() (int64, float64) {
return it.t, it.val
}
func (it *xorIterator) Err() error {
return it.err
}
func (it *xorIterator) Next() bool {
if it.err != nil || it.numRead == it.numTotal {
return false
}
if it.numRead == 0 {
t, err := binary.ReadVarint(it.br)
if err != nil {
it.err = err
return false
}
v, err := it.br.readBits(64)
if err != nil {
it.err = err
return false
}
it.t = int64(t)
it.val = math.Float64frombits(v)
it.numRead++
return true
}
if it.numRead == 1 {
tDelta, err := binary.ReadUvarint(it.br)
if err != nil {
it.err = err
return false
}
it.tDelta = tDelta
it.t = it.t + int64(it.tDelta)
return it.readValue()
}
var d byte
// read delta-of-delta
for i := 0; i < 4; i++ {
d <<= 1
bit, err := it.br.readBit()
if err != nil {
it.err = err
return false
}
if bit == zero {
break
}
d |= 1
}
var sz uint8
var dod int64
switch d {
case 0x00:
// dod == 0
case 0x02:
sz = 14
case 0x06:
sz = 17
case 0x0e:
sz = 20
case 0x0f:
bits, err := it.br.readBits(64)
if err != nil {
it.err = err
return false
}
dod = int64(bits)
}
if sz != 0 {
bits, err := it.br.readBits(int(sz))
if err != nil {
it.err = err
return false
}
if bits > (1 << (sz - 1)) {
// or something
bits = bits - (1 << sz)
}
dod = int64(bits)
}
it.tDelta = uint64(int64(it.tDelta) + dod)
it.t = it.t + int64(it.tDelta)
return it.readValue()
}
func (it *xorIterator) readValue() bool {
bit, err := it.br.readBit()
if err != nil {
it.err = err
return false
}
if bit == zero {
// it.val = it.val
} else {
bit, err := it.br.readBit()
if err != nil {
it.err = err
return false
}
if bit == zero {
// reuse leading/trailing zero bits
// it.leading, it.trailing = it.leading, it.trailing
} else {
bits, err := it.br.readBits(5)
if err != nil {
it.err = err
return false
}
it.leading = uint8(bits)
bits, err = it.br.readBits(6)
if err != nil {
it.err = err
return false
}
mbits := uint8(bits)
// 0 significant bits here means we overflowed and we actually need 64; see comment in encoder
if mbits == 0 {
mbits = 64
}
it.trailing = 64 - it.leading - mbits
}
mbits := int(64 - it.leading - it.trailing)
bits, err := it.br.readBits(mbits)
if err != nil {
it.err = err
return false
}
vbits := math.Float64bits(it.val)
vbits ^= (bits << it.trailing)
it.val = math.Float64frombits(vbits)
}
it.numRead++
return true
}

View File

@ -8,11 +8,11 @@ import (
"time"
"github.com/coreos/etcd/pkg/fileutil"
"github.com/fabxc/tsdb/labels"
"github.com/go-kit/kit/log"
"github.com/oklog/ulid"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/tsdb/labels"
)
// Compactor provides compaction against an underlying storage

View File

@ -18,11 +18,11 @@ import (
"golang.org/x/sync/errgroup"
"github.com/coreos/etcd/pkg/fileutil"
"github.com/fabxc/tsdb/labels"
"github.com/go-kit/kit/log"
"github.com/nightlyone/lockfile"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/tsdb/labels"
)
// DefaultOptions used for the DB. They are sane for setups using
@ -466,18 +466,19 @@ func (db *DB) Appender() Appender {
// anyway. For now this, with combination of only having a single timestamp per batch,
// prevents opening more than one appender and hitting an unresolved deadlock (#11).
//
// // Only instantiate appender after returning the headmtx to avoid
// // questionable locking order.
// db.headmtx.RLock()
// app := db.appendable()
// db.headmtx.RUnlock()
// for _, b := range app {
// a.heads = append(a.heads, &metaAppender{
// meta: b.Meta(),
// app: b.Appender().(*headAppender),
// })
// }
// Only instantiate appender after returning the headmtx to avoid
// questionable locking order.
db.headmtx.RLock()
app := db.appendable()
db.headmtx.RUnlock()
for _, b := range app {
a.heads = append(a.heads, &metaAppender{
meta: b.Meta(),
app: b.Appender(),
})
}
return a
}
@ -556,26 +557,26 @@ func (a *dbAppender) appenderFor(t int64) (*metaAppender, error) {
a.db.headmtx.Unlock()
// XXX(fabxc): temporary workaround. See comment on instantiating DB.Appender.
for _, b := range newHeads {
// Only get appender for the block with the specific timestamp.
if t >= b.Meta().MaxTime {
continue
}
a.heads = append(a.heads, &metaAppender{
app: b.Appender(),
meta: b.Meta(),
})
break
}
// // Instantiate appenders after returning headmtx to avoid questionable
// // locking order.
// for _, b := range newHeads {
// // Only get appender for the block with the specific timestamp.
// if t >= b.Meta().MaxTime {
// continue
// }
// a.heads = append(a.heads, &metaAppender{
// app: b.Appender(),
// meta: b.Meta(),
// })
// break
// }
// Instantiate appenders after returning headmtx to avoid questionable
// locking order.
for _, b := range newHeads {
a.heads = append(a.heads, &metaAppender{
app: b.Appender(),
meta: b.Meta(),
})
}
}
for i := len(a.heads) - 1; i >= 0; i-- {
if h := a.heads[i]; t >= h.meta.MinTime {
@ -613,28 +614,37 @@ func (db *DB) ensureHead(t int64) error {
}
func (a *dbAppender) Commit() error {
var merr MultiError
defer a.db.mtx.RUnlock()
// Commits to partial appenders must be concurrent as concurrent appenders
// may have conflicting locks on head appenders.
// XXX(fabxc): is this a leaky abstraction? Should make an effort to catch a multi-error?
var g errgroup.Group
for _, h := range a.heads {
merr.Add(h.app.Commit())
g.Go(h.app.Commit)
}
a.db.mtx.RUnlock()
if merr.Err() == nil {
a.db.metrics.samplesAppended.Add(float64(a.samples))
if err := g.Wait(); err != nil {
return err
}
return merr.Err()
// XXX(fabxc): Push the metric down into head block to account properly
// for partial appends?
a.db.metrics.samplesAppended.Add(float64(a.samples))
return nil
}
func (a *dbAppender) Rollback() error {
var merr MultiError
defer a.db.mtx.RUnlock()
var g errgroup.Group
for _, h := range a.heads {
merr.Add(h.app.Rollback())
g.Go(h.app.Commit)
}
a.db.mtx.RUnlock()
return merr.Err()
return g.Wait()
}
// appendable returns a copy of a slice of HeadBlocks that can still be appended to.

View File

@ -10,11 +10,11 @@ import (
"sync/atomic"
"time"
"github.com/fabxc/tsdb/chunks"
"github.com/fabxc/tsdb/labels"
"github.com/go-kit/kit/log"
"github.com/oklog/ulid"
"github.com/pkg/errors"
"github.com/prometheus/tsdb/chunks"
"github.com/prometheus/tsdb/labels"
)
var (

View File

@ -13,8 +13,8 @@ import (
"strings"
"github.com/coreos/etcd/pkg/fileutil"
"github.com/fabxc/tsdb/labels"
"github.com/pkg/errors"
"github.com/prometheus/tsdb/labels"
)
const (

150
vendor/github.com/prometheus/tsdb/labels/labels.go generated vendored Normal file
View File

@ -0,0 +1,150 @@
package labels
import (
"bytes"
"sort"
"strconv"
"strings"
"github.com/cespare/xxhash"
)
const sep = '\xff'
// Label is a key/value pair of strings.
type Label struct {
Name, Value string
}
// Labels is a sorted set of labels. Order has to be guaranteed upon
// instantiation.
type Labels []Label
func (ls Labels) Len() int { return len(ls) }
func (ls Labels) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] }
func (ls Labels) Less(i, j int) bool { return ls[i].Name < ls[j].Name }
func (ls Labels) String() string {
var b bytes.Buffer
b.WriteByte('{')
for i, l := range ls {
if i > 0 {
b.WriteByte(',')
}
b.WriteString(l.Name)
b.WriteByte('=')
b.WriteString(strconv.Quote(l.Value))
}
b.WriteByte('}')
return b.String()
}
// Hash returns a hash value for the label set.
func (ls Labels) Hash() uint64 {
b := make([]byte, 0, 1024)
for _, v := range ls {
b = append(b, v.Name...)
b = append(b, sep)
b = append(b, v.Value...)
b = append(b, sep)
}
return xxhash.Sum64(b)
}
// Get returns the value for the label with the given name.
// Returns an empty string if the label doesn't exist.
func (ls Labels) Get(name string) string {
for _, l := range ls {
if l.Name == name {
return l.Value
}
}
return ""
}
// Equals returns whether the two label sets are equal.
func (ls Labels) Equals(o Labels) bool {
if len(ls) != len(o) {
return false
}
for i, l := range ls {
if l.Name != o[i].Name || l.Value != o[i].Value {
return false
}
}
return true
}
// Map returns a string map of the labels.
func (ls Labels) Map() map[string]string {
m := make(map[string]string, len(ls))
for _, l := range ls {
m[l.Name] = l.Value
}
return m
}
// New returns a sorted Labels from the given labels.
// The caller has to guarantee that all label names are unique.
func New(ls ...Label) Labels {
set := make(Labels, 0, len(ls))
for _, l := range ls {
set = append(set, l)
}
sort.Sort(set)
return set
}
// FromMap returns new sorted Labels from the given map.
func FromMap(m map[string]string) Labels {
l := make([]Label, 0, len(m))
for k, v := range m {
l = append(l, Label{Name: k, Value: v})
}
return New(l...)
}
// FromStrings creates new labels from pairs of strings.
func FromStrings(ss ...string) Labels {
if len(ss)%2 != 0 {
panic("invalid number of strings")
}
var res Labels
for i := 0; i < len(ss); i += 2 {
res = append(res, Label{Name: ss[i], Value: ss[i+1]})
}
sort.Sort(res)
return res
}
// Compare compares the two label sets.
// The result will be 0 if a==b, <0 if a < b, and >0 if a > b.
func Compare(a, b Labels) int {
l := len(a)
if len(b) < l {
l = len(b)
}
for i := 0; i < l; i++ {
if d := strings.Compare(a[i].Name, b[i].Name); d != 0 {
return d
}
if d := strings.Compare(a[i].Value, b[i].Value); d != 0 {
return d
}
}
// If all labels so far were in common, the set with fewer labels comes first.
return len(a) - len(b)
}
// Slice is a sortable slice of label sets.
type Slice []Labels
func (s Slice) Len() int { return len(s) }
func (s Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s Slice) Less(i, j int) bool { return Compare(s[i], s[j]) < 0 }

70
vendor/github.com/prometheus/tsdb/labels/selector.go generated vendored Normal file
View File

@ -0,0 +1,70 @@
package labels
import "regexp"
// Selector holds constraints for matching against a label set.
type Selector []Matcher
// Matches returns whether the labels satisfy all matchers.
func (s Selector) Matches(labels Labels) bool {
for _, m := range s {
if v := labels.Get(m.Name()); !m.Matches(v) {
return false
}
}
return true
}
// Matcher specifies a constraint for the value of a label.
type Matcher interface {
// Name returns the label name the matcher should apply to.
Name() string
// Matches checks whether a value fulfills the constraints.
Matches(v string) bool
}
// EqualMatcher matches on equality.
type EqualMatcher struct {
name, value string
}
// Name implements Matcher interface.
func (m *EqualMatcher) Name() string { return m.name }
// Matches implements Matcher interface.
func (m *EqualMatcher) Matches(v string) bool { return v == m.value }
// NewEqualMatcher returns a new matcher matching an exact label value.
func NewEqualMatcher(name, value string) Matcher {
return &EqualMatcher{name: name, value: value}
}
type regexpMatcher struct {
name string
re *regexp.Regexp
}
func (m *regexpMatcher) Name() string { return m.name }
func (m *regexpMatcher) Matches(v string) bool { return m.re.MatchString(v) }
// NewRegexpMatcher returns a new matcher verifying that a value matches
// the regular expression pattern.
func NewRegexpMatcher(name, pattern string) (Matcher, error) {
re, err := regexp.Compile(pattern)
if err != nil {
return nil, err
}
return &regexpMatcher{name: name, re: re}, nil
}
// notMatcher inverts the matching result for a matcher.
type notMatcher struct {
Matcher
}
func (m *notMatcher) Matches(v string) bool { return !m.Matcher.Matches(v) }
// Not inverts the matcher's matching result.
func Not(m Matcher) Matcher {
return &notMatcher{m}
}

View File

@ -5,8 +5,8 @@ import (
"sort"
"strings"
"github.com/fabxc/tsdb/chunks"
"github.com/fabxc/tsdb/labels"
"github.com/prometheus/tsdb/chunks"
"github.com/prometheus/tsdb/labels"
)
// Querier provides querying access over time series data of a fixed
@ -258,6 +258,9 @@ func (nopSeriesSet) Next() bool { return false }
func (nopSeriesSet) At() Series { return nil }
func (nopSeriesSet) Err() error { return nil }
// mergedSeriesSet takes two series sets as a single series set. The input series sets
// must be sorted and sequential in time, i.e. if they have the same label set,
// the datapoints of a must be before the datapoints of b.
type mergedSeriesSet struct {
a, b SeriesSet
@ -307,11 +310,9 @@ func (s *mergedSeriesSet) Next() bool {
if d > 0 {
s.cur = s.b.At()
s.bdone = !s.b.Next()
} else if d < 0 {
s.cur = s.a.At()
s.adone = !s.a.Next()
} else {
s.cur = &chainedSeries{series: []Series{s.a.At(), s.b.At()}}
s.adone = !s.a.Next()

View File

@ -13,9 +13,9 @@ import (
"time"
"github.com/coreos/etcd/pkg/fileutil"
"github.com/fabxc/tsdb/labels"
"github.com/go-kit/kit/log"
"github.com/pkg/errors"
"github.com/prometheus/tsdb/labels"
)
// WALEntryType indicates what data a WAL entry contains.

24
vendor/vendor.json vendored
View File

@ -361,12 +361,6 @@
"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
"revisionTime": "2016-09-30T00:14:02Z"
},
{
"checksumSHA1": "bBLYUZL/ZfgLqOPlBCkA88QahFQ=",
"path": "github.com/fabxc/tsdb",
"revision": "1afd8080f78b4925d2d6824eb8b6df333e549160",
"revisionTime": "2017-03-31T11:53:02Z"
},
{
"checksumSHA1": "uVzWuLvF646YjiKomsc2CR1ua58=",
"path": "github.com/fabxc/tsdb/chunks",
@ -647,6 +641,24 @@
"revision": "abf152e5f3e97f2fafac028d2cc06c1feb87ffa5",
"revisionTime": "2016-04-11T19:08:41Z"
},
{
"checksumSHA1": "yKs0vGrGUdwhgmXGWY6We0O75hY=",
"path": "github.com/prometheus/tsdb",
"revision": "10c7c9acbe0175a411bce90cd7a0d9d7a13d6a83",
"revisionTime": "2017-04-04T09:27:26Z"
},
{
"checksumSHA1": "Qwlzvcx5Lbo9Nzb75AGgiiGszZI=",
"path": "github.com/prometheus/tsdb/chunks",
"revision": "10c7c9acbe0175a411bce90cd7a0d9d7a13d6a83",
"revisionTime": "2017-04-04T09:27:26Z"
},
{
"checksumSHA1": "7ztYi9KP5X3X7+1u7PmRQRXMGZU=",
"path": "github.com/prometheus/tsdb/labels",
"revision": "10c7c9acbe0175a411bce90cd7a0d9d7a13d6a83",
"revisionTime": "2017-04-04T09:27:26Z"
},
{
"checksumSHA1": "+49Vr4Me28p3cR+gxX5SUQHbbas=",
"path": "github.com/samuel/go-zookeeper/zk",