mirror of
https://github.com/prometheus/prometheus
synced 2024-12-27 00:53:12 +00:00
Merge pull request #3705 from Gouthamve/update_tsdb
vendor: update tsdb
This commit is contained in:
commit
4b984d2378
58
vendor/github.com/prometheus/tsdb/block.go
generated
vendored
58
vendor/github.com/prometheus/tsdb/block.go
generated
vendored
@ -151,6 +151,9 @@ type BlockMeta struct {
|
|||||||
|
|
||||||
// Information on compactions the block was created from.
|
// Information on compactions the block was created from.
|
||||||
Compaction BlockMetaCompaction `json:"compaction"`
|
Compaction BlockMetaCompaction `json:"compaction"`
|
||||||
|
|
||||||
|
// Version of the index format.
|
||||||
|
Version int `json:"version"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlockStats contains stats about contents of a block.
|
// BlockStats contains stats about contents of a block.
|
||||||
@ -176,12 +179,6 @@ const (
|
|||||||
flagStd = 1
|
flagStd = 1
|
||||||
)
|
)
|
||||||
|
|
||||||
type blockMeta struct {
|
|
||||||
Version int `json:"version"`
|
|
||||||
|
|
||||||
*BlockMeta
|
|
||||||
}
|
|
||||||
|
|
||||||
const indexFilename = "index"
|
const indexFilename = "index"
|
||||||
const metaFilename = "meta.json"
|
const metaFilename = "meta.json"
|
||||||
|
|
||||||
@ -193,16 +190,16 @@ func readMetaFile(dir string) (*BlockMeta, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var m blockMeta
|
var m BlockMeta
|
||||||
|
|
||||||
if err := json.Unmarshal(b, &m); err != nil {
|
if err := json.Unmarshal(b, &m); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if m.Version != 1 {
|
if m.Version != 1 && m.Version != 2 {
|
||||||
return nil, errors.Errorf("unexpected meta file version %d", m.Version)
|
return nil, errors.Errorf("unexpected meta file version %d", m.Version)
|
||||||
}
|
}
|
||||||
|
|
||||||
return m.BlockMeta, nil
|
return &m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeMetaFile(dir string, meta *BlockMeta) error {
|
func writeMetaFile(dir string, meta *BlockMeta) error {
|
||||||
@ -219,7 +216,8 @@ func writeMetaFile(dir string, meta *BlockMeta) error {
|
|||||||
enc.SetIndent("", "\t")
|
enc.SetIndent("", "\t")
|
||||||
|
|
||||||
var merr MultiError
|
var merr MultiError
|
||||||
if merr.Add(enc.Encode(&blockMeta{Version: 1, BlockMeta: meta})); merr.Err() != nil {
|
|
||||||
|
if merr.Add(enc.Encode(meta)); merr.Err() != nil {
|
||||||
merr.Add(f.Close())
|
merr.Add(f.Close())
|
||||||
return merr.Err()
|
return merr.Err()
|
||||||
}
|
}
|
||||||
@ -255,7 +253,7 @@ func OpenBlock(dir string, pool chunkenc.Pool) (*Block, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
ir, err := index.NewFileReader(filepath.Join(dir, "index"))
|
ir, err := index.NewFileReader(filepath.Join(dir, "index"), meta.Version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -321,7 +319,7 @@ func (pb *Block) Index() (IndexReader, error) {
|
|||||||
if err := pb.startRead(); err != nil {
|
if err := pb.startRead(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return blockIndexReader{IndexReader: pb.indexr, b: pb}, nil
|
return blockIndexReader{ir: pb.indexr, b: pb}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Chunks returns a new ChunkReader against the block data.
|
// Chunks returns a new ChunkReader against the block data.
|
||||||
@ -346,8 +344,40 @@ func (pb *Block) setCompactionFailed() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type blockIndexReader struct {
|
type blockIndexReader struct {
|
||||||
IndexReader
|
ir IndexReader
|
||||||
b *Block
|
b *Block
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r blockIndexReader) Symbols() (map[string]struct{}, error) {
|
||||||
|
s, err := r.ir.Symbols()
|
||||||
|
return s, errors.Wrapf(err, "block: %s", r.b.Meta().ULID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r blockIndexReader) LabelValues(names ...string) (index.StringTuples, error) {
|
||||||
|
st, err := r.ir.LabelValues(names...)
|
||||||
|
return st, errors.Wrapf(err, "block: %s", r.b.Meta().ULID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r blockIndexReader) Postings(name, value string) (index.Postings, error) {
|
||||||
|
p, err := r.ir.Postings(name, value)
|
||||||
|
return p, errors.Wrapf(err, "block: %s", r.b.Meta().ULID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r blockIndexReader) SortedPostings(p index.Postings) index.Postings {
|
||||||
|
return r.ir.SortedPostings(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r blockIndexReader) Series(ref uint64, lset *labels.Labels, chks *[]chunks.Meta) error {
|
||||||
|
return errors.Wrapf(
|
||||||
|
r.ir.Series(ref, lset, chks),
|
||||||
|
"block: %s",
|
||||||
|
r.b.Meta().ULID,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r blockIndexReader) LabelIndices() ([][]string, error) {
|
||||||
|
ss, err := r.ir.LabelIndices()
|
||||||
|
return ss, errors.Wrapf(err, "block: %s", r.b.Meta().ULID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r blockIndexReader) Close() error {
|
func (r blockIndexReader) Close() error {
|
||||||
|
1
vendor/github.com/prometheus/tsdb/compact.go
generated
vendored
1
vendor/github.com/prometheus/tsdb/compact.go
generated
vendored
@ -428,6 +428,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe
|
|||||||
}
|
}
|
||||||
|
|
||||||
indexw, err := index.NewWriter(filepath.Join(tmp, indexFilename))
|
indexw, err := index.NewWriter(filepath.Join(tmp, indexFilename))
|
||||||
|
meta.Version = indexw.Version
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "open index writer")
|
return errors.Wrap(err, "open index writer")
|
||||||
}
|
}
|
||||||
|
8
vendor/github.com/prometheus/tsdb/head.go
generated
vendored
8
vendor/github.com/prometheus/tsdb/head.go
generated
vendored
@ -397,7 +397,7 @@ func (h *rangeHead) Tombstones() (TombstoneReader, error) {
|
|||||||
return h.head.tombstones, nil
|
return h.head.tombstones, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// initAppender is a helper to initialize the time bounds of a the head
|
// initAppender is a helper to initialize the time bounds of the head
|
||||||
// upon the first sample it receives.
|
// upon the first sample it receives.
|
||||||
type initAppender struct {
|
type initAppender struct {
|
||||||
app Appender
|
app Appender
|
||||||
@ -604,7 +604,7 @@ func (h *Head) Delete(mint, maxt int64, ms ...labels.Matcher) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// gc removes data before the minimum timestmap from the head.
|
// gc removes data before the minimum timestamp from the head.
|
||||||
func (h *Head) gc() {
|
func (h *Head) gc() {
|
||||||
// Only data strictly lower than this timestamp must be deleted.
|
// Only data strictly lower than this timestamp must be deleted.
|
||||||
mint := h.MinTime()
|
mint := h.MinTime()
|
||||||
@ -962,7 +962,7 @@ func (m seriesHashmap) del(hash uint64, lset labels.Labels) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// stripeSeries locks modulo ranges of IDs and hashes to reduce lock contention.
|
// stripeSeries locks modulo ranges of IDs and hashes to reduce lock contention.
|
||||||
// The locks are padded to not be on the same cache line. Filling the badded space
|
// The locks are padded to not be on the same cache line. Filling the padded space
|
||||||
// with the maps was profiled to be slower – likely due to the additional pointer
|
// with the maps was profiled to be slower – likely due to the additional pointer
|
||||||
// dereferences.
|
// dereferences.
|
||||||
type stripeSeries struct {
|
type stripeSeries struct {
|
||||||
@ -1092,7 +1092,7 @@ type sample struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// memSeries is the in-memory representation of a series. None of its methods
|
// memSeries is the in-memory representation of a series. None of its methods
|
||||||
// are goroutine safe and its the callers responsibility to lock it.
|
// are goroutine safe and it is the caller's responsibility to lock it.
|
||||||
type memSeries struct {
|
type memSeries struct {
|
||||||
sync.Mutex
|
sync.Mutex
|
||||||
|
|
||||||
|
93
vendor/github.com/prometheus/tsdb/index/index.go
generated
vendored
93
vendor/github.com/prometheus/tsdb/index/index.go
generated
vendored
@ -98,7 +98,7 @@ func newCRC32() hash.Hash32 {
|
|||||||
return crc32.New(castagnoliTable)
|
return crc32.New(castagnoliTable)
|
||||||
}
|
}
|
||||||
|
|
||||||
// indexWriter implements the IndexWriter interface for the standard
|
// Writer implements the IndexWriter interface for the standard
|
||||||
// serialization format.
|
// serialization format.
|
||||||
type Writer struct {
|
type Writer struct {
|
||||||
f *os.File
|
f *os.File
|
||||||
@ -122,6 +122,8 @@ type Writer struct {
|
|||||||
lastSeries labels.Labels
|
lastSeries labels.Labels
|
||||||
|
|
||||||
crc32 hash.Hash
|
crc32 hash.Hash
|
||||||
|
|
||||||
|
Version int
|
||||||
}
|
}
|
||||||
|
|
||||||
type indexTOC struct {
|
type indexTOC struct {
|
||||||
@ -166,6 +168,8 @@ func NewWriter(fn string) (*Writer, error) {
|
|||||||
symbols: make(map[string]uint32, 1<<13),
|
symbols: make(map[string]uint32, 1<<13),
|
||||||
seriesOffsets: make(map[uint64]uint64, 1<<16),
|
seriesOffsets: make(map[uint64]uint64, 1<<16),
|
||||||
crc32: newCRC32(),
|
crc32: newCRC32(),
|
||||||
|
|
||||||
|
Version: 2,
|
||||||
}
|
}
|
||||||
if err := iw.writeMeta(); err != nil {
|
if err := iw.writeMeta(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -180,12 +184,12 @@ func (w *Writer) write(bufs ...[]byte) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// For now the index file must not grow beyond 4GiB. Some of the fixed-sized
|
// For now the index file must not grow beyond 64GiB. Some of the fixed-sized
|
||||||
// offset references in v1 are only 4 bytes large.
|
// offset references in v1 are only 4 bytes large.
|
||||||
// Once we move to compressed/varint representations in those areas, this limitation
|
// Once we move to compressed/varint representations in those areas, this limitation
|
||||||
// can be lifted.
|
// can be lifted.
|
||||||
if w.pos > math.MaxUint32 {
|
if w.pos > 16*math.MaxUint32 {
|
||||||
return errors.Errorf("exceeding max size of 4GiB")
|
return errors.Errorf("exceeding max size of 64GiB")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -250,6 +254,7 @@ func (w *Writer) writeMeta() error {
|
|||||||
return w.write(w.buf1.get())
|
return w.write(w.buf1.get())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddSeries adds the series one at a time along with its chunks.
|
||||||
func (w *Writer) AddSeries(ref uint64, lset labels.Labels, chunks ...chunks.Meta) error {
|
func (w *Writer) AddSeries(ref uint64, lset labels.Labels, chunks ...chunks.Meta) error {
|
||||||
if err := w.ensureStage(idxStageSeries); err != nil {
|
if err := w.ensureStage(idxStageSeries); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -261,23 +266,25 @@ func (w *Writer) AddSeries(ref uint64, lset labels.Labels, chunks ...chunks.Meta
|
|||||||
if _, ok := w.seriesOffsets[ref]; ok {
|
if _, ok := w.seriesOffsets[ref]; ok {
|
||||||
return errors.Errorf("series with reference %d already added", ref)
|
return errors.Errorf("series with reference %d already added", ref)
|
||||||
}
|
}
|
||||||
w.seriesOffsets[ref] = w.pos
|
w.addPadding(16)
|
||||||
|
w.seriesOffsets[ref] = w.pos / 16
|
||||||
|
|
||||||
w.buf2.reset()
|
w.buf2.reset()
|
||||||
w.buf2.putUvarint(len(lset))
|
w.buf2.putUvarint(len(lset))
|
||||||
|
|
||||||
for _, l := range lset {
|
for _, l := range lset {
|
||||||
offset, ok := w.symbols[l.Name]
|
// here we have an index for the symbol file if v2, otherwise it's an offset
|
||||||
|
index, ok := w.symbols[l.Name]
|
||||||
if !ok {
|
if !ok {
|
||||||
return errors.Errorf("symbol entry for %q does not exist", l.Name)
|
return errors.Errorf("symbol entry for %q does not exist", l.Name)
|
||||||
}
|
}
|
||||||
w.buf2.putUvarint32(offset)
|
w.buf2.putUvarint32(index)
|
||||||
|
|
||||||
offset, ok = w.symbols[l.Value]
|
index, ok = w.symbols[l.Value]
|
||||||
if !ok {
|
if !ok {
|
||||||
return errors.Errorf("symbol entry for %q does not exist", l.Value)
|
return errors.Errorf("symbol entry for %q does not exist", l.Value)
|
||||||
}
|
}
|
||||||
w.buf2.putUvarint32(offset)
|
w.buf2.putUvarint32(index)
|
||||||
}
|
}
|
||||||
|
|
||||||
w.buf2.putUvarint(len(chunks))
|
w.buf2.putUvarint(len(chunks))
|
||||||
@ -335,8 +342,8 @@ func (w *Writer) AddSymbols(sym map[string]struct{}) error {
|
|||||||
|
|
||||||
w.symbols = make(map[string]uint32, len(symbols))
|
w.symbols = make(map[string]uint32, len(symbols))
|
||||||
|
|
||||||
for _, s := range symbols {
|
for index, s := range symbols {
|
||||||
w.symbols[s] = uint32(w.pos) + headerSize + uint32(w.buf2.len())
|
w.symbols[s] = uint32(index)
|
||||||
w.buf2.putUvarintStr(s)
|
w.buf2.putUvarintStr(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -375,12 +382,13 @@ func (w *Writer) WriteLabelIndex(names []string, values []string) error {
|
|||||||
w.buf2.putBE32int(len(names))
|
w.buf2.putBE32int(len(names))
|
||||||
w.buf2.putBE32int(valt.Len())
|
w.buf2.putBE32int(valt.Len())
|
||||||
|
|
||||||
|
// here we have an index for the symbol file if v2, otherwise it's an offset
|
||||||
for _, v := range valt.s {
|
for _, v := range valt.s {
|
||||||
offset, ok := w.symbols[v]
|
index, ok := w.symbols[v]
|
||||||
if !ok {
|
if !ok {
|
||||||
return errors.Errorf("symbol entry for %q does not exist", v)
|
return errors.Errorf("symbol entry for %q does not exist", v)
|
||||||
}
|
}
|
||||||
w.buf2.putBE32(offset)
|
w.buf2.putBE32(index)
|
||||||
}
|
}
|
||||||
|
|
||||||
w.buf1.reset()
|
w.buf1.reset()
|
||||||
@ -531,9 +539,11 @@ type Reader struct {
|
|||||||
// the block has been unmapped.
|
// the block has been unmapped.
|
||||||
symbols map[uint32]string
|
symbols map[uint32]string
|
||||||
|
|
||||||
dec *DecoderV1
|
dec *Decoder
|
||||||
|
|
||||||
crc32 hash.Hash32
|
crc32 hash.Hash32
|
||||||
|
|
||||||
|
version int
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -563,20 +573,24 @@ func (b realByteSlice) Sub(start, end int) ByteSlice {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewReader returns a new IndexReader on the given byte slice.
|
// NewReader returns a new IndexReader on the given byte slice.
|
||||||
func NewReader(b ByteSlice) (*Reader, error) {
|
func NewReader(b ByteSlice, version int) (*Reader, error) {
|
||||||
return newReader(b, nil)
|
return newReader(b, nil, version)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFileReader returns a new index reader against the given index file.
|
// NewFileReader returns a new index reader against the given index file.
|
||||||
func NewFileReader(path string) (*Reader, error) {
|
func NewFileReader(path string, version int) (*Reader, error) {
|
||||||
f, err := fileutil.OpenMmapFile(path)
|
f, err := fileutil.OpenMmapFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return newReader(realByteSlice(f.Bytes()), f)
|
return newReader(realByteSlice(f.Bytes()), f, version)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newReader(b ByteSlice, c io.Closer) (*Reader, error) {
|
func newReader(b ByteSlice, c io.Closer, version int) (*Reader, error) {
|
||||||
|
if version != 1 && version != 2 {
|
||||||
|
return nil, errors.Errorf("unexpected file version %d", version)
|
||||||
|
}
|
||||||
|
|
||||||
r := &Reader{
|
r := &Reader{
|
||||||
b: b,
|
b: b,
|
||||||
c: c,
|
c: c,
|
||||||
@ -584,7 +598,9 @@ func newReader(b ByteSlice, c io.Closer) (*Reader, error) {
|
|||||||
labels: map[string]uint32{},
|
labels: map[string]uint32{},
|
||||||
postings: map[labels.Label]uint32{},
|
postings: map[labels.Label]uint32{},
|
||||||
crc32: newCRC32(),
|
crc32: newCRC32(),
|
||||||
|
version: version,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify magic number.
|
// Verify magic number.
|
||||||
if b.Len() < 4 {
|
if b.Len() < 4 {
|
||||||
return nil, errors.Wrap(errInvalidSize, "index header")
|
return nil, errors.Wrap(errInvalidSize, "index header")
|
||||||
@ -622,7 +638,7 @@ func newReader(b ByteSlice, c io.Closer) (*Reader, error) {
|
|||||||
return nil, errors.Wrap(err, "read postings table")
|
return nil, errors.Wrap(err, "read postings table")
|
||||||
}
|
}
|
||||||
|
|
||||||
r.dec = &DecoderV1{symbols: r.symbols}
|
r.dec = &Decoder{symbols: r.symbols}
|
||||||
|
|
||||||
return r, nil
|
return r, nil
|
||||||
}
|
}
|
||||||
@ -660,7 +676,7 @@ func (r *Reader) readTOC() error {
|
|||||||
d := decbuf{b: b[:len(b)-4]}
|
d := decbuf{b: b[:len(b)-4]}
|
||||||
|
|
||||||
if d.crc32() != expCRC {
|
if d.crc32() != expCRC {
|
||||||
return errInvalidChecksum
|
return errors.Wrap(errInvalidChecksum, "read TOC")
|
||||||
}
|
}
|
||||||
|
|
||||||
r.toc.symbols = d.be64()
|
r.toc.symbols = d.be64()
|
||||||
@ -742,14 +758,23 @@ func (r *Reader) readSymbols(off int) error {
|
|||||||
basePos = uint32(off) + 4
|
basePos = uint32(off) + 4
|
||||||
nextPos = basePos + uint32(origLen-d.len())
|
nextPos = basePos + uint32(origLen-d.len())
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if r.version == 2 {
|
||||||
|
nextPos = 0
|
||||||
|
}
|
||||||
|
|
||||||
for d.err() == nil && d.len() > 0 && cnt > 0 {
|
for d.err() == nil && d.len() > 0 && cnt > 0 {
|
||||||
s := d.uvarintStr()
|
s := d.uvarintStr()
|
||||||
r.symbols[uint32(nextPos)] = s
|
r.symbols[uint32(nextPos)] = s
|
||||||
|
|
||||||
nextPos = basePos + uint32(origLen-d.len())
|
if r.version == 2 {
|
||||||
|
nextPos++
|
||||||
|
} else {
|
||||||
|
nextPos = basePos + uint32(origLen-d.len())
|
||||||
|
}
|
||||||
cnt--
|
cnt--
|
||||||
}
|
}
|
||||||
return d.err()
|
return errors.Wrap(d.err(), "read symbols")
|
||||||
}
|
}
|
||||||
|
|
||||||
// readOffsetTable reads an offset table at the given position calls f for each
|
// readOffsetTable reads an offset table at the given position calls f for each
|
||||||
@ -852,13 +877,17 @@ func (r *Reader) LabelIndices() ([][]string, error) {
|
|||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Series the series with the given ID and writes its labels and chunks into lbls and chks.
|
// Series reads the series with the given ID and writes its labels and chunks into lbls and chks.
|
||||||
func (r *Reader) Series(id uint64, lbls *labels.Labels, chks *[]chunks.Meta) error {
|
func (r *Reader) Series(id uint64, lbls *labels.Labels, chks *[]chunks.Meta) error {
|
||||||
d := r.decbufUvarintAt(int(id))
|
offset := id
|
||||||
|
if r.version == 2 {
|
||||||
|
offset = 16 * id
|
||||||
|
}
|
||||||
|
d := r.decbufUvarintAt(int(offset))
|
||||||
if d.err() != nil {
|
if d.err() != nil {
|
||||||
return d.err()
|
return d.err()
|
||||||
}
|
}
|
||||||
return r.dec.Series(d.get(), lbls, chks)
|
return errors.Wrap(r.dec.Series(d.get(), lbls, chks), "read series")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Postings returns a postings list for the given label pair.
|
// Postings returns a postings list for the given label pair.
|
||||||
@ -955,15 +984,15 @@ func (t *serializedStringTuples) At(i int) ([]string, error) {
|
|||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DecoderV1 provides decoding methods for the v1 index file format.
|
// Decoder provides decoding methods for the v1 and v2 index file format.
|
||||||
//
|
//
|
||||||
// It currently does not contain decoding methods for all entry types but can be extended
|
// It currently does not contain decoding methods for all entry types but can be extended
|
||||||
// by them if there's demand.
|
// by them if there's demand.
|
||||||
type DecoderV1 struct {
|
type Decoder struct {
|
||||||
symbols map[uint32]string
|
symbols map[uint32]string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dec *DecoderV1) lookupSymbol(o uint32) (string, error) {
|
func (dec *Decoder) lookupSymbol(o uint32) (string, error) {
|
||||||
s, ok := dec.symbols[o]
|
s, ok := dec.symbols[o]
|
||||||
if !ok {
|
if !ok {
|
||||||
return "", errors.Errorf("unknown symbol offset %d", o)
|
return "", errors.Errorf("unknown symbol offset %d", o)
|
||||||
@ -973,12 +1002,12 @@ func (dec *DecoderV1) lookupSymbol(o uint32) (string, error) {
|
|||||||
|
|
||||||
// SetSymbolTable set the symbol table to be used for lookups when decoding series
|
// SetSymbolTable set the symbol table to be used for lookups when decoding series
|
||||||
// and label indices
|
// and label indices
|
||||||
func (dec *DecoderV1) SetSymbolTable(t map[uint32]string) {
|
func (dec *Decoder) SetSymbolTable(t map[uint32]string) {
|
||||||
dec.symbols = t
|
dec.symbols = t
|
||||||
}
|
}
|
||||||
|
|
||||||
// Postings returns a postings list for b and its number of elements.
|
// Postings returns a postings list for b and its number of elements.
|
||||||
func (dec *DecoderV1) Postings(b []byte) (int, Postings, error) {
|
func (dec *Decoder) Postings(b []byte) (int, Postings, error) {
|
||||||
d := decbuf{b: b}
|
d := decbuf{b: b}
|
||||||
n := d.be32int()
|
n := d.be32int()
|
||||||
l := d.get()
|
l := d.get()
|
||||||
@ -986,7 +1015,7 @@ func (dec *DecoderV1) Postings(b []byte) (int, Postings, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Series decodes a series entry from the given byte slice into lset and chks.
|
// Series decodes a series entry from the given byte slice into lset and chks.
|
||||||
func (dec *DecoderV1) Series(b []byte, lbls *labels.Labels, chks *[]chunks.Meta) error {
|
func (dec *Decoder) Series(b []byte, lbls *labels.Labels, chks *[]chunks.Meta) error {
|
||||||
*lbls = (*lbls)[:0]
|
*lbls = (*lbls)[:0]
|
||||||
*chks = (*chks)[:0]
|
*chks = (*chks)[:0]
|
||||||
|
|
||||||
|
2
vendor/github.com/prometheus/tsdb/tombstones.go
generated
vendored
2
vendor/github.com/prometheus/tsdb/tombstones.go
generated
vendored
@ -131,7 +131,7 @@ func readTombstones(dir string) (memTombstones, error) {
|
|||||||
return nil, d.err()
|
return nil, d.err()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify checksum
|
// Verify checksum.
|
||||||
hash := newCRC32()
|
hash := newCRC32()
|
||||||
if _, err := hash.Write(d.get()); err != nil {
|
if _, err := hash.Write(d.get()); err != nil {
|
||||||
return nil, errors.Wrap(err, "write to hash")
|
return nil, errors.Wrap(err, "write to hash")
|
||||||
|
28
vendor/vendor.json
vendored
28
vendor/vendor.json
vendored
@ -800,40 +800,40 @@
|
|||||||
"revisionTime": "2016-04-11T19:08:41Z"
|
"revisionTime": "2016-04-11T19:08:41Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "C72p7MMwA94LrpqYD3dMcQEKHzY=",
|
"checksumSHA1": "XHNJVYnIBaFpQOOA4wTqjDbMkUo=",
|
||||||
"path": "github.com/prometheus/tsdb",
|
"path": "github.com/prometheus/tsdb",
|
||||||
"revision": "07ef80820ef1250db82f9544f3fcf7f0f63ccee0",
|
"revision": "467948f3c3f2f6f4ed9881afba27dd3ae24393eb",
|
||||||
"revisionTime": "2017-12-22T14:44:41Z"
|
"revisionTime": "2018-01-18T08:32:54Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "XTirmk6Pq5TBGIZEaN5VL4k3i1s=",
|
"checksumSHA1": "XTirmk6Pq5TBGIZEaN5VL4k3i1s=",
|
||||||
"path": "github.com/prometheus/tsdb/chunkenc",
|
"path": "github.com/prometheus/tsdb/chunkenc",
|
||||||
"revision": "07ef80820ef1250db82f9544f3fcf7f0f63ccee0",
|
"revision": "467948f3c3f2f6f4ed9881afba27dd3ae24393eb",
|
||||||
"revisionTime": "2017-12-22T14:44:41Z"
|
"revisionTime": "2018-01-18T08:32:54Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "+zsn1i8cqwgZXL8Bg6jDy32xjAo=",
|
"checksumSHA1": "+zsn1i8cqwgZXL8Bg6jDy32xjAo=",
|
||||||
"path": "github.com/prometheus/tsdb/chunks",
|
"path": "github.com/prometheus/tsdb/chunks",
|
||||||
"revision": "07ef80820ef1250db82f9544f3fcf7f0f63ccee0",
|
"revision": "467948f3c3f2f6f4ed9881afba27dd3ae24393eb",
|
||||||
"revisionTime": "2017-12-22T14:44:41Z"
|
"revisionTime": "2018-01-18T08:32:54Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "h49AAcJ5+iRBwCgbfQf+2T1E1ZE=",
|
"checksumSHA1": "h49AAcJ5+iRBwCgbfQf+2T1E1ZE=",
|
||||||
"path": "github.com/prometheus/tsdb/fileutil",
|
"path": "github.com/prometheus/tsdb/fileutil",
|
||||||
"revision": "07ef80820ef1250db82f9544f3fcf7f0f63ccee0",
|
"revision": "467948f3c3f2f6f4ed9881afba27dd3ae24393eb",
|
||||||
"revisionTime": "2017-12-22T14:44:41Z"
|
"revisionTime": "2018-01-18T08:32:54Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "y3lUn43gcc3HtYd20UDujyybGq4=",
|
"checksumSHA1": "yuiJaE3cAmZ/ws8NOfd56x36Sg4=",
|
||||||
"path": "github.com/prometheus/tsdb/index",
|
"path": "github.com/prometheus/tsdb/index",
|
||||||
"revision": "07ef80820ef1250db82f9544f3fcf7f0f63ccee0",
|
"revision": "467948f3c3f2f6f4ed9881afba27dd3ae24393eb",
|
||||||
"revisionTime": "2017-12-22T14:44:41Z"
|
"revisionTime": "2018-01-18T08:32:54Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "Va8HWvOFTwFeewZFadMAOzNGDps=",
|
"checksumSHA1": "Va8HWvOFTwFeewZFadMAOzNGDps=",
|
||||||
"path": "github.com/prometheus/tsdb/labels",
|
"path": "github.com/prometheus/tsdb/labels",
|
||||||
"revision": "07ef80820ef1250db82f9544f3fcf7f0f63ccee0",
|
"revision": "467948f3c3f2f6f4ed9881afba27dd3ae24393eb",
|
||||||
"revisionTime": "2017-12-22T14:44:41Z"
|
"revisionTime": "2018-01-18T08:32:54Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "5SYLEhADhdBVZAGPVHWggQl7H8k=",
|
"checksumSHA1": "5SYLEhADhdBVZAGPVHWggQl7H8k=",
|
||||||
|
Loading…
Reference in New Issue
Block a user