2017-04-10 18:59:45 +00:00
|
|
|
// Copyright 2017 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2016-12-08 16:43:10 +00:00
|
|
|
package tsdb
|
|
|
|
|
2016-12-15 07:31:26 +00:00
|
|
|
import (
|
2017-05-15 17:58:14 +00:00
|
|
|
"encoding/binary"
|
2017-01-19 10:22:47 +00:00
|
|
|
"encoding/json"
|
2017-03-20 09:41:43 +00:00
|
|
|
"fmt"
|
2017-01-19 10:22:47 +00:00
|
|
|
"io/ioutil"
|
2016-12-15 07:31:26 +00:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2017-05-14 09:06:26 +00:00
|
|
|
"sort"
|
2016-12-22 14:54:39 +00:00
|
|
|
|
2017-02-27 09:46:15 +00:00
|
|
|
"github.com/oklog/ulid"
|
2016-12-22 14:54:39 +00:00
|
|
|
"github.com/pkg/errors"
|
2017-05-14 09:06:26 +00:00
|
|
|
"github.com/prometheus/tsdb/labels"
|
2016-12-08 16:43:10 +00:00
|
|
|
)
|
|
|
|
|
2017-03-20 07:41:56 +00:00
|
|
|
// DiskBlock handles reads against a Block of time series data.
|
|
|
|
type DiskBlock interface {
|
2017-01-19 10:22:47 +00:00
|
|
|
// Directory where block data is stored.
|
2017-01-10 14:28:22 +00:00
|
|
|
Dir() string
|
2017-01-19 10:22:47 +00:00
|
|
|
|
|
|
|
// Stats returns statistics about the block.
|
|
|
|
Meta() BlockMeta
|
|
|
|
|
|
|
|
// Index returns an IndexReader over the block's data.
|
2017-01-10 14:28:22 +00:00
|
|
|
Index() IndexReader
|
2017-01-19 10:22:47 +00:00
|
|
|
|
2017-04-28 13:41:42 +00:00
|
|
|
// Chunks returns a ChunkReader over the block's data.
|
2017-02-23 09:50:22 +00:00
|
|
|
Chunks() ChunkReader
|
2017-01-19 10:22:47 +00:00
|
|
|
|
2017-05-14 09:06:26 +00:00
|
|
|
// Delete deletes data from the block.
|
|
|
|
Delete(mint, maxt int64, ms ...labels.Matcher) error
|
|
|
|
|
2017-01-19 10:22:47 +00:00
|
|
|
// Close releases all underlying resources of the block.
|
2017-01-18 05:18:32 +00:00
|
|
|
Close() error
|
2016-12-13 14:26:58 +00:00
|
|
|
}
|
2016-12-08 16:43:10 +00:00
|
|
|
|
2017-03-20 07:41:56 +00:00
|
|
|
// Block is an interface to a DiskBlock that can also be queried.
|
|
|
|
type Block interface {
|
|
|
|
DiskBlock
|
2017-03-20 09:21:21 +00:00
|
|
|
Queryable
|
2017-03-20 07:41:56 +00:00
|
|
|
}
|
|
|
|
|
2017-05-12 14:34:41 +00:00
|
|
|
// headBlock is a regular block that can still be appended to.
|
|
|
|
type headBlock interface {
|
2017-03-20 09:21:21 +00:00
|
|
|
Block
|
2017-03-20 07:41:56 +00:00
|
|
|
Appendable
|
|
|
|
}
|
|
|
|
|
|
|
|
// Appendable defines an entity to which data can be appended.
|
|
|
|
type Appendable interface {
|
|
|
|
// Appender returns a new Appender against an underlying store.
|
|
|
|
Appender() Appender
|
|
|
|
|
|
|
|
// Busy returns whether there are any currently active appenders.
|
|
|
|
Busy() bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// Queryable defines an entity which provides a Querier.
|
|
|
|
type Queryable interface {
|
2017-03-20 09:21:21 +00:00
|
|
|
Querier(mint, maxt int64) Querier
|
2017-03-20 07:41:56 +00:00
|
|
|
}
|
|
|
|
|
2017-01-19 10:22:47 +00:00
|
|
|
// BlockMeta provides meta information about a block.
|
|
|
|
type BlockMeta struct {
|
2017-02-27 09:46:15 +00:00
|
|
|
// Unique identifier for the block and its contents. Changes on compaction.
|
|
|
|
ULID ulid.ULID `json:"ulid"`
|
|
|
|
|
2017-01-29 07:11:47 +00:00
|
|
|
// Sequence number of the block.
|
|
|
|
Sequence int `json:"sequence"`
|
|
|
|
|
2017-01-19 13:01:38 +00:00
|
|
|
// MinTime and MaxTime specify the time range all samples
|
2017-02-01 14:29:48 +00:00
|
|
|
// in the block are in.
|
|
|
|
MinTime int64 `json:"minTime"`
|
|
|
|
MaxTime int64 `json:"maxTime"`
|
2017-01-07 17:02:17 +00:00
|
|
|
|
2017-01-29 07:11:47 +00:00
|
|
|
// Stats about the contents of the block.
|
2017-01-19 10:22:47 +00:00
|
|
|
Stats struct {
|
|
|
|
NumSamples uint64 `json:"numSamples,omitempty"`
|
|
|
|
NumSeries uint64 `json:"numSeries,omitempty"`
|
|
|
|
NumChunks uint64 `json:"numChunks,omitempty"`
|
|
|
|
} `json:"stats,omitempty"`
|
2017-01-19 18:45:52 +00:00
|
|
|
|
2017-01-29 07:11:47 +00:00
|
|
|
// Information on compactions the block was created from.
|
2017-01-19 18:45:52 +00:00
|
|
|
Compaction struct {
|
|
|
|
Generation int `json:"generation"`
|
|
|
|
} `json:"compaction"`
|
2016-12-19 21:29:49 +00:00
|
|
|
}
|
|
|
|
|
2016-12-08 16:43:10 +00:00
|
|
|
const (
|
|
|
|
flagNone = 0
|
|
|
|
flagStd = 1
|
|
|
|
)
|
|
|
|
|
2017-01-19 10:22:47 +00:00
|
|
|
type blockMeta struct {
|
2017-01-19 13:01:38 +00:00
|
|
|
Version int `json:"version"`
|
2017-01-20 06:58:19 +00:00
|
|
|
|
|
|
|
*BlockMeta
|
2017-01-19 10:22:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const metaFilename = "meta.json"
|
2017-05-14 09:06:26 +00:00
|
|
|
const tombstoneFilename = "tombstones"
|
2017-01-19 10:22:47 +00:00
|
|
|
|
2017-01-19 13:01:38 +00:00
|
|
|
func readMetaFile(dir string) (*BlockMeta, error) {
|
|
|
|
b, err := ioutil.ReadFile(filepath.Join(dir, metaFilename))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
var m blockMeta
|
|
|
|
|
|
|
|
if err := json.Unmarshal(b, &m); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if m.Version != 1 {
|
|
|
|
return nil, errors.Errorf("unexpected meta file version %d", m.Version)
|
|
|
|
}
|
|
|
|
|
|
|
|
return m.BlockMeta, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func writeMetaFile(dir string, meta *BlockMeta) error {
|
2017-03-01 16:19:57 +00:00
|
|
|
// Make any changes to the file appear atomic.
|
|
|
|
path := filepath.Join(dir, metaFilename)
|
|
|
|
tmp := path + ".tmp"
|
|
|
|
|
|
|
|
f, err := os.Create(tmp)
|
2017-01-19 13:01:38 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
enc := json.NewEncoder(f)
|
|
|
|
enc.SetIndent("", "\t")
|
|
|
|
|
2017-04-28 13:45:30 +00:00
|
|
|
var merr MultiError
|
|
|
|
if merr.Add(enc.Encode(&blockMeta{Version: 1, BlockMeta: meta})); merr.Err() != nil {
|
|
|
|
merr.Add(f.Close())
|
|
|
|
return merr
|
2017-01-19 13:01:38 +00:00
|
|
|
}
|
|
|
|
if err := f.Close(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-03-01 16:19:57 +00:00
|
|
|
return renameFile(tmp, path)
|
2017-01-19 13:01:38 +00:00
|
|
|
}
|
|
|
|
|
2017-05-15 17:58:14 +00:00
|
|
|
func readTombstoneFile(dir string) (TombstoneReader, error) {
|
|
|
|
return newTombStoneReader(dir)
|
|
|
|
}
|
|
|
|
|
|
|
|
func writeTombstoneFile(dir string, tr TombstoneReader) error {
|
|
|
|
path := filepath.Join(dir, tombstoneFilename)
|
|
|
|
tmp := path + ".tmp"
|
|
|
|
|
|
|
|
f, err := os.Create(tmp)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
stoneOff := make(map[uint32]int64) // The map that holds the ref to offset vals.
|
|
|
|
refs := []uint32{} // Sorted refs.
|
|
|
|
|
|
|
|
pos := int64(0)
|
|
|
|
buf := encbuf{b: make([]byte, 2*binary.MaxVarintLen64)}
|
|
|
|
for tr.Next() {
|
|
|
|
s := tr.At()
|
|
|
|
|
|
|
|
refs = append(refs, s.ref)
|
|
|
|
stoneOff[s.ref] = pos
|
|
|
|
|
|
|
|
// Write the ranges.
|
|
|
|
buf.reset()
|
|
|
|
buf.putVarint64(int64(len(s.ranges)))
|
|
|
|
n, err := f.Write(buf.get())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
pos += int64(n)
|
|
|
|
|
|
|
|
for _, r := range s.ranges {
|
|
|
|
buf.reset()
|
|
|
|
buf.putVarint64(r.mint)
|
|
|
|
buf.putVarint64(r.maxt)
|
|
|
|
n, err = f.Write(buf.get())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
pos += int64(n)
|
|
|
|
}
|
|
|
|
}
|
2017-05-16 07:13:33 +00:00
|
|
|
if err := tr.Err(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-05-15 17:58:14 +00:00
|
|
|
|
|
|
|
// Write the offset table.
|
|
|
|
buf.reset()
|
|
|
|
buf.putBE32int(len(refs))
|
2017-05-16 07:13:33 +00:00
|
|
|
if _, err := f.Write(buf.get()); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-05-15 17:58:14 +00:00
|
|
|
for _, ref := range refs {
|
|
|
|
buf.reset()
|
|
|
|
buf.putBE32(ref)
|
|
|
|
buf.putBE64int64(stoneOff[ref])
|
|
|
|
_, err = f.Write(buf.get())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write the offset to the offset table.
|
|
|
|
buf.reset()
|
|
|
|
buf.putBE64int64(pos)
|
|
|
|
_, err = f.Write(buf.get())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := f.Close(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return renameFile(tmp, path)
|
|
|
|
}
|
|
|
|
|
2017-03-20 09:21:21 +00:00
|
|
|
type persistedBlock struct {
|
|
|
|
dir string
|
|
|
|
meta BlockMeta
|
|
|
|
|
|
|
|
chunkr *chunkReader
|
|
|
|
indexr *indexReader
|
|
|
|
}
|
|
|
|
|
2017-01-10 14:28:22 +00:00
|
|
|
func newPersistedBlock(dir string) (*persistedBlock, error) {
|
2017-01-19 18:45:52 +00:00
|
|
|
meta, err := readMetaFile(dir)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-12-15 07:31:26 +00:00
|
|
|
|
2017-02-27 09:46:15 +00:00
|
|
|
cr, err := newChunkReader(chunkDir(dir))
|
2016-12-15 07:31:26 +00:00
|
|
|
if err != nil {
|
2017-02-23 09:50:22 +00:00
|
|
|
return nil, err
|
2016-12-15 07:31:26 +00:00
|
|
|
}
|
2017-02-25 06:24:20 +00:00
|
|
|
ir, err := newIndexReader(dir)
|
2016-12-15 07:31:26 +00:00
|
|
|
if err != nil {
|
2017-02-25 06:24:20 +00:00
|
|
|
return nil, err
|
2016-12-15 07:31:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pb := &persistedBlock{
|
2017-02-23 09:50:22 +00:00
|
|
|
dir: dir,
|
|
|
|
meta: *meta,
|
|
|
|
chunkr: cr,
|
|
|
|
indexr: ir,
|
2016-12-15 07:31:26 +00:00
|
|
|
}
|
|
|
|
return pb, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (pb *persistedBlock) Close() error {
|
2017-02-27 09:46:15 +00:00
|
|
|
var merr MultiError
|
2016-12-15 07:31:26 +00:00
|
|
|
|
2017-02-27 09:46:15 +00:00
|
|
|
merr.Add(pb.chunkr.Close())
|
|
|
|
merr.Add(pb.indexr.Close())
|
|
|
|
|
|
|
|
return merr.Err()
|
2016-12-15 07:31:26 +00:00
|
|
|
}
|
|
|
|
|
2017-03-20 09:41:43 +00:00
|
|
|
func (pb *persistedBlock) String() string {
|
|
|
|
return fmt.Sprintf("(%d, %s)", pb.meta.Sequence, pb.meta.ULID)
|
|
|
|
}
|
|
|
|
|
2017-03-20 09:21:21 +00:00
|
|
|
func (pb *persistedBlock) Querier(mint, maxt int64) Querier {
|
|
|
|
return &blockQuerier{
|
|
|
|
mint: mint,
|
|
|
|
maxt: maxt,
|
|
|
|
index: pb.Index(),
|
|
|
|
chunks: pb.Chunks(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-23 09:50:22 +00:00
|
|
|
func (pb *persistedBlock) Dir() string { return pb.dir }
|
|
|
|
func (pb *persistedBlock) Index() IndexReader { return pb.indexr }
|
|
|
|
func (pb *persistedBlock) Chunks() ChunkReader { return pb.chunkr }
|
|
|
|
func (pb *persistedBlock) Meta() BlockMeta { return pb.meta }
|
2016-12-15 15:14:33 +00:00
|
|
|
|
2017-05-14 09:06:26 +00:00
|
|
|
func (pb *persistedBlock) Delete(mint, maxt int64, ms ...labels.Matcher) error {
|
|
|
|
pr := newPostingsReader(pb.indexr)
|
|
|
|
p, absent := pr.Select(ms...)
|
|
|
|
|
|
|
|
ir := pb.indexr
|
|
|
|
|
|
|
|
// Choose only valid postings which have chunks in the time-range.
|
|
|
|
vPostings := []uint32{}
|
|
|
|
|
|
|
|
Outer:
|
|
|
|
for p.Next() {
|
|
|
|
lset, chunks, err := ir.Series(p.At())
|
2017-05-15 17:58:14 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-05-14 09:06:26 +00:00
|
|
|
|
|
|
|
for _, abs := range absent {
|
|
|
|
if lset.Get(abs) != "" {
|
|
|
|
continue Outer
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// XXX(gouthamve): Adjust mint and maxt to match the time-range in the chunks?
|
|
|
|
for _, chk := range chunks {
|
2017-05-15 17:58:14 +00:00
|
|
|
if (mint <= chk.MinTime && maxt >= chk.MinTime) ||
|
2017-05-14 09:06:26 +00:00
|
|
|
(mint > chk.MinTime && mint <= chk.MaxTime) {
|
|
|
|
vPostings = append(vPostings, p.At())
|
2017-05-15 17:58:14 +00:00
|
|
|
continue Outer
|
2017-05-14 09:06:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if p.Err() != nil {
|
|
|
|
return p.Err()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Merge the current and new tombstones.
|
2017-05-15 17:58:14 +00:00
|
|
|
tr := newMapTombstoneReader(ir.tombstones)
|
2017-05-16 07:13:33 +00:00
|
|
|
str := newSimpleTombstoneReader(vPostings, []trange{{mint, maxt}})
|
2017-05-15 17:58:14 +00:00
|
|
|
tombreader := newMergedTombstoneReader(tr, str)
|
2017-05-14 09:06:26 +00:00
|
|
|
|
2017-05-15 17:58:14 +00:00
|
|
|
return writeTombstoneFile(pb.dir, tombreader)
|
|
|
|
}
|
2017-05-14 09:06:26 +00:00
|
|
|
|
2017-05-15 17:58:14 +00:00
|
|
|
// stone holds the information on the posting and time-range
|
|
|
|
// that is deleted.
|
|
|
|
type stone struct {
|
|
|
|
ref uint32
|
|
|
|
ranges []trange
|
|
|
|
}
|
2017-05-14 09:06:26 +00:00
|
|
|
|
2017-05-15 17:58:14 +00:00
|
|
|
// TombstoneReader is the iterator over tombstones.
|
|
|
|
type TombstoneReader interface {
|
|
|
|
Next() bool
|
|
|
|
At() stone
|
|
|
|
Err() error
|
|
|
|
}
|
2017-05-14 09:06:26 +00:00
|
|
|
|
2017-05-15 17:58:14 +00:00
|
|
|
var emptyTombstoneReader = newMapTombstoneReader(make(map[uint32][]trange))
|
2017-05-14 09:06:26 +00:00
|
|
|
|
2017-05-15 17:58:14 +00:00
|
|
|
type tombstoneReader struct {
|
|
|
|
stones []byte
|
|
|
|
idx int
|
|
|
|
len int
|
|
|
|
|
|
|
|
b []byte
|
|
|
|
err error
|
|
|
|
}
|
|
|
|
|
|
|
|
func newTombStoneReader(dir string) (*tombstoneReader, error) {
|
|
|
|
// TODO(gouthamve): MMAP?
|
|
|
|
b, err := ioutil.ReadFile(filepath.Join(dir, tombstoneFilename))
|
2017-05-14 09:06:26 +00:00
|
|
|
if err != nil {
|
2017-05-15 17:58:14 +00:00
|
|
|
return nil, err
|
2017-05-14 09:06:26 +00:00
|
|
|
}
|
|
|
|
|
2017-05-15 17:58:14 +00:00
|
|
|
offsetBytes := b[len(b)-8:]
|
|
|
|
d := &decbuf{b: offsetBytes}
|
|
|
|
off := d.be64int64()
|
|
|
|
if err := d.err(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-05-14 09:06:26 +00:00
|
|
|
|
2017-05-15 17:58:14 +00:00
|
|
|
d = &decbuf{b: b[off:]}
|
2017-05-16 07:13:33 +00:00
|
|
|
numStones := d.be32int()
|
2017-05-15 17:58:14 +00:00
|
|
|
if err := d.err(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-05-14 09:06:26 +00:00
|
|
|
|
2017-05-15 17:58:14 +00:00
|
|
|
return &tombstoneReader{
|
2017-05-16 07:13:33 +00:00
|
|
|
stones: b[off+4:],
|
2017-05-15 17:58:14 +00:00
|
|
|
idx: -1,
|
|
|
|
len: int(numStones),
|
|
|
|
|
|
|
|
b: b,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t *tombstoneReader) Next() bool {
|
|
|
|
if t.err != nil {
|
|
|
|
return false
|
2017-05-14 09:06:26 +00:00
|
|
|
}
|
|
|
|
|
2017-05-15 17:58:14 +00:00
|
|
|
t.idx++
|
|
|
|
|
|
|
|
return t.idx < t.len
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t *tombstoneReader) At() stone {
|
|
|
|
bytIdx := t.idx * (4 + 8)
|
|
|
|
dat := t.stones[bytIdx : bytIdx+12]
|
|
|
|
|
|
|
|
d := &decbuf{b: dat}
|
|
|
|
ref := d.be32()
|
|
|
|
off := d.be64int64()
|
|
|
|
|
|
|
|
d = &decbuf{b: t.b[off:]}
|
|
|
|
numRanges := d.varint64()
|
|
|
|
if err := d.err(); err != nil {
|
|
|
|
t.err = err
|
|
|
|
return stone{ref: ref}
|
2017-05-14 09:06:26 +00:00
|
|
|
}
|
2017-05-15 17:58:14 +00:00
|
|
|
|
|
|
|
dranges := make([]trange, 0, numRanges)
|
|
|
|
for i := 0; i < int(numRanges); i++ {
|
|
|
|
mint := d.varint64()
|
|
|
|
maxt := d.varint64()
|
|
|
|
if err := d.err(); err != nil {
|
|
|
|
t.err = err
|
|
|
|
return stone{ref: ref, ranges: dranges}
|
|
|
|
}
|
|
|
|
|
|
|
|
dranges = append(dranges, trange{mint, maxt})
|
2017-05-14 09:06:26 +00:00
|
|
|
}
|
|
|
|
|
2017-05-15 17:58:14 +00:00
|
|
|
return stone{ref: ref, ranges: dranges}
|
2017-05-14 09:06:26 +00:00
|
|
|
}
|
|
|
|
|
2017-05-15 17:58:14 +00:00
|
|
|
func (t *tombstoneReader) Err() error {
|
|
|
|
return t.err
|
2017-05-14 09:06:26 +00:00
|
|
|
}
|
|
|
|
|
2017-05-15 17:58:14 +00:00
|
|
|
type mapTombstoneReader struct {
|
|
|
|
refs []uint32
|
|
|
|
cur uint32
|
|
|
|
|
|
|
|
stones map[uint32][]trange
|
2017-05-14 09:06:26 +00:00
|
|
|
}
|
|
|
|
|
2017-05-15 17:58:14 +00:00
|
|
|
func newMapTombstoneReader(ts map[uint32][]trange) *mapTombstoneReader {
|
|
|
|
refs := make([]uint32, 0, len(ts))
|
|
|
|
for k := range ts {
|
|
|
|
refs = append(refs, k)
|
|
|
|
}
|
|
|
|
sort.Sort(uint32slice(refs))
|
|
|
|
return &mapTombstoneReader{stones: ts, refs: refs}
|
2017-05-14 09:06:26 +00:00
|
|
|
}
|
|
|
|
|
2017-05-15 17:58:14 +00:00
|
|
|
func (t *mapTombstoneReader) Next() bool {
|
|
|
|
if len(t.refs) > 0 {
|
|
|
|
t.cur = t.refs[0]
|
2017-05-16 07:13:33 +00:00
|
|
|
t.refs = t.refs[1:]
|
2017-05-15 17:58:14 +00:00
|
|
|
return true
|
|
|
|
}
|
2017-05-14 09:06:26 +00:00
|
|
|
|
2017-05-15 17:58:14 +00:00
|
|
|
t.cur = 0
|
|
|
|
return false
|
2017-05-14 09:06:26 +00:00
|
|
|
}
|
|
|
|
|
2017-05-15 17:58:14 +00:00
|
|
|
func (t *mapTombstoneReader) At() stone {
|
|
|
|
return stone{ref: t.cur, ranges: t.stones[t.cur]}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t *mapTombstoneReader) Err() error {
|
|
|
|
return nil
|
|
|
|
}
|
2017-05-14 09:06:26 +00:00
|
|
|
|
2017-05-15 17:58:14 +00:00
|
|
|
type simpleTombstoneReader struct {
|
|
|
|
refs []uint32
|
|
|
|
cur uint32
|
2017-05-14 09:06:26 +00:00
|
|
|
|
2017-05-15 17:58:14 +00:00
|
|
|
ranges []trange
|
2017-05-14 09:06:26 +00:00
|
|
|
}
|
|
|
|
|
2017-05-15 17:58:14 +00:00
|
|
|
func newSimpleTombstoneReader(refs []uint32, drange []trange) *simpleTombstoneReader {
|
|
|
|
return &simpleTombstoneReader{refs: refs, ranges: drange}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t *simpleTombstoneReader) Next() bool {
|
|
|
|
if len(t.refs) > 0 {
|
|
|
|
t.cur = t.refs[0]
|
2017-05-14 09:06:26 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2017-05-15 17:58:14 +00:00
|
|
|
t.cur = 0
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t *simpleTombstoneReader) At() stone {
|
|
|
|
return stone{ref: t.cur, ranges: t.ranges}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t *simpleTombstoneReader) Err() error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type mergedTombstoneReader struct {
|
|
|
|
a, b TombstoneReader
|
|
|
|
cur stone
|
|
|
|
|
|
|
|
initialized bool
|
|
|
|
aok, bok bool
|
|
|
|
}
|
|
|
|
|
|
|
|
func newMergedTombstoneReader(a, b TombstoneReader) *mergedTombstoneReader {
|
|
|
|
return &mergedTombstoneReader{a: a, b: b}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t *mergedTombstoneReader) Next() bool {
|
|
|
|
if !t.initialized {
|
|
|
|
t.aok = t.a.Next()
|
|
|
|
t.bok = t.b.Next()
|
|
|
|
t.initialized = true
|
|
|
|
}
|
|
|
|
|
|
|
|
if !t.aok && !t.bok {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
if !t.aok {
|
|
|
|
t.cur = t.b.At()
|
|
|
|
t.bok = t.b.Next()
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if !t.bok {
|
|
|
|
t.cur = t.a.At()
|
|
|
|
t.aok = t.a.Next()
|
|
|
|
return true
|
|
|
|
}
|
2017-05-14 09:06:26 +00:00
|
|
|
|
2017-05-15 17:58:14 +00:00
|
|
|
acur, bcur := t.a.At(), t.b.At()
|
|
|
|
|
|
|
|
if acur.ref < bcur.ref {
|
|
|
|
t.cur = acur
|
|
|
|
t.aok = t.a.Next()
|
|
|
|
} else if acur.ref > bcur.ref {
|
|
|
|
t.cur = bcur
|
|
|
|
t.bok = t.b.Next()
|
|
|
|
} else {
|
|
|
|
t.cur = acur
|
|
|
|
// Merge time ranges.
|
|
|
|
for _, r := range bcur.ranges {
|
|
|
|
acur.ranges = addNewInterval(acur.ranges, r)
|
2017-05-14 09:06:26 +00:00
|
|
|
}
|
2017-05-15 17:58:14 +00:00
|
|
|
t.aok = t.a.Next()
|
|
|
|
t.bok = t.b.Next()
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
2017-05-14 09:06:26 +00:00
|
|
|
|
2017-05-15 17:58:14 +00:00
|
|
|
func (t *mergedTombstoneReader) At() stone {
|
|
|
|
return t.cur
|
2017-05-14 09:06:26 +00:00
|
|
|
}
|
|
|
|
|
2017-05-15 17:58:14 +00:00
|
|
|
func (t *mergedTombstoneReader) Err() error {
|
|
|
|
if t.a.Err() != nil {
|
|
|
|
return t.a.Err()
|
|
|
|
}
|
|
|
|
return t.b.Err()
|
2017-05-14 09:06:26 +00:00
|
|
|
}
|
|
|
|
|
2017-02-27 09:46:15 +00:00
|
|
|
func chunkDir(dir string) string { return filepath.Join(dir, "chunks") }
|
|
|
|
func walDir(dir string) string { return filepath.Join(dir, "wal") }
|
2016-12-15 07:31:26 +00:00
|
|
|
|
|
|
|
type mmapFile struct {
|
2017-02-19 12:01:19 +00:00
|
|
|
f *os.File
|
2016-12-15 07:31:26 +00:00
|
|
|
b []byte
|
|
|
|
}
|
|
|
|
|
|
|
|
func openMmapFile(path string) (*mmapFile, error) {
|
2017-02-19 12:01:19 +00:00
|
|
|
f, err := os.Open(path)
|
2016-12-15 07:31:26 +00:00
|
|
|
if err != nil {
|
2017-01-19 07:40:15 +00:00
|
|
|
return nil, errors.Wrap(err, "try lock file")
|
2016-12-15 07:31:26 +00:00
|
|
|
}
|
|
|
|
info, err := f.Stat()
|
|
|
|
if err != nil {
|
2017-01-19 07:40:15 +00:00
|
|
|
return nil, errors.Wrap(err, "stat")
|
2016-12-15 07:31:26 +00:00
|
|
|
}
|
|
|
|
|
2017-02-19 12:01:19 +00:00
|
|
|
b, err := mmap(f, int(info.Size()))
|
2016-12-15 07:31:26 +00:00
|
|
|
if err != nil {
|
2017-01-19 07:40:15 +00:00
|
|
|
return nil, errors.Wrap(err, "mmap")
|
2016-12-15 07:31:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return &mmapFile{f: f, b: b}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *mmapFile) Close() error {
|
|
|
|
err0 := munmap(f.b)
|
|
|
|
err1 := f.f.Close()
|
|
|
|
|
|
|
|
if err0 != nil {
|
|
|
|
return err0
|
|
|
|
}
|
|
|
|
return err1
|
|
|
|
}
|