2016-12-08 16:43:10 +00:00
|
|
|
package tsdb
|
|
|
|
|
2016-12-15 07:31:26 +00:00
|
|
|
import (
|
2017-01-19 10:22:47 +00:00
|
|
|
"encoding/json"
|
|
|
|
"io/ioutil"
|
2016-12-15 07:31:26 +00:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
|
|
|
"sort"
|
2016-12-22 14:54:39 +00:00
|
|
|
|
2017-01-03 09:09:20 +00:00
|
|
|
"github.com/coreos/etcd/pkg/fileutil"
|
2016-12-22 14:54:39 +00:00
|
|
|
"github.com/pkg/errors"
|
2016-12-08 16:43:10 +00:00
|
|
|
)
|
|
|
|
|
2017-01-10 14:28:22 +00:00
|
|
|
// Block handles reads against a Block of time series data.
|
|
|
|
type Block interface {
|
2017-01-19 10:22:47 +00:00
|
|
|
// Directory where block data is stored.
|
2017-01-10 14:28:22 +00:00
|
|
|
Dir() string
|
2017-01-19 10:22:47 +00:00
|
|
|
|
|
|
|
// Stats returns statistics about the block.
|
|
|
|
Meta() BlockMeta
|
|
|
|
|
|
|
|
// Index returns an IndexReader over the block's data.
|
2017-01-10 14:28:22 +00:00
|
|
|
Index() IndexReader
|
2017-01-19 10:22:47 +00:00
|
|
|
|
|
|
|
// Series returns a SeriesReader over the block's data.
|
2017-01-10 14:28:22 +00:00
|
|
|
Series() SeriesReader
|
2017-01-19 10:22:47 +00:00
|
|
|
|
|
|
|
// Persisted returns whether the block is already persisted,
|
|
|
|
// and no longer being appended to.
|
2017-01-10 14:28:22 +00:00
|
|
|
Persisted() bool
|
2017-01-19 10:22:47 +00:00
|
|
|
|
|
|
|
// Close releases all underlying resources of the block.
|
2017-01-18 05:18:32 +00:00
|
|
|
Close() error
|
2016-12-13 14:26:58 +00:00
|
|
|
}
|
2016-12-08 16:43:10 +00:00
|
|
|
|
2017-01-19 10:22:47 +00:00
|
|
|
// BlockMeta provides meta information about a block.
|
|
|
|
type BlockMeta struct {
|
2017-01-29 07:11:47 +00:00
|
|
|
// Sequence number of the block.
|
|
|
|
Sequence int `json:"sequence"`
|
|
|
|
|
2017-01-19 13:01:38 +00:00
|
|
|
// MinTime and MaxTime specify the time range all samples
|
2017-02-01 14:29:48 +00:00
|
|
|
// in the block are in.
|
|
|
|
MinTime int64 `json:"minTime"`
|
|
|
|
MaxTime int64 `json:"maxTime"`
|
2017-01-07 17:02:17 +00:00
|
|
|
|
2017-01-29 07:11:47 +00:00
|
|
|
// Stats about the contents of the block.
|
2017-01-19 10:22:47 +00:00
|
|
|
Stats struct {
|
|
|
|
NumSamples uint64 `json:"numSamples,omitempty"`
|
|
|
|
NumSeries uint64 `json:"numSeries,omitempty"`
|
|
|
|
NumChunks uint64 `json:"numChunks,omitempty"`
|
|
|
|
} `json:"stats,omitempty"`
|
2017-01-19 18:45:52 +00:00
|
|
|
|
2017-01-29 07:11:47 +00:00
|
|
|
// Information on compactions the block was created from.
|
2017-01-19 18:45:52 +00:00
|
|
|
Compaction struct {
|
|
|
|
Generation int `json:"generation"`
|
|
|
|
} `json:"compaction"`
|
2016-12-19 21:29:49 +00:00
|
|
|
}
|
|
|
|
|
2016-12-08 16:43:10 +00:00
|
|
|
const (
|
|
|
|
flagNone = 0
|
|
|
|
flagStd = 1
|
|
|
|
)
|
|
|
|
|
2016-12-15 07:31:26 +00:00
|
|
|
type persistedBlock struct {
|
2017-01-19 10:22:47 +00:00
|
|
|
dir string
|
|
|
|
meta BlockMeta
|
2017-01-03 09:09:20 +00:00
|
|
|
|
2016-12-15 07:31:26 +00:00
|
|
|
chunksf, indexf *mmapFile
|
|
|
|
|
2017-01-02 10:12:28 +00:00
|
|
|
chunkr *seriesReader
|
|
|
|
indexr *indexReader
|
2016-12-15 07:31:26 +00:00
|
|
|
}
|
|
|
|
|
2017-01-19 10:22:47 +00:00
|
|
|
type blockMeta struct {
|
2017-01-19 13:01:38 +00:00
|
|
|
Version int `json:"version"`
|
2017-01-20 06:58:19 +00:00
|
|
|
|
|
|
|
*BlockMeta
|
2017-01-19 10:22:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const metaFilename = "meta.json"
|
|
|
|
|
2017-01-19 13:01:38 +00:00
|
|
|
func readMetaFile(dir string) (*BlockMeta, error) {
|
|
|
|
b, err := ioutil.ReadFile(filepath.Join(dir, metaFilename))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
var m blockMeta
|
|
|
|
|
|
|
|
if err := json.Unmarshal(b, &m); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if m.Version != 1 {
|
|
|
|
return nil, errors.Errorf("unexpected meta file version %d", m.Version)
|
|
|
|
}
|
|
|
|
|
|
|
|
return m.BlockMeta, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func writeMetaFile(dir string, meta *BlockMeta) error {
|
|
|
|
f, err := os.Create(filepath.Join(dir, metaFilename))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
enc := json.NewEncoder(f)
|
|
|
|
enc.SetIndent("", "\t")
|
|
|
|
|
|
|
|
if err := enc.Encode(&blockMeta{Version: 1, BlockMeta: meta}); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := f.Close(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-01-10 14:28:22 +00:00
|
|
|
func newPersistedBlock(dir string) (*persistedBlock, error) {
|
2017-01-19 18:45:52 +00:00
|
|
|
meta, err := readMetaFile(dir)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-12-15 07:31:26 +00:00
|
|
|
|
2017-01-10 14:28:22 +00:00
|
|
|
chunksf, err := openMmapFile(chunksFileName(dir))
|
2016-12-15 07:31:26 +00:00
|
|
|
if err != nil {
|
2017-01-03 14:43:26 +00:00
|
|
|
return nil, errors.Wrap(err, "open chunk file")
|
2016-12-15 07:31:26 +00:00
|
|
|
}
|
2017-01-10 14:28:22 +00:00
|
|
|
indexf, err := openMmapFile(indexFileName(dir))
|
2016-12-15 07:31:26 +00:00
|
|
|
if err != nil {
|
2017-01-03 14:43:26 +00:00
|
|
|
return nil, errors.Wrap(err, "open index file")
|
2016-12-15 07:31:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
sr, err := newSeriesReader(chunksf.b)
|
|
|
|
if err != nil {
|
2017-01-03 14:43:26 +00:00
|
|
|
return nil, errors.Wrap(err, "create series reader")
|
2016-12-15 07:31:26 +00:00
|
|
|
}
|
|
|
|
ir, err := newIndexReader(sr, indexf.b)
|
|
|
|
if err != nil {
|
2017-01-03 14:43:26 +00:00
|
|
|
return nil, errors.Wrap(err, "create index reader")
|
2016-12-15 07:31:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pb := &persistedBlock{
|
2017-01-10 14:28:22 +00:00
|
|
|
dir: dir,
|
2017-01-19 13:01:38 +00:00
|
|
|
meta: *meta,
|
2016-12-15 15:14:33 +00:00
|
|
|
chunksf: chunksf,
|
|
|
|
indexf: indexf,
|
2017-01-02 10:12:28 +00:00
|
|
|
chunkr: sr,
|
|
|
|
indexr: ir,
|
2016-12-15 07:31:26 +00:00
|
|
|
}
|
|
|
|
return pb, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (pb *persistedBlock) Close() error {
|
|
|
|
err0 := pb.chunksf.Close()
|
|
|
|
err1 := pb.indexf.Close()
|
|
|
|
|
|
|
|
if err0 != nil {
|
|
|
|
return err0
|
|
|
|
}
|
|
|
|
return err1
|
|
|
|
}
|
|
|
|
|
2017-01-10 14:28:22 +00:00
|
|
|
func (pb *persistedBlock) Dir() string { return pb.dir }
|
|
|
|
func (pb *persistedBlock) Persisted() bool { return true }
|
|
|
|
func (pb *persistedBlock) Index() IndexReader { return pb.indexr }
|
|
|
|
func (pb *persistedBlock) Series() SeriesReader { return pb.chunkr }
|
2017-01-19 10:22:47 +00:00
|
|
|
func (pb *persistedBlock) Meta() BlockMeta { return pb.meta }
|
2016-12-15 15:14:33 +00:00
|
|
|
|
2016-12-15 07:31:26 +00:00
|
|
|
func chunksFileName(path string) string {
|
2017-01-06 12:13:22 +00:00
|
|
|
return filepath.Join(path, "chunks-000")
|
2016-12-15 07:31:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func indexFileName(path string) string {
|
2017-01-06 12:13:22 +00:00
|
|
|
return filepath.Join(path, "index-000")
|
2016-12-15 07:31:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type mmapFile struct {
|
2017-01-03 09:09:20 +00:00
|
|
|
f *fileutil.LockedFile
|
2016-12-15 07:31:26 +00:00
|
|
|
b []byte
|
|
|
|
}
|
|
|
|
|
|
|
|
func openMmapFile(path string) (*mmapFile, error) {
|
2017-01-19 07:40:15 +00:00
|
|
|
// We have to open the file in RDWR for the lock to work with fileutil.
|
|
|
|
// TODO(fabxc): use own flock call that supports multi-reader.
|
|
|
|
f, err := fileutil.TryLockFile(path, os.O_RDWR, 0666)
|
2016-12-15 07:31:26 +00:00
|
|
|
if err != nil {
|
2017-01-19 07:40:15 +00:00
|
|
|
return nil, errors.Wrap(err, "try lock file")
|
2016-12-15 07:31:26 +00:00
|
|
|
}
|
|
|
|
info, err := f.Stat()
|
|
|
|
if err != nil {
|
2017-01-19 07:40:15 +00:00
|
|
|
return nil, errors.Wrap(err, "stat")
|
2016-12-15 07:31:26 +00:00
|
|
|
}
|
|
|
|
|
2017-01-03 09:09:20 +00:00
|
|
|
b, err := mmap(f.File, int(info.Size()))
|
2016-12-15 07:31:26 +00:00
|
|
|
if err != nil {
|
2017-01-19 07:40:15 +00:00
|
|
|
return nil, errors.Wrap(err, "mmap")
|
2016-12-15 07:31:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return &mmapFile{f: f, b: b}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *mmapFile) Close() error {
|
|
|
|
err0 := munmap(f.b)
|
|
|
|
err1 := f.f.Close()
|
|
|
|
|
|
|
|
if err0 != nil {
|
|
|
|
return err0
|
|
|
|
}
|
|
|
|
return err1
|
|
|
|
}
|
|
|
|
|
2016-12-08 16:43:10 +00:00
|
|
|
// A skiplist maps offsets to values. The values found in the data at an
|
|
|
|
// offset are strictly greater than the indexed value.
|
|
|
|
type skiplist interface {
|
|
|
|
// offset returns the offset to data containing values of x and lower.
|
|
|
|
offset(x int64) (uint32, bool)
|
|
|
|
}
|
|
|
|
|
|
|
|
// simpleSkiplist is a slice of plain value/offset pairs.
|
|
|
|
type simpleSkiplist []skiplistPair
|
|
|
|
|
|
|
|
type skiplistPair struct {
|
|
|
|
value int64
|
|
|
|
offset uint32
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sl simpleSkiplist) offset(x int64) (uint32, bool) {
|
|
|
|
// Search for the first offset that contains data greater than x.
|
|
|
|
i := sort.Search(len(sl), func(i int) bool { return sl[i].value >= x })
|
|
|
|
|
|
|
|
// If no element was found return false. If the first element is found,
|
|
|
|
// there's no previous offset actually containing values that are x or lower.
|
|
|
|
if i == len(sl) || i == 0 {
|
|
|
|
return 0, false
|
|
|
|
}
|
|
|
|
return sl[i-1].offset, true
|
|
|
|
}
|