2017-04-10 18:59:45 +00:00
|
|
|
// Copyright 2017 The Prometheus Authors
|
2017-06-06 12:45:54 +00:00
|
|
|
|
2017-04-10 18:59:45 +00:00
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2016-12-08 16:43:10 +00:00
|
|
|
package tsdb
|
|
|
|
|
2016-12-15 07:31:26 +00:00
|
|
|
import (
|
2017-01-19 10:22:47 +00:00
|
|
|
"encoding/json"
|
|
|
|
"io/ioutil"
|
2016-12-15 07:31:26 +00:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2017-10-09 13:21:46 +00:00
|
|
|
"sync"
|
2016-12-22 14:54:39 +00:00
|
|
|
|
2017-02-27 09:46:15 +00:00
|
|
|
"github.com/oklog/ulid"
|
2016-12-22 14:54:39 +00:00
|
|
|
"github.com/pkg/errors"
|
2017-11-30 14:34:49 +00:00
|
|
|
"github.com/prometheus/tsdb/chunkenc"
|
2017-08-08 15:35:34 +00:00
|
|
|
"github.com/prometheus/tsdb/chunks"
|
2017-11-30 14:34:49 +00:00
|
|
|
"github.com/prometheus/tsdb/index"
|
2017-05-14 09:06:26 +00:00
|
|
|
"github.com/prometheus/tsdb/labels"
|
2016-12-08 16:43:10 +00:00
|
|
|
)
|
|
|
|
|
2017-11-30 14:34:49 +00:00
|
|
|
// IndexWriter serializes the index for a block of series data.
|
|
|
|
// The methods must be called in the order they are specified in.
|
|
|
|
type IndexWriter interface {
|
|
|
|
// AddSymbols registers all string symbols that are encountered in series
|
|
|
|
// and other indices.
|
|
|
|
AddSymbols(sym map[string]struct{}) error
|
|
|
|
|
|
|
|
// AddSeries populates the index writer with a series and its offsets
|
|
|
|
// of chunks that the index can reference.
|
|
|
|
// Implementations may require series to be insert in increasing order by
|
|
|
|
// their labels.
|
|
|
|
// The reference numbers are used to resolve entries in postings lists that
|
|
|
|
// are added later.
|
|
|
|
AddSeries(ref uint64, l labels.Labels, chunks ...chunks.Meta) error
|
|
|
|
|
|
|
|
// WriteLabelIndex serializes an index from label names to values.
|
|
|
|
// The passed in values chained tuples of strings of the length of names.
|
|
|
|
WriteLabelIndex(names []string, values []string) error
|
|
|
|
|
|
|
|
// WritePostings writes a postings list for a single label pair.
|
|
|
|
// The Postings here contain refs to the series that were added.
|
|
|
|
WritePostings(name, value string, it index.Postings) error
|
|
|
|
|
|
|
|
// Close writes any finalization and closes the resources associated with
|
|
|
|
// the underlying writer.
|
|
|
|
Close() error
|
|
|
|
}
|
|
|
|
|
|
|
|
// IndexReader provides reading access of serialized index data.
|
|
|
|
type IndexReader interface {
|
|
|
|
// Symbols returns a set of string symbols that may occur in series' labels
|
|
|
|
// and indices.
|
|
|
|
Symbols() (map[string]struct{}, error)
|
|
|
|
|
|
|
|
// LabelValues returns the possible label values
|
|
|
|
LabelValues(names ...string) (index.StringTuples, error)
|
|
|
|
|
|
|
|
// Postings returns the postings list iterator for the label pair.
|
|
|
|
// The Postings here contain the offsets to the series inside the index.
|
|
|
|
// Found IDs are not strictly required to point to a valid Series, e.g. during
|
|
|
|
// background garbage collections.
|
|
|
|
Postings(name, value string) (index.Postings, error)
|
|
|
|
|
|
|
|
// SortedPostings returns a postings list that is reordered to be sorted
|
|
|
|
// by the label set of the underlying series.
|
|
|
|
SortedPostings(index.Postings) index.Postings
|
|
|
|
|
|
|
|
// Series populates the given labels and chunk metas for the series identified
|
|
|
|
// by the reference.
|
|
|
|
// Returns ErrNotFound if the ref does not resolve to a known series.
|
|
|
|
Series(ref uint64, lset *labels.Labels, chks *[]chunks.Meta) error
|
|
|
|
|
|
|
|
// LabelIndices returns the label pairs for which indices exist.
|
|
|
|
LabelIndices() ([][]string, error)
|
|
|
|
|
|
|
|
// Close released the underlying resources of the reader.
|
|
|
|
Close() error
|
|
|
|
}
|
|
|
|
|
|
|
|
// StringTuples provides access to a sorted list of string tuples.
|
|
|
|
type StringTuples interface {
|
|
|
|
// Total number of tuples in the list.
|
|
|
|
Len() int
|
|
|
|
// At returns the tuple at position i.
|
|
|
|
At(i int) ([]string, error)
|
|
|
|
}
|
|
|
|
|
|
|
|
// ChunkWriter serializes a time block of chunked series data.
|
|
|
|
type ChunkWriter interface {
|
|
|
|
// WriteChunks writes several chunks. The Chunk field of the ChunkMetas
|
|
|
|
// must be populated.
|
|
|
|
// After returning successfully, the Ref fields in the ChunkMetas
|
|
|
|
// are set and can be used to retrieve the chunks from the written data.
|
|
|
|
WriteChunks(chunks ...chunks.Meta) error
|
|
|
|
|
|
|
|
// Close writes any required finalization and closes the resources
|
|
|
|
// associated with the underlying writer.
|
|
|
|
Close() error
|
|
|
|
}
|
|
|
|
|
|
|
|
// ChunkReader provides reading access of serialized time series data.
|
|
|
|
type ChunkReader interface {
|
|
|
|
// Chunk returns the series data chunk with the given reference.
|
|
|
|
Chunk(ref uint64) (chunkenc.Chunk, error)
|
|
|
|
|
|
|
|
// Close releases all underlying resources of the reader.
|
|
|
|
Close() error
|
|
|
|
}
|
|
|
|
|
2017-10-02 13:56:57 +00:00
|
|
|
// BlockReader provides reading access to a data block.
|
2017-08-28 22:39:17 +00:00
|
|
|
type BlockReader interface {
|
2017-01-19 10:22:47 +00:00
|
|
|
// Index returns an IndexReader over the block's data.
|
2017-10-09 13:21:46 +00:00
|
|
|
Index() (IndexReader, error)
|
2017-01-19 10:22:47 +00:00
|
|
|
|
2017-04-28 13:41:42 +00:00
|
|
|
// Chunks returns a ChunkReader over the block's data.
|
2017-10-09 13:21:46 +00:00
|
|
|
Chunks() (ChunkReader, error)
|
2017-01-19 10:22:47 +00:00
|
|
|
|
2017-05-16 14:18:28 +00:00
|
|
|
// Tombstones returns a TombstoneReader over the block's deleted data.
|
2017-10-09 13:21:46 +00:00
|
|
|
Tombstones() (TombstoneReader, error)
|
2016-12-13 14:26:58 +00:00
|
|
|
}
|
2016-12-08 16:43:10 +00:00
|
|
|
|
2017-03-20 07:41:56 +00:00
|
|
|
// Appendable defines an entity to which data can be appended.
|
|
|
|
type Appendable interface {
|
|
|
|
// Appender returns a new Appender against an underlying store.
|
|
|
|
Appender() Appender
|
|
|
|
}
|
|
|
|
|
2017-01-19 10:22:47 +00:00
|
|
|
// BlockMeta provides meta information about a block.
|
|
|
|
type BlockMeta struct {
|
2017-02-27 09:46:15 +00:00
|
|
|
// Unique identifier for the block and its contents. Changes on compaction.
|
|
|
|
ULID ulid.ULID `json:"ulid"`
|
|
|
|
|
2017-01-19 13:01:38 +00:00
|
|
|
// MinTime and MaxTime specify the time range all samples
|
2017-02-01 14:29:48 +00:00
|
|
|
// in the block are in.
|
|
|
|
MinTime int64 `json:"minTime"`
|
|
|
|
MaxTime int64 `json:"maxTime"`
|
2017-01-07 17:02:17 +00:00
|
|
|
|
2017-01-29 07:11:47 +00:00
|
|
|
// Stats about the contents of the block.
|
2017-06-07 07:52:20 +00:00
|
|
|
Stats BlockStats `json:"stats,omitempty"`
|
2017-01-19 18:45:52 +00:00
|
|
|
|
2017-01-29 07:11:47 +00:00
|
|
|
// Information on compactions the block was created from.
|
2017-06-07 07:52:20 +00:00
|
|
|
Compaction BlockMetaCompaction `json:"compaction"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// BlockStats contains stats about contents of a block.
|
|
|
|
type BlockStats struct {
|
|
|
|
NumSamples uint64 `json:"numSamples,omitempty"`
|
|
|
|
NumSeries uint64 `json:"numSeries,omitempty"`
|
|
|
|
NumChunks uint64 `json:"numChunks,omitempty"`
|
|
|
|
NumTombstones uint64 `json:"numTombstones,omitempty"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// BlockMetaCompaction holds information about compactions a block went through.
|
|
|
|
type BlockMetaCompaction struct {
|
|
|
|
// Maximum number of compaction cycles any source block has
|
|
|
|
// gone through.
|
2017-08-09 09:10:29 +00:00
|
|
|
Level int `json:"level"`
|
2017-06-07 07:52:20 +00:00
|
|
|
// ULIDs of all source head blocks that went into the block.
|
|
|
|
Sources []ulid.ULID `json:"sources,omitempty"`
|
2017-11-21 11:15:02 +00:00
|
|
|
Failed bool `json:"failed,omitempty"`
|
2016-12-19 21:29:49 +00:00
|
|
|
}
|
|
|
|
|
2016-12-08 16:43:10 +00:00
|
|
|
const (
|
|
|
|
flagNone = 0
|
|
|
|
flagStd = 1
|
|
|
|
)
|
|
|
|
|
2017-01-19 10:22:47 +00:00
|
|
|
type blockMeta struct {
|
2017-01-19 13:01:38 +00:00
|
|
|
Version int `json:"version"`
|
2017-01-20 06:58:19 +00:00
|
|
|
|
|
|
|
*BlockMeta
|
2017-01-19 10:22:47 +00:00
|
|
|
}
|
|
|
|
|
2017-11-30 14:34:49 +00:00
|
|
|
const indexFilename = "index"
|
2017-01-19 10:22:47 +00:00
|
|
|
const metaFilename = "meta.json"
|
|
|
|
|
2017-11-30 14:34:49 +00:00
|
|
|
func chunkDir(dir string) string { return filepath.Join(dir, "chunks") }
|
|
|
|
func walDir(dir string) string { return filepath.Join(dir, "wal") }
|
|
|
|
|
2017-01-19 13:01:38 +00:00
|
|
|
func readMetaFile(dir string) (*BlockMeta, error) {
|
|
|
|
b, err := ioutil.ReadFile(filepath.Join(dir, metaFilename))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
var m blockMeta
|
|
|
|
|
|
|
|
if err := json.Unmarshal(b, &m); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if m.Version != 1 {
|
|
|
|
return nil, errors.Errorf("unexpected meta file version %d", m.Version)
|
|
|
|
}
|
|
|
|
|
|
|
|
return m.BlockMeta, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func writeMetaFile(dir string, meta *BlockMeta) error {
|
2017-03-01 16:19:57 +00:00
|
|
|
// Make any changes to the file appear atomic.
|
|
|
|
path := filepath.Join(dir, metaFilename)
|
|
|
|
tmp := path + ".tmp"
|
|
|
|
|
|
|
|
f, err := os.Create(tmp)
|
2017-01-19 13:01:38 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
enc := json.NewEncoder(f)
|
|
|
|
enc.SetIndent("", "\t")
|
|
|
|
|
2017-04-28 13:45:30 +00:00
|
|
|
var merr MultiError
|
|
|
|
if merr.Add(enc.Encode(&blockMeta{Version: 1, BlockMeta: meta})); merr.Err() != nil {
|
|
|
|
merr.Add(f.Close())
|
2017-06-07 07:52:20 +00:00
|
|
|
return merr.Err()
|
2017-01-19 13:01:38 +00:00
|
|
|
}
|
|
|
|
if err := f.Close(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-03-01 16:19:57 +00:00
|
|
|
return renameFile(tmp, path)
|
2017-01-19 13:01:38 +00:00
|
|
|
}
|
|
|
|
|
2017-10-25 22:12:13 +00:00
|
|
|
// Block represents a directory of time series data covering a continuous time range.
|
2017-10-09 13:21:46 +00:00
|
|
|
type Block struct {
|
|
|
|
mtx sync.RWMutex
|
|
|
|
closing bool
|
|
|
|
pendingReaders sync.WaitGroup
|
|
|
|
|
2017-03-20 09:21:21 +00:00
|
|
|
dir string
|
|
|
|
meta BlockMeta
|
|
|
|
|
2017-11-13 12:32:24 +00:00
|
|
|
chunkr ChunkReader
|
|
|
|
indexr IndexReader
|
|
|
|
tombstones TombstoneReader
|
2017-03-20 09:21:21 +00:00
|
|
|
}
|
|
|
|
|
2017-10-09 13:21:46 +00:00
|
|
|
// OpenBlock opens the block in the directory. It can be passed a chunk pool, which is used
|
|
|
|
// to instantiate chunk structs.
|
2017-11-30 14:34:49 +00:00
|
|
|
func OpenBlock(dir string, pool chunkenc.Pool) (*Block, error) {
|
2017-01-19 18:45:52 +00:00
|
|
|
meta, err := readMetaFile(dir)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-12-15 07:31:26 +00:00
|
|
|
|
2017-11-30 14:34:49 +00:00
|
|
|
cr, err := chunks.NewDirReader(chunkDir(dir), pool)
|
2016-12-15 07:31:26 +00:00
|
|
|
if err != nil {
|
2017-02-23 09:50:22 +00:00
|
|
|
return nil, err
|
2016-12-15 07:31:26 +00:00
|
|
|
}
|
2017-11-30 14:34:49 +00:00
|
|
|
ir, err := index.NewFileReader(filepath.Join(dir, "index"))
|
2016-12-15 07:31:26 +00:00
|
|
|
if err != nil {
|
2017-02-25 06:24:20 +00:00
|
|
|
return nil, err
|
2016-12-15 07:31:26 +00:00
|
|
|
}
|
|
|
|
|
2017-05-24 05:54:24 +00:00
|
|
|
tr, err := readTombstones(dir)
|
2017-05-16 14:18:28 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-05-19 06:22:15 +00:00
|
|
|
|
2017-10-09 13:21:46 +00:00
|
|
|
pb := &Block{
|
2017-05-26 15:56:31 +00:00
|
|
|
dir: dir,
|
|
|
|
meta: *meta,
|
|
|
|
chunkr: cr,
|
|
|
|
indexr: ir,
|
2017-05-24 05:54:24 +00:00
|
|
|
tombstones: tr,
|
2016-12-15 07:31:26 +00:00
|
|
|
}
|
|
|
|
return pb, nil
|
|
|
|
}
|
|
|
|
|
2017-10-09 13:21:46 +00:00
|
|
|
// Close closes the on-disk block. It blocks as long as there are readers reading from the block.
|
|
|
|
func (pb *Block) Close() error {
|
|
|
|
pb.mtx.Lock()
|
|
|
|
pb.closing = true
|
|
|
|
pb.mtx.Unlock()
|
|
|
|
|
|
|
|
pb.pendingReaders.Wait()
|
|
|
|
|
2017-02-27 09:46:15 +00:00
|
|
|
var merr MultiError
|
2016-12-15 07:31:26 +00:00
|
|
|
|
2017-02-27 09:46:15 +00:00
|
|
|
merr.Add(pb.chunkr.Close())
|
|
|
|
merr.Add(pb.indexr.Close())
|
2017-10-09 13:21:46 +00:00
|
|
|
merr.Add(pb.tombstones.Close())
|
2017-02-27 09:46:15 +00:00
|
|
|
|
|
|
|
return merr.Err()
|
2016-12-15 07:31:26 +00:00
|
|
|
}
|
|
|
|
|
2017-10-09 13:21:46 +00:00
|
|
|
func (pb *Block) String() string {
|
2017-05-18 14:09:30 +00:00
|
|
|
return pb.meta.ULID.String()
|
2017-03-20 09:41:43 +00:00
|
|
|
}
|
|
|
|
|
2017-10-09 13:21:46 +00:00
|
|
|
// Dir returns the directory of the block.
|
|
|
|
func (pb *Block) Dir() string { return pb.dir }
|
|
|
|
|
|
|
|
// Meta returns meta information about the block.
|
|
|
|
func (pb *Block) Meta() BlockMeta { return pb.meta }
|
|
|
|
|
|
|
|
// ErrClosing is returned when a block is in the process of being closed.
|
|
|
|
var ErrClosing = errors.New("block is closing")
|
|
|
|
|
|
|
|
func (pb *Block) startRead() error {
|
|
|
|
pb.mtx.RLock()
|
|
|
|
defer pb.mtx.RUnlock()
|
|
|
|
|
|
|
|
if pb.closing {
|
|
|
|
return ErrClosing
|
|
|
|
}
|
|
|
|
pb.pendingReaders.Add(1)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Index returns a new IndexReader against the block data.
|
|
|
|
func (pb *Block) Index() (IndexReader, error) {
|
|
|
|
if err := pb.startRead(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return blockIndexReader{IndexReader: pb.indexr, b: pb}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Chunks returns a new ChunkReader against the block data.
|
|
|
|
func (pb *Block) Chunks() (ChunkReader, error) {
|
|
|
|
if err := pb.startRead(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return blockChunkReader{ChunkReader: pb.chunkr, b: pb}, nil
|
2017-05-16 14:18:28 +00:00
|
|
|
}
|
2016-12-15 15:14:33 +00:00
|
|
|
|
2017-10-09 13:21:46 +00:00
|
|
|
// Tombstones returns a new TombstoneReader against the block data.
|
|
|
|
func (pb *Block) Tombstones() (TombstoneReader, error) {
|
|
|
|
if err := pb.startRead(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return blockTombstoneReader{TombstoneReader: pb.tombstones, b: pb}, nil
|
|
|
|
}
|
|
|
|
|
2017-11-21 11:15:02 +00:00
|
|
|
func (pb *Block) setCompactionFailed() error {
|
|
|
|
pb.meta.Compaction.Failed = true
|
|
|
|
return writeMetaFile(pb.dir, &pb.meta)
|
|
|
|
}
|
|
|
|
|
2017-10-09 13:21:46 +00:00
|
|
|
type blockIndexReader struct {
|
|
|
|
IndexReader
|
|
|
|
b *Block
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r blockIndexReader) Close() error {
|
|
|
|
r.b.pendingReaders.Done()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type blockTombstoneReader struct {
|
|
|
|
TombstoneReader
|
|
|
|
b *Block
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r blockTombstoneReader) Close() error {
|
|
|
|
r.b.pendingReaders.Done()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type blockChunkReader struct {
|
|
|
|
ChunkReader
|
|
|
|
b *Block
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r blockChunkReader) Close() error {
|
|
|
|
r.b.pendingReaders.Done()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete matching series between mint and maxt in the block.
|
|
|
|
func (pb *Block) Delete(mint, maxt int64, ms ...labels.Matcher) error {
|
|
|
|
pb.mtx.Lock()
|
|
|
|
defer pb.mtx.Unlock()
|
|
|
|
|
|
|
|
if pb.closing {
|
|
|
|
return ErrClosing
|
|
|
|
}
|
|
|
|
|
2017-12-17 18:08:21 +00:00
|
|
|
p, err := PostingsForMatchers(pb.indexr, ms...)
|
2017-11-13 11:16:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "select series")
|
|
|
|
}
|
2017-05-14 09:06:26 +00:00
|
|
|
|
|
|
|
ir := pb.indexr
|
|
|
|
|
|
|
|
// Choose only valid postings which have chunks in the time-range.
|
2017-11-13 12:32:24 +00:00
|
|
|
stones := memTombstones{}
|
2017-05-14 09:06:26 +00:00
|
|
|
|
2017-08-05 11:31:48 +00:00
|
|
|
var lset labels.Labels
|
2017-11-30 14:34:49 +00:00
|
|
|
var chks []chunks.Meta
|
2017-08-05 11:31:48 +00:00
|
|
|
|
2017-05-14 09:06:26 +00:00
|
|
|
Outer:
|
|
|
|
for p.Next() {
|
2017-08-05 11:31:48 +00:00
|
|
|
err := ir.Series(p.At(), &lset, &chks)
|
2017-05-15 17:58:14 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-05-14 09:06:26 +00:00
|
|
|
|
2017-08-05 11:31:48 +00:00
|
|
|
for _, chk := range chks {
|
2017-05-19 19:05:50 +00:00
|
|
|
if intervalOverlap(mint, maxt, chk.MinTime, chk.MaxTime) {
|
2017-05-26 15:56:31 +00:00
|
|
|
// Delete only until the current vlaues and not beyond.
|
2017-08-05 11:31:48 +00:00
|
|
|
tmin, tmax := clampInterval(mint, maxt, chks[0].MinTime, chks[len(chks)-1].MaxTime)
|
2017-08-25 08:11:46 +00:00
|
|
|
stones[p.At()] = Intervals{{tmin, tmax}}
|
2017-05-15 17:58:14 +00:00
|
|
|
continue Outer
|
2017-05-14 09:06:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if p.Err() != nil {
|
|
|
|
return p.Err()
|
|
|
|
}
|
|
|
|
|
2017-11-13 12:32:24 +00:00
|
|
|
err = pb.tombstones.Iter(func(id uint64, ivs Intervals) error {
|
|
|
|
for _, iv := range ivs {
|
|
|
|
stones.add(id, iv)
|
|
|
|
pb.meta.Stats.NumTombstones++
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-05-24 05:54:24 +00:00
|
|
|
}
|
2017-11-13 12:32:24 +00:00
|
|
|
pb.tombstones = stones
|
2017-05-14 09:06:26 +00:00
|
|
|
|
2017-05-26 11:01:45 +00:00
|
|
|
if err := writeTombstoneFile(pb.dir, pb.tombstones); err != nil {
|
2017-05-23 12:07:04 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
return writeMetaFile(pb.dir, &pb.meta)
|
2017-05-15 17:58:14 +00:00
|
|
|
}
|
2017-05-14 09:06:26 +00:00
|
|
|
|
2017-11-22 12:34:50 +00:00
|
|
|
// CleanTombstones will rewrite the block if there any tombstones to remove them
|
|
|
|
// and returns if there was a re-write.
|
|
|
|
func (pb *Block) CleanTombstones(dest string, c Compactor) (bool, error) {
|
2017-11-30 07:45:23 +00:00
|
|
|
numStones := 0
|
|
|
|
|
|
|
|
pb.tombstones.Iter(func(id uint64, ivs Intervals) error {
|
|
|
|
for _ = range ivs {
|
|
|
|
numStones++
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
if numStones == 0 {
|
2017-11-22 12:34:50 +00:00
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
2017-11-30 07:45:23 +00:00
|
|
|
if _, err := c.Write(dest, pb, pb.meta.MinTime, pb.meta.MaxTime); err != nil {
|
2017-11-22 12:34:50 +00:00
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
2017-10-09 13:21:46 +00:00
|
|
|
// Snapshot creates snapshot of the block into dir.
|
|
|
|
func (pb *Block) Snapshot(dir string) error {
|
2017-06-06 12:45:54 +00:00
|
|
|
blockDir := filepath.Join(dir, pb.meta.ULID.String())
|
|
|
|
if err := os.MkdirAll(blockDir, 0777); err != nil {
|
|
|
|
return errors.Wrap(err, "create snapshot block dir")
|
|
|
|
}
|
|
|
|
|
|
|
|
chunksDir := chunkDir(blockDir)
|
|
|
|
if err := os.MkdirAll(chunksDir, 0777); err != nil {
|
|
|
|
return errors.Wrap(err, "create snapshot chunk dir")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Hardlink meta, index and tombstones
|
2017-06-06 14:53:20 +00:00
|
|
|
for _, fname := range []string{
|
|
|
|
metaFilename,
|
|
|
|
indexFilename,
|
|
|
|
tombstoneFilename,
|
|
|
|
} {
|
2017-06-06 12:45:54 +00:00
|
|
|
if err := os.Link(filepath.Join(pb.dir, fname), filepath.Join(blockDir, fname)); err != nil {
|
|
|
|
return errors.Wrapf(err, "create snapshot %s", fname)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Hardlink the chunks
|
|
|
|
curChunkDir := chunkDir(pb.dir)
|
|
|
|
files, err := ioutil.ReadDir(curChunkDir)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "ReadDir the current chunk dir")
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, f := range files {
|
|
|
|
err := os.Link(filepath.Join(curChunkDir, f.Name()), filepath.Join(chunksDir, f.Name()))
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "hardlink a chunk")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-05-26 15:56:31 +00:00
|
|
|
func clampInterval(a, b, mint, maxt int64) (int64, int64) {
|
|
|
|
if a < mint {
|
|
|
|
a = mint
|
|
|
|
}
|
|
|
|
if b > maxt {
|
|
|
|
b = maxt
|
|
|
|
}
|
|
|
|
return a, b
|
|
|
|
}
|