Compact head block into persisted block
This commit is contained in:
parent
3f72d5d027
commit
5aa7f7cce8
150
compact.go
150
compact.go
|
@ -2,7 +2,6 @@ package tsdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -92,21 +91,31 @@ func (c *compactor) run() {
|
||||||
for range c.triggerc {
|
for range c.triggerc {
|
||||||
c.metrics.triggered.Inc()
|
c.metrics.triggered.Inc()
|
||||||
|
|
||||||
bs := c.pick()
|
// Compact as long as there are candidate blocks.
|
||||||
if len(bs) == 0 {
|
for {
|
||||||
continue
|
rev := c.pick()
|
||||||
}
|
var bs []block
|
||||||
|
for _, b := range rev {
|
||||||
|
bs = append([]block{b}, bs...)
|
||||||
|
}
|
||||||
|
|
||||||
start := time.Now()
|
c.logger.Log("msg", "picked for compaction", "candidates", fmt.Sprintf("%v", bs))
|
||||||
err := c.compact(bs...)
|
|
||||||
|
|
||||||
c.metrics.ran.Inc()
|
if len(bs) == 0 {
|
||||||
c.metrics.duration.Observe(time.Since(start).Seconds())
|
break
|
||||||
|
}
|
||||||
|
|
||||||
if err != nil {
|
start := time.Now()
|
||||||
c.logger.Log("msg", "compaction failed", "err", err)
|
err := c.compact(bs...)
|
||||||
c.metrics.failed.Inc()
|
|
||||||
continue
|
c.metrics.ran.Inc()
|
||||||
|
c.metrics.duration.Observe(time.Since(start).Seconds())
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
c.logger.Log("msg", "compaction failed", "err", err)
|
||||||
|
c.metrics.failed.Inc()
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Drain channel of signals triggered during compaction.
|
// Drain channel of signals triggered during compaction.
|
||||||
|
@ -118,31 +127,35 @@ func (c *compactor) run() {
|
||||||
close(c.donec)
|
close(c.donec)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
compactionMaxSize = 1 << 30 // 1GB
|
||||||
|
compactionBlocks = 2
|
||||||
|
)
|
||||||
|
|
||||||
func (c *compactor) pick() []block {
|
func (c *compactor) pick() []block {
|
||||||
bs := c.blocks.compactable()
|
bs := c.blocks.compactable()
|
||||||
if len(bs) == 0 {
|
if len(bs) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
if len(bs) == 1 && !bs[0].persisted() {
|
||||||
if !bs[len(bs)-1].persisted() {
|
return bs
|
||||||
// TODO(fabxc): double check scoring function here or only do it here
|
|
||||||
// and trigger every X scrapes?
|
|
||||||
return bs[len(bs)-1:]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
candidate := []block{}
|
for i := 0; i+1 < len(bs); i += 2 {
|
||||||
trange := int64(math.MaxInt64)
|
tpl := bs[i : i+2]
|
||||||
|
if compactionMatch(tpl) {
|
||||||
for i, b := range bs[:len(bs)-1] {
|
return tpl
|
||||||
r := bs[i+1].stats().MaxTime - b.stats().MinTime
|
|
||||||
|
|
||||||
if r < trange {
|
|
||||||
trange = r
|
|
||||||
candidate = bs[i : i+1]
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
return candidate
|
func compactionMatch(blocks []block) bool {
|
||||||
|
// TODO(fabxc): check whether combined size is below maxCompactionSize.
|
||||||
|
// Apply maximum time range? or number of series? – might already be covered by size implicitly.
|
||||||
|
|
||||||
|
// Blocks should be roughly equal in size.
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *compactor) Close() error {
|
func (c *compactor) Close() error {
|
||||||
|
@ -215,6 +228,11 @@ func (c *compactor) compact(blocks ...block) error {
|
||||||
if err := renameDir(tmpdir, blocks[0].dir()); err != nil {
|
if err := renameDir(tmpdir, blocks[0].dir()); err != nil {
|
||||||
return errors.Wrap(err, "rename dir")
|
return errors.Wrap(err, "rename dir")
|
||||||
}
|
}
|
||||||
|
for _, b := range blocks[1:] {
|
||||||
|
if err := os.RemoveAll(b.dir()); err != nil {
|
||||||
|
return errors.Wrap(err, "delete dir")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var merr MultiError
|
var merr MultiError
|
||||||
|
|
||||||
|
@ -256,6 +274,7 @@ func (c *compactor) write(blocks []block, indexw IndexWriter, chunkw SeriesWrite
|
||||||
if err := chunkw.WriteSeries(i, lset, chunks); err != nil {
|
if err := chunkw.WriteSeries(i, lset, chunks); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
fmt.Println("next", lset, chunks)
|
||||||
|
|
||||||
stats.ChunkCount += uint32(len(chunks))
|
stats.ChunkCount += uint32(len(chunks))
|
||||||
stats.SeriesCount++
|
stats.SeriesCount++
|
||||||
|
@ -367,9 +386,9 @@ func (c *compactionSeriesSet) At() (labels.Labels, []ChunkMeta) {
|
||||||
type compactionMerger struct {
|
type compactionMerger struct {
|
||||||
a, b compactionSet
|
a, b compactionSet
|
||||||
|
|
||||||
adone, bdone bool
|
aok, bok bool
|
||||||
l labels.Labels
|
l labels.Labels
|
||||||
c []ChunkMeta
|
c []ChunkMeta
|
||||||
}
|
}
|
||||||
|
|
||||||
type compactionSeries struct {
|
type compactionSeries struct {
|
||||||
|
@ -384,17 +403,17 @@ func newCompactionMerger(a, b compactionSet) (*compactionMerger, error) {
|
||||||
}
|
}
|
||||||
// Initialize first elements of both sets as Next() needs
|
// Initialize first elements of both sets as Next() needs
|
||||||
// one element look-ahead.
|
// one element look-ahead.
|
||||||
c.adone = !c.a.Next()
|
c.aok = c.a.Next()
|
||||||
c.bdone = !c.b.Next()
|
c.bok = c.b.Next()
|
||||||
|
|
||||||
return c, c.Err()
|
return c, c.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *compactionMerger) compare() int {
|
func (c *compactionMerger) compare() int {
|
||||||
if c.adone {
|
if !c.aok {
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
if c.bdone {
|
if !c.bok {
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
a, _ := c.a.At()
|
a, _ := c.a.At()
|
||||||
|
@ -403,7 +422,7 @@ func (c *compactionMerger) compare() int {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *compactionMerger) Next() bool {
|
func (c *compactionMerger) Next() bool {
|
||||||
if c.adone && c.bdone || c.Err() != nil {
|
if !c.aok && !c.bok || c.Err() != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -411,10 +430,10 @@ func (c *compactionMerger) Next() bool {
|
||||||
// Both sets contain the current series. Chain them into a single one.
|
// Both sets contain the current series. Chain them into a single one.
|
||||||
if d > 0 {
|
if d > 0 {
|
||||||
c.l, c.c = c.b.At()
|
c.l, c.c = c.b.At()
|
||||||
c.bdone = !c.b.Next()
|
c.bok = c.b.Next()
|
||||||
} else if d < 0 {
|
} else if d < 0 {
|
||||||
c.l, c.c = c.a.At()
|
c.l, c.c = c.a.At()
|
||||||
c.adone = !c.a.Next()
|
c.aok = c.a.Next()
|
||||||
} else {
|
} else {
|
||||||
l, ca := c.a.At()
|
l, ca := c.a.At()
|
||||||
_, cb := c.b.At()
|
_, cb := c.b.At()
|
||||||
|
@ -422,8 +441,8 @@ func (c *compactionMerger) Next() bool {
|
||||||
c.l = l
|
c.l = l
|
||||||
c.c = append(ca, cb...)
|
c.c = append(ca, cb...)
|
||||||
|
|
||||||
c.adone = !c.a.Next()
|
c.aok = c.a.Next()
|
||||||
c.bdone = !c.b.Next()
|
c.bok = c.b.Next()
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -439,57 +458,6 @@ func (c *compactionMerger) At() (labels.Labels, []ChunkMeta) {
|
||||||
return c.l, c.c
|
return c.l, c.c
|
||||||
}
|
}
|
||||||
|
|
||||||
func persist(dir string, write func(IndexWriter, SeriesWriter) error) error {
|
|
||||||
tmpdir := dir + ".tmp"
|
|
||||||
|
|
||||||
// Write to temporary directory to make persistence appear atomic.
|
|
||||||
if fileutil.Exist(tmpdir) {
|
|
||||||
if err := os.RemoveAll(tmpdir); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := fileutil.CreateDirAll(tmpdir); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
chunkf, err := fileutil.LockFile(chunksFileName(tmpdir), os.O_WRONLY|os.O_CREATE, 0666)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
indexf, err := fileutil.LockFile(indexFileName(tmpdir), os.O_WRONLY|os.O_CREATE, 0666)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
indexw := newIndexWriter(indexf)
|
|
||||||
chunkw := newSeriesWriter(chunkf, indexw)
|
|
||||||
|
|
||||||
if err := write(indexw, chunkw); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := chunkw.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := indexw.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := fileutil.Fsync(chunkf.File); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := fileutil.Fsync(indexf.File); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := chunkf.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := indexf.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return renameDir(tmpdir, dir)
|
|
||||||
}
|
|
||||||
|
|
||||||
func renameDir(from, to string) error {
|
func renameDir(from, to string) error {
|
||||||
if err := os.RemoveAll(to); err != nil {
|
if err := os.RemoveAll(to); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
14
db.go
14
db.go
|
@ -335,7 +335,7 @@ func (s *Shard) appendBatch(samples []hashedSample) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(fabxc): randomize over time and use better scoring function.
|
// TODO(fabxc): randomize over time and use better scoring function.
|
||||||
if head.bstats.SampleCount/(uint64(head.bstats.ChunkCount)+1) > 500 {
|
if head.bstats.SampleCount/(uint64(head.bstats.ChunkCount)+1) > 250 {
|
||||||
if err := s.cut(); err != nil {
|
if err := s.cut(); err != nil {
|
||||||
s.logger.Log("msg", "cut failed", "err", err)
|
s.logger.Log("msg", "cut failed", "err", err)
|
||||||
}
|
}
|
||||||
|
@ -383,12 +383,6 @@ func (s *Shard) reinit(dir string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a block dir has to be reinitialized and it wasn't a deletion,
|
|
||||||
// it has to be a newly persisted or compacted one.
|
|
||||||
if !fileutil.Exist(chunksFileName(dir)) {
|
|
||||||
return errors.New("no chunk file for new block dir")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove a previous head block.
|
// Remove a previous head block.
|
||||||
if i, ok := s.headForDir(dir); ok {
|
if i, ok := s.headForDir(dir); ok {
|
||||||
if err := s.heads[i].Close(); err != nil {
|
if err := s.heads[i].Close(); err != nil {
|
||||||
|
@ -419,7 +413,7 @@ func (s *Shard) reinit(dir string) error {
|
||||||
func (s *Shard) compactable() []block {
|
func (s *Shard) compactable() []block {
|
||||||
var blocks []block
|
var blocks []block
|
||||||
for _, pb := range s.persisted {
|
for _, pb := range s.persisted {
|
||||||
blocks = append(blocks, pb)
|
blocks = append([]block{pb}, blocks...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// threshold := s.heads[len(s.heads)-1].bstats.MaxTime - headGracePeriod
|
// threshold := s.heads[len(s.heads)-1].bstats.MaxTime - headGracePeriod
|
||||||
|
@ -430,7 +424,7 @@ func (s *Shard) compactable() []block {
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
for _, hb := range s.heads[:len(s.heads)-1] {
|
for _, hb := range s.heads[:len(s.heads)-1] {
|
||||||
blocks = append(blocks, hb)
|
blocks = append([]block{hb}, blocks...)
|
||||||
}
|
}
|
||||||
|
|
||||||
return blocks
|
return blocks
|
||||||
|
@ -565,7 +559,7 @@ type MultiError []error
|
||||||
func (es MultiError) Error() string {
|
func (es MultiError) Error() string {
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
|
|
||||||
if len(es) > 0 {
|
if len(es) > 1 {
|
||||||
fmt.Fprintf(&buf, "%d errors: ", len(es))
|
fmt.Fprintf(&buf, "%d errors: ", len(es))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
47
head.go
47
head.go
|
@ -270,50 +270,3 @@ func (h *HeadBlock) appendBatch(samples []hashedSample) error {
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *HeadBlock) persist(indexw IndexWriter, chunkw SeriesWriter) error {
|
|
||||||
if err := h.wal.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for ref, cd := range h.descs {
|
|
||||||
if err := chunkw.WriteSeries(uint32(ref), cd.lset, []ChunkMeta{
|
|
||||||
{
|
|
||||||
MinTime: cd.firstTimestamp,
|
|
||||||
MaxTime: cd.lastTimestamp,
|
|
||||||
Chunk: cd.chunk,
|
|
||||||
},
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := indexw.WriteStats(h.bstats); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for n, v := range h.values {
|
|
||||||
s := make([]string, 0, len(v))
|
|
||||||
for x := range v {
|
|
||||||
s = append(s, x)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := indexw.WriteLabelIndex([]string{n}, s); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for t := range h.postings.m {
|
|
||||||
if err := indexw.WritePostings(t.name, t.value, h.postings.get(t)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Write a postings list containing all series.
|
|
||||||
all := make([]uint32, len(h.descs))
|
|
||||||
for i := range all {
|
|
||||||
all[i] = uint32(i)
|
|
||||||
}
|
|
||||||
if err := indexw.WritePostings("", "", newListPostings(all)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
Loading…
Reference in New Issue