2018-05-17 13:02:47 +00:00
|
|
|
// Copyright 2018 The Prometheus Authors
|
|
|
|
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2022-10-10 15:08:46 +00:00
|
|
|
package wlog
|
2018-05-17 13:02:47 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"io"
|
2018-11-30 14:46:16 +00:00
|
|
|
"math"
|
2018-05-17 13:02:47 +00:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2020-03-18 15:10:41 +00:00
|
|
|
"sort"
|
2018-05-17 13:02:47 +00:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
|
2021-06-11 16:17:59 +00:00
|
|
|
"github.com/go-kit/log"
|
|
|
|
"github.com/go-kit/log/level"
|
2018-05-17 13:02:47 +00:00
|
|
|
"github.com/pkg/errors"
|
2020-10-22 09:00:08 +00:00
|
|
|
|
2021-11-06 10:10:04 +00:00
|
|
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
2019-08-13 08:34:14 +00:00
|
|
|
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
|
|
|
"github.com/prometheus/prometheus/tsdb/fileutil"
|
2019-09-19 09:15:41 +00:00
|
|
|
"github.com/prometheus/prometheus/tsdb/record"
|
|
|
|
"github.com/prometheus/prometheus/tsdb/tombstones"
|
2018-05-17 13:02:47 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// CheckpointStats returns stats about a created checkpoint.
|
|
|
|
type CheckpointStats struct {
|
|
|
|
DroppedSeries int
|
2022-08-29 12:08:36 +00:00
|
|
|
DroppedSamples int // Includes histograms.
|
2018-05-17 13:02:47 +00:00
|
|
|
DroppedTombstones int
|
2021-05-06 20:53:52 +00:00
|
|
|
DroppedExemplars int
|
2022-07-19 08:58:52 +00:00
|
|
|
DroppedMetadata int
|
2018-06-18 11:52:57 +00:00
|
|
|
TotalSeries int // Processed series including dropped ones.
|
2022-08-29 12:08:36 +00:00
|
|
|
TotalSamples int // Processed float and histogram samples including dropped ones.
|
2018-08-02 21:46:45 +00:00
|
|
|
TotalTombstones int // Processed tombstones including dropped ones.
|
2021-05-06 20:53:52 +00:00
|
|
|
TotalExemplars int // Processed exemplars including dropped ones.
|
2022-07-19 08:58:52 +00:00
|
|
|
TotalMetadata int // Processed metadata including dropped ones.
|
2018-05-17 13:02:47 +00:00
|
|
|
}
|
|
|
|
|
2018-10-11 15:23:52 +00:00
|
|
|
// LastCheckpoint returns the directory name and index of the most recent checkpoint.
|
2018-05-17 13:02:47 +00:00
|
|
|
// If dir does not contain any checkpoints, ErrNotFound is returned.
|
|
|
|
func LastCheckpoint(dir string) (string, int, error) {
|
2020-03-18 15:10:41 +00:00
|
|
|
checkpoints, err := listCheckpoints(dir)
|
2018-05-17 13:02:47 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", 0, err
|
|
|
|
}
|
|
|
|
|
2020-03-18 15:10:41 +00:00
|
|
|
if len(checkpoints) == 0 {
|
|
|
|
return "", 0, record.ErrNotFound
|
2018-05-17 13:02:47 +00:00
|
|
|
}
|
2020-03-18 15:10:41 +00:00
|
|
|
|
|
|
|
checkpoint := checkpoints[len(checkpoints)-1]
|
|
|
|
return filepath.Join(dir, checkpoint.name), checkpoint.index, nil
|
2018-05-17 13:02:47 +00:00
|
|
|
}
|
|
|
|
|
2018-10-11 15:23:52 +00:00
|
|
|
// DeleteCheckpoints deletes all checkpoints in a directory below a given index.
|
|
|
|
func DeleteCheckpoints(dir string, maxIndex int) error {
|
2020-03-18 15:10:41 +00:00
|
|
|
checkpoints, err := listCheckpoints(dir)
|
2018-05-17 13:02:47 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-03-18 15:10:41 +00:00
|
|
|
|
2020-10-28 15:24:58 +00:00
|
|
|
errs := tsdb_errors.NewMulti()
|
2020-03-18 15:10:41 +00:00
|
|
|
for _, checkpoint := range checkpoints {
|
|
|
|
if checkpoint.index >= maxIndex {
|
|
|
|
break
|
2018-05-17 13:02:47 +00:00
|
|
|
}
|
2020-10-28 15:24:58 +00:00
|
|
|
errs.Add(os.RemoveAll(filepath.Join(dir, checkpoint.name)))
|
2018-05-17 13:02:47 +00:00
|
|
|
}
|
|
|
|
return errs.Err()
|
|
|
|
}
|
|
|
|
|
|
|
|
const checkpointPrefix = "checkpoint."
|
|
|
|
|
2021-11-06 09:39:16 +00:00
|
|
|
// Checkpoint creates a compacted checkpoint of segments in range [from, to] in the given WAL.
|
2018-05-17 13:02:47 +00:00
|
|
|
// It includes the most recent checkpoint if it exists.
|
2022-07-19 08:58:52 +00:00
|
|
|
// All series not satisfying keep, samples/tombstones/exemplars below mint and
|
|
|
|
// metadata that are not the latest are dropped.
|
2018-05-17 13:02:47 +00:00
|
|
|
//
|
|
|
|
// The checkpoint is stored in a directory named checkpoint.N in the same
|
|
|
|
// segmented format as the original WAL itself.
|
|
|
|
// This makes it easy to read it through the WAL package and concatenate
|
|
|
|
// it with the original WAL.
|
2022-10-10 15:08:46 +00:00
|
|
|
func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.HeadSeriesRef) bool, mint int64) (*CheckpointStats, error) {
|
2018-05-17 13:02:47 +00:00
|
|
|
stats := &CheckpointStats{}
|
2018-11-30 14:46:16 +00:00
|
|
|
var sgmReader io.ReadCloser
|
2018-05-17 13:02:47 +00:00
|
|
|
|
2020-07-15 13:45:37 +00:00
|
|
|
level.Info(logger).Log("msg", "Creating checkpoint", "from_segment", from, "to_segment", to, "mint", mint)
|
|
|
|
|
2018-05-17 13:02:47 +00:00
|
|
|
{
|
2019-09-19 09:15:41 +00:00
|
|
|
var sgmRange []SegmentRange
|
2018-10-11 15:23:52 +00:00
|
|
|
dir, idx, err := LastCheckpoint(w.Dir())
|
2019-09-19 09:15:41 +00:00
|
|
|
if err != nil && err != record.ErrNotFound {
|
2018-05-17 13:02:47 +00:00
|
|
|
return nil, errors.Wrap(err, "find last checkpoint")
|
|
|
|
}
|
2018-10-11 15:23:52 +00:00
|
|
|
last := idx + 1
|
2018-05-17 13:02:47 +00:00
|
|
|
if err == nil {
|
2018-10-11 15:23:52 +00:00
|
|
|
if from > last {
|
|
|
|
return nil, fmt.Errorf("unexpected gap to last checkpoint. expected:%v, requested:%v", last, from)
|
2018-05-17 13:02:47 +00:00
|
|
|
}
|
|
|
|
// Ignore WAL files below the checkpoint. They shouldn't exist to begin with.
|
2018-10-11 15:23:52 +00:00
|
|
|
from = last
|
2018-05-17 13:02:47 +00:00
|
|
|
|
2019-09-19 09:15:41 +00:00
|
|
|
sgmRange = append(sgmRange, SegmentRange{Dir: dir, Last: math.MaxInt32})
|
2018-05-17 13:02:47 +00:00
|
|
|
}
|
|
|
|
|
2019-09-19 09:15:41 +00:00
|
|
|
sgmRange = append(sgmRange, SegmentRange{Dir: w.Dir(), First: from, Last: to})
|
|
|
|
sgmReader, err = NewSegmentsRangeReader(sgmRange...)
|
2018-05-17 13:02:47 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "create segment reader")
|
|
|
|
}
|
2018-11-30 14:46:16 +00:00
|
|
|
defer sgmReader.Close()
|
2018-05-17 13:02:47 +00:00
|
|
|
}
|
|
|
|
|
2020-03-18 15:10:41 +00:00
|
|
|
cpdir := checkpointDir(w.Dir(), to)
|
2018-05-17 13:02:47 +00:00
|
|
|
cpdirtmp := cpdir + ".tmp"
|
|
|
|
|
2020-01-14 09:35:24 +00:00
|
|
|
if err := os.RemoveAll(cpdirtmp); err != nil {
|
|
|
|
return nil, errors.Wrap(err, "remove previous temporary checkpoint dir")
|
|
|
|
}
|
|
|
|
|
2021-10-22 08:06:44 +00:00
|
|
|
if err := os.MkdirAll(cpdirtmp, 0o777); err != nil {
|
2018-05-17 13:02:47 +00:00
|
|
|
return nil, errors.Wrap(err, "create checkpoint dir")
|
|
|
|
}
|
2019-09-19 09:15:41 +00:00
|
|
|
cp, err := New(nil, nil, cpdirtmp, w.CompressionEnabled())
|
2018-05-17 13:02:47 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "open checkpoint")
|
|
|
|
}
|
|
|
|
|
2019-01-07 08:43:33 +00:00
|
|
|
// Ensures that an early return caused by an error doesn't leave any tmp files.
|
|
|
|
defer func() {
|
|
|
|
cp.Close()
|
|
|
|
os.RemoveAll(cpdirtmp)
|
|
|
|
}()
|
|
|
|
|
2019-09-19 09:15:41 +00:00
|
|
|
r := NewReader(sgmReader)
|
2018-05-17 13:02:47 +00:00
|
|
|
|
|
|
|
var (
|
2022-08-29 12:08:36 +00:00
|
|
|
series []record.RefSeries
|
|
|
|
samples []record.RefSample
|
|
|
|
histogramSamples []record.RefHistogramSample
|
|
|
|
tstones []tombstones.Stone
|
|
|
|
exemplars []record.RefExemplar
|
|
|
|
metadata []record.RefMetadata
|
|
|
|
dec record.Decoder
|
|
|
|
enc record.Encoder
|
|
|
|
buf []byte
|
|
|
|
recs [][]byte
|
2022-07-19 08:58:52 +00:00
|
|
|
|
|
|
|
latestMetadataMap = make(map[chunks.HeadSeriesRef]record.RefMetadata)
|
2018-05-17 13:02:47 +00:00
|
|
|
)
|
|
|
|
for r.Next() {
|
2022-08-29 12:08:36 +00:00
|
|
|
series, samples, histogramSamples, tstones, exemplars, metadata = series[:0], samples[:0], histogramSamples[:0], tstones[:0], exemplars[:0], metadata[:0]
|
2018-05-17 13:02:47 +00:00
|
|
|
|
|
|
|
// We don't reset the buffer since we batch up multiple records
|
|
|
|
// before writing them to the checkpoint.
|
2018-06-18 11:52:57 +00:00
|
|
|
// Remember where the record for this iteration starts.
|
2018-05-17 13:02:47 +00:00
|
|
|
start := len(buf)
|
|
|
|
rec := r.Record()
|
|
|
|
|
|
|
|
switch dec.Type(rec) {
|
2019-09-19 09:15:41 +00:00
|
|
|
case record.Series:
|
2018-05-17 13:02:47 +00:00
|
|
|
series, err = dec.Series(rec, series)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "decode series")
|
|
|
|
}
|
|
|
|
// Drop irrelevant series in place.
|
|
|
|
repl := series[:0]
|
|
|
|
for _, s := range series {
|
|
|
|
if keep(s.Ref) {
|
|
|
|
repl = append(repl, s)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(repl) > 0 {
|
|
|
|
buf = enc.Series(repl, buf)
|
|
|
|
}
|
|
|
|
stats.TotalSeries += len(series)
|
|
|
|
stats.DroppedSeries += len(series) - len(repl)
|
|
|
|
|
2019-09-19 09:15:41 +00:00
|
|
|
case record.Samples:
|
2018-05-17 13:02:47 +00:00
|
|
|
samples, err = dec.Samples(rec, samples)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "decode samples")
|
|
|
|
}
|
|
|
|
// Drop irrelevant samples in place.
|
|
|
|
repl := samples[:0]
|
|
|
|
for _, s := range samples {
|
|
|
|
if s.T >= mint {
|
|
|
|
repl = append(repl, s)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(repl) > 0 {
|
|
|
|
buf = enc.Samples(repl, buf)
|
|
|
|
}
|
|
|
|
stats.TotalSamples += len(samples)
|
|
|
|
stats.DroppedSamples += len(samples) - len(repl)
|
2022-08-29 12:08:36 +00:00
|
|
|
|
|
|
|
case record.HistogramSamples:
|
|
|
|
histogramSamples, err = dec.HistogramSamples(rec, histogramSamples)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "decode histogram samples")
|
|
|
|
}
|
|
|
|
// Drop irrelevant histogramSamples in place.
|
|
|
|
repl := histogramSamples[:0]
|
|
|
|
for _, h := range histogramSamples {
|
|
|
|
if h.T >= mint {
|
|
|
|
repl = append(repl, h)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(repl) > 0 {
|
|
|
|
buf = enc.HistogramSamples(repl, buf)
|
|
|
|
}
|
|
|
|
stats.TotalSamples += len(samples)
|
|
|
|
stats.DroppedSamples += len(samples) - len(repl)
|
2018-05-17 13:02:47 +00:00
|
|
|
|
2019-09-19 09:15:41 +00:00
|
|
|
case record.Tombstones:
|
2018-05-17 13:02:47 +00:00
|
|
|
tstones, err = dec.Tombstones(rec, tstones)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "decode deletes")
|
|
|
|
}
|
|
|
|
// Drop irrelevant tombstones in place.
|
|
|
|
repl := tstones[:0]
|
|
|
|
for _, s := range tstones {
|
2019-09-19 09:15:41 +00:00
|
|
|
for _, iv := range s.Intervals {
|
2018-05-17 13:02:47 +00:00
|
|
|
if iv.Maxt >= mint {
|
|
|
|
repl = append(repl, s)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(repl) > 0 {
|
|
|
|
buf = enc.Tombstones(repl, buf)
|
|
|
|
}
|
|
|
|
stats.TotalTombstones += len(tstones)
|
|
|
|
stats.DroppedTombstones += len(tstones) - len(repl)
|
|
|
|
|
2021-05-06 20:53:52 +00:00
|
|
|
case record.Exemplars:
|
|
|
|
exemplars, err = dec.Exemplars(rec, exemplars)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "decode exemplars")
|
|
|
|
}
|
|
|
|
// Drop irrelevant exemplars in place.
|
|
|
|
repl := exemplars[:0]
|
|
|
|
for _, e := range exemplars {
|
|
|
|
if e.T >= mint {
|
|
|
|
repl = append(repl, e)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(repl) > 0 {
|
|
|
|
buf = enc.Exemplars(repl, buf)
|
|
|
|
}
|
|
|
|
stats.TotalExemplars += len(exemplars)
|
|
|
|
stats.DroppedExemplars += len(exemplars) - len(repl)
|
2022-07-19 08:58:52 +00:00
|
|
|
case record.Metadata:
|
|
|
|
metadata, err := dec.Metadata(rec, metadata)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "decode metadata")
|
|
|
|
}
|
|
|
|
// Only keep reference to the latest found metadata for each refID.
|
|
|
|
repl := 0
|
|
|
|
for _, m := range metadata {
|
|
|
|
if keep(m.Ref) {
|
|
|
|
if _, ok := latestMetadataMap[m.Ref]; !ok {
|
|
|
|
repl++
|
|
|
|
}
|
|
|
|
latestMetadataMap[m.Ref] = m
|
|
|
|
}
|
|
|
|
}
|
|
|
|
stats.TotalMetadata += len(metadata)
|
|
|
|
stats.DroppedMetadata += len(metadata) - repl
|
2018-05-17 13:02:47 +00:00
|
|
|
default:
|
2020-10-05 09:09:59 +00:00
|
|
|
// Unknown record type, probably from a future Prometheus version.
|
|
|
|
continue
|
2018-05-17 13:02:47 +00:00
|
|
|
}
|
|
|
|
if len(buf[start:]) == 0 {
|
|
|
|
continue // All contents discarded.
|
|
|
|
}
|
|
|
|
recs = append(recs, buf[start:])
|
|
|
|
|
|
|
|
// Flush records in 1 MB increments.
|
|
|
|
if len(buf) > 1*1024*1024 {
|
|
|
|
if err := cp.Log(recs...); err != nil {
|
|
|
|
return nil, errors.Wrap(err, "flush records")
|
|
|
|
}
|
|
|
|
buf, recs = buf[:0], recs[:0]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// If we hit any corruption during checkpointing, repairing is not an option.
|
|
|
|
// The head won't know which series records are lost.
|
|
|
|
if r.Err() != nil {
|
|
|
|
return nil, errors.Wrap(r.Err(), "read segments")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Flush remaining records.
|
|
|
|
if err := cp.Log(recs...); err != nil {
|
|
|
|
return nil, errors.Wrap(err, "flush records")
|
|
|
|
}
|
2022-07-19 08:58:52 +00:00
|
|
|
|
|
|
|
// Flush latest metadata records for each series.
|
|
|
|
if len(latestMetadataMap) > 0 {
|
|
|
|
latestMetadata := make([]record.RefMetadata, 0, len(latestMetadataMap))
|
|
|
|
for _, m := range latestMetadataMap {
|
|
|
|
latestMetadata = append(latestMetadata, m)
|
|
|
|
}
|
|
|
|
if err := cp.Log(enc.Metadata(latestMetadata, buf[:0])); err != nil {
|
|
|
|
return nil, errors.Wrap(err, "flush metadata records")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-17 13:02:47 +00:00
|
|
|
if err := cp.Close(); err != nil {
|
|
|
|
return nil, errors.Wrap(err, "close checkpoint")
|
|
|
|
}
|
2020-09-07 14:34:49 +00:00
|
|
|
|
|
|
|
// Sync temporary directory before rename.
|
|
|
|
df, err := fileutil.OpenDir(cpdirtmp)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "open temporary checkpoint directory")
|
|
|
|
}
|
|
|
|
if err := df.Sync(); err != nil {
|
|
|
|
df.Close()
|
|
|
|
return nil, errors.Wrap(err, "sync temporary checkpoint directory")
|
|
|
|
}
|
|
|
|
if err = df.Close(); err != nil {
|
|
|
|
return nil, errors.Wrap(err, "close temporary checkpoint directory")
|
|
|
|
}
|
|
|
|
|
2018-08-02 21:46:45 +00:00
|
|
|
if err := fileutil.Replace(cpdirtmp, cpdir); err != nil {
|
|
|
|
return nil, errors.Wrap(err, "rename checkpoint directory")
|
2018-05-17 13:02:47 +00:00
|
|
|
}
|
2018-11-30 14:46:16 +00:00
|
|
|
|
2018-05-17 13:02:47 +00:00
|
|
|
return stats, nil
|
|
|
|
}
|
2020-03-18 15:10:41 +00:00
|
|
|
|
|
|
|
func checkpointDir(dir string, i int) string {
|
|
|
|
return filepath.Join(dir, fmt.Sprintf(checkpointPrefix+"%08d", i))
|
|
|
|
}
|
|
|
|
|
|
|
|
type checkpointRef struct {
|
|
|
|
name string
|
|
|
|
index int
|
|
|
|
}
|
|
|
|
|
|
|
|
func listCheckpoints(dir string) (refs []checkpointRef, err error) {
|
2022-04-27 09:24:36 +00:00
|
|
|
files, err := os.ReadDir(dir)
|
2020-03-18 15:10:41 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < len(files); i++ {
|
|
|
|
fi := files[i]
|
|
|
|
if !strings.HasPrefix(fi.Name(), checkpointPrefix) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if !fi.IsDir() {
|
|
|
|
return nil, errors.Errorf("checkpoint %s is not a directory", fi.Name())
|
|
|
|
}
|
|
|
|
idx, err := strconv.Atoi(fi.Name()[len(checkpointPrefix):])
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
refs = append(refs, checkpointRef{name: fi.Name(), index: idx})
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Slice(refs, func(i, j int) bool {
|
|
|
|
return refs[i].index < refs[j].index
|
|
|
|
})
|
|
|
|
|
|
|
|
return refs, nil
|
|
|
|
}
|