2018-09-07 21:26:04 +00:00
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package remote
import (
"fmt"
2019-01-18 20:31:36 +00:00
"io"
"math"
2018-09-07 21:26:04 +00:00
"os"
"path"
2019-02-13 14:47:35 +00:00
"sort"
2018-09-07 21:26:04 +00:00
"strconv"
"strings"
"time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/tsdb"
"github.com/prometheus/tsdb/fileutil"
"github.com/prometheus/tsdb/wal"
2019-03-25 23:01:12 +00:00
"github.com/prometheus/prometheus/pkg/timestamp"
2018-09-07 21:26:04 +00:00
)
2019-01-18 12:48:16 +00:00
const (
readPeriod = 10 * time . Millisecond
checkpointPeriod = 5 * time . Second
segmentCheckPeriod = 100 * time . Millisecond
)
2018-09-07 21:26:04 +00:00
var (
2019-02-13 19:14:15 +00:00
watcherRecordsRead = prometheus . NewCounterVec (
2018-09-07 21:26:04 +00:00
prometheus . CounterOpts {
Namespace : "prometheus" ,
Subsystem : "wal_watcher" ,
2019-02-13 19:14:15 +00:00
Name : "records_read_total" ,
Help : "Number of records read by the WAL watcher from the WAL." ,
2018-09-07 21:26:04 +00:00
} ,
2019-02-13 19:14:15 +00:00
[ ] string { queue , "type" } ,
2018-09-07 21:26:04 +00:00
)
watcherRecordDecodeFails = prometheus . NewCounterVec (
prometheus . CounterOpts {
Namespace : "prometheus" ,
Subsystem : "wal_watcher" ,
Name : "record_decode_failures_total" ,
Help : "Number of records read by the WAL watcher that resulted in an error when decoding." ,
} ,
[ ] string { queue } ,
)
watcherSamplesSentPreTailing = prometheus . NewCounterVec (
prometheus . CounterOpts {
Namespace : "prometheus" ,
Subsystem : "wal_watcher" ,
Name : "samples_sent_pre_tailing_total" ,
Help : "Number of sample records read by the WAL watcher and sent to remote write during replay of existing WAL." ,
} ,
[ ] string { queue } ,
)
watcherCurrentSegment = prometheus . NewGaugeVec (
prometheus . GaugeOpts {
Namespace : "prometheus" ,
Subsystem : "wal_watcher" ,
Name : "current_segment" ,
Help : "Current segment the WAL watcher is reading records from." ,
} ,
[ ] string { queue } ,
)
2019-07-03 13:19:47 +00:00
liveReaderMetrics = wal . NewLiveReaderMetrics ( prometheus . DefaultRegisterer )
2018-09-07 21:26:04 +00:00
)
func init ( ) {
2019-02-13 19:14:15 +00:00
prometheus . MustRegister ( watcherRecordsRead )
2018-09-07 21:26:04 +00:00
prometheus . MustRegister ( watcherRecordDecodeFails )
prometheus . MustRegister ( watcherSamplesSentPreTailing )
prometheus . MustRegister ( watcherCurrentSegment )
}
type writeTo interface {
Append ( [ ] tsdb . RefSample ) bool
StoreSeries ( [ ] tsdb . RefSeries , int )
SeriesReset ( int )
}
// WALWatcher watches the TSDB WAL for a given WriteTo.
type WALWatcher struct {
2019-02-19 06:46:52 +00:00
name string
writer writeTo
logger log . Logger
walDir string
lastCheckpoint string
2019-01-18 12:48:16 +00:00
2019-02-13 17:06:03 +00:00
startTime int64
2018-09-07 21:26:04 +00:00
2019-02-13 19:14:15 +00:00
recordsReadMetric * prometheus . CounterVec
2018-09-07 21:26:04 +00:00
recordDecodeFailsMetric prometheus . Counter
samplesSentPreTailing prometheus . Counter
currentSegmentMetric prometheus . Gauge
2019-01-18 12:48:16 +00:00
quit chan struct { }
2019-02-13 17:06:03 +00:00
done chan struct { }
2019-02-20 04:03:41 +00:00
// For testing, stop when we hit this segment.
maxSegment int
2018-09-07 21:26:04 +00:00
}
// NewWALWatcher creates a new WAL watcher for a given WriteTo.
2019-02-15 09:54:01 +00:00
func NewWALWatcher ( logger log . Logger , name string , writer writeTo , walDir string ) * WALWatcher {
2018-09-07 21:26:04 +00:00
if logger == nil {
logger = log . NewNopLogger ( )
}
2019-02-13 19:14:15 +00:00
return & WALWatcher {
2019-02-15 09:54:01 +00:00
logger : logger ,
writer : writer ,
walDir : path . Join ( walDir , "wal" ) ,
name : name ,
quit : make ( chan struct { } ) ,
done : make ( chan struct { } ) ,
2018-09-07 21:26:04 +00:00
2019-02-20 07:51:08 +00:00
maxSegment : - 1 ,
2019-02-13 19:14:15 +00:00
}
2018-09-07 21:26:04 +00:00
}
2019-04-23 08:49:17 +00:00
func ( w * WALWatcher ) setMetrics ( ) {
// Setup the WAL Watchers metrics. We do this here rather than in the
// constructor because of the ordering of creating Queue Managers's,
// stopping them, and then starting new ones in storage/remote/storage.go ApplyConfig.
w . recordsReadMetric = watcherRecordsRead . MustCurryWith ( prometheus . Labels { queue : w . name } )
w . recordDecodeFailsMetric = watcherRecordDecodeFails . WithLabelValues ( w . name )
w . samplesSentPreTailing = watcherSamplesSentPreTailing . WithLabelValues ( w . name )
w . currentSegmentMetric = watcherCurrentSegment . WithLabelValues ( w . name )
}
2019-02-20 07:51:08 +00:00
// Start the WALWatcher.
2018-09-07 21:26:04 +00:00
func ( w * WALWatcher ) Start ( ) {
2019-04-23 08:49:17 +00:00
w . setMetrics ( )
2019-01-18 12:48:16 +00:00
level . Info ( w . logger ) . Log ( "msg" , "starting WAL watcher" , "queue" , w . name )
2019-04-23 08:49:17 +00:00
2019-02-13 14:47:35 +00:00
go w . loop ( )
2018-09-07 21:26:04 +00:00
}
2019-02-20 07:51:08 +00:00
// Stop the WALWatcher.
2018-09-07 21:26:04 +00:00
func ( w * WALWatcher ) Stop ( ) {
close ( w . quit )
2019-02-13 17:06:03 +00:00
<- w . done
2019-04-23 08:49:17 +00:00
// Records read metric has series and samples.
watcherRecordsRead . DeleteLabelValues ( w . name , "series" )
watcherRecordsRead . DeleteLabelValues ( w . name , "samples" )
watcherRecordDecodeFails . DeleteLabelValues ( w . name )
watcherSamplesSentPreTailing . DeleteLabelValues ( w . name )
watcherCurrentSegment . DeleteLabelValues ( w . name )
2019-02-13 17:06:03 +00:00
level . Info ( w . logger ) . Log ( "msg" , "WAL watcher stopped" , "queue" , w . name )
2018-09-07 21:26:04 +00:00
}
2019-02-13 14:47:35 +00:00
func ( w * WALWatcher ) loop ( ) {
2019-02-13 17:06:03 +00:00
defer close ( w . done )
2019-02-13 14:47:35 +00:00
2019-02-13 17:06:03 +00:00
// We may encourter failures processing the WAL; we should wait and retry.
2019-02-14 10:02:54 +00:00
for ! isClosed ( w . quit ) {
2019-02-15 09:54:01 +00:00
w . startTime = timestamp . FromTime ( time . Now ( ) )
2019-02-13 14:47:35 +00:00
if err := w . run ( ) ; err != nil {
level . Error ( w . logger ) . Log ( "msg" , "error tailing WAL" , "err" , err )
2018-09-07 21:26:04 +00:00
}
2019-02-13 14:47:35 +00:00
select {
case <- w . quit :
return
case <- time . After ( 5 * time . Second ) :
}
2018-09-07 21:26:04 +00:00
}
2019-02-13 14:47:35 +00:00
}
2018-09-07 21:26:04 +00:00
2019-02-13 14:47:35 +00:00
func ( w * WALWatcher ) run ( ) error {
2019-03-05 12:21:11 +00:00
_ , lastSegment , err := w . firstAndLast ( )
2019-02-13 17:06:03 +00:00
if err != nil {
2019-02-19 16:43:58 +00:00
return errors . Wrap ( err , "wal.Segments" )
2019-02-13 17:06:03 +00:00
}
2019-01-18 20:31:36 +00:00
// Backfill from the checkpoint first if it exists.
2019-02-20 07:51:08 +00:00
lastCheckpoint , checkpointIndex , err := tsdb . LastCheckpoint ( w . walDir )
2019-01-18 20:31:36 +00:00
if err != nil && err != tsdb . ErrNotFound {
2019-02-19 16:43:58 +00:00
return errors . Wrap ( err , "tsdb.LastCheckpoint" )
2019-01-18 20:31:36 +00:00
}
if err == nil {
2019-02-13 17:06:03 +00:00
if err = w . readCheckpoint ( lastCheckpoint ) ; err != nil {
2019-02-15 09:54:01 +00:00
return errors . Wrap ( err , "readCheckpoint" )
2019-01-18 20:31:36 +00:00
}
}
2019-02-19 06:46:52 +00:00
w . lastCheckpoint = lastCheckpoint
2019-01-18 20:31:36 +00:00
2019-02-20 07:51:08 +00:00
currentSegment , err := w . findSegmentForIndex ( checkpointIndex )
2019-02-13 14:47:35 +00:00
if err != nil {
return err
}
2019-01-18 12:48:16 +00:00
2019-02-20 07:51:08 +00:00
level . Debug ( w . logger ) . Log ( "msg" , "tailing WAL" , "lastCheckpoint" , lastCheckpoint , "checkpointIndex" , checkpointIndex , "currentSegment" , currentSegment , "lastSegment" , lastSegment )
2019-02-15 09:54:01 +00:00
for ! isClosed ( w . quit ) {
2019-02-13 17:06:03 +00:00
w . currentSegmentMetric . Set ( float64 ( currentSegment ) )
2019-03-05 12:21:11 +00:00
level . Debug ( w . logger ) . Log ( "msg" , "processing segment" , "currentSegment" , currentSegment )
2019-01-18 12:48:16 +00:00
// On start, after reading the existing WAL for series records, we have a pointer to what is the latest segment.
// On subsequent calls to this function, currentSegment will have been incremented and we should open that segment.
2019-03-05 12:21:11 +00:00
if err := w . watch ( currentSegment , currentSegment >= lastSegment ) ; err != nil {
2019-02-13 14:47:35 +00:00
return err
2019-01-18 12:48:16 +00:00
}
2019-02-20 07:51:08 +00:00
// For testing: stop when you hit a specific segment.
2019-02-20 04:03:41 +00:00
if currentSegment == w . maxSegment {
return nil
}
2019-02-13 17:06:03 +00:00
currentSegment ++
2019-01-18 12:48:16 +00:00
}
2019-02-15 09:54:01 +00:00
return nil
2018-09-07 21:26:04 +00:00
}
2019-02-14 10:02:54 +00:00
// findSegmentForIndex finds the first segment greater than or equal to index.
2019-02-13 14:47:35 +00:00
func ( w * WALWatcher ) findSegmentForIndex ( index int ) ( int , error ) {
2019-04-09 09:52:44 +00:00
refs , err := w . segments ( w . walDir )
2019-03-05 12:21:11 +00:00
if err != nil {
2019-07-15 16:52:03 +00:00
return - 1 , err
2019-03-05 12:21:11 +00:00
}
for _ , r := range refs {
if r >= index {
return r , nil
}
}
return - 1 , errors . New ( "failed to find segment for index" )
}
func ( w * WALWatcher ) firstAndLast ( ) ( int , int , error ) {
2019-04-09 09:52:44 +00:00
refs , err := w . segments ( w . walDir )
2019-03-05 12:21:11 +00:00
if err != nil {
2019-07-15 16:52:03 +00:00
return - 1 , - 1 , err
2019-03-05 12:21:11 +00:00
}
if len ( refs ) == 0 {
return - 1 , - 1 , nil
}
return refs [ 0 ] , refs [ len ( refs ) - 1 ] , nil
}
// Copied from tsdb/wal/wal.go so we do not have to open a WAL.
// Plan is to move WAL watcher to TSDB and dedupe these implementations.
2019-04-09 09:52:44 +00:00
func ( w * WALWatcher ) segments ( dir string ) ( [ ] int , error ) {
files , err := fileutil . ReadDir ( dir )
2019-02-13 14:47:35 +00:00
if err != nil {
2019-03-05 12:21:11 +00:00
return nil , err
2019-02-13 14:47:35 +00:00
}
var refs [ ] int
var last int
for _ , fn := range files {
k , err := strconv . Atoi ( fn )
if err != nil {
continue
}
if len ( refs ) > 0 && k > last + 1 {
2019-03-05 12:21:11 +00:00
return nil , errors . New ( "segments are not sequential" )
2019-02-13 14:47:35 +00:00
}
refs = append ( refs , k )
last = k
}
2019-02-14 01:11:17 +00:00
sort . Ints ( refs )
2019-02-13 14:47:35 +00:00
2019-03-05 12:21:11 +00:00
return refs , nil
2019-02-13 14:47:35 +00:00
}
2019-02-20 07:51:08 +00:00
// Use tail true to indicate that the reader is currently on a segment that is
2019-01-18 20:31:36 +00:00
// actively being written to. If false, assume it's a full segment and we're
// replaying it on start to cache the series records.
2019-03-05 12:21:11 +00:00
func ( w * WALWatcher ) watch ( segmentNum int , tail bool ) error {
2019-02-12 14:12:37 +00:00
segment , err := wal . OpenReadSegment ( wal . SegmentName ( w . walDir , segmentNum ) )
if err != nil {
return err
}
defer segment . Close ( )
2019-07-03 13:19:47 +00:00
reader := wal . NewLiveReader ( w . logger , liveReaderMetrics , segment )
2019-01-18 12:48:16 +00:00
readTicker := time . NewTicker ( readPeriod )
2018-09-07 21:26:04 +00:00
defer readTicker . Stop ( )
2019-01-18 12:48:16 +00:00
checkpointTicker := time . NewTicker ( checkpointPeriod )
2018-09-07 21:26:04 +00:00
defer checkpointTicker . Stop ( )
2019-01-18 12:48:16 +00:00
segmentTicker := time . NewTicker ( segmentCheckPeriod )
defer segmentTicker . Stop ( )
2019-02-12 14:12:37 +00:00
2019-01-18 20:31:36 +00:00
// If we're replaying the segment we need to know the size of the file to know
// when to return from watch and move on to the next segment.
size := int64 ( math . MaxInt64 )
if ! tail {
segmentTicker . Stop ( )
checkpointTicker . Stop ( )
var err error
2019-02-13 17:06:03 +00:00
size , err = getSegmentSize ( w . walDir , segmentNum )
2019-01-18 20:31:36 +00:00
if err != nil {
2019-02-19 16:43:58 +00:00
return errors . Wrap ( err , "getSegmentSize" )
2019-01-18 20:31:36 +00:00
}
}
2018-09-07 21:26:04 +00:00
for {
select {
case <- w . quit :
2019-02-15 09:54:01 +00:00
return nil
2019-01-18 12:48:16 +00:00
2018-09-07 21:26:04 +00:00
case <- checkpointTicker . C :
2019-02-13 19:14:15 +00:00
// Periodically check if there is a new checkpoint so we can garbage
// collect labels. As this is considered an optimisation, we ignore
// errors during checkpoint processing.
if err := w . garbageCollectSeries ( segmentNum ) ; err != nil {
level . Warn ( w . logger ) . Log ( "msg" , "error process checkpoint" , "err" , err )
2018-09-07 21:26:04 +00:00
}
2019-01-18 12:48:16 +00:00
2018-09-07 21:26:04 +00:00
case <- segmentTicker . C :
2019-03-05 12:21:11 +00:00
_ , last , err := w . firstAndLast ( )
2018-09-07 21:26:04 +00:00
if err != nil {
2019-01-18 12:48:16 +00:00
return errors . Wrap ( err , "segments" )
}
2019-01-18 20:31:36 +00:00
// Check if new segments exists.
2019-02-13 17:06:03 +00:00
if last <= segmentNum {
2018-09-07 21:26:04 +00:00
continue
}
2019-01-18 12:48:16 +00:00
2019-02-20 04:03:41 +00:00
err = w . readSegment ( reader , segmentNum , tail )
2019-02-15 09:54:01 +00:00
// Ignore errors reading to end of segment whilst replaying the WAL.
if ! tail {
if err != nil && err != io . EOF {
level . Warn ( w . logger ) . Log ( "msg" , "ignoring error reading to end of segment, may have dropped data" , "err" , err )
} else if reader . Offset ( ) != size {
level . Warn ( w . logger ) . Log ( "msg" , "expected to have read whole segment, may have dropped data" , "segment" , segmentNum , "read" , reader . Offset ( ) , "size" , size )
}
return nil
}
// Otherwise, when we are tailing, non-EOFs are fatal.
if err != io . EOF {
return err
2018-09-07 21:26:04 +00:00
}
2019-01-18 12:48:16 +00:00
return nil
2018-09-07 21:26:04 +00:00
case <- readTicker . C :
2019-02-20 04:03:41 +00:00
err = w . readSegment ( reader , segmentNum , tail )
2019-02-15 09:54:01 +00:00
// Ignore all errors reading to end of segment whilst replaying the WAL.
if ! tail {
if err != nil && err != io . EOF {
level . Warn ( w . logger ) . Log ( "msg" , "ignoring error reading to end of segment, may have dropped data" , "segment" , segmentNum , "err" , err )
} else if reader . Offset ( ) != size {
level . Warn ( w . logger ) . Log ( "msg" , "expected to have read whole segment, may have dropped data" , "segment" , segmentNum , "read" , reader . Offset ( ) , "size" , size )
}
2019-01-18 20:31:36 +00:00
return nil
}
2019-02-14 10:02:54 +00:00
2019-02-15 09:54:01 +00:00
// Otherwise, when we are tailing, non-EOFs are fatal.
if err != io . EOF {
2019-02-14 10:02:54 +00:00
return err
}
2018-09-07 21:26:04 +00:00
}
}
}
2019-02-13 19:14:15 +00:00
func ( w * WALWatcher ) garbageCollectSeries ( segmentNum int ) error {
dir , _ , err := tsdb . LastCheckpoint ( w . walDir )
if err != nil && err != tsdb . ErrNotFound {
return errors . Wrap ( err , "tsdb.LastCheckpoint" )
}
2019-02-19 06:46:52 +00:00
if dir == "" || dir == w . lastCheckpoint {
2019-02-13 19:14:15 +00:00
return nil
}
2019-02-19 06:46:52 +00:00
w . lastCheckpoint = dir
2019-02-13 19:14:15 +00:00
index , err := checkpointNum ( dir )
if err != nil {
return errors . Wrap ( err , "error parsing checkpoint filename" )
}
if index >= segmentNum {
level . Debug ( w . logger ) . Log ( "msg" , "current segment is behind the checkpoint, skipping reading of checkpoint" , "current" , fmt . Sprintf ( "%08d" , segmentNum ) , "checkpoint" , dir )
return nil
}
level . Debug ( w . logger ) . Log ( "msg" , "new checkpoint detected" , "new" , dir , "currentSegment" , segmentNum )
if err = w . readCheckpoint ( dir ) ; err != nil {
return errors . Wrap ( err , "readCheckpoint" )
}
// Clear series with a checkpoint or segment index # lower than the checkpoint we just read.
w . writer . SeriesReset ( index )
return nil
}
2019-02-20 04:03:41 +00:00
func ( w * WALWatcher ) readSegment ( r * wal . LiveReader , segmentNum int , tail bool ) error {
var (
dec tsdb . RecordDecoder
series [ ] tsdb . RefSeries
samples [ ] tsdb . RefSample
2019-06-27 18:48:21 +00:00
send [ ] tsdb . RefSample
2019-02-20 04:03:41 +00:00
)
2019-01-18 12:48:16 +00:00
for r . Next ( ) && ! isClosed ( w . quit ) {
2019-02-20 04:03:41 +00:00
rec := r . Record ( )
w . recordsReadMetric . WithLabelValues ( recordType ( dec . Type ( rec ) ) ) . Inc ( )
switch dec . Type ( rec ) {
case tsdb . RecordSeries :
series , err := dec . Series ( rec , series [ : 0 ] )
if err != nil {
w . recordDecodeFailsMetric . Inc ( )
return err
}
w . writer . StoreSeries ( series , segmentNum )
case tsdb . RecordSamples :
// If we're not tailing a segment we can ignore any samples records we see.
// This speeds up replay of the WAL by > 10x.
if ! tail {
break
}
samples , err := dec . Samples ( rec , samples [ : 0 ] )
if err != nil {
w . recordDecodeFailsMetric . Inc ( )
return err
}
for _ , s := range samples {
if s . T > w . startTime {
send = append ( send , s )
}
}
if len ( send ) > 0 {
// Blocks until the sample is sent to all remote write endpoints or closed (because enqueue blocks).
w . writer . Append ( send )
2019-06-27 18:48:21 +00:00
send = send [ : 0 ]
2019-02-20 04:03:41 +00:00
}
case tsdb . RecordTombstones :
// noop
case tsdb . RecordInvalid :
return errors . New ( "invalid record" )
default :
w . recordDecodeFailsMetric . Inc ( )
return errors . New ( "unknown TSDB record type" )
2018-09-07 21:26:04 +00:00
}
}
2019-01-18 12:48:16 +00:00
return r . Err ( )
}
2018-09-07 21:26:04 +00:00
2019-02-13 19:14:15 +00:00
func recordType ( rt tsdb . RecordType ) string {
switch rt {
case tsdb . RecordInvalid :
return "invalid"
case tsdb . RecordSeries :
return "series"
case tsdb . RecordSamples :
return "samples"
case tsdb . RecordTombstones :
return "tombstones"
default :
2019-03-04 16:33:35 +00:00
return "unknown"
2019-02-13 19:14:15 +00:00
}
}
2019-01-18 12:48:16 +00:00
// Read all the series records from a Checkpoint directory.
func ( w * WALWatcher ) readCheckpoint ( checkpointDir string ) error {
2019-02-19 16:43:58 +00:00
level . Debug ( w . logger ) . Log ( "msg" , "reading checkpoint" , "dir" , checkpointDir )
2019-02-13 17:06:03 +00:00
index , err := checkpointNum ( checkpointDir )
if err != nil {
2019-02-19 16:43:58 +00:00
return errors . Wrap ( err , "checkpointNum" )
2019-02-13 17:06:03 +00:00
}
2019-04-09 09:52:44 +00:00
// Ensure we read the whole contents of every segment in the checkpoint dir.
segs , err := w . segments ( checkpointDir )
2018-09-07 21:26:04 +00:00
if err != nil {
2019-04-09 09:52:44 +00:00
return errors . Wrap ( err , "Unable to get segments checkpoint dir" )
2018-09-07 21:26:04 +00:00
}
2019-04-09 09:52:44 +00:00
for _ , seg := range segs {
size , err := getSegmentSize ( checkpointDir , seg )
if err != nil {
return errors . Wrap ( err , "getSegmentSize" )
}
2018-09-07 21:26:04 +00:00
2019-04-09 09:52:44 +00:00
sr , err := wal . OpenReadSegment ( wal . SegmentName ( checkpointDir , seg ) )
if err != nil {
return errors . Wrap ( err , "unable to open segment" )
}
defer sr . Close ( )
2018-09-07 21:26:04 +00:00
2019-07-03 13:19:47 +00:00
r := wal . NewLiveReader ( w . logger , liveReaderMetrics , sr )
2019-04-09 09:52:44 +00:00
if err := w . readSegment ( r , index , false ) ; err != io . EOF && err != nil {
return errors . Wrap ( err , "readSegment" )
}
2019-02-13 14:47:35 +00:00
2019-04-09 09:52:44 +00:00
if r . Offset ( ) != size {
return fmt . Errorf ( "readCheckpoint wasn't able to read all data from the checkpoint %s/%08d, size: %d, totalRead: %d" , checkpointDir , seg , size , r . Offset ( ) )
}
2019-01-18 20:31:36 +00:00
}
2019-02-13 14:47:35 +00:00
2019-04-09 09:52:44 +00:00
level . Debug ( w . logger ) . Log ( "msg" , "read series references from checkpoint" , "checkpoint" , checkpointDir )
2019-01-18 12:48:16 +00:00
return nil
2018-09-07 21:26:04 +00:00
}
2019-01-18 12:48:16 +00:00
func checkpointNum ( dir string ) ( int , error ) {
2018-09-07 21:26:04 +00:00
// Checkpoint dir names are in the format checkpoint.000001
2019-07-15 16:53:58 +00:00
// dir may contain a hidden directory, so only check the base directory
chunks := strings . Split ( path . Base ( dir ) , "." )
2018-09-07 21:26:04 +00:00
if len ( chunks ) != 2 {
2019-01-18 12:48:16 +00:00
return 0 , errors . Errorf ( "invalid checkpoint dir string: %s" , dir )
2018-09-07 21:26:04 +00:00
}
2019-01-18 12:48:16 +00:00
result , err := strconv . Atoi ( chunks [ 1 ] )
if err != nil {
return 0 , errors . Errorf ( "invalid checkpoint dir string: %s" , dir )
}
return result , nil
2018-09-07 21:26:04 +00:00
}
// Get size of segment.
func getSegmentSize ( dir string , index int ) ( int64 , error ) {
i := int64 ( - 1 )
fi , err := os . Stat ( wal . SegmentName ( dir , index ) )
if err == nil {
i = fi . Size ( )
}
return i , err
}
func isClosed ( c chan struct { } ) bool {
select {
case <- c :
return true
default :
return false
}
}