2017-05-26 15:56:31 +00:00
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
2017-05-16 07:13:33 +00:00
package tsdb
2017-11-21 11:15:02 +00:00
import (
2018-12-05 16:34:42 +00:00
"context"
2019-03-24 20:33:08 +00:00
"encoding/binary"
2019-07-03 10:47:31 +00:00
2019-03-24 20:33:08 +00:00
"errors"
2019-12-04 07:37:49 +00:00
"hash/crc32"
2017-11-21 11:15:02 +00:00
"io/ioutil"
2018-10-23 21:35:52 +00:00
"math/rand"
2017-11-21 11:15:02 +00:00
"os"
2017-11-30 14:34:49 +00:00
"path/filepath"
2019-05-27 11:24:46 +00:00
"strconv"
2017-11-21 11:15:02 +00:00
"testing"
2017-12-07 01:06:14 +00:00
2018-10-23 21:35:52 +00:00
"github.com/go-kit/kit/log"
2019-11-18 19:53:33 +00:00
"github.com/prometheus/prometheus/pkg/labels"
2019-08-13 08:34:14 +00:00
"github.com/prometheus/prometheus/tsdb/chunks"
2019-11-18 19:53:33 +00:00
"github.com/prometheus/prometheus/tsdb/fileutil"
2019-08-13 08:34:14 +00:00
"github.com/prometheus/prometheus/tsdb/tsdbutil"
2019-08-14 09:07:02 +00:00
"github.com/prometheus/prometheus/util/testutil"
2017-11-21 11:15:02 +00:00
)
2018-02-12 10:40:12 +00:00
// In Prometheus 2.1.0 we had a bug where the meta.json version was falsely bumped
// to 2. We had a migration in place resetting it to 1 but we should move immediately to
// version 3 next time to avoid confusion and issues.
func TestBlockMetaMustNeverBeVersion2 ( t * testing . T ) {
dir , err := ioutil . TempDir ( "" , "metaversion" )
testutil . Ok ( t , err )
2019-03-19 13:31:57 +00:00
defer func ( ) {
testutil . Ok ( t , os . RemoveAll ( dir ) )
} ( )
2018-02-12 10:40:12 +00:00
2019-06-24 15:42:29 +00:00
_ , err = writeMetaFile ( log . NewNopLogger ( ) , dir , & BlockMeta { } )
testutil . Ok ( t , err )
2018-02-12 10:40:12 +00:00
2019-06-24 15:42:29 +00:00
meta , _ , err := readMetaFile ( dir )
2018-02-12 10:40:12 +00:00
testutil . Ok ( t , err )
testutil . Assert ( t , meta . Version != 2 , "meta.json version must never be 2" )
}
2017-11-21 11:15:02 +00:00
func TestSetCompactionFailed ( t * testing . T ) {
2018-02-23 15:04:50 +00:00
tmpdir , err := ioutil . TempDir ( "" , "test" )
2017-12-07 01:06:14 +00:00
testutil . Ok ( t , err )
2019-03-19 13:31:57 +00:00
defer func ( ) {
testutil . Ok ( t , os . RemoveAll ( tmpdir ) )
} ( )
2017-11-21 11:15:02 +00:00
2019-07-03 10:47:31 +00:00
blockDir := createBlock ( t , tmpdir , genSeries ( 1 , 1 , 0 , 1 ) )
2019-01-16 10:03:52 +00:00
b , err := OpenBlock ( nil , blockDir , nil )
2018-12-29 11:20:51 +00:00
testutil . Ok ( t , err )
2017-12-07 01:06:14 +00:00
testutil . Equals ( t , false , b . meta . Compaction . Failed )
testutil . Ok ( t , b . setCompactionFailed ( ) )
testutil . Equals ( t , true , b . meta . Compaction . Failed )
testutil . Ok ( t , b . Close ( ) )
2017-11-21 11:15:02 +00:00
2019-01-16 10:03:52 +00:00
b , err = OpenBlock ( nil , blockDir , nil )
2017-12-07 01:06:14 +00:00
testutil . Ok ( t , err )
testutil . Equals ( t , true , b . meta . Compaction . Failed )
2018-12-29 11:20:51 +00:00
testutil . Ok ( t , b . Close ( ) )
2017-11-21 11:15:02 +00:00
}
2019-03-19 13:31:57 +00:00
func TestCreateBlock ( t * testing . T ) {
tmpdir , err := ioutil . TempDir ( "" , "test" )
testutil . Ok ( t , err )
defer func ( ) {
testutil . Ok ( t , os . RemoveAll ( tmpdir ) )
} ( )
b , err := OpenBlock ( nil , createBlock ( t , tmpdir , genSeries ( 1 , 1 , 0 , 10 ) ) , nil )
if err == nil {
testutil . Ok ( t , b . Close ( ) )
}
testutil . Ok ( t , err )
}
2019-03-24 20:33:08 +00:00
func TestCorruptedChunk ( t * testing . T ) {
for name , test := range map [ string ] struct {
corrFunc func ( f * os . File ) // Func that applies the corruption.
2019-12-04 07:37:49 +00:00
openErr error
queryErr error
2019-03-24 20:33:08 +00:00
} {
"invalid header size" : {
func ( f * os . File ) {
err := f . Truncate ( 1 )
testutil . Ok ( t , err )
} ,
2019-12-04 07:37:49 +00:00
errors . New ( "invalid segment header in segment 0: invalid size" ) ,
nil ,
2019-03-24 20:33:08 +00:00
} ,
"invalid magic number" : {
func ( f * os . File ) {
magicChunksOffset := int64 ( 0 )
_ , err := f . Seek ( magicChunksOffset , 0 )
testutil . Ok ( t , err )
// Set invalid magic number.
b := make ( [ ] byte , chunks . MagicChunksSize )
binary . BigEndian . PutUint32 ( b [ : chunks . MagicChunksSize ] , 0x00000000 )
n , err := f . Write ( b )
testutil . Ok ( t , err )
testutil . Equals ( t , chunks . MagicChunksSize , n )
} ,
errors . New ( "invalid magic number 0" ) ,
2019-12-04 07:37:49 +00:00
nil ,
2019-03-24 20:33:08 +00:00
} ,
"invalid chunk format version" : {
func ( f * os . File ) {
chunksFormatVersionOffset := int64 ( 4 )
_ , err := f . Seek ( chunksFormatVersionOffset , 0 )
testutil . Ok ( t , err )
// Set invalid chunk format version.
b := make ( [ ] byte , chunks . ChunksFormatVersionSize )
b [ 0 ] = 0
n , err := f . Write ( b )
testutil . Ok ( t , err )
testutil . Equals ( t , chunks . ChunksFormatVersionSize , n )
} ,
errors . New ( "invalid chunk format version 0" ) ,
2019-12-04 07:37:49 +00:00
nil ,
} ,
"chunk not enough bytes to read the chunk length" : {
func ( f * os . File ) {
// Truncate one byte after the segment header.
err := f . Truncate ( chunks . SegmentHeaderSize + 1 )
testutil . Ok ( t , err )
} ,
nil ,
errors . New ( "segment doesn't include enough bytes to read the chunk size data field - required:13, available:9" ) ,
} ,
"chunk not enough bytes to read the data" : {
func ( f * os . File ) {
fi , err := f . Stat ( )
testutil . Ok ( t , err )
err = f . Truncate ( fi . Size ( ) - 1 )
testutil . Ok ( t , err )
} ,
nil ,
errors . New ( "segment doesn't include enough bytes to read the chunk - required:26, available:25" ) ,
} ,
"checksum mismatch" : {
func ( f * os . File ) {
fi , err := f . Stat ( )
testutil . Ok ( t , err )
// Get the chunk data end offset.
chkEndOffset := int ( fi . Size ( ) ) - crc32 . Size
// Seek to the last byte of chunk data and modify it.
_ , err = f . Seek ( int64 ( chkEndOffset - 1 ) , 0 )
testutil . Ok ( t , err )
n , err := f . Write ( [ ] byte ( "x" ) )
testutil . Ok ( t , err )
testutil . Equals ( t , n , 1 )
} ,
nil ,
errors . New ( "checksum mismatch expected:cfc0526c, actual:34815eae" ) ,
2019-03-24 20:33:08 +00:00
} ,
} {
t . Run ( name , func ( t * testing . T ) {
tmpdir , err := ioutil . TempDir ( "" , "test_open_block_chunk_corrupted" )
testutil . Ok ( t , err )
defer func ( ) {
testutil . Ok ( t , os . RemoveAll ( tmpdir ) )
} ( )
2019-12-04 07:37:49 +00:00
series := newSeries ( map [ string ] string { "a" : "b" } , [ ] tsdbutil . Sample { sample { 1 , 1 } } )
blockDir := createBlock ( t , tmpdir , [ ] Series { series } )
2019-03-24 20:33:08 +00:00
files , err := sequenceFiles ( chunkDir ( blockDir ) )
testutil . Ok ( t , err )
testutil . Assert ( t , len ( files ) > 0 , "No chunk created." )
f , err := os . OpenFile ( files [ 0 ] , os . O_RDWR , 0666 )
testutil . Ok ( t , err )
// Apply corruption function.
test . corrFunc ( f )
testutil . Ok ( t , f . Close ( ) )
2019-12-04 07:37:49 +00:00
// Check open err.
b , err := OpenBlock ( nil , blockDir , nil )
if test . openErr != nil {
testutil . Equals ( t , test . openErr . Error ( ) , err . Error ( ) )
return
}
querier , err := NewBlockQuerier ( b , 0 , 1 )
testutil . Ok ( t , err )
set , err := querier . Select ( labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) )
testutil . Ok ( t , err )
// Check query err.
testutil . Equals ( t , false , set . Next ( ) )
testutil . Equals ( t , test . queryErr . Error ( ) , set . Err ( ) . Error ( ) )
2019-03-24 20:33:08 +00:00
} )
}
}
2019-06-24 15:42:29 +00:00
// TestBlockSize ensures that the block size is calculated correctly.
func TestBlockSize ( t * testing . T ) {
tmpdir , err := ioutil . TempDir ( "" , "test_blockSize" )
testutil . Ok ( t , err )
defer func ( ) {
testutil . Ok ( t , os . RemoveAll ( tmpdir ) )
} ( )
var (
blockInit * Block
expSizeInit int64
blockDirInit string
)
// Create a block and compare the reported size vs actual disk size.
{
blockDirInit = createBlock ( t , tmpdir , genSeries ( 10 , 1 , 1 , 100 ) )
blockInit , err = OpenBlock ( nil , blockDirInit , nil )
testutil . Ok ( t , err )
defer func ( ) {
testutil . Ok ( t , blockInit . Close ( ) )
} ( )
expSizeInit = blockInit . Size ( )
2019-11-12 02:40:16 +00:00
actSizeInit , err := fileutil . DirSize ( blockInit . Dir ( ) )
testutil . Ok ( t , err )
2019-06-24 15:42:29 +00:00
testutil . Equals ( t , expSizeInit , actSizeInit )
}
// Delete some series and check the sizes again.
{
2019-11-18 19:53:33 +00:00
testutil . Ok ( t , blockInit . Delete ( 1 , 10 , labels . MustNewMatcher ( labels . MatchRegexp , "" , ".*" ) ) )
2019-06-24 15:42:29 +00:00
expAfterDelete := blockInit . Size ( )
testutil . Assert ( t , expAfterDelete > expSizeInit , "after a delete the block size should be bigger as the tombstone file should grow %v > %v" , expAfterDelete , expSizeInit )
2019-11-12 02:40:16 +00:00
actAfterDelete , err := fileutil . DirSize ( blockDirInit )
2019-06-24 15:42:29 +00:00
testutil . Ok ( t , err )
testutil . Equals ( t , expAfterDelete , actAfterDelete , "after a delete reported block size doesn't match actual disk size" )
c , err := NewLeveledCompactor ( context . Background ( ) , nil , log . NewNopLogger ( ) , [ ] int64 { 0 } , nil )
testutil . Ok ( t , err )
blockDirAfterCompact , err := c . Compact ( tmpdir , [ ] string { blockInit . Dir ( ) } , nil )
testutil . Ok ( t , err )
blockAfterCompact , err := OpenBlock ( nil , filepath . Join ( tmpdir , blockDirAfterCompact . String ( ) ) , nil )
testutil . Ok ( t , err )
defer func ( ) {
testutil . Ok ( t , blockAfterCompact . Close ( ) )
} ( )
expAfterCompact := blockAfterCompact . Size ( )
2019-11-12 02:40:16 +00:00
actAfterCompact , err := fileutil . DirSize ( blockAfterCompact . Dir ( ) )
testutil . Ok ( t , err )
2019-06-24 15:42:29 +00:00
testutil . Assert ( t , actAfterDelete > actAfterCompact , "after a delete and compaction the block size should be smaller %v,%v" , actAfterDelete , actAfterCompact )
testutil . Equals ( t , expAfterCompact , actAfterCompact , "after a delete and compaction reported block size doesn't match actual disk size" )
}
}
2019-01-28 11:24:49 +00:00
// createBlock creates a block with given set of series and returns its dir.
func createBlock ( tb testing . TB , dir string , series [ ] Series ) string {
2019-06-07 13:41:44 +00:00
head := createHead ( tb , series )
compactor , err := NewLeveledCompactor ( context . Background ( ) , nil , log . NewNopLogger ( ) , [ ] int64 { 1000000 } , nil )
testutil . Ok ( tb , err )
testutil . Ok ( tb , os . MkdirAll ( dir , 0777 ) )
2019-07-03 10:47:31 +00:00
// Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime).
// Because of this block intervals are always +1 than the total samples it includes.
ulid , err := compactor . Write ( dir , head , head . MinTime ( ) , head . MaxTime ( ) + 1 , nil )
2019-06-07 13:41:44 +00:00
testutil . Ok ( tb , err )
return filepath . Join ( dir , ulid . String ( ) )
}
func createHead ( tb testing . TB , series [ ] Series ) * Head {
2018-10-23 21:35:52 +00:00
head , err := NewHead ( nil , nil , nil , 2 * 60 * 60 * 1000 )
testutil . Ok ( tb , err )
defer head . Close ( )
2019-01-28 11:24:49 +00:00
app := head . Appender ( )
for _ , s := range series {
ref := uint64 ( 0 )
it := s . Iterator ( )
for it . Next ( ) {
t , v := it . At ( )
if ref != 0 {
err := app . AddFast ( ref , t , v )
2019-01-18 16:58:17 +00:00
if err == nil {
continue
}
2018-10-23 21:35:52 +00:00
}
2019-01-28 11:24:49 +00:00
ref , err = app . Add ( s . Labels ( ) , t , v )
2018-10-23 21:35:52 +00:00
testutil . Ok ( tb , err )
}
2019-01-28 11:24:49 +00:00
testutil . Ok ( tb , it . Err ( ) )
2018-10-23 21:35:52 +00:00
}
2019-01-28 11:24:49 +00:00
err = app . Commit ( )
testutil . Ok ( tb , err )
2019-06-07 13:41:44 +00:00
return head
2018-10-23 21:35:52 +00:00
}
2019-01-28 11:24:49 +00:00
2019-05-27 11:24:46 +00:00
const (
defaultLabelName = "labelName"
defaultLabelValue = "labelValue"
)
2019-01-28 11:24:49 +00:00
// genSeries generates series with a given number of labels and values.
func genSeries ( totalSeries , labelCount int , mint , maxt int64 ) [ ] Series {
if totalSeries == 0 || labelCount == 0 {
return nil
}
2019-02-14 13:29:41 +00:00
series := make ( [ ] Series , totalSeries )
2019-06-07 13:41:44 +00:00
2019-01-28 11:24:49 +00:00
for i := 0 ; i < totalSeries ; i ++ {
lbls := make ( map [ string ] string , labelCount )
2019-05-27 11:24:46 +00:00
lbls [ defaultLabelName ] = strconv . Itoa ( i )
for j := 1 ; len ( lbls ) < labelCount ; j ++ {
lbls [ defaultLabelName + strconv . Itoa ( j ) ] = defaultLabelValue + strconv . Itoa ( j )
2019-01-28 11:24:49 +00:00
}
samples := make ( [ ] tsdbutil . Sample , 0 , maxt - mint + 1 )
2019-07-03 10:47:31 +00:00
for t := mint ; t < maxt ; t ++ {
2019-01-28 11:24:49 +00:00
samples = append ( samples , sample { t : t , v : rand . Float64 ( ) } )
}
series [ i ] = newSeries ( lbls , samples )
}
2019-02-14 13:29:41 +00:00
return series
}
// populateSeries generates series from given labels, mint and maxt.
func populateSeries ( lbls [ ] map [ string ] string , mint , maxt int64 ) [ ] Series {
if len ( lbls ) == 0 {
return nil
}
2019-01-28 11:24:49 +00:00
2019-02-14 13:29:41 +00:00
series := make ( [ ] Series , 0 , len ( lbls ) )
for _ , lbl := range lbls {
if len ( lbl ) == 0 {
continue
}
samples := make ( [ ] tsdbutil . Sample , 0 , maxt - mint + 1 )
for t := mint ; t <= maxt ; t ++ {
samples = append ( samples , sample { t : t , v : rand . Float64 ( ) } )
}
series = append ( series , newSeries ( lbl , samples ) )
}
2019-01-28 11:24:49 +00:00
return series
}