2017-04-10 18:59:45 +00:00
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
2017-11-30 14:34:49 +00:00
package chunks
2017-03-07 11:47:49 +00:00
import (
"bufio"
"encoding/binary"
"fmt"
2017-04-25 14:45:44 +00:00
"hash"
2017-11-30 14:34:49 +00:00
"hash/crc32"
2017-03-07 11:47:49 +00:00
"io"
"os"
2017-11-30 14:34:49 +00:00
"path/filepath"
"strconv"
2017-03-07 11:47:49 +00:00
2019-08-13 08:34:14 +00:00
"github.com/prometheus/prometheus/tsdb/chunkenc"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil"
2017-03-07 11:47:49 +00:00
)
2019-12-04 07:37:49 +00:00
// Segment header fields constants.
2017-03-07 11:47:49 +00:00
const (
2017-04-28 13:41:42 +00:00
// MagicChunks is 4 bytes at the head of a series file.
2017-03-07 11:47:49 +00:00
MagicChunks = 0x85BD40DD
2019-03-24 20:33:08 +00:00
// MagicChunksSize is the size in bytes of MagicChunks.
2019-12-04 07:37:49 +00:00
MagicChunksSize = 4
chunksFormatV1 = 1
ChunksFormatVersionSize = 1
segmentHeaderPaddingSize = 3
// SegmentHeaderSize defines the total size of the header part.
SegmentHeaderSize = MagicChunksSize + ChunksFormatVersionSize + segmentHeaderPaddingSize
)
2019-03-24 20:33:08 +00:00
2019-12-04 07:37:49 +00:00
// Chunk fields constants.
const (
// MaxChunkLengthFieldSize defines the maximum size of the data length part.
MaxChunkLengthFieldSize = binary . MaxVarintLen32
// ChunkEncodingSize defines the size of the chunk encoding part.
ChunkEncodingSize = 1
2017-03-07 11:47:49 +00:00
)
2021-11-06 10:10:04 +00:00
// ChunkRef is a generic reference for reading chunk data. In prometheus it
// is either a HeadChunkRef or BlockChunkRef, though other implementations
// may have their own reference types.
type ChunkRef uint64
// HeadSeriesRef refers to in-memory series.
type HeadSeriesRef uint64
// HeadChunkRef packs a HeadSeriesRef and a ChunkID into a global 8 Byte ID.
// The HeadSeriesRef and ChunkID may not exceed 5 and 3 bytes respectively.
type HeadChunkRef uint64
2021-11-17 13:05:10 +00:00
func NewHeadChunkRef ( hsr HeadSeriesRef , chunkID HeadChunkID ) HeadChunkRef {
2021-11-06 10:10:04 +00:00
if hsr > ( 1 << 40 ) - 1 {
panic ( "series ID exceeds 5 bytes" )
}
if chunkID > ( 1 << 24 ) - 1 {
panic ( "chunk ID exceeds 3 bytes" )
}
2021-11-17 13:05:10 +00:00
return HeadChunkRef ( uint64 ( hsr << 24 ) | uint64 ( chunkID ) )
2021-11-06 10:10:04 +00:00
}
2021-11-17 13:05:10 +00:00
func ( p HeadChunkRef ) Unpack ( ) ( HeadSeriesRef , HeadChunkID ) {
return HeadSeriesRef ( p >> 24 ) , HeadChunkID ( p << 40 ) >> 40
2021-11-06 10:10:04 +00:00
}
2021-11-17 13:05:10 +00:00
// HeadChunkID refers to a specific chunk in a series (memSeries) in the Head.
// Each memSeries has its own monotonically increasing number to refer to its chunks.
// If the HeadChunkID value is...
2022-09-07 09:30:48 +00:00
// - memSeries.firstChunkID+len(memSeries.mmappedChunks), it's the head chunk.
// - less than the above, but >= memSeries.firstID, then it's
// memSeries.mmappedChunks[i] where i = HeadChunkID - memSeries.firstID.
//
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 09:10:24 +00:00
// If memSeries.headChunks is non-nil it points to a *memChunk that holds the current
// "open" (accepting appends) instance. *memChunk is a linked list and memChunk.next pointer
// might link to the older *memChunk instance.
// If there are multiple *memChunk instances linked to each other from memSeries.headChunks
// they will be m-mapped as soon as possible leaving only "open" *memChunk instance.
//
2021-11-17 13:05:10 +00:00
// Example:
// assume a memSeries.firstChunkID=7 and memSeries.mmappedChunks=[p5,p6,p7,p8,p9].
2023-10-03 20:09:25 +00:00
//
// | HeadChunkID value | refers to ... |
// |-------------------|----------------------------------------------------------------------------------------|
// | 0-6 | chunks that have been compacted to blocks, these won't return data for queries in Head |
// | 7-11 | memSeries.mmappedChunks[i] where i is 0 to 4. |
// | 12 | *memChunk{next: nil}
// | 13 | *memChunk{next: ^}
// | 14 | memSeries.headChunks -> *memChunk{next: ^}
2021-11-17 13:05:10 +00:00
type HeadChunkID uint64
2021-11-06 10:10:04 +00:00
// BlockChunkRef refers to a chunk within a persisted block.
// The upper 4 bytes are for the segment index and
// the lower 4 bytes are for the segment offset where the data starts for this chunk.
type BlockChunkRef uint64
// NewBlockChunkRef packs the file index and byte offset into a BlockChunkRef.
func NewBlockChunkRef ( fileIndex , fileOffset uint64 ) BlockChunkRef {
return BlockChunkRef ( fileIndex << 32 | fileOffset )
}
func ( b BlockChunkRef ) Unpack ( ) ( int , int ) {
sgmIndex := int ( b >> 32 )
chkStart := int ( ( b << 32 ) >> 32 )
return sgmIndex , chkStart
}
2023-11-28 10:14:29 +00:00
// Meta holds information about one or more chunks.
// For examples of when chunks.Meta could refer to multiple chunks, see
// ChunkReader.ChunkOrIterable().
2017-11-30 14:34:49 +00:00
type Meta struct {
2017-03-07 11:47:49 +00:00
// Ref and Chunk hold either a reference that can be used to retrieve
// chunk data or the data itself.
2023-11-28 10:14:29 +00:00
// If Chunk is nil, call ChunkReader.ChunkOrIterable(Meta.Ref) to get the
// chunk and assign it to the Chunk field. If an iterable is returned from
// that method, then it may not be possible to set Chunk as the iterable
// might form several chunks.
2021-11-06 10:10:04 +00:00
Ref ChunkRef
2017-11-30 14:34:49 +00:00
Chunk chunkenc . Chunk
2017-03-07 11:47:49 +00:00
2019-07-03 10:47:31 +00:00
// Time range the data covers.
// When MaxTime == math.MaxInt64 the chunk is still open and being appended to.
MinTime , MaxTime int64
2022-09-20 17:05:50 +00:00
// OOOLastRef, OOOLastMinTime and OOOLastMaxTime are kept as markers for
// overlapping chunks.
// These fields point to the last created out of order Chunk (the head) that existed
// when Series() was called and was overlapping.
// Series() and Chunk() method responses should be consistent for the same
// query even if new data is added in between the calls.
OOOLastRef ChunkRef
OOOLastMinTime , OOOLastMaxTime int64
2017-05-14 09:06:26 +00:00
}
2023-08-24 13:21:17 +00:00
// ChunkFromSamples requires all samples to have the same type.
func ChunkFromSamples ( s [ ] Sample ) ( Meta , error ) {
return ChunkFromSamplesGeneric ( SampleSlice ( s ) )
}
// ChunkFromSamplesGeneric requires all samples to have the same type.
func ChunkFromSamplesGeneric ( s Samples ) ( Meta , error ) {
emptyChunk := Meta { Chunk : chunkenc . NewXORChunk ( ) }
mint , maxt := int64 ( 0 ) , int64 ( 0 )
if s . Len ( ) > 0 {
mint , maxt = s . Get ( 0 ) . T ( ) , s . Get ( s . Len ( ) - 1 ) . T ( )
}
if s . Len ( ) == 0 {
return emptyChunk , nil
}
sampleType := s . Get ( 0 ) . Type ( )
c , err := chunkenc . NewEmptyChunk ( sampleType . ChunkEncoding ( ) )
if err != nil {
return Meta { } , err
}
ca , _ := c . Appender ( )
var newChunk chunkenc . Chunk
for i := 0 ; i < s . Len ( ) ; i ++ {
switch sampleType {
case chunkenc . ValFloat :
ca . Append ( s . Get ( i ) . T ( ) , s . Get ( i ) . F ( ) )
case chunkenc . ValHistogram :
newChunk , _ , ca , err = ca . AppendHistogram ( nil , s . Get ( i ) . T ( ) , s . Get ( i ) . H ( ) , false )
if err != nil {
return emptyChunk , err
}
if newChunk != nil {
return emptyChunk , fmt . Errorf ( "did not expect to start a second chunk" )
}
case chunkenc . ValFloatHistogram :
newChunk , _ , ca , err = ca . AppendFloatHistogram ( nil , s . Get ( i ) . T ( ) , s . Get ( i ) . FH ( ) , false )
if err != nil {
return emptyChunk , err
}
if newChunk != nil {
return emptyChunk , fmt . Errorf ( "did not expect to start a second chunk" )
}
default :
panic ( fmt . Sprintf ( "unknown sample type %s" , sampleType . String ( ) ) )
}
}
return Meta {
MinTime : mint ,
MaxTime : maxt ,
Chunk : c ,
} , nil
}
2023-10-03 20:09:25 +00:00
// PopulatedChunk creates a chunk populated with samples every second starting at minTime.
2023-08-24 13:21:17 +00:00
func PopulatedChunk ( numSamples int , minTime int64 ) ( Meta , error ) {
samples := make ( [ ] Sample , numSamples )
for i := 0 ; i < numSamples ; i ++ {
samples [ i ] = sample { t : minTime + int64 ( i * 1000 ) , f : 1.0 }
}
return ChunkFromSamples ( samples )
}
2023-09-19 15:06:46 +00:00
// ChunkMetasToSamples converts a slice of chunk meta data to a slice of samples.
// Used in tests to compare the content of chunks.
func ChunkMetasToSamples ( chunks [ ] Meta ) ( result [ ] Sample ) {
if len ( chunks ) == 0 {
return
}
for _ , chunk := range chunks {
it := chunk . Chunk . Iterator ( nil )
for vt := it . Next ( ) ; vt != chunkenc . ValNone ; vt = it . Next ( ) {
switch vt {
case chunkenc . ValFloat :
t , v := it . At ( )
result = append ( result , sample { t : t , f : v } )
case chunkenc . ValHistogram :
2024-01-23 16:02:14 +00:00
t , h := it . AtHistogram ( nil )
2023-09-19 15:06:46 +00:00
result = append ( result , sample { t : t , h : h } )
case chunkenc . ValFloatHistogram :
2024-01-23 16:02:14 +00:00
t , fh := it . AtFloatHistogram ( nil )
2023-09-19 15:06:46 +00:00
result = append ( result , sample { t : t , fh : fh } )
default :
panic ( "unexpected value type" )
}
}
}
return
}
2021-11-17 10:21:27 +00:00
// Iterator iterates over the chunks of a single time series.
2020-03-24 20:15:47 +00:00
type Iterator interface {
// At returns the current meta.
// It depends on implementation if the chunk is populated or not.
At ( ) Meta
// Next advances the iterator by one.
Next ( ) bool
// Err returns optional error if Next is false.
Err ( ) error
}
2017-05-16 07:13:33 +00:00
// writeHash writes the chunk encoding and raw data into the provided hash.
2019-07-12 13:12:34 +00:00
func ( cm * Meta ) writeHash ( h hash . Hash , buf [ ] byte ) error {
buf = append ( buf [ : 0 ] , byte ( cm . Chunk . Encoding ( ) ) )
if _ , err := h . Write ( buf [ : 1 ] ) ; err != nil {
2017-05-16 07:13:33 +00:00
return err
}
if _ , err := h . Write ( cm . Chunk . Bytes ( ) ) ; err != nil {
return err
}
return nil
}
2019-03-25 08:17:28 +00:00
// OverlapsClosedInterval Returns true if the chunk overlaps [mint, maxt].
2018-07-02 08:23:36 +00:00
func ( cm * Meta ) OverlapsClosedInterval ( mint , maxt int64 ) bool {
// The chunk itself is a closed interval [cm.MinTime, cm.MaxTime].
return cm . MinTime <= maxt && mint <= cm . MaxTime
}
2021-10-22 08:06:44 +00:00
var errInvalidSize = fmt . Errorf ( "invalid size" )
2017-05-14 09:06:26 +00:00
2017-11-30 14:34:49 +00:00
var castagnoliTable * crc32 . Table
2017-05-14 09:06:26 +00:00
2017-11-30 14:34:49 +00:00
func init ( ) {
castagnoliTable = crc32 . MakeTable ( crc32 . Castagnoli )
2017-05-14 09:06:26 +00:00
}
2017-11-30 14:34:49 +00:00
// newCRC32 initializes a CRC32 hash with a preconfigured polynomial, so the
// polynomial may be easily changed in one location at a later time, if necessary.
func newCRC32 ( ) hash . Hash32 {
return crc32 . New ( castagnoliTable )
2017-03-07 11:47:49 +00:00
}
2022-06-08 02:30:59 +00:00
// Check if the CRC of data matches that stored in sum, computed when the chunk was stored.
func checkCRC32 ( data , sum [ ] byte ) error {
got := crc32 . Checksum ( data , castagnoliTable )
// This combination of shifts is the inverse of digest.Sum() in go/src/hash/crc32.
want := uint32 ( sum [ 0 ] ) << 24 + uint32 ( sum [ 1 ] ) << 16 + uint32 ( sum [ 2 ] ) << 8 + uint32 ( sum [ 3 ] )
if got != want {
2023-11-08 09:02:59 +00:00
return fmt . Errorf ( "checksum mismatch expected:%x, actual:%x" , want , got )
2022-06-08 02:30:59 +00:00
}
return nil
}
2017-11-30 14:34:49 +00:00
// Writer implements the ChunkWriter interface for the standard
2017-03-07 11:47:49 +00:00
// serialization format.
2017-11-30 14:34:49 +00:00
type Writer struct {
2017-03-07 11:47:49 +00:00
dirFile * os . File
files [ ] * os . File
wbuf * bufio . Writer
n int64
2017-04-28 12:17:53 +00:00
crc32 hash . Hash
2019-07-12 13:12:34 +00:00
buf [ binary . MaxVarintLen32 ] byte
2017-03-07 11:47:49 +00:00
segmentSize int64
}
const (
2019-12-04 07:37:49 +00:00
// DefaultChunkSegmentSize is the default chunks segment size.
DefaultChunkSegmentSize = 512 * 1024 * 1024
2017-03-07 11:47:49 +00:00
)
2019-12-04 07:37:49 +00:00
// NewWriterWithSegSize returns a new writer against the given directory
// and allows setting a custom size for the segments.
func NewWriterWithSegSize ( dir string , segmentSize int64 ) ( * Writer , error ) {
return newWriter ( dir , segmentSize )
}
// NewWriter returns a new writer against the given directory
// using the default segment size.
2017-11-30 14:34:49 +00:00
func NewWriter ( dir string ) ( * Writer , error ) {
2019-12-04 07:37:49 +00:00
return newWriter ( dir , DefaultChunkSegmentSize )
}
func newWriter ( dir string , segmentSize int64 ) ( * Writer , error ) {
if segmentSize <= 0 {
segmentSize = DefaultChunkSegmentSize
}
2021-10-22 08:06:44 +00:00
if err := os . MkdirAll ( dir , 0 o777 ) ; err != nil {
2017-03-07 11:47:49 +00:00
return nil , err
}
dirFile , err := fileutil . OpenDir ( dir )
if err != nil {
return nil , err
}
2019-12-04 07:37:49 +00:00
return & Writer {
2017-03-07 11:47:49 +00:00
dirFile : dirFile ,
n : 0 ,
2017-08-26 16:04:00 +00:00
crc32 : newCRC32 ( ) ,
2019-12-04 07:37:49 +00:00
segmentSize : segmentSize ,
} , nil
2017-03-07 11:47:49 +00:00
}
2017-11-30 14:34:49 +00:00
func ( w * Writer ) tail ( ) * os . File {
2017-03-07 11:47:49 +00:00
if len ( w . files ) == 0 {
return nil
}
return w . files [ len ( w . files ) - 1 ]
}
// finalizeTail writes all pending data to the current tail file,
// truncates its size, and closes it.
2017-11-30 14:34:49 +00:00
func ( w * Writer ) finalizeTail ( ) error {
2017-03-07 11:47:49 +00:00
tf := w . tail ( )
if tf == nil {
return nil
}
if err := w . wbuf . Flush ( ) ; err != nil {
return err
}
2019-04-03 08:16:54 +00:00
if err := tf . Sync ( ) ; err != nil {
2017-03-07 11:47:49 +00:00
return err
}
// As the file was pre-allocated, we truncate any superfluous zero bytes.
2018-03-21 20:39:43 +00:00
off , err := tf . Seek ( 0 , io . SeekCurrent )
2017-03-07 11:47:49 +00:00
if err != nil {
return err
}
if err := tf . Truncate ( off ) ; err != nil {
return err
}
2017-10-31 14:37:41 +00:00
2017-03-07 11:47:49 +00:00
return tf . Close ( )
}
2017-11-30 14:34:49 +00:00
func ( w * Writer ) cut ( ) error {
2017-03-07 11:47:49 +00:00
// Sync current tail to disk and close.
2017-04-28 13:59:23 +00:00
if err := w . finalizeTail ( ) ; err != nil {
return err
}
2017-03-07 11:47:49 +00:00
2020-03-19 16:33:44 +00:00
n , f , _ , err := cutSegmentFile ( w . dirFile , MagicChunks , chunksFormatV1 , w . segmentSize )
2017-03-07 11:47:49 +00:00
if err != nil {
return err
}
2020-02-05 13:39:40 +00:00
w . n = int64 ( n )
w . files = append ( w . files , f )
if w . wbuf != nil {
w . wbuf . Reset ( f )
} else {
w . wbuf = bufio . NewWriterSize ( f , 8 * 1024 * 1024 )
}
return nil
}
2020-07-15 05:48:48 +00:00
func cutSegmentFile ( dirFile * os . File , magicNumber uint32 , chunksFormat byte , allocSize int64 ) ( headerSize int , newFile * os . File , seq int , returnErr error ) {
2020-02-05 13:39:40 +00:00
p , seq , err := nextSequenceFile ( dirFile . Name ( ) )
if err != nil {
2023-11-08 09:02:59 +00:00
return 0 , nil , 0 , fmt . Errorf ( "next sequence file: %w" , err )
2020-02-05 13:39:40 +00:00
}
2020-07-14 09:29:28 +00:00
ptmp := p + ".tmp"
2021-10-22 08:06:44 +00:00
f , err := os . OpenFile ( ptmp , os . O_WRONLY | os . O_CREATE , 0 o666 )
2017-03-07 11:47:49 +00:00
if err != nil {
2023-11-08 09:02:59 +00:00
return 0 , nil , 0 , fmt . Errorf ( "open temp file: %w" , err )
2017-03-07 11:47:49 +00:00
}
2020-07-15 05:48:48 +00:00
defer func ( ) {
if returnErr != nil {
2020-10-28 15:24:58 +00:00
errs := tsdb_errors . NewMulti ( returnErr )
2020-07-15 05:48:48 +00:00
if f != nil {
2020-10-28 15:24:58 +00:00
errs . Add ( f . Close ( ) )
2020-07-15 05:48:48 +00:00
}
// Calling RemoveAll on a non-existent file does not return error.
2020-10-28 15:24:58 +00:00
errs . Add ( os . RemoveAll ( ptmp ) )
returnErr = errs . Err ( )
2020-07-15 05:48:48 +00:00
}
} ( )
2020-03-19 16:33:44 +00:00
if allocSize > 0 {
if err = fileutil . Preallocate ( f , allocSize , true ) ; err != nil {
2023-11-08 09:02:59 +00:00
return 0 , nil , 0 , fmt . Errorf ( "preallocate: %w" , err )
2020-02-05 13:39:40 +00:00
}
2017-03-07 11:47:49 +00:00
}
2020-02-05 13:39:40 +00:00
if err = dirFile . Sync ( ) ; err != nil {
2023-11-08 09:02:59 +00:00
return 0 , nil , 0 , fmt . Errorf ( "sync directory: %w" , err )
2017-03-07 11:47:49 +00:00
}
// Write header metadata for new file.
2019-12-04 07:37:49 +00:00
metab := make ( [ ] byte , SegmentHeaderSize )
2020-03-19 16:33:44 +00:00
binary . BigEndian . PutUint32 ( metab [ : MagicChunksSize ] , magicNumber )
2020-02-05 13:39:40 +00:00
metab [ 4 ] = chunksFormat
2017-03-07 11:47:49 +00:00
2019-12-04 07:37:49 +00:00
n , err := f . Write ( metab )
if err != nil {
2023-11-08 09:02:59 +00:00
return 0 , nil , 0 , fmt . Errorf ( "write header: %w" , err )
2020-07-14 09:29:28 +00:00
}
if err := f . Close ( ) ; err != nil {
2023-11-08 09:02:59 +00:00
return 0 , nil , 0 , fmt . Errorf ( "close temp file: %w" , err )
2020-07-14 09:29:28 +00:00
}
2020-07-15 05:48:48 +00:00
f = nil
2020-07-14 09:29:28 +00:00
if err := fileutil . Rename ( ptmp , p ) ; err != nil {
2023-11-08 09:02:59 +00:00
return 0 , nil , 0 , fmt . Errorf ( "replace file: %w" , err )
2020-07-14 09:29:28 +00:00
}
2021-10-22 08:06:44 +00:00
f , err = os . OpenFile ( p , os . O_WRONLY , 0 o666 )
2020-07-14 09:29:28 +00:00
if err != nil {
2023-11-08 09:02:59 +00:00
return 0 , nil , 0 , fmt . Errorf ( "open final file: %w" , err )
2020-07-14 09:29:28 +00:00
}
2020-07-15 05:48:48 +00:00
// Skip header for further writes.
2020-07-14 09:29:28 +00:00
if _ , err := f . Seek ( int64 ( n ) , 0 ) ; err != nil {
2023-11-08 09:02:59 +00:00
return 0 , nil , 0 , fmt . Errorf ( "seek in final file: %w" , err )
2017-03-07 11:47:49 +00:00
}
2020-02-05 13:39:40 +00:00
return n , f , seq , nil
2017-03-07 11:47:49 +00:00
}
2017-11-30 14:34:49 +00:00
func ( w * Writer ) write ( b [ ] byte ) error {
2017-04-24 15:10:12 +00:00
n , err := w . wbuf . Write ( b )
2017-03-07 11:47:49 +00:00
w . n += int64 ( n )
return err
}
2019-12-04 07:37:49 +00:00
// WriteChunks writes as many chunks as possible to the current segment,
// cuts a new segment when the current segment is full and
// writes the rest of the chunks in the new segment.
2017-11-30 14:34:49 +00:00
func ( w * Writer ) WriteChunks ( chks ... Meta ) error {
2019-12-04 07:37:49 +00:00
var (
batchSize = int64 ( 0 )
batchStart = 0
batches = make ( [ ] [ ] Meta , 1 )
batchID = 0
firstBatch = true
)
for i , chk := range chks {
// Each chunk contains: data length + encoding + the data itself + crc32
chkSize := int64 ( MaxChunkLengthFieldSize ) // The data length is a variable length field so use the maximum possible value.
chkSize += ChunkEncodingSize // The chunk encoding.
chkSize += int64 ( len ( chk . Chunk . Bytes ( ) ) ) // The data itself.
chkSize += crc32 . Size // The 4 bytes of crc32.
batchSize += chkSize
// Cut a new batch when it is not the first chunk(to avoid empty segments) and
// the batch is too large to fit in the current segment.
cutNewBatch := ( i != 0 ) && ( batchSize + SegmentHeaderSize > w . segmentSize )
// When the segment already has some data than
// the first batch size calculation should account for that.
if firstBatch && w . n > SegmentHeaderSize {
cutNewBatch = batchSize + w . n > w . segmentSize
if cutNewBatch {
firstBatch = false
}
}
if cutNewBatch {
batchStart = i
batches = append ( batches , [ ] Meta { } )
batchID ++
batchSize = chkSize
}
batches [ batchID ] = chks [ batchStart : i + 1 ]
2017-03-07 11:47:49 +00:00
}
2019-12-04 07:37:49 +00:00
// Create a new segment when one doesn't already exist.
if w . n == 0 {
2017-03-07 11:47:49 +00:00
if err := w . cut ( ) ; err != nil {
return err
}
}
2019-12-04 07:37:49 +00:00
for i , chks := range batches {
if err := w . writeChunks ( chks ) ; err != nil {
return err
}
// Cut a new segment only when there are more chunks to write.
// Avoid creating a new empty segment at the end of the write.
if i < len ( batches ) - 1 {
if err := w . cut ( ) ; err != nil {
return err
}
}
}
return nil
}
// writeChunks writes the chunks into the current segment irrespective
// of the configured segment size limit. A segment should have been already
// started before calling this.
func ( w * Writer ) writeChunks ( chks [ ] Meta ) error {
if len ( chks ) == 0 {
return nil
}
2021-11-06 10:10:04 +00:00
seq := uint64 ( w . seq ( ) )
2017-08-06 18:41:24 +00:00
for i := range chks {
chk := & chks [ i ]
2021-11-06 10:10:04 +00:00
chk . Ref = ChunkRef ( NewBlockChunkRef ( seq , uint64 ( w . n ) ) )
2017-03-07 11:47:49 +00:00
2019-07-12 13:12:34 +00:00
n := binary . PutUvarint ( w . buf [ : ] , uint64 ( len ( chk . Chunk . Bytes ( ) ) ) )
2017-03-07 11:47:49 +00:00
2019-07-12 13:12:34 +00:00
if err := w . write ( w . buf [ : n ] ) ; err != nil {
2017-03-07 11:47:49 +00:00
return err
}
2019-07-12 13:12:34 +00:00
w . buf [ 0 ] = byte ( chk . Chunk . Encoding ( ) )
if err := w . write ( w . buf [ : 1 ] ) ; err != nil {
2017-04-25 14:45:44 +00:00
return err
}
2017-04-24 15:10:12 +00:00
if err := w . write ( chk . Chunk . Bytes ( ) ) ; err != nil {
2017-03-07 11:47:49 +00:00
return err
}
2017-04-28 12:17:53 +00:00
w . crc32 . Reset ( )
2019-07-12 13:12:34 +00:00
if err := chk . writeHash ( w . crc32 , w . buf [ : ] ) ; err != nil {
2017-05-02 10:55:40 +00:00
return err
}
2019-07-12 13:12:34 +00:00
if err := w . write ( w . crc32 . Sum ( w . buf [ : 0 ] ) ) ; err != nil {
2017-04-28 12:17:53 +00:00
return err
}
2017-03-07 11:47:49 +00:00
}
return nil
}
2017-11-30 14:34:49 +00:00
func ( w * Writer ) seq ( ) int {
2017-03-07 11:47:49 +00:00
return len ( w . files ) - 1
}
2017-11-30 14:34:49 +00:00
func ( w * Writer ) Close ( ) error {
2017-10-31 14:37:41 +00:00
if err := w . finalizeTail ( ) ; err != nil {
return err
}
// close dir file (if not windows platform will fail on rename)
return w . dirFile . Close ( )
2017-03-07 11:47:49 +00:00
}
2017-11-30 14:34:49 +00:00
// ByteSlice abstracts a byte slice.
type ByteSlice interface {
Len ( ) int
Range ( start , end int ) [ ] byte
}
type realByteSlice [ ] byte
2017-03-07 11:47:49 +00:00
2017-11-30 14:34:49 +00:00
func ( b realByteSlice ) Len ( ) int {
return len ( b )
2017-03-07 11:47:49 +00:00
}
2017-11-30 14:34:49 +00:00
func ( b realByteSlice ) Range ( start , end int ) [ ] byte {
return b [ start : end ]
}
2019-07-11 10:55:34 +00:00
// Reader implements a ChunkReader for a serialized byte stream
2017-03-07 11:47:49 +00:00
// of series data.
2017-11-30 14:34:49 +00:00
type Reader struct {
2019-12-04 07:37:49 +00:00
// The underlying bytes holding the encoded series data.
// Each slice holds the data for a different segment.
2019-12-24 21:55:22 +00:00
bs [ ] ByteSlice
cs [ ] io . Closer // Closers for resources behind the byte slices.
size int64 // The total size of bytes in the reader.
pool chunkenc . Pool
2017-03-07 11:47:49 +00:00
}
2017-11-30 14:34:49 +00:00
func newReader ( bs [ ] ByteSlice , cs [ ] io . Closer , pool chunkenc . Pool ) ( * Reader , error ) {
2019-12-24 21:55:22 +00:00
cr := Reader { pool : pool , bs : bs , cs : cs }
2017-11-10 10:38:22 +00:00
for i , b := range cr . bs {
2019-12-04 07:37:49 +00:00
if b . Len ( ) < SegmentHeaderSize {
2023-11-08 09:02:59 +00:00
return nil , fmt . Errorf ( "invalid segment header in segment %d: %w" , i , errInvalidSize )
2017-11-10 10:38:22 +00:00
}
// Verify magic number.
2019-03-24 20:33:08 +00:00
if m := binary . BigEndian . Uint32 ( b . Range ( 0 , MagicChunksSize ) ) ; m != MagicChunks {
2023-11-08 09:02:59 +00:00
return nil , fmt . Errorf ( "invalid magic number %x" , m )
2017-11-10 10:38:22 +00:00
}
2019-03-24 20:33:08 +00:00
// Verify chunk format version.
if v := int ( b . Range ( MagicChunksSize , MagicChunksSize + ChunksFormatVersionSize ) [ 0 ] ) ; v != chunksFormatV1 {
2023-11-08 09:02:59 +00:00
return nil , fmt . Errorf ( "invalid chunk format version %d" , v )
2019-03-24 20:33:08 +00:00
}
2020-02-05 13:39:40 +00:00
cr . size += int64 ( b . Len ( ) )
2017-11-10 10:38:22 +00:00
}
return & cr , nil
}
2017-11-30 14:34:49 +00:00
// NewDirReader returns a new Reader against sequentially numbered files in the
2017-11-10 10:38:22 +00:00
// given directory.
2017-11-30 14:34:49 +00:00
func NewDirReader ( dir string , pool chunkenc . Pool ) ( * Reader , error ) {
2017-08-30 16:34:54 +00:00
files , err := sequenceFiles ( dir )
2017-03-07 11:47:49 +00:00
if err != nil {
return nil , err
}
2017-08-08 15:35:34 +00:00
if pool == nil {
2017-11-30 14:34:49 +00:00
pool = chunkenc . NewPool ( )
2017-08-08 15:35:34 +00:00
}
2017-11-10 10:38:22 +00:00
2019-01-16 10:03:52 +00:00
var (
2020-10-28 15:24:58 +00:00
bs [ ] ByteSlice
cs [ ] io . Closer
2019-01-16 10:03:52 +00:00
)
2017-03-07 11:47:49 +00:00
for _ , fn := range files {
2017-11-30 14:34:49 +00:00
f , err := fileutil . OpenMmapFile ( fn )
2017-03-07 11:47:49 +00:00
if err != nil {
2020-10-28 15:24:58 +00:00
return nil , tsdb_errors . NewMulti (
2023-11-08 09:02:59 +00:00
fmt . Errorf ( "mmap files: %w" , err ) ,
2020-10-28 15:24:58 +00:00
tsdb_errors . CloseAll ( cs ) ,
) . Err ( )
2017-03-07 11:47:49 +00:00
}
2017-11-10 10:38:22 +00:00
cs = append ( cs , f )
2017-11-30 14:34:49 +00:00
bs = append ( bs , realByteSlice ( f . Bytes ( ) ) )
2017-03-07 11:47:49 +00:00
}
2019-03-24 20:33:08 +00:00
reader , err := newReader ( bs , cs , pool )
if err != nil {
2020-10-28 15:24:58 +00:00
return nil , tsdb_errors . NewMulti (
err ,
tsdb_errors . CloseAll ( cs ) ,
) . Err ( )
2019-03-24 20:33:08 +00:00
}
return reader , nil
2017-03-07 11:47:49 +00:00
}
2017-11-30 14:34:49 +00:00
func ( s * Reader ) Close ( ) error {
2020-10-28 15:24:58 +00:00
return tsdb_errors . CloseAll ( s . cs )
2017-03-07 11:47:49 +00:00
}
2019-01-16 10:03:52 +00:00
// Size returns the size of the chunks.
func ( s * Reader ) Size ( ) int64 {
return s . size
}
2023-11-28 15:33:28 +00:00
// ChunkOrIterable returns a chunk from a given reference.
2023-11-28 10:14:29 +00:00
func ( s * Reader ) ChunkOrIterable ( meta Meta ) ( chunkenc . Chunk , chunkenc . Iterable , error ) {
2022-09-20 17:05:50 +00:00
sgmIndex , chkStart := BlockChunkRef ( meta . Ref ) . Unpack ( )
2019-12-04 07:37:49 +00:00
if sgmIndex >= len ( s . bs ) {
2023-11-28 10:14:29 +00:00
return nil , nil , fmt . Errorf ( "segment index %d out of range" , sgmIndex )
2017-03-07 11:47:49 +00:00
}
2019-12-04 07:37:49 +00:00
sgmBytes := s . bs [ sgmIndex ]
if chkStart + MaxChunkLengthFieldSize > sgmBytes . Len ( ) {
2023-11-28 10:14:29 +00:00
return nil , nil , fmt . Errorf ( "segment doesn't include enough bytes to read the chunk size data field - required:%v, available:%v" , chkStart + MaxChunkLengthFieldSize , sgmBytes . Len ( ) )
2017-03-07 11:47:49 +00:00
}
2017-11-10 10:38:22 +00:00
// With the minimum chunk length this should never cause us reading
// over the end of the slice.
2019-12-04 07:37:49 +00:00
c := sgmBytes . Range ( chkStart , chkStart + MaxChunkLengthFieldSize )
chkDataLen , n := binary . Uvarint ( c )
2018-06-08 08:25:12 +00:00
if n <= 0 {
2023-11-28 10:14:29 +00:00
return nil , nil , fmt . Errorf ( "reading chunk length failed with %d" , n )
2017-03-07 11:47:49 +00:00
}
2019-12-04 07:37:49 +00:00
chkEncStart := chkStart + n
chkEnd := chkEncStart + ChunkEncodingSize + int ( chkDataLen ) + crc32 . Size
chkDataStart := chkEncStart + ChunkEncodingSize
chkDataEnd := chkEnd - crc32 . Size
if chkEnd > sgmBytes . Len ( ) {
2023-11-28 10:14:29 +00:00
return nil , nil , fmt . Errorf ( "segment doesn't include enough bytes to read the chunk - required:%v, available:%v" , chkEnd , sgmBytes . Len ( ) )
2019-12-04 07:37:49 +00:00
}
2019-12-24 21:55:22 +00:00
sum := sgmBytes . Range ( chkDataEnd , chkEnd )
2022-06-08 02:30:59 +00:00
if err := checkCRC32 ( sgmBytes . Range ( chkEncStart , chkDataEnd ) , sum ) ; err != nil {
2023-11-28 10:14:29 +00:00
return nil , nil , err
2019-12-04 07:37:49 +00:00
}
2019-12-24 21:55:22 +00:00
2019-12-04 07:37:49 +00:00
chkData := sgmBytes . Range ( chkDataStart , chkDataEnd )
chkEnc := sgmBytes . Range ( chkEncStart , chkEncStart + ChunkEncodingSize ) [ 0 ]
2023-11-28 10:14:29 +00:00
chk , err := s . pool . Get ( chunkenc . Encoding ( chkEnc ) , chkData )
return chk , nil , err
2017-11-30 14:34:49 +00:00
}
func nextSequenceFile ( dir string ) ( string , int , error ) {
2022-04-27 09:24:36 +00:00
files , err := os . ReadDir ( dir )
2017-11-30 14:34:49 +00:00
if err != nil {
return "" , 0 , err
}
i := uint64 ( 0 )
2020-04-06 13:34:20 +00:00
for _ , f := range files {
j , err := strconv . ParseUint ( f . Name ( ) , 10 , 64 )
2017-11-30 14:34:49 +00:00
if err != nil {
continue
}
2020-02-05 13:39:40 +00:00
// It is not necessary that we find the files in number order,
// for example with '1000000' and '200000', '1000000' would come first.
// Though this is a very very race case, we check anyway for the max id.
if j > i {
i = j
}
2017-11-30 14:34:49 +00:00
}
2020-02-05 13:39:40 +00:00
return segmentFile ( dir , int ( i + 1 ) ) , int ( i + 1 ) , nil
}
func segmentFile ( baseDir string , index int ) string {
return filepath . Join ( baseDir , fmt . Sprintf ( "%0.6d" , index ) )
2017-11-30 14:34:49 +00:00
}
func sequenceFiles ( dir string ) ( [ ] string , error ) {
2022-04-27 09:24:36 +00:00
files , err := os . ReadDir ( dir )
2017-11-30 14:34:49 +00:00
if err != nil {
return nil , err
}
var res [ ] string
for _ , fi := range files {
if _ , err := strconv . ParseUint ( fi . Name ( ) , 10 , 64 ) ; err != nil {
continue
}
res = append ( res , filepath . Join ( dir , fi . Name ( ) ) )
}
return res , nil
}