From 79db04eb122f9ff9d56574f8139bb797d56a8d45 Mon Sep 17 00:00:00 2001 From: Justin Lei Date: Fri, 3 Mar 2023 13:05:13 -0800 Subject: [PATCH 1/5] Adjust samplesPerChunk from 120 to 220 Signed-off-by: Justin Lei --- tsdb/head_append.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 14e343f74..967c74359 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -1324,10 +1324,7 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, func (s *memSeries) appendPreprocessor( t int64, e chunkenc.Encoding, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, ) (c *memChunk, sampleInOrder, chunkCreated bool) { - // Based on Gorilla white papers this offers near-optimal compression ratio - // so anything bigger that this has diminishing returns and increases - // the time range within which we have to decompress all samples. - const samplesPerChunk = 120 + const samplesPerChunk = 220 c = s.head() From c770ba804762d30c4cf9b63b092a3dfd9fed77c4 Mon Sep 17 00:00:00 2001 From: Justin Lei Date: Fri, 3 Mar 2023 13:10:24 -0800 Subject: [PATCH 2/5] Add comment linking to PR Signed-off-by: Justin Lei --- tsdb/head_append.go | 1 + 1 file changed, 1 insertion(+) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 967c74359..46180051e 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -1324,6 +1324,7 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, func (s *memSeries) appendPreprocessor( t int64, e chunkenc.Encoding, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, ) (c *memChunk, sampleInOrder, chunkCreated bool) { + // The basis for this number can be found here: https://github.com/prometheus/prometheus/pull/12055 const samplesPerChunk = 220 c = s.head() From 73ff91d182c991daff13ee945bf861c3d48a58c0 Mon Sep 17 00:00:00 2001 From: Justin Lei Date: Fri, 3 Mar 2023 14:27:13 -0800 Subject: [PATCH 3/5] Test fixes Signed-off-by: Justin Lei --- storage/remote/read_handler_test.go | 133 ++++++++++++++-------------- tsdb/db_test.go | 12 +-- tsdb/head_test.go | 24 ++--- 3 files changed, 86 insertions(+), 83 deletions(-) diff --git a/storage/remote/read_handler_test.go b/storage/remote/read_handler_test.go index 261c28e21..0c186097a 100644 --- a/storage/remote/read_handler_test.go +++ b/storage/remote/read_handler_test.go @@ -202,15 +202,18 @@ func BenchmarkStreamReadEndpoint(b *testing.B) { } func TestStreamReadEndpoint(t *testing.T) { - // First with 120 samples. We expect 1 frame with 1 chunk. - // Second with 121 samples, We expect 1 frame with 2 chunks. - // Third with 241 samples. We expect 1 frame with 2 chunks, and 1 frame with 1 chunk for the same series due to bytes limit. + // Note: samplesPerChunk is set to 220, but that isn't cleanly divisible by the chunkRange of 24 hrs and 1 min + // resolution used in this test so tsdb.computeChunkEndTime will put 240 samples in each chunk. + // + // First with 239 samples; we expect 1 frame with 1 full chunk. + // Second with 241 samples; we expect 1 frame with 2 chunks. + // Third with 481 samples; we expect 1 frame with 2 chunks, and 1 frame with 1 chunk for the same series due to bytes limit. // Fourth with 120 histogram samples. We expect 1 frame with 1 chunk. suite, err := promql.NewTest(t, ` load 1m - test_metric1{foo="bar1",baz="qux"} 0+100x119 - test_metric1{foo="bar2",baz="qux"} 0+100x120 - test_metric1{foo="bar3",baz="qux"} 0+100x240 + test_metric1{foo="bar1",baz="qux"} 0+100x239 + test_metric1{foo="bar2",baz="qux"} 0+100x240 + test_metric1{foo="bar3",baz="qux"} 0+100x480 `) require.NoError(t, err) defer suite.Close() @@ -228,8 +231,8 @@ func TestStreamReadEndpoint(t *testing.T) { } }, 1e6, 1, - // Labelset has 57 bytes. Full chunk in test data has roughly 240 bytes. This allows us to have at max 2 chunks in this test. - 57+480, + // Labelset has 57 bytes. Full chunk in test data has roughly 440 bytes. This allows us to have at max 2 chunks in this test. + 57+880, ) // Encode the request. @@ -245,19 +248,19 @@ func TestStreamReadEndpoint(t *testing.T) { matcher4, err := labels.NewMatcher(labels.MatchEqual, "__name__", "test_histogram_metric1") require.NoError(t, err) - query1, err := ToQuery(0, 14400001, []*labels.Matcher{matcher1, matcher2}, &storage.SelectHints{ + query1, err := ToQuery(0, 32460001, []*labels.Matcher{matcher1, matcher2}, &storage.SelectHints{ Step: 1, Func: "avg", Start: 0, - End: 14400001, + End: 32460001, }) require.NoError(t, err) - query2, err := ToQuery(0, 14400001, []*labels.Matcher{matcher1, matcher3}, &storage.SelectHints{ + query2, err := ToQuery(0, 32460001, []*labels.Matcher{matcher1, matcher3}, &storage.SelectHints{ Step: 1, Func: "avg", Start: 0, - End: 14400001, + End: 32460001, }) require.NoError(t, err) @@ -316,8 +319,8 @@ func TestStreamReadEndpoint(t *testing.T) { Chunks: []prompb.Chunk{ { Type: prompb.Chunk_XOR, - MaxTimeMs: 7140000, - Data: []byte("\000x\000\000\000\000\000\000\000\000\000\340\324\003\302|\005\224\000\301\254}\351z2\320O\355\264n[\007\316\224\243md\371\320\375\032Pm\nS\235\016Q\255\006P\275\250\277\312\201Z\003(3\240R\207\332\005(\017\240\322\201\332=(\023\2402\203Z\007(w\2402\201Z\017(\023\265\227\364P\033@\245\007\364\nP\033C\245\002t\036P+@e\036\364\016Pk@e\002t:P;A\245\001\364\nS\373@\245\006t\006P+C\345\002\364\006Pk@\345\036t\nP\033A\245\003\364:P\033@\245\006t\016ZJ\377\\\205\313\210\327\270\017\345+F[\310\347E)\355\024\241\366\342}(v\215(N\203)\326\207(\336\203(V\332W\362\202t4\240m\005(\377AJ\006\320\322\202t\374\240\255\003(oA\312:\3202"), + MaxTimeMs: 14340000, + Data: []byte("\x00\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xd4\x03\xc2|\x05\x94\x00\xc1\xac}\xe9z2\xd0O\xed\xb4n[\aΔ\xa3md\xf9\xd0\xfd\x1aPm\nS\x9d\x0eQ\xad\x06P\xbd\xa8\xbfʁZ\x03(3\xa0R\x87\xda\x05(\x0f\xa0ҁ\xda=(\x13\xa02\x83Z\a(w\xa02\x81Z\x0f(\x13\xb5\x97\xf4P\x1b@\xa5\a\xf4\nP\x1bC\xa5\x02t\x1eP+@e\x1e\xf4\x0ePk@e\x02t:P;A\xa5\x01\xf4\nS\xfb@\xa5\x06t\x06P+C\xe5\x02\xf4\x06Pk@\xe5\x1et\nP\x1bA\xa5\x03\xf4:P\x1b@\xa5\x06t\x0eZJ\xff\\\x85ˈ\u05f8\x0f\xe5+F[\xc8\xe7E)\xed\x14\xa1\xf6\xe2}(v\x8d(N\x83)և(ރ(V\xdaW\xf2\x82t4\xa0m\x05(\xffAJ\x06\xd0҂t\xfc\xa0\xad\x03(oA\xca:\xd02\x82t4\xa0\xed\xb0\xbfҀ\xfa\x05(=\xa0R\x87:\x03(\x15\xa0\xf2\x81z\x03(\xf5\xa0r\x83:\x05(\r\xa1ҁ\xfa\r(\r\xa0R\x9f:\a(5\xa02\x81z\x1f(\x15\xa02\x83:\x05(\xfd\xa0R\x80\xfa\r(\x1d\xa1ҁ:\x03(5\xa0r\xd6g\xf4\xb9\x8b\x97!\xabq\x1f\xcaN\xe0GJ\x1bE)\xfd\x14\xa1\xb6\xf2}(N\x8f(V\x83)އ(փ(N\xdcW҃\xb44\xa0}\x05(\xfbAJ\x19\xd02\x82\xb4\xfc\xa0\xbd\x03(kA\xca9\xd0R\x81\xb44\xa0\xfd\xb4\xbfҀ\xda\x05(3\xa0r\x87Z\x03(\x17\xa0\xf2\x81Z\x03(\xf3\xa0R\x83\xda\x05(\x0f\xa1ҁ\xda\r(\x13\xa02\x9fZ\a(7\xa02\x81Z\x1f(\x13\xa0Ҁ\xda\x05(\xff\xa0R\x80\xda\r "), }, }, }, @@ -336,61 +339,61 @@ func TestStreamReadEndpoint(t *testing.T) { Chunks: []prompb.Chunk{ { Type: prompb.Chunk_XOR, - MaxTimeMs: 7140000, - Data: []byte("\000x\000\000\000\000\000\000\000\000\000\340\324\003\302|\005\224\000\301\254}\351z2\320O\355\264n[\007\316\224\243md\371\320\375\032Pm\nS\235\016Q\255\006P\275\250\277\312\201Z\003(3\240R\207\332\005(\017\240\322\201\332=(\023\2402\203Z\007(w\2402\201Z\017(\023\265\227\364P\033@\245\007\364\nP\033C\245\002t\036P+@e\036\364\016Pk@e\002t:P;A\245\001\364\nS\373@\245\006t\006P+C\345\002\364\006Pk@\345\036t\nP\033A\245\003\364:P\033@\245\006t\016ZJ\377\\\205\313\210\327\270\017\345+F[\310\347E)\355\024\241\366\342}(v\215(N\203)\326\207(\336\203(V\332W\362\202t4\240m\005(\377AJ\006\320\322\202t\374\240\255\003(oA\312:\3202"), - }, - { - Type: prompb.Chunk_XOR, - MinTimeMs: 7200000, - MaxTimeMs: 7200000, - Data: []byte("\000\001\200\364\356\006@\307p\000\000\000\000\000\000"), - }, - }, - }, - }, - }, - { - ChunkedSeries: []*prompb.ChunkedSeries{ - { - Labels: []prompb.Label{ - {Name: "__name__", Value: "test_metric1"}, - {Name: "b", Value: "c"}, - {Name: "baz", Value: "qux"}, - {Name: "d", Value: "e"}, - {Name: "foo", Value: "bar3"}, - }, - Chunks: []prompb.Chunk{ - { - Type: prompb.Chunk_XOR, - MaxTimeMs: 7140000, - Data: []byte("\000x\000\000\000\000\000\000\000\000\000\340\324\003\302|\005\224\000\301\254}\351z2\320O\355\264n[\007\316\224\243md\371\320\375\032Pm\nS\235\016Q\255\006P\275\250\277\312\201Z\003(3\240R\207\332\005(\017\240\322\201\332=(\023\2402\203Z\007(w\2402\201Z\017(\023\265\227\364P\033@\245\007\364\nP\033C\245\002t\036P+@e\036\364\016Pk@e\002t:P;A\245\001\364\nS\373@\245\006t\006P+C\345\002\364\006Pk@\345\036t\nP\033A\245\003\364:P\033@\245\006t\016ZJ\377\\\205\313\210\327\270\017\345+F[\310\347E)\355\024\241\366\342}(v\215(N\203)\326\207(\336\203(V\332W\362\202t4\240m\005(\377AJ\006\320\322\202t\374\240\255\003(oA\312:\3202"), - }, - { - Type: prompb.Chunk_XOR, - MinTimeMs: 7200000, MaxTimeMs: 14340000, - Data: []byte("\000x\200\364\356\006@\307p\000\000\000\000\000\340\324\003\340>\224\355\260\277\322\200\372\005(=\240R\207:\003(\025\240\362\201z\003(\365\240r\203:\005(\r\241\322\201\372\r(\r\240R\237:\007(5\2402\201z\037(\025\2402\203:\005(\375\240R\200\372\r(\035\241\322\201:\003(5\240r\326g\364\271\213\227!\253q\037\312N\340GJ\033E)\375\024\241\266\362}(N\217(V\203)\336\207(\326\203(N\334W\322\203\2644\240}\005(\373AJ\031\3202\202\264\374\240\275\003(kA\3129\320R\201\2644\240\375\264\277\322\200\332\005(3\240r\207Z\003(\027\240\362\201Z\003(\363\240R\203\332\005(\017\241\322\201\332\r(\023\2402\237Z\007(7\2402\201Z\037(\023\240\322\200\332\005(\377\240R\200\332\r "), + Data: []byte("\x00\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xd4\x03\xc2|\x05\x94\x00\xc1\xac}\xe9z2\xd0O\xed\xb4n[\aΔ\xa3md\xf9\xd0\xfd\x1aPm\nS\x9d\x0eQ\xad\x06P\xbd\xa8\xbfʁZ\x03(3\xa0R\x87\xda\x05(\x0f\xa0ҁ\xda=(\x13\xa02\x83Z\a(w\xa02\x81Z\x0f(\x13\xb5\x97\xf4P\x1b@\xa5\a\xf4\nP\x1bC\xa5\x02t\x1eP+@e\x1e\xf4\x0ePk@e\x02t:P;A\xa5\x01\xf4\nS\xfb@\xa5\x06t\x06P+C\xe5\x02\xf4\x06Pk@\xe5\x1et\nP\x1bA\xa5\x03\xf4:P\x1b@\xa5\x06t\x0eZJ\xff\\\x85ˈ\u05f8\x0f\xe5+F[\xc8\xe7E)\xed\x14\xa1\xf6\xe2}(v\x8d(N\x83)և(ރ(V\xdaW\xf2\x82t4\xa0m\x05(\xffAJ\x06\xd0҂t\xfc\xa0\xad\x03(oA\xca:\xd02\x82t4\xa0\xed\xb0\xbfҀ\xfa\x05(=\xa0R\x87:\x03(\x15\xa0\xf2\x81z\x03(\xf5\xa0r\x83:\x05(\r\xa1ҁ\xfa\r(\r\xa0R\x9f:\a(5\xa02\x81z\x1f(\x15\xa02\x83:\x05(\xfd\xa0R\x80\xfa\r(\x1d\xa1ҁ:\x03(5\xa0r\xd6g\xf4\xb9\x8b\x97!\xabq\x1f\xcaN\xe0GJ\x1bE)\xfd\x14\xa1\xb6\xf2}(N\x8f(V\x83)އ(փ(N\xdcW҃\xb44\xa0}\x05(\xfbAJ\x19\xd02\x82\xb4\xfc\xa0\xbd\x03(kA\xca9\xd0R\x81\xb44\xa0\xfd\xb4\xbfҀ\xda\x05(3\xa0r\x87Z\x03(\x17\xa0\xf2\x81Z\x03(\xf3\xa0R\x83\xda\x05(\x0f\xa1ҁ\xda\r(\x13\xa02\x9fZ\a(7\xa02\x81Z\x1f(\x13\xa0Ҁ\xda\x05(\xff\xa0R\x80\xda\r "), }, - }, - }, - }, - }, - { - ChunkedSeries: []*prompb.ChunkedSeries{ - { - Labels: []prompb.Label{ - {Name: "__name__", Value: "test_metric1"}, - {Name: "b", Value: "c"}, - {Name: "baz", Value: "qux"}, - {Name: "d", Value: "e"}, - {Name: "foo", Value: "bar3"}, - }, - Chunks: []prompb.Chunk{ { Type: prompb.Chunk_XOR, MinTimeMs: 14400000, MaxTimeMs: 14400000, - Data: []byte("\000\001\200\350\335\r@\327p\000\000\000\000\000\000"), + Data: []byte("\x00\x01\x80\xe8\xdd\r@\xd7p\x00\x00\x00\x00\x00\x00"), + }, + }, + }, + }, + }, + { + ChunkedSeries: []*prompb.ChunkedSeries{ + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "test_metric1"}, + {Name: "b", Value: "c"}, + {Name: "baz", Value: "qux"}, + {Name: "d", Value: "e"}, + {Name: "foo", Value: "bar3"}, + }, + Chunks: []prompb.Chunk{ + { + Type: prompb.Chunk_XOR, + MaxTimeMs: 14340000, + Data: []byte("\x00\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xd4\x03\xc2|\x05\x94\x00\xc1\xac}\xe9z2\xd0O\xed\xb4n[\aΔ\xa3md\xf9\xd0\xfd\x1aPm\nS\x9d\x0eQ\xad\x06P\xbd\xa8\xbfʁZ\x03(3\xa0R\x87\xda\x05(\x0f\xa0ҁ\xda=(\x13\xa02\x83Z\a(w\xa02\x81Z\x0f(\x13\xb5\x97\xf4P\x1b@\xa5\a\xf4\nP\x1bC\xa5\x02t\x1eP+@e\x1e\xf4\x0ePk@e\x02t:P;A\xa5\x01\xf4\nS\xfb@\xa5\x06t\x06P+C\xe5\x02\xf4\x06Pk@\xe5\x1et\nP\x1bA\xa5\x03\xf4:P\x1b@\xa5\x06t\x0eZJ\xff\\\x85ˈ\u05f8\x0f\xe5+F[\xc8\xe7E)\xed\x14\xa1\xf6\xe2}(v\x8d(N\x83)և(ރ(V\xdaW\xf2\x82t4\xa0m\x05(\xffAJ\x06\xd0҂t\xfc\xa0\xad\x03(oA\xca:\xd02\x82t4\xa0\xed\xb0\xbfҀ\xfa\x05(=\xa0R\x87:\x03(\x15\xa0\xf2\x81z\x03(\xf5\xa0r\x83:\x05(\r\xa1ҁ\xfa\r(\r\xa0R\x9f:\a(5\xa02\x81z\x1f(\x15\xa02\x83:\x05(\xfd\xa0R\x80\xfa\r(\x1d\xa1ҁ:\x03(5\xa0r\xd6g\xf4\xb9\x8b\x97!\xabq\x1f\xcaN\xe0GJ\x1bE)\xfd\x14\xa1\xb6\xf2}(N\x8f(V\x83)އ(փ(N\xdcW҃\xb44\xa0}\x05(\xfbAJ\x19\xd02\x82\xb4\xfc\xa0\xbd\x03(kA\xca9\xd0R\x81\xb44\xa0\xfd\xb4\xbfҀ\xda\x05(3\xa0r\x87Z\x03(\x17\xa0\xf2\x81Z\x03(\xf3\xa0R\x83\xda\x05(\x0f\xa1ҁ\xda\r(\x13\xa02\x9fZ\a(7\xa02\x81Z\x1f(\x13\xa0Ҁ\xda\x05(\xff\xa0R\x80\xda\r "), + }, + { + Type: prompb.Chunk_XOR, + MinTimeMs: 14400000, + MaxTimeMs: 28740000, + Data: []byte("\x00\xf0\x80\xe8\xdd\r@\xd7p\x00\x00\x00\x00\x00\xe0\xd4\x03\xe0G\xca+C)\xbd\x1c\xb6\x19\xfdh\x06P\x13\xa0i@v\x83\xa5\x00\xfa\x02\x94\x0fh\nP\xf3\xa0\x19@V\x81\xe5\x01z\x01\x94\x1dh\x0eP3\xa0)@6\x8f\xa5\x01\xfa\x06\x94\x03h\nPs\xa09@րe\x01z\x1f\x94\x05h\x06P3\xa0)A\xf6\x80\xa5\x00\xfa\x06\x94\ai\xfaP\x13\xa0\x19@ր\xe5\az\x01\x94\x05h\x1eP\x13\xa1\xe9@6\x80\xa5\x03\xfa\x02\x94\x03h:P\x13\xa0y@V\x80e\x1fz\x03\x94\rh\x06P\x13\xa0\xe9@v\x81\xa5\x00\xfa\x02\x94?h\nP3\xa0\x19@V\x83\xe5\x01z\x01\x94\rh\x0eZ\x8e\xff\xad\xccjSnC\xe9O\xdcH\xe9Ch\xa53\xa3\x97\x02}h2\x85\xe8\xf2\x85h2\x9c\xe8R\x8fhR\x83\xed\xe5}(;CJ\t\xd02\x8e\xb4\x1c\xa1\xbd\x03(+O\xca\t\xd0ҁ\xb4\x14\xa3\xfd\x05(\x1bCJ\tۋ\xff(\x15\xa02\x83z\a(u\xa02\x81:\r(\x1d\xa3Ҁ\xfa\x05(=\xa0R\x87:\x03(\x15\xa0\xf2\x81z\x03)\xf5\xa0r\x83:\x05(\r\xa1ҁ\xfa\r(\r\xa0R\x8f:\a(5\xa02\x81z\x1f(\x15\xa02\x83:\x05-\xa6\x7f\xda\x02\x94\x03\xe8\x1aP\x1d\xa0\xe9@N\x80e\x03Z\x03\x94=\xe8\x06P\x15\xa0y@N\x83\xa5\x00\xda\x02\x94\x0f\xe8\nP\r\xa3\xe9@N\x81\xe5\x01Z\x01\x94\x1d\xe8\x0eP5\xa0\x19@N\x87\xa5\x01\xda\x06\x94\x03\xe8\nP}\xa0)@\u0380e\x01Z\x7f\x94\x05\xe8\x06P5\xa09A\u0380\xa5\x00\xda\x06\x94\a\xe8zP\r\xa0)@\u0380\xe5\aZ\x01\x94\x05\xe8\x1eP\x15\xa0\x19G\u0380\xa5\x03\xda\x02\x94\x03\xe8:P\x1d\xa0i"), + }, + }, + }, + }, + }, + { + ChunkedSeries: []*prompb.ChunkedSeries{ + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "test_metric1"}, + {Name: "b", Value: "c"}, + {Name: "baz", Value: "qux"}, + {Name: "d", Value: "e"}, + {Name: "foo", Value: "bar3"}, + }, + Chunks: []prompb.Chunk{ + { + Type: prompb.Chunk_XOR, + MinTimeMs: 28800000, + MaxTimeMs: 28800000, + Data: []byte("\x00\x01\x80л\x1b@\xe7p\x00\x00\x00\x00\x00\x00"), }, }, }, @@ -409,8 +412,8 @@ func TestStreamReadEndpoint(t *testing.T) { Chunks: []prompb.Chunk{ { Type: prompb.Chunk_XOR, - MaxTimeMs: 7140000, - Data: []byte("\000x\000\000\000\000\000\000\000\000\000\340\324\003\302|\005\224\000\301\254}\351z2\320O\355\264n[\007\316\224\243md\371\320\375\032Pm\nS\235\016Q\255\006P\275\250\277\312\201Z\003(3\240R\207\332\005(\017\240\322\201\332=(\023\2402\203Z\007(w\2402\201Z\017(\023\265\227\364P\033@\245\007\364\nP\033C\245\002t\036P+@e\036\364\016Pk@e\002t:P;A\245\001\364\nS\373@\245\006t\006P+C\345\002\364\006Pk@\345\036t\nP\033A\245\003\364:P\033@\245\006t\016ZJ\377\\\205\313\210\327\270\017\345+F[\310\347E)\355\024\241\366\342}(v\215(N\203)\326\207(\336\203(V\332W\362\202t4\240m\005(\377AJ\006\320\322\202t\374\240\255\003(oA\312:\3202"), + MaxTimeMs: 14340000, + Data: []byte("\x00\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xd4\x03\xc2|\x05\x94\x00\xc1\xac}\xe9z2\xd0O\xed\xb4n[\aΔ\xa3md\xf9\xd0\xfd\x1aPm\nS\x9d\x0eQ\xad\x06P\xbd\xa8\xbfʁZ\x03(3\xa0R\x87\xda\x05(\x0f\xa0ҁ\xda=(\x13\xa02\x83Z\a(w\xa02\x81Z\x0f(\x13\xb5\x97\xf4P\x1b@\xa5\a\xf4\nP\x1bC\xa5\x02t\x1eP+@e\x1e\xf4\x0ePk@e\x02t:P;A\xa5\x01\xf4\nS\xfb@\xa5\x06t\x06P+C\xe5\x02\xf4\x06Pk@\xe5\x1et\nP\x1bA\xa5\x03\xf4:P\x1b@\xa5\x06t\x0eZJ\xff\\\x85ˈ\u05f8\x0f\xe5+F[\xc8\xe7E)\xed\x14\xa1\xf6\xe2}(v\x8d(N\x83)և(ރ(V\xdaW\xf2\x82t4\xa0m\x05(\xffAJ\x06\xd0҂t\xfc\xa0\xad\x03(oA\xca:\xd02\x82t4\xa0\xed\xb0\xbfҀ\xfa\x05(=\xa0R\x87:\x03(\x15\xa0\xf2\x81z\x03(\xf5\xa0r\x83:\x05(\r\xa1ҁ\xfa\r(\r\xa0R\x9f:\a(5\xa02\x81z\x1f(\x15\xa02\x83:\x05(\xfd\xa0R\x80\xfa\r(\x1d\xa1ҁ:\x03(5\xa0r\xd6g\xf4\xb9\x8b\x97!\xabq\x1f\xcaN\xe0GJ\x1bE)\xfd\x14\xa1\xb6\xf2}(N\x8f(V\x83)އ(փ(N\xdcW҃\xb44\xa0}\x05(\xfbAJ\x19\xd02\x82\xb4\xfc\xa0\xbd\x03(kA\xca9\xd0R\x81\xb44\xa0\xfd\xb4\xbfҀ\xda\x05(3\xa0r\x87Z\x03(\x17\xa0\xf2\x81Z\x03(\xf3\xa0R\x83\xda\x05(\x0f\xa1ҁ\xda\r(\x13\xa02\x9fZ\a(7\xa02\x81Z\x1f(\x13\xa0Ҁ\xda\x05(\xff\xa0R\x80\xda\r "), }, }, }, diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 70639085e..14297c3dc 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -247,8 +247,8 @@ func TestNoPanicAfterWALCorruption(t *testing.T) { var maxt int64 ctx := context.Background() { - // Appending 121 samples because on the 121st a new chunk will be created. - for i := 0; i < 121; i++ { + // Appending 221 samples because on the 221st a new chunk will be created. + for i := 0; i < 221; i++ { app := db.Appender(ctx) _, err := app.Append(0, labels.FromStrings("foo", "bar"), maxt, 0) expSamples = append(expSamples, sample{t: maxt, v: 0}) @@ -1089,9 +1089,9 @@ func TestWALReplayRaceOnSamplesLoggedBeforeSeries(t *testing.T) { numSamplesBeforeSeriesCreation = 1000 ) - // We test both with few and many samples appended after series creation. If samples are < 120 then there's no + // We test both with few and many samples appended after series creation. If samples are < 220 then there's no // mmap-ed chunk, otherwise there's at least 1 mmap-ed chunk when replaying the WAL. - for _, numSamplesAfterSeriesCreation := range []int{1, 1000} { + for _, numSamplesAfterSeriesCreation := range []int{1, 2000} { for run := 1; run <= numRuns; run++ { t.Run(fmt.Sprintf("samples after series creation = %d, run = %d", numSamplesAfterSeriesCreation, run), func(t *testing.T) { testWALReplayRaceOnSamplesLoggedBeforeSeries(t, numSamplesBeforeSeriesCreation, numSamplesAfterSeriesCreation) @@ -1160,8 +1160,8 @@ func testWALReplayRaceOnSamplesLoggedBeforeSeries(t *testing.T, numSamplesBefore } require.NoError(t, chunksIt.Err()) - // We expect 1 chunk every 120 samples after series creation. - require.Equalf(t, (numSamplesAfterSeriesCreation/120)+1, actualChunks, "series: %s", set.At().Labels().String()) + // We expect 1 chunk every 220 samples after series creation. + require.Equalf(t, (numSamplesAfterSeriesCreation/220)+1, actualChunks, "series: %s", set.At().Labels().String()) } require.NoError(t, set.Err()) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 0a2a2ee6f..9a7220957 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -808,7 +808,7 @@ func TestMemSeries_truncateChunks(t *testing.T) { s := newMemSeries(labels.FromStrings("a", "b"), 1, defaultIsolationDisabled) - for i := 0; i < 4000; i += 5 { + for i := 0; i < 8000; i += 5 { ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper, chunkRange) require.True(t, ok, "sample append failed") } @@ -825,9 +825,9 @@ func TestMemSeries_truncateChunks(t *testing.T) { require.NotNil(t, chk) require.NoError(t, err) - s.truncateChunksBefore(2000, 0) + s.truncateChunksBefore(4000, 0) - require.Equal(t, int64(2000), s.mmappedChunks[0].minTime) + require.Equal(t, int64(4000), s.mmappedChunks[0].minTime) _, _, err = s.chunk(0, chunkDiskMapper, &memChunkPool) require.Equal(t, storage.ErrNotFound, err, "first chunks not gone") require.Equal(t, countBefore/2, len(s.mmappedChunks)+1) // +1 for the head chunk. @@ -1364,9 +1364,9 @@ func TestMemSeries_append(t *testing.T) { require.Equal(t, int64(1000), s.headChunk.minTime, "wrong chunk range") require.Equal(t, int64(1001), s.headChunk.maxTime, "wrong chunk range") - // Fill the range [1000,2000) with many samples. Intermediate chunks should be cut - // at approximately 120 samples per chunk. - for i := 1; i < 1000; i++ { + // Fill the range [1000,3000) with many samples. Intermediate chunks should be cut + // at approximately 220 samples per chunk. + for i := 1; i < 2000; i++ { ok, _ := s.append(1001+int64(i), float64(i), 0, chunkDiskMapper, chunkRange) require.True(t, ok, "append failed") } @@ -1437,7 +1437,7 @@ func TestMemSeries_appendHistogram(t *testing.T) { } func TestMemSeries_append_atVariableRate(t *testing.T) { - const samplesPerChunk = 120 + const samplesPerChunk = 220 dir := t.TempDir() // This is usually taken from the Head, but passing manually here. chunkDiskMapper, err := chunks.NewChunkDiskMapper(nil, dir, chunkenc.NewPool(), chunks.DefaultWriteBufferSize, chunks.DefaultWriteQueueSize) @@ -2983,7 +2983,7 @@ func TestAppendHistogram(t *testing.T) { } func TestHistogramInWALAndMmapChunk(t *testing.T) { - head, _ := newTestHead(t, 3000, false, false) + head, _ := newTestHead(t, 6000, false, false) t.Cleanup(func() { require.NoError(t, head.Close()) }) @@ -2992,7 +2992,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { // Series with only histograms. s1 := labels.FromStrings("a", "b1") k1 := s1.String() - numHistograms := 300 + numHistograms := 600 exp := map[string][]tsdbutil.Sample{} ts := int64(0) var app storage.Appender @@ -3728,7 +3728,7 @@ func TestHistogramCounterResetHeader(t *testing.T) { checkExpCounterResetHeader(chunkenc.CounterReset) // Add 2 non-counter reset histograms. - for i := 0; i < 250; i++ { + for i := 0; i < 500; i++ { appendHistogram(h) } checkExpCounterResetHeader(chunkenc.NotCounterReset, chunkenc.NotCounterReset) @@ -3756,7 +3756,7 @@ func TestHistogramCounterResetHeader(t *testing.T) { checkExpCounterResetHeader(chunkenc.CounterReset) // Add 2 non-counter reset histograms. Just to have some non-counter reset chunks in between. - for i := 0; i < 250; i++ { + for i := 0; i < 500; i++ { appendHistogram(h) } checkExpCounterResetHeader(chunkenc.NotCounterReset, chunkenc.NotCounterReset) @@ -4223,7 +4223,7 @@ func TestHeadInit_DiscardChunksWithUnsupportedEncoding(t *testing.T) { h.chunkDiskMapper.WriteChunk(chunks.HeadSeriesRef(seriesRef), 500, 600, uc, false, func(err error) { require.NoError(t, err) }) app = h.Appender(ctx) - for i := 700; i < 1200; i++ { + for i := 700; i < 1700; i++ { _, err := app.Append(0, seriesLabels, int64(i), float64(i)) require.NoError(t, err) } From 052993414a60254d71bd0e19cf3ee648ca697728 Mon Sep 17 00:00:00 2001 From: Justin Lei Date: Wed, 12 Apr 2023 09:48:35 -0700 Subject: [PATCH 4/5] Add storage.tsdb.samples-per-chunk flag Signed-off-by: Justin Lei --- cmd/prometheus/main.go | 5 +++++ tsdb/db.go | 5 +++++ tsdb/head.go | 19 +++++++++++++------ tsdb/head_append.go | 7 ++----- tsdb/head_test.go | 12 ++++++------ 5 files changed, 31 insertions(+), 17 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index f4f6af20d..cafe2f819 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -336,6 +336,9 @@ func main() { serverOnlyFlag(a, "storage.tsdb.head-chunks-write-queue-size", "Size of the queue through which head chunks are written to the disk to be m-mapped, 0 disables the queue completely. Experimental."). Default("0").IntVar(&cfg.tsdb.HeadChunksWriteQueueSize) + serverOnlyFlag(a, "storage.tsdb.samples-per-chunk", "Target number of samples per chunk."). + Default("120").Hidden().IntVar(&cfg.tsdb.SamplesPerChunk) + agentOnlyFlag(a, "storage.agent.path", "Base path for metrics storage."). Default("data-agent/").StringVar(&cfg.agentStoragePath) @@ -1542,6 +1545,7 @@ type tsdbOptions struct { NoLockfile bool WALCompression bool HeadChunksWriteQueueSize int + SamplesPerChunk int StripeSize int MinBlockDuration model.Duration MaxBlockDuration model.Duration @@ -1562,6 +1566,7 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { AllowOverlappingCompaction: true, WALCompression: opts.WALCompression, HeadChunksWriteQueueSize: opts.HeadChunksWriteQueueSize, + SamplesPerChunk: opts.SamplesPerChunk, StripeSize: opts.StripeSize, MinBlockDuration: int64(time.Duration(opts.MinBlockDuration) / time.Millisecond), MaxBlockDuration: int64(time.Duration(opts.MaxBlockDuration) / time.Millisecond), diff --git a/tsdb/db.go b/tsdb/db.go index 659251c3c..e0e9c69f0 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -78,6 +78,7 @@ func DefaultOptions() *Options { NoLockfile: false, AllowOverlappingCompaction: true, WALCompression: false, + SamplesPerChunk: DefaultSamplesPerChunk, StripeSize: DefaultStripeSize, HeadChunksWriteBufferSize: chunks.DefaultWriteBufferSize, IsolationDisabled: defaultIsolationDisabled, @@ -149,6 +150,9 @@ type Options struct { // HeadChunksWriteQueueSize configures the size of the chunk write queue used in the head chunks mapper. HeadChunksWriteQueueSize int + // SamplesPerChunk configures the target number of samples per chunk. + SamplesPerChunk int + // SeriesLifecycleCallback specifies a list of callbacks that will be called during a lifecycle of a series. // It is always a no-op in Prometheus and mainly meant for external users who import TSDB. SeriesLifecycleCallback SeriesLifecycleCallback @@ -778,6 +782,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs headOpts.ChunkPool = db.chunkPool headOpts.ChunkWriteBufferSize = opts.HeadChunksWriteBufferSize headOpts.ChunkWriteQueueSize = opts.HeadChunksWriteQueueSize + headOpts.SamplesPerChunk = opts.SamplesPerChunk headOpts.StripeSize = opts.StripeSize headOpts.SeriesCallback = opts.SeriesLifecycleCallback headOpts.EnableExemplarStorage = opts.EnableExemplarStorage diff --git a/tsdb/head.go b/tsdb/head.go index af8175cd0..b4df1b2d0 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -150,6 +150,8 @@ type HeadOptions struct { ChunkWriteBufferSize int ChunkWriteQueueSize int + SamplesPerChunk int + // StripeSize sets the number of entries in the hash map, it must be a power of 2. // A larger StripeSize will allocate more memory up-front, but will increase performance when handling a large number of series. // A smaller StripeSize reduces the memory allocated, but can decrease performance with large number of series. @@ -169,6 +171,8 @@ type HeadOptions struct { const ( // DefaultOutOfOrderCapMax is the default maximum size of an in-memory out-of-order chunk. DefaultOutOfOrderCapMax int64 = 32 + // DefaultSamplesPerChunk provides a default target number of samples per chunk. + DefaultSamplesPerChunk = 120 ) func DefaultHeadOptions() *HeadOptions { @@ -178,6 +182,7 @@ func DefaultHeadOptions() *HeadOptions { ChunkPool: chunkenc.NewPool(), ChunkWriteBufferSize: chunks.DefaultWriteBufferSize, ChunkWriteQueueSize: chunks.DefaultWriteQueueSize, + SamplesPerChunk: DefaultSamplesPerChunk, StripeSize: DefaultStripeSize, SeriesCallback: &noopSeriesLifecycleCallback{}, IsolationDisabled: defaultIsolationDisabled, @@ -1607,7 +1612,7 @@ func (h *Head) getOrCreate(hash uint64, lset labels.Labels) (*memSeries, bool, e func (h *Head) getOrCreateWithID(id chunks.HeadSeriesRef, hash uint64, lset labels.Labels) (*memSeries, bool, error) { s, created, err := h.series.getOrSet(hash, lset, func() *memSeries { - return newMemSeries(lset, id, h.opts.IsolationDisabled) + return newMemSeries(lset, id, h.opts.IsolationDisabled, h.opts.SamplesPerChunk) }) if err != nil { return nil, false, err @@ -1915,7 +1920,8 @@ type memSeries struct { mmMaxTime int64 // Max time of any mmapped chunk, only used during WAL replay. - nextAt int64 // Timestamp at which to cut the next chunk. + samplesPerChunk int // Target number of samples per chunk. + nextAt int64 // Timestamp at which to cut the next chunk. // We keep the last value here (in addition to appending it to the chunk) so we can check for duplicates. lastValue float64 @@ -1943,11 +1949,12 @@ type memSeriesOOOFields struct { firstOOOChunkID chunks.HeadChunkID // HeadOOOChunkID for oooMmappedChunks[0]. } -func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, isolationDisabled bool) *memSeries { +func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, isolationDisabled bool, samplesPerChunk int) *memSeries { s := &memSeries{ - lset: lset, - ref: id, - nextAt: math.MinInt64, + lset: lset, + ref: id, + nextAt: math.MinInt64, + samplesPerChunk: samplesPerChunk, } if !isolationDisabled { s.txs = newTxRing(4) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 46180051e..eb5b219ea 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -1324,9 +1324,6 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, func (s *memSeries) appendPreprocessor( t int64, e chunkenc.Encoding, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, ) (c *memChunk, sampleInOrder, chunkCreated bool) { - // The basis for this number can be found here: https://github.com/prometheus/prometheus/pull/12055 - const samplesPerChunk = 220 - c = s.head() if c == nil { @@ -1363,7 +1360,7 @@ func (s *memSeries) appendPreprocessor( // for this chunk that will try to make samples equally distributed within // the remaining chunks in the current chunk range. // At latest it must happen at the timestamp set when the chunk was cut. - if numSamples == samplesPerChunk/4 { + if numSamples == s.samplesPerChunk/4 { s.nextAt = computeChunkEndTime(c.minTime, c.maxTime, s.nextAt) } // If numSamples > samplesPerChunk*2 then our previous prediction was invalid, @@ -1371,7 +1368,7 @@ func (s *memSeries) appendPreprocessor( // Since we assume that the rate is higher, we're being conservative and cutting at 2*samplesPerChunk // as we expect more chunks to come. // Note that next chunk will have its nextAt recalculated for the new rate. - if t >= s.nextAt || numSamples >= samplesPerChunk*2 { + if t >= s.nextAt || numSamples >= s.samplesPerChunk*2 { c = s.cutNewHeadChunk(t, e, chunkDiskMapper, chunkRange) chunkCreated = true } diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 9a7220957..df48e592d 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -284,7 +284,7 @@ func BenchmarkLoadWAL(b *testing.B) { require.NoError(b, err) for k := 0; k < c.batches*c.seriesPerBatch; k++ { // Create one mmapped chunk per series, with one sample at the given time. - s := newMemSeries(labels.Labels{}, chunks.HeadSeriesRef(k)*101, defaultIsolationDisabled) + s := newMemSeries(labels.Labels{}, chunks.HeadSeriesRef(k)*101, defaultIsolationDisabled, DefaultSamplesPerChunk) s.append(c.mmappedChunkT, 42, 0, chunkDiskMapper, c.mmappedChunkT) s.mmapCurrentHeadChunk(chunkDiskMapper) } @@ -806,7 +806,7 @@ func TestMemSeries_truncateChunks(t *testing.T) { }, } - s := newMemSeries(labels.FromStrings("a", "b"), 1, defaultIsolationDisabled) + s := newMemSeries(labels.FromStrings("a", "b"), 1, defaultIsolationDisabled, DefaultSamplesPerChunk) for i := 0; i < 8000; i += 5 { ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper, chunkRange) @@ -1337,7 +1337,7 @@ func TestMemSeries_append(t *testing.T) { }() const chunkRange = 500 - s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled) + s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled, DefaultSamplesPerChunk) // Add first two samples at the very end of a chunk range and the next two // on and after it. @@ -1391,7 +1391,7 @@ func TestMemSeries_appendHistogram(t *testing.T) { }() chunkRange := int64(1000) - s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled) + s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled, DefaultSamplesPerChunk) histograms := tsdbutil.GenerateTestHistograms(4) histogramWithOneMoreBucket := histograms[3].Copy() @@ -1447,7 +1447,7 @@ func TestMemSeries_append_atVariableRate(t *testing.T) { }) chunkRange := DefaultBlockDuration - s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled) + s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled, DefaultSamplesPerChunk) // At this slow rate, we will fill the chunk in two block durations. slowRate := (DefaultBlockDuration * 2) / samplesPerChunk @@ -2609,7 +2609,7 @@ func TestIteratorSeekIntoBuffer(t *testing.T) { }() const chunkRange = 500 - s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled) + s := newMemSeries(labels.Labels{}, 1, defaultIsolationDisabled, DefaultSamplesPerChunk) for i := 0; i < 7; i++ { ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper, chunkRange) From c3e6b8563109ab09ccea33283ae27bfe3121b77a Mon Sep 17 00:00:00 2001 From: Justin Lei Date: Thu, 13 Apr 2023 14:35:29 -0700 Subject: [PATCH 5/5] Reverse test changes Signed-off-by: Justin Lei --- storage/remote/read_handler_test.go | 135 ++++++++++++++-------------- tsdb/db_test.go | 12 +-- tsdb/head_test.go | 24 ++--- 3 files changed, 84 insertions(+), 87 deletions(-) diff --git a/storage/remote/read_handler_test.go b/storage/remote/read_handler_test.go index 0c186097a..261c28e21 100644 --- a/storage/remote/read_handler_test.go +++ b/storage/remote/read_handler_test.go @@ -202,18 +202,15 @@ func BenchmarkStreamReadEndpoint(b *testing.B) { } func TestStreamReadEndpoint(t *testing.T) { - // Note: samplesPerChunk is set to 220, but that isn't cleanly divisible by the chunkRange of 24 hrs and 1 min - // resolution used in this test so tsdb.computeChunkEndTime will put 240 samples in each chunk. - // - // First with 239 samples; we expect 1 frame with 1 full chunk. - // Second with 241 samples; we expect 1 frame with 2 chunks. - // Third with 481 samples; we expect 1 frame with 2 chunks, and 1 frame with 1 chunk for the same series due to bytes limit. + // First with 120 samples. We expect 1 frame with 1 chunk. + // Second with 121 samples, We expect 1 frame with 2 chunks. + // Third with 241 samples. We expect 1 frame with 2 chunks, and 1 frame with 1 chunk for the same series due to bytes limit. // Fourth with 120 histogram samples. We expect 1 frame with 1 chunk. suite, err := promql.NewTest(t, ` load 1m - test_metric1{foo="bar1",baz="qux"} 0+100x239 - test_metric1{foo="bar2",baz="qux"} 0+100x240 - test_metric1{foo="bar3",baz="qux"} 0+100x480 + test_metric1{foo="bar1",baz="qux"} 0+100x119 + test_metric1{foo="bar2",baz="qux"} 0+100x120 + test_metric1{foo="bar3",baz="qux"} 0+100x240 `) require.NoError(t, err) defer suite.Close() @@ -231,8 +228,8 @@ func TestStreamReadEndpoint(t *testing.T) { } }, 1e6, 1, - // Labelset has 57 bytes. Full chunk in test data has roughly 440 bytes. This allows us to have at max 2 chunks in this test. - 57+880, + // Labelset has 57 bytes. Full chunk in test data has roughly 240 bytes. This allows us to have at max 2 chunks in this test. + 57+480, ) // Encode the request. @@ -248,19 +245,19 @@ func TestStreamReadEndpoint(t *testing.T) { matcher4, err := labels.NewMatcher(labels.MatchEqual, "__name__", "test_histogram_metric1") require.NoError(t, err) - query1, err := ToQuery(0, 32460001, []*labels.Matcher{matcher1, matcher2}, &storage.SelectHints{ + query1, err := ToQuery(0, 14400001, []*labels.Matcher{matcher1, matcher2}, &storage.SelectHints{ Step: 1, Func: "avg", Start: 0, - End: 32460001, + End: 14400001, }) require.NoError(t, err) - query2, err := ToQuery(0, 32460001, []*labels.Matcher{matcher1, matcher3}, &storage.SelectHints{ + query2, err := ToQuery(0, 14400001, []*labels.Matcher{matcher1, matcher3}, &storage.SelectHints{ Step: 1, Func: "avg", Start: 0, - End: 32460001, + End: 14400001, }) require.NoError(t, err) @@ -319,8 +316,8 @@ func TestStreamReadEndpoint(t *testing.T) { Chunks: []prompb.Chunk{ { Type: prompb.Chunk_XOR, - MaxTimeMs: 14340000, - Data: []byte("\x00\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xd4\x03\xc2|\x05\x94\x00\xc1\xac}\xe9z2\xd0O\xed\xb4n[\aΔ\xa3md\xf9\xd0\xfd\x1aPm\nS\x9d\x0eQ\xad\x06P\xbd\xa8\xbfʁZ\x03(3\xa0R\x87\xda\x05(\x0f\xa0ҁ\xda=(\x13\xa02\x83Z\a(w\xa02\x81Z\x0f(\x13\xb5\x97\xf4P\x1b@\xa5\a\xf4\nP\x1bC\xa5\x02t\x1eP+@e\x1e\xf4\x0ePk@e\x02t:P;A\xa5\x01\xf4\nS\xfb@\xa5\x06t\x06P+C\xe5\x02\xf4\x06Pk@\xe5\x1et\nP\x1bA\xa5\x03\xf4:P\x1b@\xa5\x06t\x0eZJ\xff\\\x85ˈ\u05f8\x0f\xe5+F[\xc8\xe7E)\xed\x14\xa1\xf6\xe2}(v\x8d(N\x83)և(ރ(V\xdaW\xf2\x82t4\xa0m\x05(\xffAJ\x06\xd0҂t\xfc\xa0\xad\x03(oA\xca:\xd02\x82t4\xa0\xed\xb0\xbfҀ\xfa\x05(=\xa0R\x87:\x03(\x15\xa0\xf2\x81z\x03(\xf5\xa0r\x83:\x05(\r\xa1ҁ\xfa\r(\r\xa0R\x9f:\a(5\xa02\x81z\x1f(\x15\xa02\x83:\x05(\xfd\xa0R\x80\xfa\r(\x1d\xa1ҁ:\x03(5\xa0r\xd6g\xf4\xb9\x8b\x97!\xabq\x1f\xcaN\xe0GJ\x1bE)\xfd\x14\xa1\xb6\xf2}(N\x8f(V\x83)އ(փ(N\xdcW҃\xb44\xa0}\x05(\xfbAJ\x19\xd02\x82\xb4\xfc\xa0\xbd\x03(kA\xca9\xd0R\x81\xb44\xa0\xfd\xb4\xbfҀ\xda\x05(3\xa0r\x87Z\x03(\x17\xa0\xf2\x81Z\x03(\xf3\xa0R\x83\xda\x05(\x0f\xa1ҁ\xda\r(\x13\xa02\x9fZ\a(7\xa02\x81Z\x1f(\x13\xa0Ҁ\xda\x05(\xff\xa0R\x80\xda\r "), + MaxTimeMs: 7140000, + Data: []byte("\000x\000\000\000\000\000\000\000\000\000\340\324\003\302|\005\224\000\301\254}\351z2\320O\355\264n[\007\316\224\243md\371\320\375\032Pm\nS\235\016Q\255\006P\275\250\277\312\201Z\003(3\240R\207\332\005(\017\240\322\201\332=(\023\2402\203Z\007(w\2402\201Z\017(\023\265\227\364P\033@\245\007\364\nP\033C\245\002t\036P+@e\036\364\016Pk@e\002t:P;A\245\001\364\nS\373@\245\006t\006P+C\345\002\364\006Pk@\345\036t\nP\033A\245\003\364:P\033@\245\006t\016ZJ\377\\\205\313\210\327\270\017\345+F[\310\347E)\355\024\241\366\342}(v\215(N\203)\326\207(\336\203(V\332W\362\202t4\240m\005(\377AJ\006\320\322\202t\374\240\255\003(oA\312:\3202"), }, }, }, @@ -339,61 +336,61 @@ func TestStreamReadEndpoint(t *testing.T) { Chunks: []prompb.Chunk{ { Type: prompb.Chunk_XOR, - MaxTimeMs: 14340000, - Data: []byte("\x00\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xd4\x03\xc2|\x05\x94\x00\xc1\xac}\xe9z2\xd0O\xed\xb4n[\aΔ\xa3md\xf9\xd0\xfd\x1aPm\nS\x9d\x0eQ\xad\x06P\xbd\xa8\xbfʁZ\x03(3\xa0R\x87\xda\x05(\x0f\xa0ҁ\xda=(\x13\xa02\x83Z\a(w\xa02\x81Z\x0f(\x13\xb5\x97\xf4P\x1b@\xa5\a\xf4\nP\x1bC\xa5\x02t\x1eP+@e\x1e\xf4\x0ePk@e\x02t:P;A\xa5\x01\xf4\nS\xfb@\xa5\x06t\x06P+C\xe5\x02\xf4\x06Pk@\xe5\x1et\nP\x1bA\xa5\x03\xf4:P\x1b@\xa5\x06t\x0eZJ\xff\\\x85ˈ\u05f8\x0f\xe5+F[\xc8\xe7E)\xed\x14\xa1\xf6\xe2}(v\x8d(N\x83)և(ރ(V\xdaW\xf2\x82t4\xa0m\x05(\xffAJ\x06\xd0҂t\xfc\xa0\xad\x03(oA\xca:\xd02\x82t4\xa0\xed\xb0\xbfҀ\xfa\x05(=\xa0R\x87:\x03(\x15\xa0\xf2\x81z\x03(\xf5\xa0r\x83:\x05(\r\xa1ҁ\xfa\r(\r\xa0R\x9f:\a(5\xa02\x81z\x1f(\x15\xa02\x83:\x05(\xfd\xa0R\x80\xfa\r(\x1d\xa1ҁ:\x03(5\xa0r\xd6g\xf4\xb9\x8b\x97!\xabq\x1f\xcaN\xe0GJ\x1bE)\xfd\x14\xa1\xb6\xf2}(N\x8f(V\x83)އ(փ(N\xdcW҃\xb44\xa0}\x05(\xfbAJ\x19\xd02\x82\xb4\xfc\xa0\xbd\x03(kA\xca9\xd0R\x81\xb44\xa0\xfd\xb4\xbfҀ\xda\x05(3\xa0r\x87Z\x03(\x17\xa0\xf2\x81Z\x03(\xf3\xa0R\x83\xda\x05(\x0f\xa1ҁ\xda\r(\x13\xa02\x9fZ\a(7\xa02\x81Z\x1f(\x13\xa0Ҁ\xda\x05(\xff\xa0R\x80\xda\r "), + MaxTimeMs: 7140000, + Data: []byte("\000x\000\000\000\000\000\000\000\000\000\340\324\003\302|\005\224\000\301\254}\351z2\320O\355\264n[\007\316\224\243md\371\320\375\032Pm\nS\235\016Q\255\006P\275\250\277\312\201Z\003(3\240R\207\332\005(\017\240\322\201\332=(\023\2402\203Z\007(w\2402\201Z\017(\023\265\227\364P\033@\245\007\364\nP\033C\245\002t\036P+@e\036\364\016Pk@e\002t:P;A\245\001\364\nS\373@\245\006t\006P+C\345\002\364\006Pk@\345\036t\nP\033A\245\003\364:P\033@\245\006t\016ZJ\377\\\205\313\210\327\270\017\345+F[\310\347E)\355\024\241\366\342}(v\215(N\203)\326\207(\336\203(V\332W\362\202t4\240m\005(\377AJ\006\320\322\202t\374\240\255\003(oA\312:\3202"), }, + { + Type: prompb.Chunk_XOR, + MinTimeMs: 7200000, + MaxTimeMs: 7200000, + Data: []byte("\000\001\200\364\356\006@\307p\000\000\000\000\000\000"), + }, + }, + }, + }, + }, + { + ChunkedSeries: []*prompb.ChunkedSeries{ + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "test_metric1"}, + {Name: "b", Value: "c"}, + {Name: "baz", Value: "qux"}, + {Name: "d", Value: "e"}, + {Name: "foo", Value: "bar3"}, + }, + Chunks: []prompb.Chunk{ + { + Type: prompb.Chunk_XOR, + MaxTimeMs: 7140000, + Data: []byte("\000x\000\000\000\000\000\000\000\000\000\340\324\003\302|\005\224\000\301\254}\351z2\320O\355\264n[\007\316\224\243md\371\320\375\032Pm\nS\235\016Q\255\006P\275\250\277\312\201Z\003(3\240R\207\332\005(\017\240\322\201\332=(\023\2402\203Z\007(w\2402\201Z\017(\023\265\227\364P\033@\245\007\364\nP\033C\245\002t\036P+@e\036\364\016Pk@e\002t:P;A\245\001\364\nS\373@\245\006t\006P+C\345\002\364\006Pk@\345\036t\nP\033A\245\003\364:P\033@\245\006t\016ZJ\377\\\205\313\210\327\270\017\345+F[\310\347E)\355\024\241\366\342}(v\215(N\203)\326\207(\336\203(V\332W\362\202t4\240m\005(\377AJ\006\320\322\202t\374\240\255\003(oA\312:\3202"), + }, + { + Type: prompb.Chunk_XOR, + MinTimeMs: 7200000, + MaxTimeMs: 14340000, + Data: []byte("\000x\200\364\356\006@\307p\000\000\000\000\000\340\324\003\340>\224\355\260\277\322\200\372\005(=\240R\207:\003(\025\240\362\201z\003(\365\240r\203:\005(\r\241\322\201\372\r(\r\240R\237:\007(5\2402\201z\037(\025\2402\203:\005(\375\240R\200\372\r(\035\241\322\201:\003(5\240r\326g\364\271\213\227!\253q\037\312N\340GJ\033E)\375\024\241\266\362}(N\217(V\203)\336\207(\326\203(N\334W\322\203\2644\240}\005(\373AJ\031\3202\202\264\374\240\275\003(kA\3129\320R\201\2644\240\375\264\277\322\200\332\005(3\240r\207Z\003(\027\240\362\201Z\003(\363\240R\203\332\005(\017\241\322\201\332\r(\023\2402\237Z\007(7\2402\201Z\037(\023\240\322\200\332\005(\377\240R\200\332\r "), + }, + }, + }, + }, + }, + { + ChunkedSeries: []*prompb.ChunkedSeries{ + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "test_metric1"}, + {Name: "b", Value: "c"}, + {Name: "baz", Value: "qux"}, + {Name: "d", Value: "e"}, + {Name: "foo", Value: "bar3"}, + }, + Chunks: []prompb.Chunk{ { Type: prompb.Chunk_XOR, MinTimeMs: 14400000, MaxTimeMs: 14400000, - Data: []byte("\x00\x01\x80\xe8\xdd\r@\xd7p\x00\x00\x00\x00\x00\x00"), - }, - }, - }, - }, - }, - { - ChunkedSeries: []*prompb.ChunkedSeries{ - { - Labels: []prompb.Label{ - {Name: "__name__", Value: "test_metric1"}, - {Name: "b", Value: "c"}, - {Name: "baz", Value: "qux"}, - {Name: "d", Value: "e"}, - {Name: "foo", Value: "bar3"}, - }, - Chunks: []prompb.Chunk{ - { - Type: prompb.Chunk_XOR, - MaxTimeMs: 14340000, - Data: []byte("\x00\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xd4\x03\xc2|\x05\x94\x00\xc1\xac}\xe9z2\xd0O\xed\xb4n[\aΔ\xa3md\xf9\xd0\xfd\x1aPm\nS\x9d\x0eQ\xad\x06P\xbd\xa8\xbfʁZ\x03(3\xa0R\x87\xda\x05(\x0f\xa0ҁ\xda=(\x13\xa02\x83Z\a(w\xa02\x81Z\x0f(\x13\xb5\x97\xf4P\x1b@\xa5\a\xf4\nP\x1bC\xa5\x02t\x1eP+@e\x1e\xf4\x0ePk@e\x02t:P;A\xa5\x01\xf4\nS\xfb@\xa5\x06t\x06P+C\xe5\x02\xf4\x06Pk@\xe5\x1et\nP\x1bA\xa5\x03\xf4:P\x1b@\xa5\x06t\x0eZJ\xff\\\x85ˈ\u05f8\x0f\xe5+F[\xc8\xe7E)\xed\x14\xa1\xf6\xe2}(v\x8d(N\x83)և(ރ(V\xdaW\xf2\x82t4\xa0m\x05(\xffAJ\x06\xd0҂t\xfc\xa0\xad\x03(oA\xca:\xd02\x82t4\xa0\xed\xb0\xbfҀ\xfa\x05(=\xa0R\x87:\x03(\x15\xa0\xf2\x81z\x03(\xf5\xa0r\x83:\x05(\r\xa1ҁ\xfa\r(\r\xa0R\x9f:\a(5\xa02\x81z\x1f(\x15\xa02\x83:\x05(\xfd\xa0R\x80\xfa\r(\x1d\xa1ҁ:\x03(5\xa0r\xd6g\xf4\xb9\x8b\x97!\xabq\x1f\xcaN\xe0GJ\x1bE)\xfd\x14\xa1\xb6\xf2}(N\x8f(V\x83)އ(փ(N\xdcW҃\xb44\xa0}\x05(\xfbAJ\x19\xd02\x82\xb4\xfc\xa0\xbd\x03(kA\xca9\xd0R\x81\xb44\xa0\xfd\xb4\xbfҀ\xda\x05(3\xa0r\x87Z\x03(\x17\xa0\xf2\x81Z\x03(\xf3\xa0R\x83\xda\x05(\x0f\xa1ҁ\xda\r(\x13\xa02\x9fZ\a(7\xa02\x81Z\x1f(\x13\xa0Ҁ\xda\x05(\xff\xa0R\x80\xda\r "), - }, - { - Type: prompb.Chunk_XOR, - MinTimeMs: 14400000, - MaxTimeMs: 28740000, - Data: []byte("\x00\xf0\x80\xe8\xdd\r@\xd7p\x00\x00\x00\x00\x00\xe0\xd4\x03\xe0G\xca+C)\xbd\x1c\xb6\x19\xfdh\x06P\x13\xa0i@v\x83\xa5\x00\xfa\x02\x94\x0fh\nP\xf3\xa0\x19@V\x81\xe5\x01z\x01\x94\x1dh\x0eP3\xa0)@6\x8f\xa5\x01\xfa\x06\x94\x03h\nPs\xa09@րe\x01z\x1f\x94\x05h\x06P3\xa0)A\xf6\x80\xa5\x00\xfa\x06\x94\ai\xfaP\x13\xa0\x19@ր\xe5\az\x01\x94\x05h\x1eP\x13\xa1\xe9@6\x80\xa5\x03\xfa\x02\x94\x03h:P\x13\xa0y@V\x80e\x1fz\x03\x94\rh\x06P\x13\xa0\xe9@v\x81\xa5\x00\xfa\x02\x94?h\nP3\xa0\x19@V\x83\xe5\x01z\x01\x94\rh\x0eZ\x8e\xff\xad\xccjSnC\xe9O\xdcH\xe9Ch\xa53\xa3\x97\x02}h2\x85\xe8\xf2\x85h2\x9c\xe8R\x8fhR\x83\xed\xe5}(;CJ\t\xd02\x8e\xb4\x1c\xa1\xbd\x03(+O\xca\t\xd0ҁ\xb4\x14\xa3\xfd\x05(\x1bCJ\tۋ\xff(\x15\xa02\x83z\a(u\xa02\x81:\r(\x1d\xa3Ҁ\xfa\x05(=\xa0R\x87:\x03(\x15\xa0\xf2\x81z\x03)\xf5\xa0r\x83:\x05(\r\xa1ҁ\xfa\r(\r\xa0R\x8f:\a(5\xa02\x81z\x1f(\x15\xa02\x83:\x05-\xa6\x7f\xda\x02\x94\x03\xe8\x1aP\x1d\xa0\xe9@N\x80e\x03Z\x03\x94=\xe8\x06P\x15\xa0y@N\x83\xa5\x00\xda\x02\x94\x0f\xe8\nP\r\xa3\xe9@N\x81\xe5\x01Z\x01\x94\x1d\xe8\x0eP5\xa0\x19@N\x87\xa5\x01\xda\x06\x94\x03\xe8\nP}\xa0)@\u0380e\x01Z\x7f\x94\x05\xe8\x06P5\xa09A\u0380\xa5\x00\xda\x06\x94\a\xe8zP\r\xa0)@\u0380\xe5\aZ\x01\x94\x05\xe8\x1eP\x15\xa0\x19G\u0380\xa5\x03\xda\x02\x94\x03\xe8:P\x1d\xa0i"), - }, - }, - }, - }, - }, - { - ChunkedSeries: []*prompb.ChunkedSeries{ - { - Labels: []prompb.Label{ - {Name: "__name__", Value: "test_metric1"}, - {Name: "b", Value: "c"}, - {Name: "baz", Value: "qux"}, - {Name: "d", Value: "e"}, - {Name: "foo", Value: "bar3"}, - }, - Chunks: []prompb.Chunk{ - { - Type: prompb.Chunk_XOR, - MinTimeMs: 28800000, - MaxTimeMs: 28800000, - Data: []byte("\x00\x01\x80л\x1b@\xe7p\x00\x00\x00\x00\x00\x00"), + Data: []byte("\000\001\200\350\335\r@\327p\000\000\000\000\000\000"), }, }, }, @@ -412,8 +409,8 @@ func TestStreamReadEndpoint(t *testing.T) { Chunks: []prompb.Chunk{ { Type: prompb.Chunk_XOR, - MaxTimeMs: 14340000, - Data: []byte("\x00\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xd4\x03\xc2|\x05\x94\x00\xc1\xac}\xe9z2\xd0O\xed\xb4n[\aΔ\xa3md\xf9\xd0\xfd\x1aPm\nS\x9d\x0eQ\xad\x06P\xbd\xa8\xbfʁZ\x03(3\xa0R\x87\xda\x05(\x0f\xa0ҁ\xda=(\x13\xa02\x83Z\a(w\xa02\x81Z\x0f(\x13\xb5\x97\xf4P\x1b@\xa5\a\xf4\nP\x1bC\xa5\x02t\x1eP+@e\x1e\xf4\x0ePk@e\x02t:P;A\xa5\x01\xf4\nS\xfb@\xa5\x06t\x06P+C\xe5\x02\xf4\x06Pk@\xe5\x1et\nP\x1bA\xa5\x03\xf4:P\x1b@\xa5\x06t\x0eZJ\xff\\\x85ˈ\u05f8\x0f\xe5+F[\xc8\xe7E)\xed\x14\xa1\xf6\xe2}(v\x8d(N\x83)և(ރ(V\xdaW\xf2\x82t4\xa0m\x05(\xffAJ\x06\xd0҂t\xfc\xa0\xad\x03(oA\xca:\xd02\x82t4\xa0\xed\xb0\xbfҀ\xfa\x05(=\xa0R\x87:\x03(\x15\xa0\xf2\x81z\x03(\xf5\xa0r\x83:\x05(\r\xa1ҁ\xfa\r(\r\xa0R\x9f:\a(5\xa02\x81z\x1f(\x15\xa02\x83:\x05(\xfd\xa0R\x80\xfa\r(\x1d\xa1ҁ:\x03(5\xa0r\xd6g\xf4\xb9\x8b\x97!\xabq\x1f\xcaN\xe0GJ\x1bE)\xfd\x14\xa1\xb6\xf2}(N\x8f(V\x83)އ(փ(N\xdcW҃\xb44\xa0}\x05(\xfbAJ\x19\xd02\x82\xb4\xfc\xa0\xbd\x03(kA\xca9\xd0R\x81\xb44\xa0\xfd\xb4\xbfҀ\xda\x05(3\xa0r\x87Z\x03(\x17\xa0\xf2\x81Z\x03(\xf3\xa0R\x83\xda\x05(\x0f\xa1ҁ\xda\r(\x13\xa02\x9fZ\a(7\xa02\x81Z\x1f(\x13\xa0Ҁ\xda\x05(\xff\xa0R\x80\xda\r "), + MaxTimeMs: 7140000, + Data: []byte("\000x\000\000\000\000\000\000\000\000\000\340\324\003\302|\005\224\000\301\254}\351z2\320O\355\264n[\007\316\224\243md\371\320\375\032Pm\nS\235\016Q\255\006P\275\250\277\312\201Z\003(3\240R\207\332\005(\017\240\322\201\332=(\023\2402\203Z\007(w\2402\201Z\017(\023\265\227\364P\033@\245\007\364\nP\033C\245\002t\036P+@e\036\364\016Pk@e\002t:P;A\245\001\364\nS\373@\245\006t\006P+C\345\002\364\006Pk@\345\036t\nP\033A\245\003\364:P\033@\245\006t\016ZJ\377\\\205\313\210\327\270\017\345+F[\310\347E)\355\024\241\366\342}(v\215(N\203)\326\207(\336\203(V\332W\362\202t4\240m\005(\377AJ\006\320\322\202t\374\240\255\003(oA\312:\3202"), }, }, }, diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 14297c3dc..70639085e 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -247,8 +247,8 @@ func TestNoPanicAfterWALCorruption(t *testing.T) { var maxt int64 ctx := context.Background() { - // Appending 221 samples because on the 221st a new chunk will be created. - for i := 0; i < 221; i++ { + // Appending 121 samples because on the 121st a new chunk will be created. + for i := 0; i < 121; i++ { app := db.Appender(ctx) _, err := app.Append(0, labels.FromStrings("foo", "bar"), maxt, 0) expSamples = append(expSamples, sample{t: maxt, v: 0}) @@ -1089,9 +1089,9 @@ func TestWALReplayRaceOnSamplesLoggedBeforeSeries(t *testing.T) { numSamplesBeforeSeriesCreation = 1000 ) - // We test both with few and many samples appended after series creation. If samples are < 220 then there's no + // We test both with few and many samples appended after series creation. If samples are < 120 then there's no // mmap-ed chunk, otherwise there's at least 1 mmap-ed chunk when replaying the WAL. - for _, numSamplesAfterSeriesCreation := range []int{1, 2000} { + for _, numSamplesAfterSeriesCreation := range []int{1, 1000} { for run := 1; run <= numRuns; run++ { t.Run(fmt.Sprintf("samples after series creation = %d, run = %d", numSamplesAfterSeriesCreation, run), func(t *testing.T) { testWALReplayRaceOnSamplesLoggedBeforeSeries(t, numSamplesBeforeSeriesCreation, numSamplesAfterSeriesCreation) @@ -1160,8 +1160,8 @@ func testWALReplayRaceOnSamplesLoggedBeforeSeries(t *testing.T, numSamplesBefore } require.NoError(t, chunksIt.Err()) - // We expect 1 chunk every 220 samples after series creation. - require.Equalf(t, (numSamplesAfterSeriesCreation/220)+1, actualChunks, "series: %s", set.At().Labels().String()) + // We expect 1 chunk every 120 samples after series creation. + require.Equalf(t, (numSamplesAfterSeriesCreation/120)+1, actualChunks, "series: %s", set.At().Labels().String()) } require.NoError(t, set.Err()) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index df48e592d..80b71e927 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -808,7 +808,7 @@ func TestMemSeries_truncateChunks(t *testing.T) { s := newMemSeries(labels.FromStrings("a", "b"), 1, defaultIsolationDisabled, DefaultSamplesPerChunk) - for i := 0; i < 8000; i += 5 { + for i := 0; i < 4000; i += 5 { ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper, chunkRange) require.True(t, ok, "sample append failed") } @@ -825,9 +825,9 @@ func TestMemSeries_truncateChunks(t *testing.T) { require.NotNil(t, chk) require.NoError(t, err) - s.truncateChunksBefore(4000, 0) + s.truncateChunksBefore(2000, 0) - require.Equal(t, int64(4000), s.mmappedChunks[0].minTime) + require.Equal(t, int64(2000), s.mmappedChunks[0].minTime) _, _, err = s.chunk(0, chunkDiskMapper, &memChunkPool) require.Equal(t, storage.ErrNotFound, err, "first chunks not gone") require.Equal(t, countBefore/2, len(s.mmappedChunks)+1) // +1 for the head chunk. @@ -1364,9 +1364,9 @@ func TestMemSeries_append(t *testing.T) { require.Equal(t, int64(1000), s.headChunk.minTime, "wrong chunk range") require.Equal(t, int64(1001), s.headChunk.maxTime, "wrong chunk range") - // Fill the range [1000,3000) with many samples. Intermediate chunks should be cut - // at approximately 220 samples per chunk. - for i := 1; i < 2000; i++ { + // Fill the range [1000,2000) with many samples. Intermediate chunks should be cut + // at approximately 120 samples per chunk. + for i := 1; i < 1000; i++ { ok, _ := s.append(1001+int64(i), float64(i), 0, chunkDiskMapper, chunkRange) require.True(t, ok, "append failed") } @@ -1437,7 +1437,7 @@ func TestMemSeries_appendHistogram(t *testing.T) { } func TestMemSeries_append_atVariableRate(t *testing.T) { - const samplesPerChunk = 220 + const samplesPerChunk = 120 dir := t.TempDir() // This is usually taken from the Head, but passing manually here. chunkDiskMapper, err := chunks.NewChunkDiskMapper(nil, dir, chunkenc.NewPool(), chunks.DefaultWriteBufferSize, chunks.DefaultWriteQueueSize) @@ -2983,7 +2983,7 @@ func TestAppendHistogram(t *testing.T) { } func TestHistogramInWALAndMmapChunk(t *testing.T) { - head, _ := newTestHead(t, 6000, false, false) + head, _ := newTestHead(t, 3000, false, false) t.Cleanup(func() { require.NoError(t, head.Close()) }) @@ -2992,7 +2992,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { // Series with only histograms. s1 := labels.FromStrings("a", "b1") k1 := s1.String() - numHistograms := 600 + numHistograms := 300 exp := map[string][]tsdbutil.Sample{} ts := int64(0) var app storage.Appender @@ -3728,7 +3728,7 @@ func TestHistogramCounterResetHeader(t *testing.T) { checkExpCounterResetHeader(chunkenc.CounterReset) // Add 2 non-counter reset histograms. - for i := 0; i < 500; i++ { + for i := 0; i < 250; i++ { appendHistogram(h) } checkExpCounterResetHeader(chunkenc.NotCounterReset, chunkenc.NotCounterReset) @@ -3756,7 +3756,7 @@ func TestHistogramCounterResetHeader(t *testing.T) { checkExpCounterResetHeader(chunkenc.CounterReset) // Add 2 non-counter reset histograms. Just to have some non-counter reset chunks in between. - for i := 0; i < 500; i++ { + for i := 0; i < 250; i++ { appendHistogram(h) } checkExpCounterResetHeader(chunkenc.NotCounterReset, chunkenc.NotCounterReset) @@ -4223,7 +4223,7 @@ func TestHeadInit_DiscardChunksWithUnsupportedEncoding(t *testing.T) { h.chunkDiskMapper.WriteChunk(chunks.HeadSeriesRef(seriesRef), 500, 600, uc, false, func(err error) { require.NoError(t, err) }) app = h.Appender(ctx) - for i := 700; i < 1700; i++ { + for i := 700; i < 1200; i++ { _, err := app.Append(0, seriesLabels, int64(i), float64(i)) require.NoError(t, err) }