Merge pull request #9338 from prometheus/release-2.30

merge back release 2.30
This commit is contained in:
Julien Pivotto 2021-09-14 23:46:11 +02:00 committed by GitHub
commit 2bde71ec5f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 41 additions and 18 deletions

View File

@ -1,3 +1,20 @@
## 2.30.0 / 2021-09-14
* [FEATURE] **experimental** TSDB: Snapshot in-memory chunks on shutdown for faster restarts. Behind `--enable-feature=memory-snapshot-on-shutdown` flag. #7229
* [FEATURE] **experimental** Scrape: Configure scrape interval and scrape timeout via relabeling using `__scrape_interval__` and `__scrape_timeout__` labels respectively. #8911
* [FEATURE] Scrape: Add `scrape_timeout_seconds` and `scrape_sample_limit` metric. Behind `--enable-feature=extra-scrape-metrics` flag to avoid additional cardinality by default. #9247 #9295
* [ENHANCEMENT] Scrape: Add `--scrape.timestamp-tolerance` flag to adjust scrape timestamp tolerance when enabled via `--scrape.adjust-timestamps`. #9283
* [ENHANCEMENT] Remote Write: Improve throughput when sending exemplars. #8921
* [ENHANCEMENT] TSDB: Optimise WAL loading by removing extra map and caching min-time #9160
* [ENHANCEMENT] promtool: Speed up checking for duplicate rules. #9262/#9306
* [ENHANCEMENT] Scrape: Reduce allocations when parsing the metrics. #9299
* [ENHANCEMENT] docker_sd: Support host network mode #9125
* [BUGFIX] Exemplars: Fix panic when resizing exemplar storage from 0 to a non-zero size. #9286
* [BUGFIX] TSDB: Correctly decrement `prometheus_tsdb_head_active_appenders` when the append has no samples. #9230
* [BUGFIX] promtool rules backfill: Return 1 if backfill was unsuccessful. #9303
* [BUGFIX] promtool rules backfill: Avoid creation of overlapping blocks. #9324
* [BUGFIX] config: Fix a panic when reloading configuration with a `null` relabel action. #9224
## 2.29.2 / 2021-08-27
* [BUGFIX] Fix Kubernetes SD failing to discover Ingress in Kubernetes v1.22. #9205

View File

@ -1 +1 @@
2.29.2
2.30.0

View File

@ -103,6 +103,9 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName
currStart := max(startOfBlock/int64(time.Second/time.Millisecond), start.Unix())
startWithAlignment := grp.EvalTimestamp(time.Unix(currStart, 0).UTC().UnixNano())
for startWithAlignment.Unix() < currStart {
startWithAlignment = startWithAlignment.Add(grp.Interval())
}
val, warnings, err := importer.apiClient.QueryRange(ctx,
ruleExpr,
v1.Range{
@ -141,22 +144,16 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName
matrix = val.(model.Matrix)
for _, sample := range matrix {
currentLabels := make(labels.Labels, 0, len(sample.Metric)+len(ruleLabels)+1)
currentLabels = append(currentLabels, labels.Label{
Name: labels.MetricName,
Value: ruleName,
})
currentLabels = append(currentLabels, ruleLabels...)
lb := labels.NewBuilder(ruleLabels)
for name, value := range sample.Metric {
currentLabels = append(currentLabels, labels.Label{
Name: string(name),
Value: string(value),
})
lb.Set(string(name), string(value))
}
lb.Set(labels.MetricName, ruleName)
for _, value := range sample.Values {
if err := app.add(ctx, currentLabels, timestamp.FromTime(value.Timestamp.Time()), float64(value.Value)); err != nil {
if err := app.add(ctx, lb.Labels(), timestamp.FromTime(value.Timestamp.Time()), float64(value.Value)); err != nil {
return errors.Wrap(err, "add")
}
}

View File

@ -61,6 +61,7 @@ func TestBackfillRuleIntegration(t *testing.T) {
}{
{"no samples", 1, 0, 0, 0, []*model.SampleStream{}},
{"run importer once", 1, 8, 4, 4, []*model.SampleStream{{Metric: model.Metric{"name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}}}}},
{"run importer with dup name label", 1, 8, 4, 4, []*model.SampleStream{{Metric: model.Metric{"__name__": "val1", "name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}}}}},
{"one importer twice", 2, 8, 4, 8, []*model.SampleStream{{Metric: model.Metric{"name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}, {Timestamp: testTime2, Value: testValue2}}}}},
}
for _, tt := range testCases {
@ -194,7 +195,7 @@ func createMultiRuleTestFiles(path string) error {
- record: grp1_rule1
expr: grp1_rule1_expr
labels:
testlabel11: testlabelvalue11
testlabel11: testlabelvalue12
- name: group2
rules:
- record: grp2_rule1
@ -202,7 +203,7 @@ func createMultiRuleTestFiles(path string) error {
- record: grp2_rule2
expr: grp2_rule2_expr
labels:
testlabel11: testlabelvalue11
testlabel11: testlabelvalue13
`
return ioutil.WriteFile(path, []byte(recordingRules), 0777)
}

View File

@ -1168,12 +1168,12 @@ func (h *Head) Close() error {
defer h.closedMtx.Unlock()
h.closed = true
errs := tsdb_errors.NewMulti(h.chunkDiskMapper.Close())
if errs.Err() == nil && h.opts.EnableMemorySnapshotOnShutdown {
errs.Add(h.performChunkSnapshot())
}
if h.wal != nil {
errs.Add(h.wal.Close())
}
if errs.Err() == nil && h.opts.EnableMemorySnapshotOnShutdown {
errs.Add(h.performChunkSnapshot())
}
return errs.Err()
}

View File

@ -2659,6 +2659,10 @@ func TestChunkSnapshot(t *testing.T) {
// These references should be the ones used for the snapshot.
wlast, woffset, err = head.wal.LastSegmentAndOffset()
require.NoError(t, err)
if woffset != 0 && woffset < 32*1024 {
// The page is always filled before taking the snapshot.
woffset = 32 * 1024
}
{
// Creating snapshot and verifying it.
@ -2725,6 +2729,10 @@ func TestChunkSnapshot(t *testing.T) {
// Creating another snapshot should delete the older snapshot and replay still works fine.
wlast, woffset, err = head.wal.LastSegmentAndOffset()
require.NoError(t, err)
if woffset != 0 && woffset < 32*1024 {
// The page is always filled before taking the snapshot.
woffset = 32 * 1024
}
{
// Close Head and verify that new snapshot was created.