2013-04-05 16:03:45 +00:00
|
|
|
// Copyright 2013 Prometheus Team
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package metric
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2013-06-08 08:27:44 +00:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"code.google.com/p/goprotobuf/proto"
|
|
|
|
|
2013-04-05 16:03:45 +00:00
|
|
|
dto "github.com/prometheus/prometheus/model/generated"
|
2013-06-08 08:27:44 +00:00
|
|
|
|
|
|
|
"github.com/prometheus/prometheus/model"
|
2013-04-05 16:03:45 +00:00
|
|
|
"github.com/prometheus/prometheus/storage/raw"
|
|
|
|
"github.com/prometheus/prometheus/storage/raw/leveldb"
|
|
|
|
)
|
|
|
|
|
|
|
|
// processor models a post-processing agent that performs work given a sample
|
|
|
|
// corpus.
|
2013-05-02 10:37:24 +00:00
|
|
|
type Processor interface {
|
2013-04-05 16:03:45 +00:00
|
|
|
// Name emits the name of this processor's signature encoder. It must be
|
|
|
|
// fully-qualified in the sense that it could be used via a Protocol Buffer
|
|
|
|
// registry to extract the descriptor to reassemble this message.
|
|
|
|
Name() string
|
|
|
|
// Signature emits a byte signature for this process for the purpose of
|
|
|
|
// remarking how far along it has been applied to the database.
|
|
|
|
Signature() (signature []byte, err error)
|
|
|
|
// Apply runs this processor against the sample set. sampleIterator expects
|
|
|
|
// to be pre-seeked to the initial starting position. The processor will
|
|
|
|
// run until up until stopAt has been reached. It is imperative that the
|
|
|
|
// provided stopAt is within the interval of the series frontier.
|
|
|
|
//
|
|
|
|
// Upon completion or error, the last time at which the processor finished
|
|
|
|
// shall be emitted in addition to any errors.
|
2013-05-17 10:58:15 +00:00
|
|
|
Apply(sampleIterator leveldb.Iterator, samplesPersistence raw.Persistence, stopAt time.Time, fingerprint *model.Fingerprint) (lastCurated time.Time, err error)
|
2013-04-05 16:03:45 +00:00
|
|
|
}
|
|
|
|
|
2013-05-02 10:37:24 +00:00
|
|
|
// CompactionProcessor combines sparse values in the database together such
|
2013-04-28 18:26:44 +00:00
|
|
|
// that at least MinimumGroupSize-sized chunks are grouped together.
|
2013-05-02 10:37:24 +00:00
|
|
|
type CompactionProcessor struct {
|
2013-04-05 16:03:45 +00:00
|
|
|
// MaximumMutationPoolBatch represents approximately the largest pending
|
|
|
|
// batch of mutation operations for the database before pausing to
|
|
|
|
// commit before resumption.
|
|
|
|
//
|
|
|
|
// A reasonable value would be (MinimumGroupSize * 2) + 1.
|
|
|
|
MaximumMutationPoolBatch int
|
|
|
|
// MinimumGroupSize represents the smallest allowed sample chunk size in the
|
|
|
|
// database.
|
|
|
|
MinimumGroupSize int
|
2013-05-02 10:37:24 +00:00
|
|
|
// signature is the byte representation of the CompactionProcessor's settings,
|
2013-04-05 16:03:45 +00:00
|
|
|
// used for purely memoization purposes across an instance.
|
|
|
|
signature []byte
|
|
|
|
}
|
|
|
|
|
2013-05-02 10:37:24 +00:00
|
|
|
func (p CompactionProcessor) Name() string {
|
2013-04-05 16:03:45 +00:00
|
|
|
return "io.prometheus.CompactionProcessorDefinition"
|
|
|
|
}
|
|
|
|
|
2013-05-02 10:37:24 +00:00
|
|
|
func (p *CompactionProcessor) Signature() (out []byte, err error) {
|
2013-04-05 16:03:45 +00:00
|
|
|
if len(p.signature) == 0 {
|
|
|
|
out, err = proto.Marshal(&dto.CompactionProcessorDefinition{
|
|
|
|
MinimumGroupSize: proto.Uint32(uint32(p.MinimumGroupSize)),
|
|
|
|
})
|
|
|
|
|
|
|
|
p.signature = out
|
|
|
|
}
|
|
|
|
|
|
|
|
out = p.signature
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2013-05-02 10:37:24 +00:00
|
|
|
func (p CompactionProcessor) String() string {
|
2013-05-08 18:39:59 +00:00
|
|
|
return fmt.Sprintf("compactionProcessor for minimum group size %d", p.MinimumGroupSize)
|
2013-04-05 16:03:45 +00:00
|
|
|
}
|
|
|
|
|
2013-05-17 10:58:15 +00:00
|
|
|
func (p CompactionProcessor) Apply(sampleIterator leveldb.Iterator, samplesPersistence raw.Persistence, stopAt time.Time, fingerprint *model.Fingerprint) (lastCurated time.Time, err error) {
|
2013-04-05 16:03:45 +00:00
|
|
|
var pendingBatch raw.Batch = nil
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if pendingBatch != nil {
|
|
|
|
pendingBatch.Close()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
var pendingMutations = 0
|
|
|
|
var pendingSamples model.Values
|
|
|
|
var sampleKey model.SampleKey
|
2013-06-07 12:41:00 +00:00
|
|
|
var unactedSamples model.Values
|
2013-04-05 16:03:45 +00:00
|
|
|
var lastTouchedTime time.Time
|
|
|
|
var keyDropped bool
|
|
|
|
|
|
|
|
sampleKey, err = extractSampleKey(sampleIterator)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2013-06-07 12:41:00 +00:00
|
|
|
unactedSamples, err = extractSampleValues(sampleIterator)
|
2013-04-05 16:03:45 +00:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for lastCurated.Before(stopAt) && lastTouchedTime.Before(stopAt) {
|
|
|
|
switch {
|
|
|
|
// Furnish a new pending batch operation if none is available.
|
|
|
|
case pendingBatch == nil:
|
|
|
|
pendingBatch = leveldb.NewBatch()
|
|
|
|
|
|
|
|
// If there are no sample values to extract from the datastore, let's
|
|
|
|
// continue extracting more values to use. We know that the time.Before()
|
|
|
|
// block would prevent us from going into unsafe territory.
|
2013-06-07 12:41:00 +00:00
|
|
|
case len(unactedSamples) == 0:
|
2013-04-05 16:03:45 +00:00
|
|
|
if !sampleIterator.Next() {
|
|
|
|
return lastCurated, fmt.Errorf("Illegal Condition: Invalid Iterator on Continuation")
|
|
|
|
}
|
|
|
|
|
|
|
|
keyDropped = false
|
|
|
|
|
|
|
|
sampleKey, err = extractSampleKey(sampleIterator)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2013-06-07 12:41:00 +00:00
|
|
|
unactedSamples, err = extractSampleValues(sampleIterator)
|
2013-04-05 16:03:45 +00:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the number of pending mutations exceeds the allowed batch amount,
|
|
|
|
// commit to disk and delete the batch. A new one will be recreated if
|
|
|
|
// necessary.
|
|
|
|
case pendingMutations >= p.MaximumMutationPoolBatch:
|
2013-05-08 18:39:59 +00:00
|
|
|
err = samplesPersistence.Commit(pendingBatch)
|
2013-04-05 16:03:45 +00:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
pendingMutations = 0
|
|
|
|
|
|
|
|
pendingBatch.Close()
|
|
|
|
pendingBatch = nil
|
|
|
|
|
2013-06-07 12:41:00 +00:00
|
|
|
case len(pendingSamples) == 0 && len(unactedSamples) >= p.MinimumGroupSize:
|
|
|
|
lastTouchedTime = unactedSamples[len(unactedSamples)-1].Timestamp
|
|
|
|
unactedSamples = model.Values{}
|
2013-04-05 16:03:45 +00:00
|
|
|
|
2013-06-07 12:41:00 +00:00
|
|
|
case len(pendingSamples)+len(unactedSamples) < p.MinimumGroupSize:
|
2013-04-05 16:03:45 +00:00
|
|
|
if !keyDropped {
|
2013-06-08 08:27:44 +00:00
|
|
|
pendingBatch.Drop(sampleKey.ToDTO())
|
2013-04-05 16:03:45 +00:00
|
|
|
keyDropped = true
|
|
|
|
}
|
2013-06-07 12:41:00 +00:00
|
|
|
pendingSamples = append(pendingSamples, unactedSamples...)
|
|
|
|
lastTouchedTime = unactedSamples[len(unactedSamples)-1].Timestamp
|
|
|
|
unactedSamples = model.Values{}
|
2013-04-05 16:03:45 +00:00
|
|
|
pendingMutations++
|
|
|
|
|
|
|
|
// If the number of pending writes equals the target group size
|
|
|
|
case len(pendingSamples) == p.MinimumGroupSize:
|
|
|
|
newSampleKey := pendingSamples.ToSampleKey(fingerprint)
|
2013-06-08 08:27:44 +00:00
|
|
|
pendingBatch.Put(newSampleKey.ToDTO(), pendingSamples.ToDTO())
|
2013-04-05 16:03:45 +00:00
|
|
|
pendingMutations++
|
|
|
|
lastCurated = newSampleKey.FirstTimestamp.In(time.UTC)
|
2013-06-07 12:41:00 +00:00
|
|
|
if len(unactedSamples) > 0 {
|
2013-04-05 16:03:45 +00:00
|
|
|
if !keyDropped {
|
2013-06-08 08:27:44 +00:00
|
|
|
pendingBatch.Drop(sampleKey.ToDTO())
|
2013-04-05 16:03:45 +00:00
|
|
|
keyDropped = true
|
|
|
|
}
|
|
|
|
|
2013-06-07 12:41:00 +00:00
|
|
|
if len(unactedSamples) > p.MinimumGroupSize {
|
|
|
|
pendingSamples = unactedSamples[:p.MinimumGroupSize]
|
|
|
|
unactedSamples = unactedSamples[p.MinimumGroupSize:]
|
|
|
|
lastTouchedTime = unactedSamples[len(unactedSamples)-1].Timestamp
|
2013-04-05 16:03:45 +00:00
|
|
|
} else {
|
2013-06-07 12:41:00 +00:00
|
|
|
pendingSamples = unactedSamples
|
2013-04-05 16:03:45 +00:00
|
|
|
lastTouchedTime = pendingSamples[len(pendingSamples)-1].Timestamp
|
2013-06-07 12:41:00 +00:00
|
|
|
unactedSamples = model.Values{}
|
2013-04-05 16:03:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-07 12:41:00 +00:00
|
|
|
case len(pendingSamples)+len(unactedSamples) >= p.MinimumGroupSize:
|
2013-04-05 16:03:45 +00:00
|
|
|
if !keyDropped {
|
2013-06-08 08:27:44 +00:00
|
|
|
pendingBatch.Drop(sampleKey.ToDTO())
|
2013-04-05 16:03:45 +00:00
|
|
|
keyDropped = true
|
|
|
|
}
|
|
|
|
remainder := p.MinimumGroupSize - len(pendingSamples)
|
2013-06-07 12:41:00 +00:00
|
|
|
pendingSamples = append(pendingSamples, unactedSamples[:remainder]...)
|
|
|
|
unactedSamples = unactedSamples[remainder:]
|
|
|
|
if len(unactedSamples) == 0 {
|
2013-04-05 16:03:45 +00:00
|
|
|
lastTouchedTime = pendingSamples[len(pendingSamples)-1].Timestamp
|
|
|
|
} else {
|
2013-06-07 12:41:00 +00:00
|
|
|
lastTouchedTime = unactedSamples[len(unactedSamples)-1].Timestamp
|
2013-04-05 16:03:45 +00:00
|
|
|
}
|
|
|
|
pendingMutations++
|
|
|
|
default:
|
|
|
|
err = fmt.Errorf("Unhandled processing case.")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-07 12:41:00 +00:00
|
|
|
if len(unactedSamples) > 0 || len(pendingSamples) > 0 {
|
|
|
|
pendingSamples = append(pendingSamples, unactedSamples...)
|
2013-04-05 16:03:45 +00:00
|
|
|
newSampleKey := pendingSamples.ToSampleKey(fingerprint)
|
2013-06-08 08:27:44 +00:00
|
|
|
pendingBatch.Put(newSampleKey.ToDTO(), pendingSamples.ToDTO())
|
2013-04-05 16:03:45 +00:00
|
|
|
pendingSamples = model.Values{}
|
|
|
|
pendingMutations++
|
|
|
|
lastCurated = newSampleKey.FirstTimestamp.In(time.UTC)
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is not deferred due to the off-chance that a pre-existing commit
|
|
|
|
// failed.
|
|
|
|
if pendingBatch != nil && pendingMutations > 0 {
|
2013-05-08 18:39:59 +00:00
|
|
|
err = samplesPersistence.Commit(pendingBatch)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeletionProcessor deletes sample blocks older than a defined value.
|
|
|
|
type DeletionProcessor struct {
|
|
|
|
// MaximumMutationPoolBatch represents approximately the largest pending
|
|
|
|
// batch of mutation operations for the database before pausing to
|
|
|
|
// commit before resumption.
|
|
|
|
MaximumMutationPoolBatch int
|
|
|
|
// signature is the byte representation of the DeletionProcessor's settings,
|
|
|
|
// used for purely memoization purposes across an instance.
|
|
|
|
signature []byte
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p DeletionProcessor) Name() string {
|
|
|
|
return "io.prometheus.DeletionProcessorDefinition"
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *DeletionProcessor) Signature() (out []byte, err error) {
|
|
|
|
if len(p.signature) == 0 {
|
|
|
|
out, err = proto.Marshal(&dto.DeletionProcessorDefinition{})
|
|
|
|
|
|
|
|
p.signature = out
|
|
|
|
}
|
|
|
|
|
|
|
|
out = p.signature
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p DeletionProcessor) String() string {
|
|
|
|
return "deletionProcessor"
|
|
|
|
}
|
|
|
|
|
2013-05-17 10:58:15 +00:00
|
|
|
func (p DeletionProcessor) Apply(sampleIterator leveldb.Iterator, samplesPersistence raw.Persistence, stopAt time.Time, fingerprint *model.Fingerprint) (lastCurated time.Time, err error) {
|
2013-05-08 18:39:59 +00:00
|
|
|
var pendingBatch raw.Batch = nil
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if pendingBatch != nil {
|
|
|
|
pendingBatch.Close()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
sampleKey, err := extractSampleKey(sampleIterator)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
sampleValues, err := extractSampleValues(sampleIterator)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
pendingMutations := 0
|
|
|
|
|
|
|
|
for lastCurated.Before(stopAt) {
|
|
|
|
switch {
|
|
|
|
// Furnish a new pending batch operation if none is available.
|
|
|
|
case pendingBatch == nil:
|
|
|
|
pendingBatch = leveldb.NewBatch()
|
|
|
|
|
|
|
|
// If there are no sample values to extract from the datastore, let's
|
|
|
|
// continue extracting more values to use. We know that the time.Before()
|
|
|
|
// block would prevent us from going into unsafe territory.
|
|
|
|
case len(sampleValues) == 0:
|
|
|
|
if !sampleIterator.Next() {
|
|
|
|
return lastCurated, fmt.Errorf("Illegal Condition: Invalid Iterator on Continuation")
|
|
|
|
}
|
|
|
|
|
|
|
|
sampleKey, err = extractSampleKey(sampleIterator)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
sampleValues, err = extractSampleValues(sampleIterator)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the number of pending mutations exceeds the allowed batch amount,
|
|
|
|
// commit to disk and delete the batch. A new one will be recreated if
|
|
|
|
// necessary.
|
|
|
|
case pendingMutations >= p.MaximumMutationPoolBatch:
|
|
|
|
err = samplesPersistence.Commit(pendingBatch)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
pendingMutations = 0
|
|
|
|
|
|
|
|
pendingBatch.Close()
|
|
|
|
pendingBatch = nil
|
|
|
|
|
|
|
|
case !sampleKey.MayContain(stopAt):
|
2013-06-08 08:27:44 +00:00
|
|
|
pendingBatch.Drop(sampleKey.ToDTO())
|
2013-05-08 18:39:59 +00:00
|
|
|
lastCurated = sampleKey.LastTimestamp
|
|
|
|
sampleValues = model.Values{}
|
|
|
|
pendingMutations++
|
|
|
|
|
|
|
|
case sampleKey.MayContain(stopAt):
|
2013-06-08 08:27:44 +00:00
|
|
|
pendingBatch.Drop(sampleKey.ToDTO())
|
2013-05-08 18:39:59 +00:00
|
|
|
pendingMutations++
|
|
|
|
|
|
|
|
sampleValues = sampleValues.TruncateBefore(stopAt)
|
|
|
|
if len(sampleValues) > 0 {
|
|
|
|
sampleKey = sampleValues.ToSampleKey(fingerprint)
|
|
|
|
lastCurated = sampleKey.FirstTimestamp
|
2013-06-08 08:27:44 +00:00
|
|
|
pendingBatch.Put(sampleKey.ToDTO(), sampleValues.ToDTO())
|
2013-05-08 18:39:59 +00:00
|
|
|
pendingMutations++
|
|
|
|
} else {
|
|
|
|
lastCurated = sampleKey.LastTimestamp
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
err = fmt.Errorf("Unhandled processing case.")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is not deferred due to the off-chance that a pre-existing commit
|
|
|
|
// failed.
|
|
|
|
if pendingBatch != nil && pendingMutations > 0 {
|
|
|
|
err = samplesPersistence.Commit(pendingBatch)
|
2013-04-05 16:03:45 +00:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|