2017-05-10 09:44:13 +00:00
|
|
|
// Copyright 2017 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package remote
|
|
|
|
|
|
|
|
import (
|
2020-07-30 11:11:13 +00:00
|
|
|
"context"
|
2019-12-12 20:47:23 +00:00
|
|
|
"fmt"
|
2021-10-29 15:25:05 +00:00
|
|
|
"math"
|
2019-06-01 01:39:40 +00:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2021-06-11 16:17:59 +00:00
|
|
|
"github.com/go-kit/log"
|
2019-03-01 19:04:26 +00:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
|
|
|
"github.com/prometheus/client_golang/prometheus/promauto"
|
2020-10-22 09:00:08 +00:00
|
|
|
|
2019-06-01 01:39:40 +00:00
|
|
|
"github.com/prometheus/prometheus/config"
|
2021-11-08 14:23:17 +00:00
|
|
|
"github.com/prometheus/prometheus/model/exemplar"
|
Style cleanup of all the changes in sparsehistogram so far
A lot of this code was hacked together, literally during a
hackathon. This commit intends not to change the code substantially,
but just make the code obey the usual style practices.
A (possibly incomplete) list of areas:
* Generally address linter warnings.
* The `pgk` directory is deprecated as per dev-summit. No new packages should
be added to it. I moved the new `pkg/histogram` package to `model`
anticipating what's proposed in #9478.
* Make the naming of the Sparse Histogram more consistent. Including
abbreviations, there were just too many names for it: SparseHistogram,
Histogram, Histo, hist, his, shs, h. The idea is to call it "Histogram" in
general. Only add "Sparse" if it is needed to avoid confusion with
conventional Histograms (which is rare because the TSDB really has no notion
of conventional Histograms). Use abbreviations only in local scope, and then
really abbreviate (not just removing three out of seven letters like in
"Histo"). This is in the spirit of
https://github.com/golang/go/wiki/CodeReviewComments#variable-names
* Several other minor name changes.
* A lot of formatting of doc comments. For one, following
https://github.com/golang/go/wiki/CodeReviewComments#comment-sentences
, but also layout question, anticipating how things will look like
when rendered by `godoc` (even where `godoc` doesn't render them
right now because they are for unexported types or not a doc comment
at all but just a normal code comment - consistency is queen!).
* Re-enabled `TestQueryLog` and `TestEndopints` (they pass now,
leaving them disabled was presumably an oversight).
* Bucket iterator for histogram.Histogram is now created with a
method.
* HistogramChunk.iterator now allows iterator recycling. (I think
@dieterbe only commented it out because he was confused by the
question in the comment.)
* HistogramAppender.Append panics now because we decided to treat
staleness marker differently.
Signed-off-by: beorn7 <beorn@grafana.com>
2021-10-09 13:57:07 +00:00
|
|
|
"github.com/prometheus/prometheus/model/histogram"
|
2021-11-08 14:23:17 +00:00
|
|
|
"github.com/prometheus/prometheus/model/labels"
|
2022-07-19 08:58:52 +00:00
|
|
|
"github.com/prometheus/prometheus/model/metadata"
|
2017-05-10 09:44:13 +00:00
|
|
|
"github.com/prometheus/prometheus/storage"
|
2022-10-10 15:08:46 +00:00
|
|
|
"github.com/prometheus/prometheus/tsdb/wlog"
|
2017-05-10 09:44:13 +00:00
|
|
|
)
|
|
|
|
|
2019-03-01 19:04:26 +00:00
|
|
|
var (
|
|
|
|
samplesIn = promauto.NewCounter(prometheus.CounterOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "samples_in_total",
|
|
|
|
Help: "Samples in to remote storage, compare to samples out for queue managers.",
|
|
|
|
})
|
2021-05-06 20:53:52 +00:00
|
|
|
exemplarsIn = promauto.NewCounter(prometheus.CounterOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "exemplars_in_total",
|
|
|
|
Help: "Exemplars in to remote storage, compare to exemplars out for queue managers.",
|
|
|
|
})
|
2022-07-14 13:13:12 +00:00
|
|
|
histogramsIn = promauto.NewCounter(prometheus.CounterOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "histograms_in_total",
|
2022-08-29 12:08:36 +00:00
|
|
|
Help: "HistogramSamples in to remote storage, compare to histograms out for queue managers.",
|
2022-07-14 13:13:12 +00:00
|
|
|
})
|
2019-03-01 19:04:26 +00:00
|
|
|
)
|
|
|
|
|
2019-06-01 01:39:40 +00:00
|
|
|
// WriteStorage represents all the remote write storage.
|
|
|
|
type WriteStorage struct {
|
|
|
|
logger log.Logger
|
2020-04-25 03:39:46 +00:00
|
|
|
reg prometheus.Registerer
|
2019-06-01 01:39:40 +00:00
|
|
|
mtx sync.Mutex
|
|
|
|
|
2022-10-10 15:08:46 +00:00
|
|
|
watcherMetrics *wlog.WatcherMetrics
|
|
|
|
liveReaderMetrics *wlog.LiveReaderMetrics
|
2020-03-31 03:39:29 +00:00
|
|
|
externalLabels labels.Labels
|
2022-05-31 04:45:30 +00:00
|
|
|
dir string
|
2019-12-12 20:47:23 +00:00
|
|
|
queues map[string]*QueueManager
|
2019-09-04 17:21:53 +00:00
|
|
|
samplesIn *ewmaRate
|
|
|
|
flushDeadline time.Duration
|
2020-09-24 18:44:18 +00:00
|
|
|
interner *pool
|
2020-11-19 15:23:03 +00:00
|
|
|
scraper ReadyScrapeManager
|
2021-10-29 23:39:02 +00:00
|
|
|
quit chan struct{}
|
2020-09-24 18:44:18 +00:00
|
|
|
|
|
|
|
// For timestampTracker.
|
2020-10-15 21:53:59 +00:00
|
|
|
highestTimestamp *maxTimestamp
|
2019-06-01 01:39:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewWriteStorage creates and runs a WriteStorage.
|
2022-05-31 04:45:30 +00:00
|
|
|
func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager) *WriteStorage {
|
2019-06-01 01:39:40 +00:00
|
|
|
if logger == nil {
|
|
|
|
logger = log.NewNopLogger()
|
|
|
|
}
|
|
|
|
rws := &WriteStorage{
|
2020-03-20 16:34:15 +00:00
|
|
|
queues: make(map[string]*QueueManager),
|
2022-10-10 15:08:46 +00:00
|
|
|
watcherMetrics: wlog.NewWatcherMetrics(reg),
|
|
|
|
liveReaderMetrics: wlog.NewLiveReaderMetrics(reg),
|
2020-03-20 16:34:15 +00:00
|
|
|
logger: logger,
|
2020-04-25 03:39:46 +00:00
|
|
|
reg: reg,
|
2020-03-20 16:34:15 +00:00
|
|
|
flushDeadline: flushDeadline,
|
|
|
|
samplesIn: newEWMARate(ewmaWeight, shardUpdateDuration),
|
2022-05-31 04:45:30 +00:00
|
|
|
dir: dir,
|
2020-09-24 18:44:18 +00:00
|
|
|
interner: newPool(),
|
2020-11-19 15:23:03 +00:00
|
|
|
scraper: sm,
|
2021-10-29 23:39:02 +00:00
|
|
|
quit: make(chan struct{}),
|
2020-10-15 21:53:59 +00:00
|
|
|
highestTimestamp: &maxTimestamp{
|
2020-09-24 18:44:18 +00:00
|
|
|
Gauge: prometheus.NewGauge(prometheus.GaugeOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "highest_timestamp_in_seconds",
|
|
|
|
Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch.",
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if reg != nil {
|
|
|
|
reg.MustRegister(rws.highestTimestamp)
|
2019-06-01 01:39:40 +00:00
|
|
|
}
|
|
|
|
go rws.run()
|
|
|
|
return rws
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rws *WriteStorage) run() {
|
|
|
|
ticker := time.NewTicker(shardUpdateDuration)
|
|
|
|
defer ticker.Stop()
|
2021-10-29 23:39:02 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
rws.samplesIn.tick()
|
|
|
|
case <-rws.quit:
|
|
|
|
return
|
|
|
|
}
|
2019-06-01 01:39:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ApplyConfig updates the state as the new config requires.
|
2019-09-04 17:21:53 +00:00
|
|
|
// Only stop & create queues which have changes.
|
2019-06-01 01:39:40 +00:00
|
|
|
func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
|
|
|
|
rws.mtx.Lock()
|
|
|
|
defer rws.mtx.Unlock()
|
|
|
|
|
2019-12-12 20:47:23 +00:00
|
|
|
// Remote write queues only need to change if the remote write config or
|
|
|
|
// external labels change.
|
2020-03-31 03:39:29 +00:00
|
|
|
externalLabelUnchanged := labels.Equal(conf.GlobalConfig.ExternalLabels, rws.externalLabels)
|
|
|
|
rws.externalLabels = conf.GlobalConfig.ExternalLabels
|
2019-06-01 01:39:40 +00:00
|
|
|
|
2019-12-12 20:47:23 +00:00
|
|
|
newQueues := make(map[string]*QueueManager)
|
|
|
|
newHashes := []string{}
|
|
|
|
for _, rwConf := range conf.RemoteWriteConfigs {
|
|
|
|
hash, err := toHash(rwConf)
|
2019-09-04 17:21:53 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-03-31 03:39:29 +00:00
|
|
|
// Don't allow duplicate remote write configs.
|
|
|
|
if _, ok := newQueues[hash]; ok {
|
|
|
|
return fmt.Errorf("duplicate remote write configs are not allowed, found duplicate for URL: %s", rwConf.URL)
|
|
|
|
}
|
|
|
|
|
2019-12-12 20:47:23 +00:00
|
|
|
// Set the queue name to the config hash if the user has not set
|
|
|
|
// a name in their remote write config so we can still differentiate
|
|
|
|
// between queues that have the same remote write endpoint.
|
2020-06-24 13:41:52 +00:00
|
|
|
name := hash[:6]
|
2019-12-12 20:47:23 +00:00
|
|
|
if rwConf.Name != "" {
|
|
|
|
name = rwConf.Name
|
|
|
|
}
|
|
|
|
|
2020-06-24 13:41:52 +00:00
|
|
|
c, err := NewWriteClient(name, &ClientConfig{
|
2019-06-01 01:39:40 +00:00
|
|
|
URL: rwConf.URL,
|
|
|
|
Timeout: rwConf.RemoteTimeout,
|
|
|
|
HTTPClientConfig: rwConf.HTTPClientConfig,
|
2021-03-08 19:20:09 +00:00
|
|
|
SigV4Config: rwConf.SigV4Config,
|
2021-02-04 21:18:13 +00:00
|
|
|
Headers: rwConf.Headers,
|
2021-02-11 17:24:49 +00:00
|
|
|
RetryOnRateLimit: rwConf.QueueConfig.RetryOnRateLimit,
|
2019-06-01 01:39:40 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-03-31 03:39:29 +00:00
|
|
|
|
|
|
|
queue, ok := rws.queues[hash]
|
|
|
|
if externalLabelUnchanged && ok {
|
|
|
|
// Update the client in case any secret configuration has changed.
|
|
|
|
queue.SetClient(c)
|
|
|
|
newQueues[hash] = queue
|
|
|
|
delete(rws.queues, hash)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2021-09-23 18:34:09 +00:00
|
|
|
// Redacted to remove any passwords in the URL (that are
|
|
|
|
// technically accepted but not recommended) since this is
|
|
|
|
// only used for metric labels.
|
|
|
|
endpoint := rwConf.URL.Redacted()
|
2019-12-12 20:47:23 +00:00
|
|
|
newQueues[hash] = NewQueueManager(
|
2020-04-25 03:39:46 +00:00
|
|
|
newQueueManagerMetrics(rws.reg, name, endpoint),
|
2020-03-20 16:34:15 +00:00
|
|
|
rws.watcherMetrics,
|
|
|
|
rws.liveReaderMetrics,
|
2019-06-01 01:39:40 +00:00
|
|
|
rws.logger,
|
2022-05-31 04:45:30 +00:00
|
|
|
rws.dir,
|
2019-06-01 01:39:40 +00:00
|
|
|
rws.samplesIn,
|
|
|
|
rwConf.QueueConfig,
|
2020-11-19 15:23:03 +00:00
|
|
|
rwConf.MetadataConfig,
|
2019-06-01 01:39:40 +00:00
|
|
|
conf.GlobalConfig.ExternalLabels,
|
|
|
|
rwConf.WriteRelabelConfigs,
|
|
|
|
c,
|
|
|
|
rws.flushDeadline,
|
2020-09-24 18:44:18 +00:00
|
|
|
rws.interner,
|
|
|
|
rws.highestTimestamp,
|
2020-11-19 15:23:03 +00:00
|
|
|
rws.scraper,
|
2021-05-06 20:53:52 +00:00
|
|
|
rwConf.SendExemplars,
|
2022-07-14 13:13:12 +00:00
|
|
|
rwConf.SendNativeHistograms,
|
2019-12-12 20:47:23 +00:00
|
|
|
)
|
|
|
|
// Keep track of which queues are new so we know which to start.
|
2019-09-04 17:21:53 +00:00
|
|
|
newHashes = append(newHashes, hash)
|
2019-06-01 01:39:40 +00:00
|
|
|
}
|
|
|
|
|
2019-12-12 20:47:23 +00:00
|
|
|
// Anything remaining in rws.queues is a queue who's config has
|
|
|
|
// changed or was removed from the overall remote write config.
|
2019-06-01 01:39:40 +00:00
|
|
|
for _, q := range rws.queues {
|
2019-12-12 20:47:23 +00:00
|
|
|
q.Stop()
|
2019-06-01 01:39:40 +00:00
|
|
|
}
|
|
|
|
|
2019-12-12 20:47:23 +00:00
|
|
|
for _, hash := range newHashes {
|
|
|
|
newQueues[hash].Start()
|
2019-06-01 01:39:40 +00:00
|
|
|
}
|
2019-09-04 17:21:53 +00:00
|
|
|
|
|
|
|
rws.queues = newQueues
|
|
|
|
|
2019-06-01 01:39:40 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Appender implements storage.Storage.
|
2020-07-30 11:11:13 +00:00
|
|
|
func (rws *WriteStorage) Appender(_ context.Context) storage.Appender {
|
2019-01-18 12:48:16 +00:00
|
|
|
return ×tampTracker{
|
2020-09-24 18:44:18 +00:00
|
|
|
writeStorage: rws,
|
|
|
|
highestRecvTimestamp: rws.highestTimestamp,
|
2020-02-06 15:58:38 +00:00
|
|
|
}
|
2019-01-18 12:48:16 +00:00
|
|
|
}
|
|
|
|
|
2021-10-29 15:25:05 +00:00
|
|
|
// LowestSentTimestamp returns the lowest sent timestamp across all queues.
|
|
|
|
func (rws *WriteStorage) LowestSentTimestamp() int64 {
|
|
|
|
rws.mtx.Lock()
|
|
|
|
defer rws.mtx.Unlock()
|
|
|
|
|
|
|
|
var lowestTs int64 = math.MaxInt64
|
|
|
|
|
|
|
|
for _, q := range rws.queues {
|
|
|
|
ts := int64(q.metrics.highestSentTimestamp.Get() * 1000)
|
|
|
|
if ts < lowestTs {
|
|
|
|
lowestTs = ts
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(rws.queues) == 0 {
|
|
|
|
lowestTs = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
return lowestTs
|
|
|
|
}
|
|
|
|
|
2019-06-01 01:39:40 +00:00
|
|
|
// Close closes the WriteStorage.
|
|
|
|
func (rws *WriteStorage) Close() error {
|
|
|
|
rws.mtx.Lock()
|
|
|
|
defer rws.mtx.Unlock()
|
|
|
|
for _, q := range rws.queues {
|
|
|
|
q.Stop()
|
|
|
|
}
|
2021-10-29 23:39:02 +00:00
|
|
|
close(rws.quit)
|
2019-06-01 01:39:40 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-01-18 12:48:16 +00:00
|
|
|
type timestampTracker struct {
|
2020-09-24 18:44:18 +00:00
|
|
|
writeStorage *WriteStorage
|
|
|
|
samples int64
|
2021-05-06 20:53:52 +00:00
|
|
|
exemplars int64
|
2021-07-01 12:49:04 +00:00
|
|
|
histograms int64
|
2020-09-24 18:44:18 +00:00
|
|
|
highestTimestamp int64
|
2020-10-15 21:53:59 +00:00
|
|
|
highestRecvTimestamp *maxTimestamp
|
2017-05-10 09:44:13 +00:00
|
|
|
}
|
|
|
|
|
2021-02-18 12:07:00 +00:00
|
|
|
// Append implements storage.Appender.
|
2021-11-06 10:10:04 +00:00
|
|
|
func (t *timestampTracker) Append(_ storage.SeriesRef, _ labels.Labels, ts int64, _ float64) (storage.SeriesRef, error) {
|
2019-01-18 12:48:16 +00:00
|
|
|
t.samples++
|
|
|
|
if ts > t.highestTimestamp {
|
|
|
|
t.highestTimestamp = ts
|
2017-05-10 09:44:13 +00:00
|
|
|
}
|
2017-09-07 12:14:41 +00:00
|
|
|
return 0, nil
|
2017-05-10 09:44:13 +00:00
|
|
|
}
|
|
|
|
|
2021-11-06 10:10:04 +00:00
|
|
|
func (t *timestampTracker) AppendExemplar(_ storage.SeriesRef, _ labels.Labels, _ exemplar.Exemplar) (storage.SeriesRef, error) {
|
2021-05-06 20:53:52 +00:00
|
|
|
t.exemplars++
|
2021-03-16 09:47:45 +00:00
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
2022-07-14 13:13:12 +00:00
|
|
|
func (t *timestampTracker) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, ts int64, h *histogram.Histogram) (storage.SeriesRef, error) {
|
2021-07-01 12:49:04 +00:00
|
|
|
t.histograms++
|
|
|
|
if ts > t.highestTimestamp {
|
|
|
|
t.highestTimestamp = ts
|
|
|
|
}
|
|
|
|
return 0, nil
|
2021-06-28 15:00:55 +00:00
|
|
|
}
|
|
|
|
|
2022-07-19 08:58:52 +00:00
|
|
|
func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) {
|
|
|
|
// TODO: Add and increment a `metadata` field when we get around to wiring metadata in remote_write.
|
|
|
|
// UpadteMetadata is no-op for remote write (where timestampTracker is being used) for now.
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
2017-10-23 13:57:30 +00:00
|
|
|
// Commit implements storage.Appender.
|
2019-01-18 12:48:16 +00:00
|
|
|
func (t *timestampTracker) Commit() error {
|
2022-07-14 13:13:12 +00:00
|
|
|
t.writeStorage.samplesIn.incr(t.samples + t.exemplars + t.histograms)
|
2019-01-18 12:48:16 +00:00
|
|
|
|
2019-03-01 19:04:26 +00:00
|
|
|
samplesIn.Add(float64(t.samples))
|
2021-05-06 20:53:52 +00:00
|
|
|
exemplarsIn.Add(float64(t.exemplars))
|
2022-07-14 13:13:12 +00:00
|
|
|
histogramsIn.Add(float64(t.histograms))
|
2020-09-24 18:44:18 +00:00
|
|
|
t.highestRecvTimestamp.Set(float64(t.highestTimestamp / 1000))
|
2017-05-10 09:44:13 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-10-23 13:57:30 +00:00
|
|
|
// Rollback implements storage.Appender.
|
2019-01-18 12:48:16 +00:00
|
|
|
func (*timestampTracker) Rollback() error {
|
2017-05-10 09:44:13 +00:00
|
|
|
return nil
|
|
|
|
}
|