prometheus/storage/remote/storage.go

184 lines
5.0 KiB
Go
Raw Normal View History

2017-05-10 09:44:13 +00:00
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
2017-05-10 09:44:13 +00:00
package remote
import (
"context"
"crypto/md5"
"encoding/hex"
"fmt"
2017-05-10 09:44:13 +00:00
"sync"
"time"
2017-05-10 09:44:13 +00:00
2017-08-11 18:45:52 +00:00
"github.com/go-kit/kit/log"
"gopkg.in/yaml.v2"
Tail the TSDB WAL for remote_write This change switches the remote_write API to use the TSDB WAL. This should reduce memory usage and prevent sample loss when the remote end point is down. We use the new LiveReader from TSDB to tail WAL segments. Logic for finding the tracking segment is included in this PR. The WAL is tailed once for each remote_write endpoint specified. Reading from the segment is based on a ticker rather than relying on fsnotify write events, which were found to be complicated and unreliable in early prototypes. Enqueuing a sample for sending via remote_write can now block, to provide back pressure. Queues are still required to acheive parallelism and batching. We have updated the queue config based on new defaults for queue capacity and pending samples values - much smaller values are now possible. The remote_write resharding code has been updated to prevent deadlocks, and extra tests have been added for these cases. As part of this change, we attempt to guarantee that samples are not lost; however this initial version doesn't guarantee this across Prometheus restarts or non-retryable errors from the remote end (eg 400s). This changes also includes the following optimisations: - only marshal the proto request once, not once per retry - maintain a single copy of the labels for given series to reduce GC pressure Other minor tweaks: - only reshard if we've also successfully sent recently - add pending samples, latest sent timestamp, WAL events processed metrics Co-authored-by: Chris Marchbanks <csmarchbanks.com> (initial prototype) Co-authored-by: Tom Wilkie <tom.wilkie@gmail.com> (sharding changes) Signed-off-by: Callum Styan <callumstyan@gmail.com>
2018-09-07 21:26:04 +00:00
"github.com/prometheus/client_golang/prometheus"
2017-05-10 09:44:13 +00:00
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/logging"
"github.com/prometheus/prometheus/storage"
2017-05-10 09:44:13 +00:00
)
// String constants for instrumentation.
const (
namespace = "prometheus"
subsystem = "remote_storage"
remoteName = "remote_name"
endpoint = "url"
)
// startTimeCallback is a callback func that return the oldest timestamp stored in a storage.
type startTimeCallback func() (int64, error)
2017-08-01 10:19:35 +00:00
// Storage represents all the remote read and write endpoints. It implements
// storage.Storage.
2017-05-10 09:44:13 +00:00
type Storage struct {
2017-08-11 18:45:52 +00:00
logger log.Logger
mtx sync.Mutex
2017-05-10 09:44:13 +00:00
rws *WriteStorage
2017-05-10 09:44:13 +00:00
// For reads
queryables []storage.Queryable
localStartTimeCallback startTimeCallback
2017-05-10 09:44:13 +00:00
}
// NewStorage returns a remote.Storage.
Tail the TSDB WAL for remote_write This change switches the remote_write API to use the TSDB WAL. This should reduce memory usage and prevent sample loss when the remote end point is down. We use the new LiveReader from TSDB to tail WAL segments. Logic for finding the tracking segment is included in this PR. The WAL is tailed once for each remote_write endpoint specified. Reading from the segment is based on a ticker rather than relying on fsnotify write events, which were found to be complicated and unreliable in early prototypes. Enqueuing a sample for sending via remote_write can now block, to provide back pressure. Queues are still required to acheive parallelism and batching. We have updated the queue config based on new defaults for queue capacity and pending samples values - much smaller values are now possible. The remote_write resharding code has been updated to prevent deadlocks, and extra tests have been added for these cases. As part of this change, we attempt to guarantee that samples are not lost; however this initial version doesn't guarantee this across Prometheus restarts or non-retryable errors from the remote end (eg 400s). This changes also includes the following optimisations: - only marshal the proto request once, not once per retry - maintain a single copy of the labels for given series to reduce GC pressure Other minor tweaks: - only reshard if we've also successfully sent recently - add pending samples, latest sent timestamp, WAL events processed metrics Co-authored-by: Chris Marchbanks <csmarchbanks.com> (initial prototype) Co-authored-by: Tom Wilkie <tom.wilkie@gmail.com> (sharding changes) Signed-off-by: Callum Styan <callumstyan@gmail.com>
2018-09-07 21:26:04 +00:00
func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration) *Storage {
2017-08-11 18:45:52 +00:00
if l == nil {
l = log.NewNopLogger()
}
Tail the TSDB WAL for remote_write This change switches the remote_write API to use the TSDB WAL. This should reduce memory usage and prevent sample loss when the remote end point is down. We use the new LiveReader from TSDB to tail WAL segments. Logic for finding the tracking segment is included in this PR. The WAL is tailed once for each remote_write endpoint specified. Reading from the segment is based on a ticker rather than relying on fsnotify write events, which were found to be complicated and unreliable in early prototypes. Enqueuing a sample for sending via remote_write can now block, to provide back pressure. Queues are still required to acheive parallelism and batching. We have updated the queue config based on new defaults for queue capacity and pending samples values - much smaller values are now possible. The remote_write resharding code has been updated to prevent deadlocks, and extra tests have been added for these cases. As part of this change, we attempt to guarantee that samples are not lost; however this initial version doesn't guarantee this across Prometheus restarts or non-retryable errors from the remote end (eg 400s). This changes also includes the following optimisations: - only marshal the proto request once, not once per retry - maintain a single copy of the labels for given series to reduce GC pressure Other minor tweaks: - only reshard if we've also successfully sent recently - add pending samples, latest sent timestamp, WAL events processed metrics Co-authored-by: Chris Marchbanks <csmarchbanks.com> (initial prototype) Co-authored-by: Tom Wilkie <tom.wilkie@gmail.com> (sharding changes) Signed-off-by: Callum Styan <callumstyan@gmail.com>
2018-09-07 21:26:04 +00:00
s := &Storage{
logger: logging.Dedupe(l, 1*time.Minute),
localStartTimeCallback: stCallback,
}
s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline)
Tail the TSDB WAL for remote_write This change switches the remote_write API to use the TSDB WAL. This should reduce memory usage and prevent sample loss when the remote end point is down. We use the new LiveReader from TSDB to tail WAL segments. Logic for finding the tracking segment is included in this PR. The WAL is tailed once for each remote_write endpoint specified. Reading from the segment is based on a ticker rather than relying on fsnotify write events, which were found to be complicated and unreliable in early prototypes. Enqueuing a sample for sending via remote_write can now block, to provide back pressure. Queues are still required to acheive parallelism and batching. We have updated the queue config based on new defaults for queue capacity and pending samples values - much smaller values are now possible. The remote_write resharding code has been updated to prevent deadlocks, and extra tests have been added for these cases. As part of this change, we attempt to guarantee that samples are not lost; however this initial version doesn't guarantee this across Prometheus restarts or non-retryable errors from the remote end (eg 400s). This changes also includes the following optimisations: - only marshal the proto request once, not once per retry - maintain a single copy of the labels for given series to reduce GC pressure Other minor tweaks: - only reshard if we've also successfully sent recently - add pending samples, latest sent timestamp, WAL events processed metrics Co-authored-by: Chris Marchbanks <csmarchbanks.com> (initial prototype) Co-authored-by: Tom Wilkie <tom.wilkie@gmail.com> (sharding changes) Signed-off-by: Callum Styan <callumstyan@gmail.com>
2018-09-07 21:26:04 +00:00
return s
2017-08-11 18:45:52 +00:00
}
2017-05-10 09:44:13 +00:00
// ApplyConfig updates the state as the new config requires.
func (s *Storage) ApplyConfig(conf *config.Config) error {
s.mtx.Lock()
defer s.mtx.Unlock()
if err := s.rws.ApplyConfig(conf); err != nil {
return err
}
// Update read clients
readHashes := make(map[string]struct{})
queryables := make([]storage.Queryable, 0, len(conf.RemoteReadConfigs))
for _, rrConf := range conf.RemoteReadConfigs {
hash, err := toHash(rrConf)
if err != nil {
return err
}
// Don't allow duplicate remote read configs.
if _, ok := readHashes[hash]; ok {
return fmt.Errorf("duplicate remote read configs are not allowed, found duplicate for URL: %s", rrConf.URL)
}
readHashes[hash] = struct{}{}
// Set the queue name to the config hash if the user has not set
// a name in their remote write config so we can still differentiate
// between queues that have the same remote write endpoint.
name := string(hash[:6])
if rrConf.Name != "" {
name = rrConf.Name
}
c, err := NewClient(name, &ClientConfig{
URL: rrConf.URL,
Timeout: rrConf.RemoteTimeout,
HTTPClientConfig: rrConf.HTTPClientConfig,
})
if err != nil {
return err
}
q := QueryableClient(c)
q = ExternalLabelsHandler(q, conf.GlobalConfig.ExternalLabels)
if len(rrConf.RequiredMatchers) > 0 {
q = RequiredMatchersFilter(q, labelsToEqualityMatchers(rrConf.RequiredMatchers))
}
if !rrConf.ReadRecent {
q = PreferLocalStorageFilter(q, s.localStartTimeCallback)
}
queryables = append(queryables, q)
}
s.queryables = queryables
return nil
}
// StartTime implements the Storage interface.
func (s *Storage) StartTime() (int64, error) {
return int64(model.Latest), nil
}
// Querier returns a storage.MergeQuerier combining the remote client queriers
// of each configured remote read endpoint.
func (s *Storage) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
s.mtx.Lock()
queryables := s.queryables
s.mtx.Unlock()
queriers := make([]storage.Querier, 0, len(queryables))
for _, queryable := range queryables {
q, err := queryable.Querier(ctx, mint, maxt)
if err != nil {
return nil, err
}
queriers = append(queriers, q)
}
storage: Added Chunks{Queryable/Querier/SeriesSet/Series/Iteratable. Added generic Merge{SeriesSet/Querier} implementation. (#7005) * storage: Added Chunks{Queryable/Querier/SeriesSet/Series/Iteratable. Added generic Merge{SeriesSet/Querier} implementation. ## Rationales: In many places (e.g. chunk Remote read, Thanos Receive fetching chunk from TSDB), we operate on encoded chunks not samples. This means that we unnecessary decode/encode, wasting CPU, time and memory. This PR adds chunk iterator interfaces and makes the merge code to be reused between both seriesSets I will make the use of it in following PR inside tsdb itself. For now fanout implements it and mergers. All merges now also allows passing series mergers. This opens doors for custom deduplications other than TSDB vertical ones (e.g. offline one we have in Thanos). ## Changes * Added Chunk versions of all iterating methods. It all starts in Querier/ChunkQuerier. The plan is that Storage will implement both chunked and samples. * Added Seek to chunks.Iterator interface for iterating over chunks. * NewMergeChunkQuerier was added; Both this and NewMergeQuerier are now using generigMergeQuerier to share the code. Generic code was added. * Improved tests. * Added some TODO for further simplifications in next PRs. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Addressed Brian's comments. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Moved s/Labeled/SeriesLabels as per Krasi suggestion. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Addressed Krasi's comments. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Second iteration of Krasi comments. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Another round of comments. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
2020-03-24 20:15:47 +00:00
return storage.NewMergeQuerier(nil, queriers, storage.ChainedSeriesMerge), nil
}
// Appender implements storage.Storage.
func (s *Storage) Appender() storage.Appender {
return s.rws.Appender()
}
2017-08-01 10:19:35 +00:00
// Close the background processing of the storage queues.
2017-05-10 09:44:13 +00:00
func (s *Storage) Close() error {
s.mtx.Lock()
defer s.mtx.Unlock()
return s.rws.Close()
2017-05-10 09:44:13 +00:00
}
func labelsToEqualityMatchers(ls model.LabelSet) []*labels.Matcher {
ms := make([]*labels.Matcher, 0, len(ls))
for k, v := range ls {
ms = append(ms, &labels.Matcher{
Type: labels.MatchEqual,
Name: string(k),
Value: string(v),
})
}
return ms
}
// Used for hashing configs and diff'ing hashes in ApplyConfig.
func toHash(data interface{}) (string, error) {
bytes, err := yaml.Marshal(data)
if err != nil {
return "", err
}
hash := md5.Sum(bytes)
return hex.EncodeToString(hash[:]), nil
}