2017-05-10 09:44:13 +00:00
|
|
|
// Copyright 2017 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
2017-10-17 01:26:38 +00:00
|
|
|
// limitations under the License.
|
2017-05-10 09:44:13 +00:00
|
|
|
|
|
|
|
package remote
|
|
|
|
|
|
|
|
import (
|
2017-11-12 01:15:27 +00:00
|
|
|
"context"
|
2019-04-23 08:47:18 +00:00
|
|
|
"crypto/md5"
|
|
|
|
"encoding/json"
|
2017-05-10 09:44:13 +00:00
|
|
|
"sync"
|
2018-05-23 14:03:54 +00:00
|
|
|
"time"
|
2017-05-10 09:44:13 +00:00
|
|
|
|
2017-08-11 18:45:52 +00:00
|
|
|
"github.com/go-kit/kit/log"
|
2019-04-23 08:47:18 +00:00
|
|
|
"github.com/go-kit/kit/log/level"
|
2018-09-07 21:26:04 +00:00
|
|
|
|
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
2017-05-10 09:44:13 +00:00
|
|
|
"github.com/prometheus/common/model"
|
|
|
|
"github.com/prometheus/prometheus/config"
|
2017-11-12 01:23:20 +00:00
|
|
|
"github.com/prometheus/prometheus/pkg/labels"
|
2019-02-12 14:58:25 +00:00
|
|
|
"github.com/prometheus/prometheus/pkg/logging"
|
2017-11-12 01:15:27 +00:00
|
|
|
"github.com/prometheus/prometheus/storage"
|
2017-05-10 09:44:13 +00:00
|
|
|
)
|
|
|
|
|
2018-12-27 14:02:36 +00:00
|
|
|
// startTimeCallback is a callback func that return the oldest timestamp stored in a storage.
|
2017-10-18 11:08:14 +00:00
|
|
|
type startTimeCallback func() (int64, error)
|
|
|
|
|
2017-08-01 10:19:35 +00:00
|
|
|
// Storage represents all the remote read and write endpoints. It implements
|
|
|
|
// storage.Storage.
|
2017-05-10 09:44:13 +00:00
|
|
|
type Storage struct {
|
2017-08-11 18:45:52 +00:00
|
|
|
logger log.Logger
|
2019-03-05 12:21:11 +00:00
|
|
|
mtx sync.Mutex
|
2017-05-10 09:44:13 +00:00
|
|
|
|
2019-04-23 08:47:18 +00:00
|
|
|
configHash [16]byte
|
|
|
|
|
2017-05-10 09:44:13 +00:00
|
|
|
// For writes
|
2019-03-01 19:04:26 +00:00
|
|
|
walDir string
|
|
|
|
queues []*QueueManager
|
|
|
|
samplesIn *ewmaRate
|
|
|
|
flushDeadline time.Duration
|
2017-05-10 09:44:13 +00:00
|
|
|
|
|
|
|
// For reads
|
2017-11-12 01:15:27 +00:00
|
|
|
queryables []storage.Queryable
|
2017-10-18 11:08:14 +00:00
|
|
|
localStartTimeCallback startTimeCallback
|
2017-05-10 09:44:13 +00:00
|
|
|
}
|
|
|
|
|
2017-10-18 11:08:14 +00:00
|
|
|
// NewStorage returns a remote.Storage.
|
2018-09-07 21:26:04 +00:00
|
|
|
func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration) *Storage {
|
2017-08-11 18:45:52 +00:00
|
|
|
if l == nil {
|
|
|
|
l = log.NewNopLogger()
|
|
|
|
}
|
2018-09-07 21:26:04 +00:00
|
|
|
s := &Storage{
|
2019-02-13 14:47:35 +00:00
|
|
|
logger: logging.Dedupe(l, 1*time.Minute),
|
2018-05-23 14:03:54 +00:00
|
|
|
localStartTimeCallback: stCallback,
|
|
|
|
flushDeadline: flushDeadline,
|
2019-03-01 19:04:26 +00:00
|
|
|
samplesIn: newEWMARate(ewmaWeight, shardUpdateDuration),
|
2018-09-07 21:26:04 +00:00
|
|
|
walDir: walDir,
|
2018-05-23 14:03:54 +00:00
|
|
|
}
|
2019-03-01 19:04:26 +00:00
|
|
|
go s.run()
|
2018-09-07 21:26:04 +00:00
|
|
|
return s
|
2017-08-11 18:45:52 +00:00
|
|
|
}
|
|
|
|
|
2019-03-01 19:04:26 +00:00
|
|
|
func (s *Storage) run() {
|
|
|
|
ticker := time.NewTicker(shardUpdateDuration)
|
|
|
|
defer ticker.Stop()
|
|
|
|
for range ticker.C {
|
|
|
|
s.samplesIn.tick()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-10 09:44:13 +00:00
|
|
|
// ApplyConfig updates the state as the new config requires.
|
|
|
|
func (s *Storage) ApplyConfig(conf *config.Config) error {
|
|
|
|
s.mtx.Lock()
|
|
|
|
defer s.mtx.Unlock()
|
|
|
|
|
2019-04-23 08:47:18 +00:00
|
|
|
cfgBytes, err := json.Marshal(conf.RemoteWriteConfigs)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
hash := md5.Sum(cfgBytes)
|
|
|
|
if hash == s.configHash {
|
|
|
|
level.Debug(s.logger).Log("msg", "remote write config has not changed, no need to restart QueueManagers")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
s.configHash = hash
|
|
|
|
|
2017-05-10 09:44:13 +00:00
|
|
|
// Update write queues
|
|
|
|
newQueues := []*QueueManager{}
|
|
|
|
// TODO: we should only stop & recreate queues which have changes,
|
|
|
|
// as this can be quite disruptive.
|
|
|
|
for i, rwConf := range conf.RemoteWriteConfigs {
|
2017-10-23 13:53:43 +00:00
|
|
|
c, err := NewClient(i, &ClientConfig{
|
|
|
|
URL: rwConf.URL,
|
|
|
|
Timeout: rwConf.RemoteTimeout,
|
|
|
|
HTTPClientConfig: rwConf.HTTPClientConfig,
|
2017-05-10 09:44:13 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
newQueues = append(newQueues, NewQueueManager(
|
2017-08-11 18:45:52 +00:00
|
|
|
s.logger,
|
2018-09-07 21:26:04 +00:00
|
|
|
s.walDir,
|
|
|
|
s.samplesIn,
|
2017-12-08 10:11:45 +00:00
|
|
|
rwConf.QueueConfig,
|
2017-05-10 09:44:13 +00:00
|
|
|
conf.GlobalConfig.ExternalLabels,
|
|
|
|
rwConf.WriteRelabelConfigs,
|
|
|
|
c,
|
2018-05-23 14:03:54 +00:00
|
|
|
s.flushDeadline,
|
2017-05-10 09:44:13 +00:00
|
|
|
))
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, q := range s.queues {
|
|
|
|
q.Stop()
|
|
|
|
}
|
|
|
|
|
|
|
|
s.queues = newQueues
|
|
|
|
for _, q := range s.queues {
|
|
|
|
q.Start()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update read clients
|
2019-03-05 12:21:11 +00:00
|
|
|
queryables := make([]storage.Queryable, 0, len(conf.RemoteReadConfigs))
|
2017-05-10 09:44:13 +00:00
|
|
|
for i, rrConf := range conf.RemoteReadConfigs {
|
2017-10-23 13:53:43 +00:00
|
|
|
c, err := NewClient(i, &ClientConfig{
|
|
|
|
URL: rrConf.URL,
|
|
|
|
Timeout: rrConf.RemoteTimeout,
|
|
|
|
HTTPClientConfig: rrConf.HTTPClientConfig,
|
2017-05-10 09:44:13 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-02-26 07:58:10 +00:00
|
|
|
q := QueryableClient(c)
|
2018-07-26 07:49:53 +00:00
|
|
|
q = ExternalLabelsHandler(q, conf.GlobalConfig.ExternalLabels)
|
2017-11-12 01:23:20 +00:00
|
|
|
if len(rrConf.RequiredMatchers) > 0 {
|
|
|
|
q = RequiredMatchersFilter(q, labelsToEqualityMatchers(rrConf.RequiredMatchers))
|
|
|
|
}
|
2017-11-12 01:15:27 +00:00
|
|
|
if !rrConf.ReadRecent {
|
|
|
|
q = PreferLocalStorageFilter(q, s.localStartTimeCallback)
|
|
|
|
}
|
2019-03-05 12:21:11 +00:00
|
|
|
queryables = append(queryables, q)
|
2017-11-12 01:15:27 +00:00
|
|
|
}
|
2019-03-05 12:21:11 +00:00
|
|
|
s.queryables = queryables
|
2017-05-10 09:44:13 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-10-18 11:08:14 +00:00
|
|
|
// StartTime implements the Storage interface.
|
|
|
|
func (s *Storage) StartTime() (int64, error) {
|
|
|
|
return int64(model.Latest), nil
|
|
|
|
}
|
|
|
|
|
2017-11-12 01:15:27 +00:00
|
|
|
// Querier returns a storage.MergeQuerier combining the remote client queriers
|
|
|
|
// of each configured remote read endpoint.
|
|
|
|
func (s *Storage) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
|
|
|
|
s.mtx.Lock()
|
|
|
|
queryables := s.queryables
|
|
|
|
s.mtx.Unlock()
|
|
|
|
|
|
|
|
queriers := make([]storage.Querier, 0, len(queryables))
|
|
|
|
for _, queryable := range queryables {
|
|
|
|
q, err := queryable.Querier(ctx, mint, maxt)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
queriers = append(queriers, q)
|
|
|
|
}
|
2018-11-30 14:27:12 +00:00
|
|
|
return storage.NewMergeQuerier(nil, queriers), nil
|
2017-11-12 01:15:27 +00:00
|
|
|
}
|
|
|
|
|
2017-08-01 10:19:35 +00:00
|
|
|
// Close the background processing of the storage queues.
|
2017-05-10 09:44:13 +00:00
|
|
|
func (s *Storage) Close() error {
|
|
|
|
s.mtx.Lock()
|
|
|
|
defer s.mtx.Unlock()
|
|
|
|
|
|
|
|
for _, q := range s.queues {
|
|
|
|
q.Stop()
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2017-11-12 01:23:20 +00:00
|
|
|
|
|
|
|
func labelsToEqualityMatchers(ls model.LabelSet) []*labels.Matcher {
|
|
|
|
ms := make([]*labels.Matcher, 0, len(ls))
|
|
|
|
for k, v := range ls {
|
|
|
|
ms = append(ms, &labels.Matcher{
|
|
|
|
Type: labels.MatchEqual,
|
|
|
|
Name: string(k),
|
|
|
|
Value: string(v),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return ms
|
|
|
|
}
|