2017-11-25 13:13:54 +00:00
|
|
|
// Copyright 2016 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package discovery
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
2017-11-29 22:38:52 +00:00
|
|
|
"sort"
|
2017-11-25 13:13:54 +00:00
|
|
|
|
|
|
|
"github.com/go-kit/kit/log"
|
|
|
|
"github.com/go-kit/kit/log/level"
|
|
|
|
|
|
|
|
"github.com/prometheus/prometheus/config"
|
|
|
|
|
|
|
|
"github.com/prometheus/prometheus/discovery/azure"
|
|
|
|
"github.com/prometheus/prometheus/discovery/consul"
|
|
|
|
"github.com/prometheus/prometheus/discovery/dns"
|
|
|
|
"github.com/prometheus/prometheus/discovery/ec2"
|
|
|
|
"github.com/prometheus/prometheus/discovery/file"
|
|
|
|
"github.com/prometheus/prometheus/discovery/gce"
|
|
|
|
"github.com/prometheus/prometheus/discovery/kubernetes"
|
|
|
|
"github.com/prometheus/prometheus/discovery/marathon"
|
|
|
|
"github.com/prometheus/prometheus/discovery/openstack"
|
|
|
|
"github.com/prometheus/prometheus/discovery/triton"
|
|
|
|
"github.com/prometheus/prometheus/discovery/zookeeper"
|
|
|
|
)
|
|
|
|
|
2017-11-26 22:18:05 +00:00
|
|
|
// Discoverer provides information about target groups. It maintains a set
|
2017-11-25 13:13:54 +00:00
|
|
|
// of sources from which TargetGroups can originate. Whenever a discovery provider
|
2017-11-26 22:18:05 +00:00
|
|
|
// detects a potential change, it sends the TargetGroup through its channel.
|
2017-11-25 13:13:54 +00:00
|
|
|
//
|
2017-11-26 22:18:05 +00:00
|
|
|
// Discoverer does not know if an actual change happened.
|
2017-11-25 13:13:54 +00:00
|
|
|
// It does guarantee that it sends the new TargetGroup whenever a change happens.
|
|
|
|
//
|
2017-11-26 22:18:05 +00:00
|
|
|
// Discoverers should initially send a full set of all discoverable TargetGroups.
|
|
|
|
type Discoverer interface {
|
2017-11-25 13:13:54 +00:00
|
|
|
// Run hands a channel to the discovery provider(consul,dns etc) through which it can send
|
|
|
|
// updated target groups.
|
|
|
|
// Must returns if the context gets canceled. It should not close the update
|
|
|
|
// channel on returning.
|
|
|
|
Run(ctx context.Context, up chan<- []*config.TargetGroup)
|
|
|
|
}
|
|
|
|
|
2017-11-26 22:18:05 +00:00
|
|
|
// type pool struct {
|
|
|
|
// cancel func()
|
|
|
|
// tgroups []*config.TargetGroup
|
|
|
|
// }
|
2017-11-25 13:13:54 +00:00
|
|
|
|
2017-11-26 22:18:05 +00:00
|
|
|
// NewManager is the Discovery Manager constructor
|
2017-12-01 12:59:24 +00:00
|
|
|
func NewManager(logger log.Logger) *Manager {
|
2017-11-26 22:18:05 +00:00
|
|
|
return &Manager{
|
|
|
|
logger: logger,
|
2017-12-01 12:59:24 +00:00
|
|
|
actionCh: make(chan func(context.Context)),
|
2017-11-26 22:18:05 +00:00
|
|
|
syncCh: make(chan map[string][]*config.TargetGroup),
|
2017-11-29 22:38:52 +00:00
|
|
|
targets: make(map[string]map[string][]*config.TargetGroup),
|
2017-11-26 22:18:05 +00:00
|
|
|
discoverCancel: []context.CancelFunc{},
|
2017-11-25 13:13:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-26 22:18:05 +00:00
|
|
|
// Manager maintains a set of discovery providers and sends each update to a channel used by other packages.
|
2017-12-01 12:59:24 +00:00
|
|
|
// The sync channels sends the updates in map[targetSetName] where targetSetName is the job value from the scrape config.
|
|
|
|
// Targets pool is kept in a map with a format map[targetSetName]map[providerName].
|
2017-11-26 22:18:05 +00:00
|
|
|
type Manager struct {
|
|
|
|
logger log.Logger
|
2017-12-01 12:59:24 +00:00
|
|
|
syncCh chan map[string][]*config.TargetGroup
|
|
|
|
actionCh chan func(context.Context)
|
2017-11-26 22:18:05 +00:00
|
|
|
discoverCancel []context.CancelFunc
|
2017-12-01 12:59:24 +00:00
|
|
|
targets map[string]map[string][]*config.TargetGroup
|
2017-11-25 13:13:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Run starts the background processing
|
2017-12-01 12:59:24 +00:00
|
|
|
func (m *Manager) Run(ctx context.Context) error {
|
2017-11-25 13:13:54 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case f := <-m.actionCh:
|
2017-12-01 12:59:24 +00:00
|
|
|
f(ctx)
|
|
|
|
case <-ctx.Done():
|
2017-11-29 22:38:52 +00:00
|
|
|
m.cancelDiscoverers()
|
2017-12-01 12:59:24 +00:00
|
|
|
return ctx.Err()
|
2017-11-25 13:13:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-29 22:38:52 +00:00
|
|
|
// SyncCh returns a read only channel used by all Discoverers to send target updates.
|
2017-11-26 22:18:05 +00:00
|
|
|
func (m *Manager) SyncCh() <-chan map[string][]*config.TargetGroup {
|
2017-11-25 13:13:54 +00:00
|
|
|
return m.syncCh
|
|
|
|
}
|
|
|
|
|
|
|
|
// ApplyConfig removes all running discovery providers and starts new ones using the provided config.
|
2017-11-26 22:18:05 +00:00
|
|
|
func (m *Manager) ApplyConfig(cfg *config.Config) error {
|
2017-11-25 13:13:54 +00:00
|
|
|
err := make(chan error)
|
2017-12-01 12:59:24 +00:00
|
|
|
m.actionCh <- func(ctx context.Context) {
|
2017-11-26 22:18:05 +00:00
|
|
|
m.cancelDiscoverers()
|
2017-11-25 13:13:54 +00:00
|
|
|
for _, scfg := range cfg.ScrapeConfigs {
|
|
|
|
for provName, prov := range m.providersFromConfig(scfg.ServiceDiscoveryConfig) {
|
2017-12-01 12:59:24 +00:00
|
|
|
m.startProvider(ctx, scfg.JobName, provName, prov)
|
2017-11-25 13:13:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
close(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return <-err
|
|
|
|
}
|
|
|
|
|
2017-12-01 12:59:24 +00:00
|
|
|
func (m *Manager) startProvider(ctx context.Context, jobName, provName string, worker Discoverer) {
|
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
2017-11-27 01:59:34 +00:00
|
|
|
updates := make(chan []*config.TargetGroup)
|
|
|
|
|
|
|
|
m.discoverCancel = append(m.discoverCancel, cancel)
|
|
|
|
|
|
|
|
go worker.Run(ctx, updates)
|
2017-12-01 12:59:24 +00:00
|
|
|
go m.runProvider(ctx, provName, jobName, updates)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Manager) runProvider(ctx context.Context, provName, jobName string, updates chan []*config.TargetGroup) {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
case tgs, ok := <-updates:
|
|
|
|
// Handle the case that a target provider exits and closes the channel
|
|
|
|
// before the context is done.
|
|
|
|
if !ok {
|
2017-11-27 01:59:34 +00:00
|
|
|
return
|
|
|
|
}
|
2017-12-01 12:59:24 +00:00
|
|
|
m.addGroup(jobName, provName, tgs)
|
|
|
|
m.syncCh <- m.allGroups(jobName)
|
2017-11-27 01:59:34 +00:00
|
|
|
}
|
2017-12-01 12:59:24 +00:00
|
|
|
}
|
2017-11-27 01:59:34 +00:00
|
|
|
}
|
|
|
|
|
2017-11-26 22:18:05 +00:00
|
|
|
func (m *Manager) cancelDiscoverers() {
|
|
|
|
for _, c := range m.discoverCancel {
|
|
|
|
c()
|
2017-11-25 13:13:54 +00:00
|
|
|
}
|
2017-11-29 22:38:52 +00:00
|
|
|
m.targets = make(map[string]map[string][]*config.TargetGroup)
|
2017-12-01 12:59:24 +00:00
|
|
|
m.discoverCancel = nil
|
2017-11-25 13:13:54 +00:00
|
|
|
}
|
|
|
|
|
2017-12-01 12:59:24 +00:00
|
|
|
func (m *Manager) addGroup(tsName, provName string, tg []*config.TargetGroup) {
|
|
|
|
done := make(chan struct{})
|
2017-11-29 22:38:52 +00:00
|
|
|
|
2017-12-01 12:59:24 +00:00
|
|
|
m.actionCh <- func(ctx context.Context) {
|
2017-11-29 22:38:52 +00:00
|
|
|
if m.targets[tsName] == nil {
|
|
|
|
m.targets[tsName] = make(map[string][]*config.TargetGroup)
|
|
|
|
}
|
|
|
|
|
2017-11-25 13:13:54 +00:00
|
|
|
if tg != nil {
|
2017-11-29 22:38:52 +00:00
|
|
|
m.targets[tsName][provName] = tg
|
2017-11-25 13:13:54 +00:00
|
|
|
}
|
2017-12-01 12:59:24 +00:00
|
|
|
close(done)
|
|
|
|
|
|
|
|
}
|
|
|
|
<-done
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Manager) allGroups(tsName string) map[string][]*config.TargetGroup {
|
|
|
|
tset := make(chan map[string][]*config.TargetGroup)
|
|
|
|
|
|
|
|
m.actionCh <- func(ctx context.Context) {
|
2017-11-29 22:38:52 +00:00
|
|
|
tgAll := []*config.TargetGroup{}
|
|
|
|
|
2017-12-01 12:59:24 +00:00
|
|
|
// Sorting the providers is needed so that we can have predictable tests.
|
2017-11-29 22:38:52 +00:00
|
|
|
// Maps cannot be sorted so need to extract the keys to a slice and sort the string slice.
|
|
|
|
var providerNames []string
|
|
|
|
for providerName := range m.targets[tsName] {
|
|
|
|
providerNames = append(providerNames, providerName)
|
|
|
|
}
|
|
|
|
sort.Strings(providerNames)
|
|
|
|
for _, prov := range providerNames {
|
|
|
|
for _, tg := range m.targets[tsName][prov] {
|
|
|
|
if tg.Source != "" { // Don't add empty targets.
|
|
|
|
tgAll = append(tgAll, tg)
|
|
|
|
}
|
2017-11-25 13:13:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
t := make(map[string][]*config.TargetGroup)
|
|
|
|
t[tsName] = tgAll
|
|
|
|
tset <- t
|
|
|
|
}
|
|
|
|
return <-tset
|
2017-12-01 12:59:24 +00:00
|
|
|
|
2017-11-25 13:13:54 +00:00
|
|
|
}
|
|
|
|
|
2017-11-26 22:18:05 +00:00
|
|
|
func (m *Manager) providersFromConfig(cfg config.ServiceDiscoveryConfig) map[string]Discoverer {
|
|
|
|
providers := map[string]Discoverer{}
|
2017-11-25 13:13:54 +00:00
|
|
|
|
2017-11-26 22:18:05 +00:00
|
|
|
app := func(mech string, i int, tp Discoverer) {
|
2017-11-25 13:13:54 +00:00
|
|
|
providers[fmt.Sprintf("%s/%d", mech, i)] = tp
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, c := range cfg.DNSSDConfigs {
|
|
|
|
app("dns", i, dns.NewDiscovery(c, log.With(m.logger, "discovery", "dns")))
|
|
|
|
}
|
|
|
|
for i, c := range cfg.FileSDConfigs {
|
|
|
|
app("file", i, file.NewDiscovery(c, log.With(m.logger, "discovery", "file")))
|
|
|
|
}
|
|
|
|
for i, c := range cfg.ConsulSDConfigs {
|
|
|
|
k, err := consul.NewDiscovery(c, log.With(m.logger, "discovery", "consul"))
|
|
|
|
if err != nil {
|
|
|
|
level.Error(m.logger).Log("msg", "Cannot create Consul discovery", "err", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
app("consul", i, k)
|
|
|
|
}
|
|
|
|
for i, c := range cfg.MarathonSDConfigs {
|
|
|
|
t, err := marathon.NewDiscovery(c, log.With(m.logger, "discovery", "marathon"))
|
|
|
|
if err != nil {
|
|
|
|
level.Error(m.logger).Log("msg", "Cannot create Marathon discovery", "err", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
app("marathon", i, t)
|
|
|
|
}
|
|
|
|
for i, c := range cfg.KubernetesSDConfigs {
|
|
|
|
k, err := kubernetes.New(log.With(m.logger, "discovery", "k8s"), c)
|
|
|
|
if err != nil {
|
|
|
|
level.Error(m.logger).Log("msg", "Cannot create Kubernetes discovery", "err", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
app("kubernetes", i, k)
|
|
|
|
}
|
|
|
|
for i, c := range cfg.ServersetSDConfigs {
|
|
|
|
app("serverset", i, zookeeper.NewServersetDiscovery(c, log.With(m.logger, "discovery", "zookeeper")))
|
|
|
|
}
|
|
|
|
for i, c := range cfg.NerveSDConfigs {
|
|
|
|
app("nerve", i, zookeeper.NewNerveDiscovery(c, log.With(m.logger, "discovery", "nerve")))
|
|
|
|
}
|
|
|
|
for i, c := range cfg.EC2SDConfigs {
|
|
|
|
app("ec2", i, ec2.NewDiscovery(c, log.With(m.logger, "discovery", "ec2")))
|
|
|
|
}
|
|
|
|
for i, c := range cfg.OpenstackSDConfigs {
|
|
|
|
openstackd, err := openstack.NewDiscovery(c, log.With(m.logger, "discovery", "openstack"))
|
|
|
|
if err != nil {
|
|
|
|
level.Error(m.logger).Log("msg", "Cannot initialize OpenStack discovery", "err", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
app("openstack", i, openstackd)
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, c := range cfg.GCESDConfigs {
|
|
|
|
gced, err := gce.NewDiscovery(c, log.With(m.logger, "discovery", "gce"))
|
|
|
|
if err != nil {
|
|
|
|
level.Error(m.logger).Log("msg", "Cannot initialize GCE discovery", "err", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
app("gce", i, gced)
|
|
|
|
}
|
|
|
|
for i, c := range cfg.AzureSDConfigs {
|
|
|
|
app("azure", i, azure.NewDiscovery(c, log.With(m.logger, "discovery", "azure")))
|
|
|
|
}
|
|
|
|
for i, c := range cfg.TritonSDConfigs {
|
|
|
|
t, err := triton.New(log.With(m.logger, "discovery", "trition"), c)
|
|
|
|
if err != nil {
|
|
|
|
level.Error(m.logger).Log("msg", "Cannot create Triton discovery", "err", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
app("triton", i, t)
|
|
|
|
}
|
|
|
|
if len(cfg.StaticConfigs) > 0 {
|
|
|
|
app("static", 0, NewStaticProvider(cfg.StaticConfigs))
|
|
|
|
}
|
|
|
|
|
|
|
|
return providers
|
|
|
|
}
|
|
|
|
|
|
|
|
// StaticProvider holds a list of target groups that never change.
|
|
|
|
type StaticProvider struct {
|
|
|
|
TargetGroups []*config.TargetGroup
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewStaticProvider returns a StaticProvider configured with the given
|
|
|
|
// target groups.
|
|
|
|
func NewStaticProvider(groups []*config.TargetGroup) *StaticProvider {
|
|
|
|
for i, tg := range groups {
|
|
|
|
tg.Source = fmt.Sprintf("%d", i)
|
|
|
|
}
|
|
|
|
return &StaticProvider{groups}
|
|
|
|
}
|
|
|
|
|
2017-11-26 22:18:05 +00:00
|
|
|
// Run implements the Worker interface.
|
2017-11-25 13:13:54 +00:00
|
|
|
func (sd *StaticProvider) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
|
|
|
|
// We still have to consider that the consumer exits right away in which case
|
|
|
|
// the context will be canceled.
|
|
|
|
select {
|
|
|
|
case ch <- sd.TargetGroups:
|
|
|
|
case <-ctx.Done():
|
|
|
|
}
|
|
|
|
close(ch)
|
|
|
|
}
|