Merge branch 'master' into beorn7/storage6
This commit is contained in:
commit
d77d625ad3
|
@ -242,10 +242,12 @@ func parse(args []string) error {
|
|||
if err := parsePrometheusURL(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := parseInfluxdbURL(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := validateAlertmanagerURL(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg.remote.InfluxdbPassword = os.Getenv("INFLUXDB_PW")
|
||||
|
||||
|
@ -266,7 +268,7 @@ func parsePrometheusURL() error {
|
|||
}
|
||||
|
||||
if ok := govalidator.IsURL(cfg.prometheusURL); !ok {
|
||||
return fmt.Errorf("Invalid Prometheus URL: %s", cfg.prometheusURL)
|
||||
return fmt.Errorf("invalid Prometheus URL: %s", cfg.prometheusURL)
|
||||
}
|
||||
|
||||
promURL, err := url.Parse(cfg.prometheusURL)
|
||||
|
@ -289,7 +291,7 @@ func parseInfluxdbURL() error {
|
|||
}
|
||||
|
||||
if ok := govalidator.IsURL(cfg.influxdbURL); !ok {
|
||||
return fmt.Errorf("Invalid InfluxDB URL: %s", cfg.influxdbURL)
|
||||
return fmt.Errorf("invalid InfluxDB URL: %s", cfg.influxdbURL)
|
||||
}
|
||||
|
||||
url, err := url.Parse(cfg.influxdbURL)
|
||||
|
@ -301,6 +303,23 @@ func parseInfluxdbURL() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func validateAlertmanagerURL() error {
|
||||
if cfg.notifier.AlertmanagerURL == "" {
|
||||
return nil
|
||||
}
|
||||
if ok := govalidator.IsURL(cfg.notifier.AlertmanagerURL); !ok {
|
||||
return fmt.Errorf("invalid Alertmanager URL: %s", cfg.notifier.AlertmanagerURL)
|
||||
}
|
||||
url, err := url.Parse(cfg.notifier.AlertmanagerURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if url.Scheme == "" {
|
||||
return fmt.Errorf("missing scheme in Alertmanager URL: %s", cfg.notifier.AlertmanagerURL)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var helpTmpl = `
|
||||
usage: prometheus [<args>]
|
||||
{{ range $cat, $flags := . }}{{ if ne $cat "." }} == {{ $cat | upper }} =={{ end }}
|
||||
|
|
|
@ -48,6 +48,18 @@ func TestParse(t *testing.T) {
|
|||
input: []string{"-storage.remote.influxdb-url", "'https://some-url/'"},
|
||||
valid: false,
|
||||
},
|
||||
{
|
||||
input: []string{"-alertmanager.url", ""},
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
input: []string{"-alertmanager.url", "http://alertmanager.company.com"},
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
input: []string{"-alertmanager.url", "alertmanager.company.com"},
|
||||
valid: false,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
|
|
|
@ -15,6 +15,7 @@ package retrieval
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -134,11 +135,11 @@ func (tm *TargetManager) reload() {
|
|||
}
|
||||
|
||||
// Pools returns the targets currently being scraped bucketed by their job name.
|
||||
func (tm *TargetManager) Pools() map[string][]*Target {
|
||||
func (tm *TargetManager) Pools() map[string]Targets {
|
||||
tm.mtx.RLock()
|
||||
defer tm.mtx.RUnlock()
|
||||
|
||||
pools := map[string][]*Target{}
|
||||
pools := map[string]Targets{}
|
||||
|
||||
// TODO(fabxc): this is just a hack to maintain compatibility for now.
|
||||
for _, ps := range tm.targetSets {
|
||||
|
@ -151,6 +152,9 @@ func (tm *TargetManager) Pools() map[string][]*Target {
|
|||
|
||||
ps.scrapePool.mtx.RUnlock()
|
||||
}
|
||||
for _, targets := range pools {
|
||||
sort.Sort(targets)
|
||||
}
|
||||
return pools
|
||||
}
|
||||
|
||||
|
@ -274,19 +278,15 @@ func (ts *targetSet) runProviders(ctx context.Context, providers map[string]Targ
|
|||
updates := make(chan []*config.TargetGroup)
|
||||
|
||||
go func(name string, prov TargetProvider) {
|
||||
var initial []*config.TargetGroup
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
wg.Done()
|
||||
return
|
||||
case initial = <-updates:
|
||||
// First set of all targets the provider knows.
|
||||
case <-time.After(5 * time.Second):
|
||||
// Initial set didn't arrive. Act as if it was empty
|
||||
// and wait for updates later on.
|
||||
case initial, ok := <-updates:
|
||||
// Handle the case that a target provider exits and closes the channel
|
||||
// before the context is done.
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
||||
// First set of all targets the provider knows.
|
||||
for _, tgroup := range initial {
|
||||
targets, err := targetsFromGroup(tgroup, ts.config)
|
||||
if err != nil {
|
||||
|
@ -295,6 +295,10 @@ func (ts *targetSet) runProviders(ctx context.Context, providers map[string]Targ
|
|||
}
|
||||
ts.tgroups[name+"/"+tgroup.Source] = targets
|
||||
}
|
||||
case <-time.After(5 * time.Second):
|
||||
// Initial set didn't arrive. Act as if it was empty
|
||||
// and wait for updates later on.
|
||||
}
|
||||
|
||||
wg.Done()
|
||||
|
||||
|
@ -303,7 +307,12 @@ func (ts *targetSet) runProviders(ctx context.Context, providers map[string]Targ
|
|||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case tgs := <-updates:
|
||||
case tgs, ok := <-updates:
|
||||
// Handle the case that a target provider exits and closes the channel
|
||||
// before the context is done.
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
for _, tg := range tgs {
|
||||
if err := ts.update(name, tg); err != nil {
|
||||
log.With("target_group", tg).Errorf("Target update failed: %s", err)
|
||||
|
|
|
@ -92,7 +92,7 @@ type PrometheusStatus struct {
|
|||
|
||||
// A function that returns the current scrape targets pooled
|
||||
// by their job name.
|
||||
TargetPools func() map[string][]*retrieval.Target
|
||||
TargetPools func() map[string]retrieval.Targets
|
||||
// A function that returns all loaded rules.
|
||||
Rules func() []rules.Rule
|
||||
|
||||
|
|
Loading…
Reference in New Issue