Apply new scrape config on reload.
This commit updates a target set's scrape configuration on reload. This will cause all running scrape loops to be stopped and started again with new parameters.
This commit is contained in:
parent
02f635dc24
commit
84f74b9a84
|
@ -107,6 +107,34 @@ func (sp *scrapePool) stop() {
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (sp *scrapePool) reload(cfg *config.ScrapeConfig) {
|
||||||
|
log.Debugln("reload scrapepool")
|
||||||
|
defer log.Debugln("reload done")
|
||||||
|
|
||||||
|
sp.mtx.Lock()
|
||||||
|
defer sp.mtx.Unlock()
|
||||||
|
|
||||||
|
sp.config = cfg
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
for _, tgroup := range sp.tgroups {
|
||||||
|
for _, t := range tgroup {
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
go func(t *Target) {
|
||||||
|
t.scrapeLoop.stop()
|
||||||
|
|
||||||
|
t.scrapeLoop = newScrapeLoop(sp.ctx, t, sp.sampleAppender(t), sp.reportAppender(t))
|
||||||
|
go t.scrapeLoop.run(time.Duration(cfg.ScrapeInterval), time.Duration(cfg.ScrapeTimeout), nil)
|
||||||
|
wg.Done()
|
||||||
|
}(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
// sampleAppender returns an appender for ingested samples from the target.
|
// sampleAppender returns an appender for ingested samples from the target.
|
||||||
func (sp *scrapePool) sampleAppender(target *Target) storage.SampleAppender {
|
func (sp *scrapePool) sampleAppender(target *Target) storage.SampleAppender {
|
||||||
app := sp.appender
|
app := sp.appender
|
||||||
|
@ -143,6 +171,7 @@ func (sp *scrapePool) reportAppender(target *Target) storage.SampleAppender {
|
||||||
|
|
||||||
func (sp *scrapePool) sync(tgroups map[string]map[model.Fingerprint]*Target) {
|
func (sp *scrapePool) sync(tgroups map[string]map[model.Fingerprint]*Target) {
|
||||||
sp.mtx.Lock()
|
sp.mtx.Lock()
|
||||||
|
defer sp.mtx.Unlock()
|
||||||
|
|
||||||
var (
|
var (
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
|
@ -203,9 +232,6 @@ func (sp *scrapePool) sync(tgroups map[string]map[model.Fingerprint]*Target) {
|
||||||
// may be active and tries to insert. The old scraper that didn't terminate yet could still
|
// may be active and tries to insert. The old scraper that didn't terminate yet could still
|
||||||
// be inserting a previous sample set.
|
// be inserting a previous sample set.
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
// TODO(fabxc): maybe this can be released earlier with subsequent refactoring.
|
|
||||||
sp.mtx.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// A scraper retrieves samples and accepts a status report at the end.
|
// A scraper retrieves samples and accepts a status report at the end.
|
||||||
|
|
|
@ -117,6 +117,8 @@ func (tm *TargetManager) reload() {
|
||||||
ts.runScraping(tm.ctx)
|
ts.runScraping(tm.ctx)
|
||||||
tm.wg.Done()
|
tm.wg.Done()
|
||||||
}(ts)
|
}(ts)
|
||||||
|
} else {
|
||||||
|
ts.reload(scfg)
|
||||||
}
|
}
|
||||||
ts.runProviders(tm.ctx, providersFromConfig(scfg))
|
ts.runProviders(tm.ctx, providersFromConfig(scfg))
|
||||||
}
|
}
|
||||||
|
@ -203,6 +205,14 @@ func (ts *targetSet) cancel() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ts *targetSet) reload(cfg *config.ScrapeConfig) {
|
||||||
|
ts.mtx.Lock()
|
||||||
|
ts.config = cfg
|
||||||
|
ts.mtx.Unlock()
|
||||||
|
|
||||||
|
ts.scrapePool.reload(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
func (ts *targetSet) runScraping(ctx context.Context) {
|
func (ts *targetSet) runScraping(ctx context.Context) {
|
||||||
ctx, ts.cancelScraping = context.WithCancel(ctx)
|
ctx, ts.cancelScraping = context.WithCancel(ctx)
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue