2013-02-22 20:07:35 +00:00
|
|
|
// Copyright 2013 Prometheus Team
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2013-01-04 11:17:31 +00:00
|
|
|
package retrieval
|
|
|
|
|
|
|
|
import (
|
2013-06-05 12:44:20 +00:00
|
|
|
"sync"
|
2013-01-04 11:17:31 +00:00
|
|
|
"time"
|
2013-06-05 12:44:20 +00:00
|
|
|
|
2013-08-12 15:18:02 +00:00
|
|
|
"github.com/golang/glog"
|
2013-06-25 12:02:27 +00:00
|
|
|
"github.com/prometheus/client_golang/extraction"
|
2014-07-29 18:31:11 +00:00
|
|
|
"github.com/prometheus/prometheus/utility"
|
2013-01-04 11:17:31 +00:00
|
|
|
)
|
|
|
|
|
2013-01-28 15:36:28 +00:00
|
|
|
const (
|
2013-06-05 12:44:20 +00:00
|
|
|
targetAddQueueSize = 100
|
2013-06-05 14:29:05 +00:00
|
|
|
targetReplaceQueueSize = 1
|
2014-06-18 17:43:15 +00:00
|
|
|
)
|
|
|
|
|
2014-12-10 15:16:49 +00:00
|
|
|
// TargetPool is a pool of targets for the same job.
|
2013-01-04 13:41:47 +00:00
|
|
|
type TargetPool struct {
|
2013-06-05 12:44:20 +00:00
|
|
|
sync.RWMutex
|
|
|
|
|
2014-12-17 18:40:59 +00:00
|
|
|
manager TargetManager
|
|
|
|
targetsByURL map[string]Target
|
|
|
|
interval time.Duration
|
|
|
|
ingester extraction.Ingester
|
|
|
|
addTargetQueue chan Target
|
2013-06-11 20:59:27 +00:00
|
|
|
|
|
|
|
targetProvider TargetProvider
|
2014-12-10 15:16:49 +00:00
|
|
|
|
|
|
|
stopping, stopped chan struct{}
|
2013-01-04 13:41:47 +00:00
|
|
|
}
|
|
|
|
|
2014-12-10 15:16:49 +00:00
|
|
|
// NewTargetPool creates a TargetPool, ready to be started by calling Run.
|
2014-07-29 18:31:11 +00:00
|
|
|
func NewTargetPool(m TargetManager, p TargetProvider, ing extraction.Ingester, i time.Duration) *TargetPool {
|
2013-01-15 16:06:17 +00:00
|
|
|
return &TargetPool{
|
2014-12-17 18:40:59 +00:00
|
|
|
manager: m,
|
|
|
|
interval: i,
|
|
|
|
ingester: ing,
|
|
|
|
targetsByURL: make(map[string]Target),
|
|
|
|
addTargetQueue: make(chan Target, targetAddQueueSize),
|
|
|
|
targetProvider: p,
|
|
|
|
stopping: make(chan struct{}),
|
|
|
|
stopped: make(chan struct{}),
|
2013-01-15 16:06:17 +00:00
|
|
|
}
|
2013-01-04 13:41:47 +00:00
|
|
|
}
|
2013-01-04 11:17:31 +00:00
|
|
|
|
2014-12-10 15:16:49 +00:00
|
|
|
// Run starts the target pool. It returns when the target pool has stopped
|
|
|
|
// (after calling Stop). Run is usually called as a goroutine.
|
2014-07-29 18:31:11 +00:00
|
|
|
func (p *TargetPool) Run() {
|
|
|
|
ticker := time.NewTicker(p.interval)
|
2013-06-05 12:44:20 +00:00
|
|
|
defer ticker.Stop()
|
2013-01-04 13:41:47 +00:00
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
2013-06-05 12:44:20 +00:00
|
|
|
case <-ticker.C:
|
2014-07-29 18:31:11 +00:00
|
|
|
if p.targetProvider != nil {
|
|
|
|
targets, err := p.targetProvider.Targets()
|
|
|
|
if err != nil {
|
|
|
|
glog.Warningf("Error looking up targets, keeping old list: %s", err)
|
|
|
|
} else {
|
|
|
|
p.ReplaceTargets(targets)
|
|
|
|
}
|
|
|
|
}
|
2013-02-22 20:07:35 +00:00
|
|
|
case newTarget := <-p.addTargetQueue:
|
|
|
|
p.addTarget(newTarget)
|
2014-12-10 15:16:49 +00:00
|
|
|
case <-p.stopping:
|
2014-07-29 18:31:11 +00:00
|
|
|
p.ReplaceTargets([]Target{})
|
2014-12-10 15:16:49 +00:00
|
|
|
close(p.stopped)
|
2013-06-05 12:44:20 +00:00
|
|
|
return
|
2013-01-04 13:41:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-10 15:16:49 +00:00
|
|
|
// Stop stops the target pool and returns once the shutdown is complete.
|
2014-10-08 11:49:42 +00:00
|
|
|
func (p *TargetPool) Stop() {
|
2014-12-10 15:16:49 +00:00
|
|
|
close(p.stopping)
|
|
|
|
<-p.stopped
|
2013-01-04 13:41:47 +00:00
|
|
|
}
|
|
|
|
|
2014-12-10 15:16:49 +00:00
|
|
|
// AddTarget adds a target by queuing it in the target queue.
|
2013-02-22 20:07:35 +00:00
|
|
|
func (p *TargetPool) AddTarget(target Target) {
|
|
|
|
p.addTargetQueue <- target
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *TargetPool) addTarget(target Target) {
|
2013-06-05 12:44:20 +00:00
|
|
|
p.Lock()
|
|
|
|
defer p.Unlock()
|
|
|
|
|
2014-12-17 18:40:59 +00:00
|
|
|
p.targetsByURL[target.URL()] = target
|
2014-07-29 18:31:11 +00:00
|
|
|
go target.RunScraper(p.ingester, p.interval)
|
2013-02-22 20:07:35 +00:00
|
|
|
}
|
|
|
|
|
2014-12-10 15:16:49 +00:00
|
|
|
// ReplaceTargets replaces the old targets by the provided new ones but reuses
|
|
|
|
// old targets that are also present in newTargets to preserve scheduling and
|
|
|
|
// health state. Targets no longer present are stopped.
|
2013-02-22 20:07:35 +00:00
|
|
|
func (p *TargetPool) ReplaceTargets(newTargets []Target) {
|
2013-06-05 14:29:05 +00:00
|
|
|
p.Lock()
|
|
|
|
defer p.Unlock()
|
|
|
|
|
2014-12-17 18:40:59 +00:00
|
|
|
newTargetURLs := make(utility.Set)
|
2014-07-29 18:31:11 +00:00
|
|
|
for _, newTarget := range newTargets {
|
2014-12-17 18:40:59 +00:00
|
|
|
newTargetURLs.Add(newTarget.URL())
|
|
|
|
oldTarget, ok := p.targetsByURL[newTarget.URL()]
|
2014-07-29 18:31:11 +00:00
|
|
|
if ok {
|
2014-10-09 13:59:47 +00:00
|
|
|
oldTarget.SetBaseLabelsFrom(newTarget)
|
2013-07-12 20:38:02 +00:00
|
|
|
} else {
|
2014-12-17 18:40:59 +00:00
|
|
|
p.targetsByURL[newTarget.URL()] = newTarget
|
2014-07-29 18:31:11 +00:00
|
|
|
go newTarget.RunScraper(p.ingester, p.interval)
|
2013-06-11 20:59:27 +00:00
|
|
|
}
|
|
|
|
}
|
2014-12-10 15:16:49 +00:00
|
|
|
|
2014-11-11 23:38:28 +00:00
|
|
|
var wg sync.WaitGroup
|
2014-12-17 18:40:59 +00:00
|
|
|
for k, oldTarget := range p.targetsByURL {
|
|
|
|
if !newTargetURLs.Has(k) {
|
2014-11-11 23:38:28 +00:00
|
|
|
wg.Add(1)
|
|
|
|
go func(k string, oldTarget Target) {
|
|
|
|
defer wg.Done()
|
|
|
|
glog.V(1).Infof("Stopping scraper for target %s...", k)
|
|
|
|
oldTarget.StopScraper()
|
|
|
|
glog.V(1).Infof("Scraper for target %s stopped.", k)
|
|
|
|
}(k, oldTarget)
|
2014-12-17 18:40:59 +00:00
|
|
|
delete(p.targetsByURL, k)
|
2014-07-29 18:31:11 +00:00
|
|
|
}
|
2013-01-04 13:41:47 +00:00
|
|
|
}
|
2014-11-11 23:38:28 +00:00
|
|
|
wg.Wait()
|
2013-01-04 11:17:31 +00:00
|
|
|
}
|
2013-02-22 20:07:35 +00:00
|
|
|
|
2014-12-10 15:16:49 +00:00
|
|
|
// Targets returns a copy of the current target list.
|
2013-02-22 20:07:35 +00:00
|
|
|
func (p *TargetPool) Targets() []Target {
|
2013-06-05 12:44:20 +00:00
|
|
|
p.RLock()
|
|
|
|
defer p.RUnlock()
|
|
|
|
|
2014-12-17 18:40:59 +00:00
|
|
|
targets := make([]Target, 0, len(p.targetsByURL))
|
|
|
|
for _, v := range p.targetsByURL {
|
2014-07-29 18:31:11 +00:00
|
|
|
targets = append(targets, v)
|
|
|
|
}
|
2013-06-05 12:44:20 +00:00
|
|
|
return targets
|
2013-02-22 20:07:35 +00:00
|
|
|
}
|