2016-02-23 09:58:16 +00:00
|
|
|
// Copyright 2016 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package retrieval
|
|
|
|
|
|
|
|
import (
|
2016-02-28 08:51:02 +00:00
|
|
|
"fmt"
|
2016-02-28 22:59:03 +00:00
|
|
|
"net/http"
|
|
|
|
"net/http/httptest"
|
|
|
|
"net/url"
|
2016-02-28 08:51:02 +00:00
|
|
|
"reflect"
|
2016-03-09 15:00:33 +00:00
|
|
|
"sort"
|
2016-02-28 22:59:03 +00:00
|
|
|
"strings"
|
2016-02-28 08:51:02 +00:00
|
|
|
"sync"
|
2016-02-23 09:58:16 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/prometheus/common/model"
|
|
|
|
"golang.org/x/net/context"
|
|
|
|
|
2016-02-23 10:56:09 +00:00
|
|
|
"github.com/prometheus/prometheus/config"
|
2016-02-28 08:51:02 +00:00
|
|
|
"github.com/prometheus/prometheus/storage"
|
2016-02-23 09:58:16 +00:00
|
|
|
)
|
|
|
|
|
2016-02-28 08:51:02 +00:00
|
|
|
func TestNewScrapePool(t *testing.T) {
|
|
|
|
var (
|
|
|
|
app = &nopAppender{}
|
|
|
|
cfg = &config.ScrapeConfig{}
|
2016-11-22 11:48:30 +00:00
|
|
|
sp = newScrapePool(context.Background(), cfg, app)
|
2016-02-28 08:51:02 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
if a, ok := sp.appender.(*nopAppender); !ok || a != app {
|
|
|
|
t.Fatalf("Wrong sample appender")
|
|
|
|
}
|
|
|
|
if sp.config != cfg {
|
|
|
|
t.Fatalf("Wrong scrape config")
|
|
|
|
}
|
|
|
|
if sp.newLoop == nil {
|
|
|
|
t.Fatalf("newLoop function not initialized")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type testLoop struct {
|
|
|
|
startFunc func(interval, timeout time.Duration, errc chan<- error)
|
|
|
|
stopFunc func()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *testLoop) run(interval, timeout time.Duration, errc chan<- error) {
|
|
|
|
l.startFunc(interval, timeout, errc)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *testLoop) stop() {
|
|
|
|
l.stopFunc()
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestScrapePoolStop(t *testing.T) {
|
|
|
|
sp := &scrapePool{
|
2016-02-28 18:56:18 +00:00
|
|
|
targets: map[uint64]*Target{},
|
|
|
|
loops: map[uint64]loop{},
|
2016-02-28 08:51:02 +00:00
|
|
|
}
|
|
|
|
var mtx sync.Mutex
|
2016-02-28 18:56:18 +00:00
|
|
|
stopped := map[uint64]bool{}
|
2016-02-28 08:51:02 +00:00
|
|
|
numTargets := 20
|
|
|
|
|
|
|
|
// Stopping the scrape pool must call stop() on all scrape loops,
|
|
|
|
// clean them and the respective targets up. It must wait until each loop's
|
|
|
|
// stop function returned before returning itself.
|
|
|
|
|
|
|
|
for i := 0; i < numTargets; i++ {
|
|
|
|
t := &Target{
|
|
|
|
labels: model.LabelSet{
|
|
|
|
model.AddressLabel: model.LabelValue(fmt.Sprintf("example.com:%d", i)),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
l := &testLoop{}
|
|
|
|
l.stopFunc = func() {
|
|
|
|
time.Sleep(time.Duration(i*20) * time.Millisecond)
|
|
|
|
|
|
|
|
mtx.Lock()
|
2016-02-28 18:56:18 +00:00
|
|
|
stopped[t.hash()] = true
|
2016-02-28 08:51:02 +00:00
|
|
|
mtx.Unlock()
|
|
|
|
}
|
|
|
|
|
2016-02-28 18:56:18 +00:00
|
|
|
sp.targets[t.hash()] = t
|
|
|
|
sp.loops[t.hash()] = l
|
2016-02-28 08:51:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
done := make(chan struct{})
|
|
|
|
stopTime := time.Now()
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
sp.stop()
|
|
|
|
close(done)
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("scrapeLoop.stop() did not return as expected")
|
|
|
|
case <-done:
|
|
|
|
// This should have taken at least as long as the last target slept.
|
|
|
|
if time.Since(stopTime) < time.Duration(numTargets*20)*time.Millisecond {
|
|
|
|
t.Fatalf("scrapeLoop.stop() exited before all targets stopped")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mtx.Lock()
|
|
|
|
if len(stopped) != numTargets {
|
|
|
|
t.Fatalf("Expected 20 stopped loops, got %d", len(stopped))
|
|
|
|
}
|
|
|
|
mtx.Unlock()
|
|
|
|
|
|
|
|
if len(sp.targets) > 0 {
|
|
|
|
t.Fatalf("Targets were not cleared on stopping: %d left", len(sp.targets))
|
|
|
|
}
|
|
|
|
if len(sp.loops) > 0 {
|
|
|
|
t.Fatalf("Loops were not cleared on stopping: %d left", len(sp.loops))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestScrapePoolReload(t *testing.T) {
|
|
|
|
var mtx sync.Mutex
|
|
|
|
numTargets := 20
|
|
|
|
|
2016-02-28 18:56:18 +00:00
|
|
|
stopped := map[uint64]bool{}
|
2016-02-28 08:51:02 +00:00
|
|
|
|
|
|
|
reloadCfg := &config.ScrapeConfig{
|
|
|
|
ScrapeInterval: model.Duration(3 * time.Second),
|
|
|
|
ScrapeTimeout: model.Duration(2 * time.Second),
|
|
|
|
}
|
2016-09-15 03:23:28 +00:00
|
|
|
// On starting to run, new loops created on reload check whether their preceding
|
2016-02-28 08:51:02 +00:00
|
|
|
// equivalents have been stopped.
|
2017-01-07 16:28:49 +00:00
|
|
|
newLoop := func(ctx context.Context, s scraper, app storage.SampleAppender, tl model.LabelSet, cfg *config.ScrapeConfig) loop {
|
2016-02-28 08:51:02 +00:00
|
|
|
l := &testLoop{}
|
|
|
|
l.startFunc = func(interval, timeout time.Duration, errc chan<- error) {
|
|
|
|
if interval != 3*time.Second {
|
|
|
|
t.Errorf("Expected scrape interval %d but got %d", 3*time.Second, interval)
|
|
|
|
}
|
|
|
|
if timeout != 2*time.Second {
|
|
|
|
t.Errorf("Expected scrape timeout %d but got %d", 2*time.Second, timeout)
|
|
|
|
}
|
|
|
|
mtx.Lock()
|
2016-02-28 18:56:18 +00:00
|
|
|
if !stopped[s.(*targetScraper).hash()] {
|
2016-02-28 18:21:50 +00:00
|
|
|
t.Errorf("Scrape loop for %v not stopped yet", s.(*targetScraper))
|
2016-02-28 08:51:02 +00:00
|
|
|
}
|
|
|
|
mtx.Unlock()
|
|
|
|
}
|
|
|
|
return l
|
|
|
|
}
|
|
|
|
sp := &scrapePool{
|
2016-02-28 18:56:18 +00:00
|
|
|
targets: map[uint64]*Target{},
|
|
|
|
loops: map[uint64]loop{},
|
2016-02-28 08:51:02 +00:00
|
|
|
newLoop: newLoop,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reloading a scrape pool with a new scrape configuration must stop all scrape
|
2016-09-15 03:23:28 +00:00
|
|
|
// loops and start new ones. A new loop must not be started before the preceding
|
2016-02-28 08:51:02 +00:00
|
|
|
// one terminated.
|
|
|
|
|
|
|
|
for i := 0; i < numTargets; i++ {
|
|
|
|
t := &Target{
|
|
|
|
labels: model.LabelSet{
|
|
|
|
model.AddressLabel: model.LabelValue(fmt.Sprintf("example.com:%d", i)),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
l := &testLoop{}
|
|
|
|
l.stopFunc = func() {
|
|
|
|
time.Sleep(time.Duration(i*20) * time.Millisecond)
|
|
|
|
|
|
|
|
mtx.Lock()
|
2016-02-28 18:56:18 +00:00
|
|
|
stopped[t.hash()] = true
|
2016-02-28 08:51:02 +00:00
|
|
|
mtx.Unlock()
|
|
|
|
}
|
|
|
|
|
2016-02-28 18:56:18 +00:00
|
|
|
sp.targets[t.hash()] = t
|
|
|
|
sp.loops[t.hash()] = l
|
2016-02-28 08:51:02 +00:00
|
|
|
}
|
|
|
|
done := make(chan struct{})
|
|
|
|
|
2016-02-28 18:56:18 +00:00
|
|
|
beforeTargets := map[uint64]*Target{}
|
|
|
|
for h, t := range sp.targets {
|
|
|
|
beforeTargets[h] = t
|
2016-02-28 08:51:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
reloadTime := time.Now()
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
sp.reload(reloadCfg)
|
|
|
|
close(done)
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("scrapeLoop.reload() did not return as expected")
|
|
|
|
case <-done:
|
|
|
|
// This should have taken at least as long as the last target slept.
|
|
|
|
if time.Since(reloadTime) < time.Duration(numTargets*20)*time.Millisecond {
|
|
|
|
t.Fatalf("scrapeLoop.stop() exited before all targets stopped")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mtx.Lock()
|
|
|
|
if len(stopped) != numTargets {
|
2016-05-01 21:37:45 +00:00
|
|
|
t.Fatalf("Expected 20 stopped loops, got %d", len(stopped))
|
2016-02-28 08:51:02 +00:00
|
|
|
}
|
|
|
|
mtx.Unlock()
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(sp.targets, beforeTargets) {
|
|
|
|
t.Fatalf("Reloading affected target states unexpectedly")
|
|
|
|
}
|
|
|
|
if len(sp.loops) != numTargets {
|
|
|
|
t.Fatalf("Expected %d loops after reload but got %d", numTargets, len(sp.loops))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-07 16:28:49 +00:00
|
|
|
func TestScrapeLoopWrapSampleAppender(t *testing.T) {
|
2016-02-23 10:56:09 +00:00
|
|
|
cfg := &config.ScrapeConfig{
|
|
|
|
MetricRelabelConfigs: []*config.RelabelConfig{
|
2017-01-07 16:28:49 +00:00
|
|
|
{
|
|
|
|
Action: config.RelabelDrop,
|
|
|
|
SourceLabels: model.LabelNames{"__name__"},
|
|
|
|
Regex: config.MustNewRegexp("does_not_match_.*"),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Action: config.RelabelDrop,
|
|
|
|
SourceLabels: model.LabelNames{"__name__"},
|
|
|
|
Regex: config.MustNewRegexp("does_not_match_either_*"),
|
|
|
|
},
|
2016-02-23 10:56:09 +00:00
|
|
|
},
|
|
|
|
}
|
2017-01-07 16:28:49 +00:00
|
|
|
|
2016-02-23 10:56:09 +00:00
|
|
|
target := newTestTarget("example.com:80", 10*time.Millisecond, nil)
|
|
|
|
app := &nopAppender{}
|
|
|
|
|
2016-11-22 11:48:30 +00:00
|
|
|
sp := newScrapePool(context.Background(), cfg, app)
|
2016-02-23 10:56:09 +00:00
|
|
|
|
|
|
|
cfg.HonorLabels = false
|
|
|
|
|
2017-01-07 16:28:49 +00:00
|
|
|
sl := sp.newLoop(
|
|
|
|
sp.ctx,
|
|
|
|
&targetScraper{Target: target, client: sp.client},
|
|
|
|
sp.appender,
|
|
|
|
target.Labels(),
|
|
|
|
sp.config,
|
|
|
|
).(*scrapeLoop)
|
2017-01-07 22:51:38 +00:00
|
|
|
wrapped, _ := sl.wrapAppender(sl.appender)
|
2016-02-23 10:56:09 +00:00
|
|
|
|
|
|
|
rl, ok := wrapped.(ruleLabelsAppender)
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("Expected ruleLabelsAppender but got %T", wrapped)
|
|
|
|
}
|
|
|
|
re, ok := rl.SampleAppender.(relabelAppender)
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("Expected relabelAppender but got %T", rl.SampleAppender)
|
|
|
|
}
|
2017-01-07 16:28:49 +00:00
|
|
|
co, ok := re.SampleAppender.(*countingAppender)
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("Expected *countingAppender but got %T", re.SampleAppender)
|
|
|
|
}
|
|
|
|
if co.SampleAppender != app {
|
|
|
|
t.Fatalf("Expected base appender but got %T", co.SampleAppender)
|
2016-02-23 10:56:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
cfg.HonorLabels = true
|
2017-01-07 16:28:49 +00:00
|
|
|
sl = sp.newLoop(
|
|
|
|
sp.ctx,
|
|
|
|
&targetScraper{Target: target, client: sp.client},
|
|
|
|
sp.appender,
|
|
|
|
target.Labels(),
|
|
|
|
sp.config,
|
|
|
|
).(*scrapeLoop)
|
2017-01-07 22:51:38 +00:00
|
|
|
wrapped, _ = sl.wrapAppender(sl.appender)
|
2016-02-23 10:56:09 +00:00
|
|
|
|
|
|
|
hl, ok := wrapped.(honorLabelsAppender)
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("Expected honorLabelsAppender but got %T", wrapped)
|
|
|
|
}
|
|
|
|
re, ok = hl.SampleAppender.(relabelAppender)
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("Expected relabelAppender but got %T", hl.SampleAppender)
|
|
|
|
}
|
2017-01-07 16:28:49 +00:00
|
|
|
co, ok = re.SampleAppender.(*countingAppender)
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("Expected *countingAppender but got %T", re.SampleAppender)
|
|
|
|
}
|
|
|
|
if co.SampleAppender != app {
|
|
|
|
t.Fatalf("Expected base appender but got %T", co.SampleAppender)
|
2016-02-23 10:56:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-13 16:18:17 +00:00
|
|
|
func TestScrapeLoopSampleProcessing(t *testing.T) {
|
|
|
|
readSamples := model.Samples{
|
|
|
|
{
|
|
|
|
Metric: model.Metric{"__name__": "a_metric"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Metric: model.Metric{"__name__": "b_metric"},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
testCases := []struct {
|
2017-01-07 16:28:49 +00:00
|
|
|
scrapedSamples model.Samples
|
|
|
|
scrapeConfig *config.ScrapeConfig
|
|
|
|
expectedReportedSamples model.Samples
|
|
|
|
expectedPostRelabelSamplesCount int
|
2016-12-13 16:18:17 +00:00
|
|
|
}{
|
2017-01-07 16:28:49 +00:00
|
|
|
{ // 0
|
2016-12-16 15:08:50 +00:00
|
|
|
scrapedSamples: readSamples,
|
2017-01-07 16:28:49 +00:00
|
|
|
scrapeConfig: &config.ScrapeConfig{},
|
2016-12-13 16:18:17 +00:00
|
|
|
expectedReportedSamples: model.Samples{
|
|
|
|
{
|
|
|
|
Metric: model.Metric{"__name__": "up"},
|
|
|
|
Value: 1,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Metric: model.Metric{"__name__": "scrape_duration_seconds"},
|
2017-01-07 16:28:49 +00:00
|
|
|
Value: 42,
|
2016-12-13 16:18:17 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
Metric: model.Metric{"__name__": "scrape_samples_scraped"},
|
|
|
|
Value: 2,
|
|
|
|
},
|
2016-12-13 17:32:11 +00:00
|
|
|
{
|
|
|
|
Metric: model.Metric{"__name__": "scrape_samples_post_metric_relabeling"},
|
|
|
|
Value: 2,
|
|
|
|
},
|
2016-12-13 16:18:17 +00:00
|
|
|
},
|
2017-01-07 16:28:49 +00:00
|
|
|
expectedPostRelabelSamplesCount: 2,
|
2016-12-13 16:18:17 +00:00
|
|
|
},
|
2017-01-07 16:28:49 +00:00
|
|
|
{ // 1
|
2016-12-13 16:18:17 +00:00
|
|
|
scrapedSamples: readSamples,
|
2017-01-07 16:28:49 +00:00
|
|
|
scrapeConfig: &config.ScrapeConfig{
|
2016-12-16 15:08:50 +00:00
|
|
|
MetricRelabelConfigs: []*config.RelabelConfig{
|
|
|
|
{
|
|
|
|
Action: config.RelabelDrop,
|
|
|
|
SourceLabels: model.LabelNames{"__name__"},
|
|
|
|
Regex: config.MustNewRegexp("a.*"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expectedReportedSamples: model.Samples{
|
2016-12-13 16:18:17 +00:00
|
|
|
{
|
2016-12-16 15:08:50 +00:00
|
|
|
Metric: model.Metric{"__name__": "up"},
|
|
|
|
Value: 1,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Metric: model.Metric{"__name__": "scrape_duration_seconds"},
|
2017-01-07 16:28:49 +00:00
|
|
|
Value: 42,
|
2016-12-16 15:08:50 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
Metric: model.Metric{"__name__": "scrape_samples_scraped"},
|
|
|
|
Value: 2,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Metric: model.Metric{"__name__": "scrape_samples_post_metric_relabeling"},
|
|
|
|
Value: 1,
|
|
|
|
},
|
|
|
|
},
|
2017-01-07 16:28:49 +00:00
|
|
|
expectedPostRelabelSamplesCount: 1,
|
2016-12-16 15:08:50 +00:00
|
|
|
},
|
2017-01-07 16:28:49 +00:00
|
|
|
{ // 2
|
2016-12-16 15:08:50 +00:00
|
|
|
scrapedSamples: readSamples,
|
2017-01-07 16:28:49 +00:00
|
|
|
scrapeConfig: &config.ScrapeConfig{
|
2016-12-16 15:08:50 +00:00
|
|
|
SampleLimit: 1,
|
|
|
|
MetricRelabelConfigs: []*config.RelabelConfig{
|
|
|
|
{
|
|
|
|
Action: config.RelabelDrop,
|
|
|
|
SourceLabels: model.LabelNames{"__name__"},
|
|
|
|
Regex: config.MustNewRegexp("a.*"),
|
|
|
|
},
|
2016-12-13 16:18:17 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
expectedReportedSamples: model.Samples{
|
|
|
|
{
|
|
|
|
Metric: model.Metric{"__name__": "up"},
|
|
|
|
Value: 1,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Metric: model.Metric{"__name__": "scrape_duration_seconds"},
|
2017-01-07 16:28:49 +00:00
|
|
|
Value: 42,
|
2016-12-13 16:18:17 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
Metric: model.Metric{"__name__": "scrape_samples_scraped"},
|
|
|
|
Value: 2,
|
|
|
|
},
|
2016-12-13 17:32:11 +00:00
|
|
|
{
|
|
|
|
Metric: model.Metric{"__name__": "scrape_samples_post_metric_relabeling"},
|
|
|
|
Value: 1,
|
|
|
|
},
|
2016-12-13 16:18:17 +00:00
|
|
|
},
|
2017-01-07 16:28:49 +00:00
|
|
|
expectedPostRelabelSamplesCount: 1,
|
2016-12-13 16:18:17 +00:00
|
|
|
},
|
2017-01-07 16:28:49 +00:00
|
|
|
{ // 3
|
2016-12-16 15:08:50 +00:00
|
|
|
scrapedSamples: readSamples,
|
2017-01-07 16:28:49 +00:00
|
|
|
scrapeConfig: &config.ScrapeConfig{
|
2016-12-16 15:08:50 +00:00
|
|
|
SampleLimit: 1,
|
|
|
|
},
|
|
|
|
expectedReportedSamples: model.Samples{
|
|
|
|
{
|
|
|
|
Metric: model.Metric{"__name__": "up"},
|
|
|
|
Value: 0,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Metric: model.Metric{"__name__": "scrape_duration_seconds"},
|
2017-01-07 16:28:49 +00:00
|
|
|
Value: 42,
|
2016-12-16 15:08:50 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
Metric: model.Metric{"__name__": "scrape_samples_scraped"},
|
|
|
|
Value: 2,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Metric: model.Metric{"__name__": "scrape_samples_post_metric_relabeling"},
|
|
|
|
Value: 2,
|
|
|
|
},
|
|
|
|
},
|
2017-01-07 16:28:49 +00:00
|
|
|
expectedPostRelabelSamplesCount: 2,
|
2016-12-13 16:18:17 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2016-12-16 15:08:50 +00:00
|
|
|
for i, test := range testCases {
|
2016-12-13 16:18:17 +00:00
|
|
|
ingestedSamples := &bufferAppender{buffer: model.Samples{}}
|
|
|
|
|
|
|
|
target := newTestTarget("example.com:80", 10*time.Millisecond, nil)
|
|
|
|
|
|
|
|
scraper := &testScraper{}
|
2017-01-07 16:28:49 +00:00
|
|
|
sl := newScrapeLoop(context.Background(), scraper, ingestedSamples, target.Labels(), test.scrapeConfig).(*scrapeLoop)
|
|
|
|
num, err := sl.append(test.scrapedSamples)
|
|
|
|
sl.report(time.Unix(0, 0), 42*time.Second, len(test.scrapedSamples), num, err)
|
|
|
|
reportedSamples := ingestedSamples.buffer
|
|
|
|
if err == nil {
|
|
|
|
reportedSamples = reportedSamples[num:]
|
|
|
|
}
|
2016-12-13 16:18:17 +00:00
|
|
|
|
2017-01-07 16:28:49 +00:00
|
|
|
if !reflect.DeepEqual(reportedSamples, test.expectedReportedSamples) {
|
2016-12-16 15:08:50 +00:00
|
|
|
t.Errorf("Reported samples did not match expected metrics for case %d", i)
|
2016-12-13 16:18:17 +00:00
|
|
|
t.Errorf("Expected: %v", test.expectedReportedSamples)
|
2017-01-07 16:28:49 +00:00
|
|
|
t.Fatalf("Got: %v", reportedSamples)
|
2016-12-13 16:18:17 +00:00
|
|
|
}
|
2017-01-07 16:28:49 +00:00
|
|
|
if test.expectedPostRelabelSamplesCount != num {
|
|
|
|
t.Fatalf("Case %d: Ingested samples %d did not match expected value %d", i, num, test.expectedPostRelabelSamplesCount)
|
2016-12-13 16:18:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-02-28 08:51:02 +00:00
|
|
|
func TestScrapeLoopStop(t *testing.T) {
|
|
|
|
scraper := &testScraper{}
|
2017-01-07 16:28:49 +00:00
|
|
|
sl := newScrapeLoop(context.Background(), scraper, nil, nil, &config.ScrapeConfig{})
|
2016-02-28 08:51:02 +00:00
|
|
|
|
|
|
|
// The scrape pool synchronizes on stopping scrape loops. However, new scrape
|
2017-01-07 16:28:49 +00:00
|
|
|
// loops are started asynchronously. Thus it's possible, that a loop is stopped
|
2016-02-28 08:51:02 +00:00
|
|
|
// again before having started properly.
|
|
|
|
// Stopping not-yet-started loops must block until the run method was called and exited.
|
|
|
|
// The run method must exit immediately.
|
|
|
|
|
|
|
|
stopDone := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
sl.stop()
|
|
|
|
close(stopDone)
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-stopDone:
|
|
|
|
t.Fatalf("Stopping terminated before run exited successfully")
|
|
|
|
case <-time.After(500 * time.Millisecond):
|
|
|
|
}
|
|
|
|
|
|
|
|
// Running the scrape loop must exit before calling the scraper even once.
|
|
|
|
scraper.scrapeFunc = func(context.Context, time.Time) (model.Samples, error) {
|
|
|
|
t.Fatalf("scraper was called for terminated scrape loop")
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
runDone := make(chan struct{})
|
|
|
|
go func() {
|
2016-08-18 07:33:52 +00:00
|
|
|
sl.run(1, 0, nil)
|
2016-02-28 08:51:02 +00:00
|
|
|
close(runDone)
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-runDone:
|
|
|
|
case <-time.After(1 * time.Second):
|
|
|
|
t.Fatalf("Running terminated scrape loop did not exit")
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-stopDone:
|
|
|
|
case <-time.After(1 * time.Second):
|
|
|
|
t.Fatalf("Stopping did not terminate after running exited")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-23 09:58:16 +00:00
|
|
|
func TestScrapeLoopRun(t *testing.T) {
|
|
|
|
var (
|
|
|
|
signal = make(chan struct{})
|
|
|
|
errc = make(chan error)
|
|
|
|
|
2017-01-07 16:28:49 +00:00
|
|
|
scraper = &testScraper{}
|
|
|
|
app = &nopAppender{}
|
2016-02-23 09:58:16 +00:00
|
|
|
)
|
|
|
|
defer close(signal)
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
2017-01-07 16:28:49 +00:00
|
|
|
sl := newScrapeLoop(ctx, scraper, app, nil, &config.ScrapeConfig{})
|
2016-02-23 09:58:16 +00:00
|
|
|
|
|
|
|
// The loop must terminate during the initial offset if the context
|
|
|
|
// is canceled.
|
|
|
|
scraper.offsetDur = time.Hour
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
sl.run(time.Second, time.Hour, errc)
|
|
|
|
signal <- struct{}{}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Wait to make sure we are actually waiting on the offset.
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
|
|
|
|
cancel()
|
|
|
|
select {
|
|
|
|
case <-signal:
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("Cancelation during initial offset failed")
|
|
|
|
case err := <-errc:
|
|
|
|
t.Fatalf("Unexpected error: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The provided timeout must cause cancelation of the context passed down to the
|
|
|
|
// scraper. The scraper has to respect the context.
|
|
|
|
scraper.offsetDur = 0
|
|
|
|
|
|
|
|
block := make(chan struct{})
|
2016-02-25 12:58:46 +00:00
|
|
|
scraper.scrapeFunc = func(ctx context.Context, ts time.Time) (model.Samples, error) {
|
2016-02-23 09:58:16 +00:00
|
|
|
select {
|
|
|
|
case <-block:
|
|
|
|
case <-ctx.Done():
|
|
|
|
return nil, ctx.Err()
|
|
|
|
}
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx, cancel = context.WithCancel(context.Background())
|
2017-01-07 16:28:49 +00:00
|
|
|
sl = newScrapeLoop(ctx, scraper, app, nil, &config.ScrapeConfig{})
|
2016-02-23 09:58:16 +00:00
|
|
|
|
|
|
|
go func() {
|
|
|
|
sl.run(time.Second, 100*time.Millisecond, errc)
|
|
|
|
signal <- struct{}{}
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case err := <-errc:
|
|
|
|
if err != context.DeadlineExceeded {
|
|
|
|
t.Fatalf("Expected timeout error but got: %s", err)
|
|
|
|
}
|
|
|
|
case <-time.After(3 * time.Second):
|
|
|
|
t.Fatalf("Expected timeout error but got none")
|
|
|
|
}
|
|
|
|
|
|
|
|
// We already caught the timeout error and are certainly in the loop.
|
|
|
|
// Let the scrapes returns immediately to cause no further timeout errors
|
|
|
|
// and check whether canceling the parent context terminates the loop.
|
|
|
|
close(block)
|
|
|
|
cancel()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-signal:
|
|
|
|
// Loop terminated as expected.
|
|
|
|
case err := <-errc:
|
|
|
|
t.Fatalf("Unexpected error: %s", err)
|
|
|
|
case <-time.After(3 * time.Second):
|
|
|
|
t.Fatalf("Loop did not terminate on context cancelation")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-28 22:59:03 +00:00
|
|
|
func TestTargetScraperScrapeOK(t *testing.T) {
|
|
|
|
server := httptest.NewServer(
|
|
|
|
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.Header().Set("Content-Type", `text/plain; version=0.0.4`)
|
|
|
|
w.Write([]byte("metric_a 1\nmetric_b 2\n"))
|
|
|
|
}),
|
|
|
|
)
|
|
|
|
defer server.Close()
|
|
|
|
|
|
|
|
serverURL, err := url.Parse(server.URL)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
ts := &targetScraper{
|
|
|
|
Target: &Target{
|
|
|
|
labels: model.LabelSet{
|
|
|
|
model.SchemeLabel: model.LabelValue(serverURL.Scheme),
|
|
|
|
model.AddressLabel: model.LabelValue(serverURL.Host),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
client: http.DefaultClient,
|
|
|
|
}
|
|
|
|
now := time.Now()
|
|
|
|
|
|
|
|
samples, err := ts.scrape(context.Background(), now)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unexpected scrape error: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
expectedSamples := model.Samples{
|
|
|
|
{
|
|
|
|
Metric: model.Metric{"__name__": "metric_a"},
|
|
|
|
Timestamp: model.TimeFromUnixNano(now.UnixNano()),
|
|
|
|
Value: 1,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Metric: model.Metric{"__name__": "metric_b"},
|
|
|
|
Timestamp: model.TimeFromUnixNano(now.UnixNano()),
|
|
|
|
Value: 2,
|
|
|
|
},
|
|
|
|
}
|
2016-03-09 15:00:33 +00:00
|
|
|
sort.Sort(expectedSamples)
|
|
|
|
sort.Sort(samples)
|
2016-02-28 22:59:03 +00:00
|
|
|
|
|
|
|
if !reflect.DeepEqual(samples, expectedSamples) {
|
|
|
|
t.Errorf("Scraped samples did not match served metrics")
|
|
|
|
t.Errorf("Expected: %v", expectedSamples)
|
|
|
|
t.Fatalf("Got: %v", samples)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestTargetScrapeScrapeCancel(t *testing.T) {
|
|
|
|
block := make(chan struct{})
|
|
|
|
|
|
|
|
server := httptest.NewServer(
|
|
|
|
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
<-block
|
|
|
|
}),
|
|
|
|
)
|
|
|
|
defer server.Close()
|
|
|
|
|
|
|
|
serverURL, err := url.Parse(server.URL)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
ts := &targetScraper{
|
|
|
|
Target: &Target{
|
|
|
|
labels: model.LabelSet{
|
|
|
|
model.SchemeLabel: model.LabelValue(serverURL.Scheme),
|
|
|
|
model.AddressLabel: model.LabelValue(serverURL.Host),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
client: http.DefaultClient,
|
|
|
|
}
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
|
2016-11-13 17:21:42 +00:00
|
|
|
errc := make(chan error)
|
2016-02-28 22:59:03 +00:00
|
|
|
|
|
|
|
go func() {
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
cancel()
|
|
|
|
}()
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
if _, err := ts.scrape(ctx, time.Now()); err != context.Canceled {
|
2016-11-13 17:21:42 +00:00
|
|
|
errc <- fmt.Errorf("Expected context cancelation error but got: %s", err)
|
2016-02-28 22:59:03 +00:00
|
|
|
}
|
2016-11-13 17:21:42 +00:00
|
|
|
close(errc)
|
2016-02-28 22:59:03 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("Scrape function did not return unexpectedly")
|
2016-11-13 17:21:42 +00:00
|
|
|
case err := <-errc:
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf(err.Error())
|
|
|
|
}
|
2016-02-28 22:59:03 +00:00
|
|
|
}
|
|
|
|
// If this is closed in a defer above the function the test server
|
|
|
|
// does not terminate and the test doens't complete.
|
|
|
|
close(block)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestTargetScrapeScrapeNotFound(t *testing.T) {
|
|
|
|
server := httptest.NewServer(
|
|
|
|
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(http.StatusNotFound)
|
|
|
|
}),
|
|
|
|
)
|
|
|
|
defer server.Close()
|
|
|
|
|
|
|
|
serverURL, err := url.Parse(server.URL)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
ts := &targetScraper{
|
|
|
|
Target: &Target{
|
|
|
|
labels: model.LabelSet{
|
|
|
|
model.SchemeLabel: model.LabelValue(serverURL.Scheme),
|
|
|
|
model.AddressLabel: model.LabelValue(serverURL.Host),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
client: http.DefaultClient,
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err := ts.scrape(context.Background(), time.Now()); !strings.Contains(err.Error(), "404") {
|
|
|
|
t.Fatalf("Expected \"404 NotFound\" error but got: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-23 09:58:16 +00:00
|
|
|
// testScraper implements the scraper interface and allows setting values
|
|
|
|
// returned by its methods. It also allows setting a custom scrape function.
|
|
|
|
type testScraper struct {
|
|
|
|
offsetDur time.Duration
|
|
|
|
|
|
|
|
lastStart time.Time
|
|
|
|
lastDuration time.Duration
|
|
|
|
lastError error
|
|
|
|
|
|
|
|
samples model.Samples
|
|
|
|
scrapeErr error
|
2016-02-25 12:58:46 +00:00
|
|
|
scrapeFunc func(context.Context, time.Time) (model.Samples, error)
|
2016-02-23 09:58:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ts *testScraper) offset(interval time.Duration) time.Duration {
|
|
|
|
return ts.offsetDur
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ts *testScraper) report(start time.Time, duration time.Duration, err error) {
|
|
|
|
ts.lastStart = start
|
|
|
|
ts.lastDuration = duration
|
|
|
|
ts.lastError = err
|
|
|
|
}
|
|
|
|
|
2016-02-25 12:58:46 +00:00
|
|
|
func (ts *testScraper) scrape(ctx context.Context, t time.Time) (model.Samples, error) {
|
2016-02-23 09:58:16 +00:00
|
|
|
if ts.scrapeFunc != nil {
|
2016-02-25 12:58:46 +00:00
|
|
|
return ts.scrapeFunc(ctx, t)
|
2016-02-23 09:58:16 +00:00
|
|
|
}
|
|
|
|
return ts.samples, ts.scrapeErr
|
|
|
|
}
|