diff --git a/config/config.go b/config/config.go index 281881867..10695f550 100644 --- a/config/config.go +++ b/config/config.go @@ -14,10 +14,10 @@ package config import ( - "code.google.com/p/goprotobuf/proto" "fmt" "regexp" "time" + "code.google.com/p/goprotobuf/proto" clientmodel "github.com/prometheus/client_golang/model" @@ -48,7 +48,7 @@ func (c Config) validateLabels(labels *pb.LabelPairs) error { } for _, label := range labels.Label { if !labelNameRE.MatchString(label.GetName()) { - return fmt.Errorf("Invalid label name '%s'", label.GetName()) + return fmt.Errorf("invalid label name '%s'", label.GetName()) } } return nil @@ -59,42 +59,42 @@ func (c Config) Validate() error { // Check the global configuration section for validity. global := c.Global if _, err := utility.StringToDuration(global.GetScrapeInterval()); err != nil { - return fmt.Errorf("Invalid global scrape interval: %s", err) + return fmt.Errorf("invalid global scrape interval: %s", err) } if _, err := utility.StringToDuration(global.GetEvaluationInterval()); err != nil { - return fmt.Errorf("Invalid rule evaluation interval: %s", err) + return fmt.Errorf("invalid rule evaluation interval: %s", err) } if err := c.validateLabels(global.Labels); err != nil { - return fmt.Errorf("Invalid global labels: %s", err) + return fmt.Errorf("invalid global labels: %s", err) } // Check each job configuration for validity. jobNames := map[string]bool{} for _, job := range c.Job { if jobNames[job.GetName()] { - return fmt.Errorf("Found multiple jobs configured with the same name: '%s'", job.GetName()) + return fmt.Errorf("found multiple jobs configured with the same name: '%s'", job.GetName()) } jobNames[job.GetName()] = true if !jobNameRE.MatchString(job.GetName()) { - return fmt.Errorf("Invalid job name '%s'", job.GetName()) + return fmt.Errorf("invalid job name '%s'", job.GetName()) } if _, err := utility.StringToDuration(job.GetScrapeInterval()); err != nil { - return fmt.Errorf("Invalid scrape interval for job '%s': %s", job.GetName(), err) + return fmt.Errorf("invalid scrape interval for job '%s': %s", job.GetName(), err) } if _, err := utility.StringToDuration(job.GetSdRefreshInterval()); err != nil { - return fmt.Errorf("Invalid SD refresh interval for job '%s': %s", job.GetName(), err) + return fmt.Errorf("invalid SD refresh interval for job '%s': %s", job.GetName(), err) } if _, err := utility.StringToDuration(job.GetScrapeTimeout()); err != nil { - return fmt.Errorf("Invalid scrape timeout for job '%s': %s", job.GetName(), err) + return fmt.Errorf("invalid scrape timeout for job '%s': %s", job.GetName(), err) } for _, targetGroup := range job.TargetGroup { if err := c.validateLabels(targetGroup.Labels); err != nil { - return fmt.Errorf("Invalid labels for job '%s': %s", job.GetName(), err) + return fmt.Errorf("invalid labels for job '%s': %s", job.GetName(), err) } } if job.SdName != nil && len(job.TargetGroup) > 0 { - return fmt.Errorf("Specified both DNS-SD name and target group for job: %s", job.GetName()) + return fmt.Errorf("specified both DNS-SD name and target group for job: %s", job.GetName()) } } @@ -111,7 +111,7 @@ func (c Config) GetJobByName(name string) *JobConfig { return nil } -// Return the global labels as a LabelSet. +// GlobalLabels returns the global labels as a LabelSet. func (c Config) GlobalLabels() clientmodel.LabelSet { labels := clientmodel.LabelSet{} if c.Global.Labels != nil { @@ -155,12 +155,12 @@ type JobConfig struct { pb.JobConfig } -// EvaluationInterval gets the scrape interval for a job. +// ScrapeInterval gets the scrape interval for a job. func (c JobConfig) ScrapeInterval() time.Duration { return stringToDuration(c.GetScrapeInterval()) } -// EvaluationInterval gets the scrape interval for a job. +// ScrapeTimeout gets the scrape timeout for a job. func (c JobConfig) ScrapeTimeout() time.Duration { return stringToDuration(c.GetScrapeInterval()) } diff --git a/config/config_test.go b/config/config_test.go index b2459229e..062b2bcc4 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -43,27 +43,27 @@ var configTests = []struct { { inputFile: "invalid_scrape_interval.conf.input", shouldFail: true, - errContains: "Invalid global scrape interval", + errContains: "invalid global scrape interval", }, { inputFile: "invalid_job_name.conf.input", shouldFail: true, - errContains: "Invalid job name", + errContains: "invalid job name", }, { inputFile: "invalid_label_name.conf.input", shouldFail: true, - errContains: "Invalid label name", + errContains: "invalid label name", }, { inputFile: "mixing_sd_and_manual_targets.conf.input", shouldFail: true, - errContains: "Specified both DNS-SD name and target group", + errContains: "specified both DNS-SD name and target group", }, { inputFile: "repeated_job_name.conf.input", shouldFail: true, - errContains: "Found multiple jobs configured with the same name: 'testjob1'", + errContains: "found multiple jobs configured with the same name: 'testjob1'", }, } diff --git a/config/load.go b/config/load.go index 983961a29..740165b43 100644 --- a/config/load.go +++ b/config/load.go @@ -14,11 +14,13 @@ package config import ( + "io/ioutil" + "code.google.com/p/goprotobuf/proto" pb "github.com/prometheus/prometheus/config/generated" - "io/ioutil" ) +// LoadFromString returns a config parsed from the provided string. func LoadFromString(configStr string) (Config, error) { configProto := pb.PrometheusConfig{} if err := proto.UnmarshalText(configStr, &configProto); err != nil { @@ -39,6 +41,7 @@ func LoadFromString(configStr string) (Config, error) { return config, err } +// LoadFromFile returns a config parsed from the file of teh provided name. func LoadFromFile(fileName string) (Config, error) { configStr, err := ioutil.ReadFile(fileName) if err != nil { diff --git a/main.go b/main.go index 726aad853..701bce67c 100644 --- a/main.go +++ b/main.go @@ -15,6 +15,7 @@ package main import ( "flag" + _ "net/http/pprof" // Comment this line to disable pprof endpoint. "os" "os/signal" "sync" @@ -130,7 +131,7 @@ func NewPrometheus() *prometheus { NotificationHandler: notificationHandler, EvaluationInterval: conf.EvaluationInterval(), Storage: memStorage, - PrometheusUrl: web.MustBuildServerUrl(), + PrometheusURL: web.MustBuildServerURL(), }) if err := ruleManager.AddRulesFromConfig(conf); err != nil { glog.Fatal("Error loading rule files: ", err) diff --git a/notification/notification.go b/notification/notification.go index b08ac196c..e9a472590 100644 --- a/notification/notification.go +++ b/notification/notification.go @@ -31,8 +31,8 @@ import ( ) const ( - alertmanagerApiEventsPath = "/api/alerts" - contentTypeJson = "application/json" + alertmanagerAPIEventsPath = "/api/alerts" + contentTypeJSON = "application/json" ) // String constants for instrumentation. @@ -50,8 +50,8 @@ var ( deadline = flag.Duration("alertmanager.http-deadline", 10*time.Second, "Alert manager HTTP API timeout.") ) -// A request for sending a notification to the alert manager for a single alert -// vector element. +// NotificationReq is a request for sending a notification to the alert manager +// for a single alert vector element. type NotificationReq struct { // Short-form alert summary. May contain text/template-style interpolations. Summary string @@ -69,6 +69,9 @@ type NotificationReq struct { GeneratorURL string } +// NotificationReqs is just a short-hand for []*NotificationReq. No methods +// attached. Arguably, it's more confusing than helpful. Perhaps we should +// remove it... type NotificationReqs []*NotificationReq type httpPoster interface { @@ -79,7 +82,7 @@ type httpPoster interface { // alert manager service. type NotificationHandler struct { // The URL of the alert manager to send notifications to. - alertmanagerUrl string + alertmanagerURL string // Buffer of notifications that have not yet been sent. pendingNotifications chan NotificationReqs // HTTP client with custom timeout settings. @@ -92,10 +95,10 @@ type NotificationHandler struct { stopped chan struct{} } -// Construct a new NotificationHandler. -func NewNotificationHandler(alertmanagerUrl string, notificationQueueCapacity int) *NotificationHandler { +// NewNotificationHandler constructs a new NotificationHandler. +func NewNotificationHandler(alertmanagerURL string, notificationQueueCapacity int) *NotificationHandler { return &NotificationHandler{ - alertmanagerUrl: alertmanagerUrl, + alertmanagerURL: alertmanagerURL, pendingNotifications: make(chan NotificationReqs, notificationQueueCapacity), httpClient: utility.NewDeadlineClient(*deadline), @@ -150,8 +153,8 @@ func (n *NotificationHandler) sendNotifications(reqs NotificationReqs) error { } glog.V(1).Infoln("Sending notifications to alertmanager:", string(buf)) resp, err := n.httpClient.Post( - n.alertmanagerUrl+alertmanagerApiEventsPath, - contentTypeJson, + n.alertmanagerURL+alertmanagerAPIEventsPath, + contentTypeJSON, bytes.NewBuffer(buf), ) if err != nil { @@ -170,7 +173,7 @@ func (n *NotificationHandler) sendNotifications(reqs NotificationReqs) error { // Run dispatches notifications continuously. func (n *NotificationHandler) Run() { for reqs := range n.pendingNotifications { - if n.alertmanagerUrl == "" { + if n.alertmanagerURL == "" { glog.Warning("No alert manager configured, not dispatching notification") n.notificationLatency.WithLabelValues(dropped).Observe(0) continue diff --git a/notification/notification_test.go b/notification/notification_test.go index e01e4d316..9de3140c3 100644 --- a/notification/notification_test.go +++ b/notification/notification_test.go @@ -24,12 +24,12 @@ import ( clientmodel "github.com/prometheus/client_golang/model" ) -type testHttpPoster struct { +type testHTTPPoster struct { message string receivedPost chan<- bool } -func (p *testHttpPoster) Post(url string, bodyType string, body io.Reader) (*http.Response, error) { +func (p *testHTTPPoster) Post(url string, bodyType string, body io.Reader) (*http.Response, error) { var buf bytes.Buffer buf.ReadFrom(body) p.message = buf.String() @@ -50,7 +50,7 @@ func (s *testNotificationScenario) test(i int, t *testing.T) { defer h.Stop() receivedPost := make(chan bool, 1) - poster := testHttpPoster{receivedPost: receivedPost} + poster := testHTTPPoster{receivedPost: receivedPost} h.httpClient = &poster go h.Run() diff --git a/retrieval/ingester.go b/retrieval/ingester.go index 667a58328..c8f6f8d43 100644 --- a/retrieval/ingester.go +++ b/retrieval/ingester.go @@ -29,6 +29,8 @@ type MergeLabelsIngester struct { Ingester extraction.Ingester } +// Ingest ingests the provided extraction result by merging in i.Labels and then +// handing it over to i.Ingester. func (i *MergeLabelsIngester) Ingest(r *extraction.Result) error { for _, s := range r.Samples { s.Metric.MergeFromLabelSet(i.Labels, i.CollisionPrefix) @@ -40,6 +42,7 @@ func (i *MergeLabelsIngester) Ingest(r *extraction.Result) error { // ChannelIngester feeds results into a channel without modifying them. type ChannelIngester chan<- *extraction.Result +// Ingest ingests the provided extraction result by sending it to i. func (i ChannelIngester) Ingest(r *extraction.Result) error { i <- r return nil diff --git a/retrieval/target.go b/retrieval/target.go index 621ea1d76..9a38fa4b0 100644 --- a/retrieval/target.go +++ b/retrieval/target.go @@ -32,8 +32,10 @@ import ( ) const ( + // InstanceLabel is the label value used for the instance label. InstanceLabel clientmodel.LabelName = "instance" - // The metric name for the synthetic health variable. + // ScrapeHealthMetricName is the metric name for the synthetic health + // variable. ScrapeHealthMetricName clientmodel.LabelValue = "up" // Constants for instrumentation. @@ -74,16 +76,16 @@ func init() { prometheus.MustRegister(targetIntervalLength) } -// The state of the given Target. +// TargetState describes the state of a Target. type TargetState int func (t TargetState) String() string { switch t { - case UNKNOWN: + case Unknown: return "UNKNOWN" - case ALIVE: + case Alive: return "ALIVE" - case UNREACHABLE: + case Unreachable: return "UNREACHABLE" } @@ -91,14 +93,16 @@ func (t TargetState) String() string { } const ( - // The Target has not been seen; we know nothing about it, except that it is - // on our docket for examination. - UNKNOWN TargetState = iota - // The Target has been found and successfully queried. - ALIVE - // The Target was either historically found or not found and then determined - // to be unhealthy by either not responding or disappearing. - UNREACHABLE + // Unknown is the state of a Target that has not been seen; we know + // nothing about it, except that it is on our docket for examination. + Unknown TargetState = iota + // Alive is the state of a Target that has been found and successfully + // queried. + Alive + // Unreachable is the state of a Target that was either historically + // found or not found and then determined to be unhealthy by either not + // responding or disappearing. + Unreachable ) // A Target represents an endpoint that should be interrogated for metrics. @@ -170,7 +174,7 @@ type target struct { sync.Mutex } -// Furnish a reasonably configured target for querying. +// NewTarget creates a reasonably configured target for querying. func NewTarget(address string, deadline time.Duration, baseLabels clientmodel.LabelSet) Target { target := &target{ address: address, @@ -296,10 +300,10 @@ func (t *target) scrape(ingester extraction.Ingester) (err error) { } t.Lock() // Writing t.state and t.lastError requires the lock. if err == nil { - t.state = ALIVE + t.state = Alive labels[outcome] = failure } else { - t.state = UNREACHABLE + t.state = Unreachable } t.lastError = err t.Unlock() diff --git a/retrieval/target_provider.go b/retrieval/target_provider.go index 15cc282ae..2313a9985 100644 --- a/retrieval/target_provider.go +++ b/retrieval/target_provider.go @@ -62,7 +62,7 @@ type sdTargetProvider struct { refreshInterval time.Duration } -// Constructs a new sdTargetProvider for a job. +// NewSdTargetProvider constructs a new sdTargetProvider for a job. func NewSdTargetProvider(job config.JobConfig) *sdTargetProvider { i, err := utility.StringToDuration(job.GetSdRefreshInterval()) if err != nil { @@ -125,7 +125,7 @@ func (p *sdTargetProvider) Targets() ([]Target, error) { func lookupSRV(name string) (*dns.Msg, error) { conf, err := dns.ClientConfigFromFile(resolvConf) if err != nil { - return nil, fmt.Errorf("Couldn't load resolv.conf: %s", err) + return nil, fmt.Errorf("couldn't load resolv.conf: %s", err) } client := &dns.Client{} @@ -140,7 +140,7 @@ func lookupSRV(name string) (*dns.Msg, error) { return response, nil } } else { - glog.Warningf("Resolving %s.%s failed: %s", name, suffix, err) + glog.Warningf("resolving %s.%s failed: %s", name, suffix, err) } } response, err = lookup(name, dns.TypeSRV, client, servAddr, "", false) @@ -148,7 +148,7 @@ func lookupSRV(name string) (*dns.Msg, error) { return response, nil } } - return response, fmt.Errorf("Couldn't resolve %s: No server responded", name) + return response, fmt.Errorf("couldn't resolve %s: No server responded", name) } func lookup(name string, queryType uint16, client *dns.Client, servAddr string, suffix string, edns bool) (*dns.Msg, error) { @@ -178,7 +178,7 @@ func lookup(name string, queryType uint16, client *dns.Client, servAddr string, if response.MsgHdr.Truncated { if client.Net == "tcp" { - return nil, fmt.Errorf("Got truncated message on tcp") + return nil, fmt.Errorf("got truncated message on tcp") } if edns { // Truncated even though EDNS is used diff --git a/retrieval/target_test.go b/retrieval/target_test.go index 22797dcc2..9f74e62ec 100644 --- a/retrieval/target_test.go +++ b/retrieval/target_test.go @@ -38,13 +38,13 @@ func (i *collectResultIngester) Ingest(r *extraction.Result) error { func TestTargetScrapeUpdatesState(t *testing.T) { testTarget := target{ - state: UNKNOWN, + state: Unknown, address: "bad schema", httpClient: utility.NewDeadlineClient(0), } testTarget.scrape(nopIngester{}) - if testTarget.state != UNREACHABLE { - t.Errorf("Expected target state %v, actual: %v", UNREACHABLE, testTarget.state) + if testTarget.state != Unreachable { + t.Errorf("Expected target state %v, actual: %v", Unreachable, testTarget.state) } } @@ -146,7 +146,7 @@ func TestTargetScrape404(t *testing.T) { func TestTargetRunScraperScrapes(t *testing.T) { testTarget := target{ - state: UNKNOWN, + state: Unknown, address: "bad schema", httpClient: utility.NewDeadlineClient(0), scraperStopping: make(chan struct{}), diff --git a/retrieval/targetmanager.go b/retrieval/targetmanager.go index 4976510b8..01ba3a68d 100644 --- a/retrieval/targetmanager.go +++ b/retrieval/targetmanager.go @@ -39,6 +39,7 @@ type targetManager struct { ingester extraction.Ingester } +// NewTargetManager returns a newly initialized TargetManager ready to use. func NewTargetManager(ingester extraction.Ingester) TargetManager { return &targetManager{ ingester: ingester, @@ -50,7 +51,7 @@ func (m *targetManager) targetPoolForJob(job config.JobConfig) *TargetPool { targetPool, ok := m.poolsByJob[job.GetName()] if !ok { - var provider TargetProvider = nil + var provider TargetProvider if job.SdName != nil { provider = NewSdTargetProvider(job) } diff --git a/retrieval/targetmanager_test.go b/retrieval/targetmanager_test.go index f82cfb2a3..30eee7e20 100644 --- a/retrieval/targetmanager_test.go +++ b/retrieval/targetmanager_test.go @@ -73,7 +73,7 @@ func (t fakeTarget) StopScraper() { } func (t fakeTarget) State() TargetState { - return ALIVE + return Alive } func (t *fakeTarget) SetBaseLabelsFrom(newTarget Target) {} diff --git a/retrieval/targetpool.go b/retrieval/targetpool.go index a0c27b71d..1d6d797f9 100644 --- a/retrieval/targetpool.go +++ b/retrieval/targetpool.go @@ -27,10 +27,10 @@ const ( targetReplaceQueueSize = 1 ) +// TargetPool is a pool of targets for the same job. type TargetPool struct { sync.RWMutex - done chan chan struct{} manager TargetManager targetsByAddress map[string]Target interval time.Duration @@ -38,8 +38,11 @@ type TargetPool struct { addTargetQueue chan Target targetProvider TargetProvider + + stopping, stopped chan struct{} } +// NewTargetPool creates a TargetPool, ready to be started by calling Run. func NewTargetPool(m TargetManager, p TargetProvider, ing extraction.Ingester, i time.Duration) *TargetPool { return &TargetPool{ manager: m, @@ -48,10 +51,13 @@ func NewTargetPool(m TargetManager, p TargetProvider, ing extraction.Ingester, i targetsByAddress: make(map[string]Target), addTargetQueue: make(chan Target, targetAddQueueSize), targetProvider: p, - done: make(chan chan struct{}), + stopping: make(chan struct{}), + stopped: make(chan struct{}), } } +// Run starts the target pool. It returns when the target pool has stopped +// (after calling Stop). Run is usually called as a goroutine. func (p *TargetPool) Run() { ticker := time.NewTicker(p.interval) defer ticker.Stop() @@ -69,20 +75,21 @@ func (p *TargetPool) Run() { } case newTarget := <-p.addTargetQueue: p.addTarget(newTarget) - case stopped := <-p.done: + case <-p.stopping: p.ReplaceTargets([]Target{}) - close(stopped) + close(p.stopped) return } } } +// Stop stops the target pool and returns once the shutdown is complete. func (p *TargetPool) Stop() { - stopped := make(chan struct{}) - p.done <- stopped - <-stopped + close(p.stopping) + <-p.stopped } +// AddTarget adds a target by queuing it in the target queue. func (p *TargetPool) AddTarget(target Target) { p.addTargetQueue <- target } @@ -95,13 +102,13 @@ func (p *TargetPool) addTarget(target Target) { go target.RunScraper(p.ingester, p.interval) } +// ReplaceTargets replaces the old targets by the provided new ones but reuses +// old targets that are also present in newTargets to preserve scheduling and +// health state. Targets no longer present are stopped. func (p *TargetPool) ReplaceTargets(newTargets []Target) { p.Lock() defer p.Unlock() - // Replace old target list by new one, but reuse those targets from the old - // list of targets which are also in the new list (to preserve scheduling and - // health state). newTargetAddresses := make(utility.Set) for _, newTarget := range newTargets { newTargetAddresses.Add(newTarget.Address()) @@ -113,7 +120,7 @@ func (p *TargetPool) ReplaceTargets(newTargets []Target) { go newTarget.RunScraper(p.ingester, p.interval) } } - // Stop any targets no longer present. + var wg sync.WaitGroup for k, oldTarget := range p.targetsByAddress { if !newTargetAddresses.Has(k) { @@ -130,6 +137,7 @@ func (p *TargetPool) ReplaceTargets(newTargets []Target) { wg.Wait() } +// Targets returns a copy of the current target list. func (p *TargetPool) Targets() []Target { p.RLock() defer p.RUnlock() diff --git a/retrieval/targetpool_test.go b/retrieval/targetpool_test.go index 4d8fb21c9..c74fef661 100644 --- a/retrieval/targetpool_test.go +++ b/retrieval/targetpool_test.go @@ -115,7 +115,7 @@ func TestTargetPoolReplaceTargets(t *testing.T) { pool := NewTargetPool(nil, nil, nopIngester{}, time.Duration(1)) oldTarget1 := &target{ address: "example1", - state: UNREACHABLE, + state: Unreachable, scraperStopping: make(chan struct{}), scraperStopped: make(chan struct{}), newBaseLabels: make(chan clientmodel.LabelSet, 1), @@ -123,7 +123,7 @@ func TestTargetPoolReplaceTargets(t *testing.T) { } oldTarget2 := &target{ address: "example2", - state: UNREACHABLE, + state: Unreachable, scraperStopping: make(chan struct{}), scraperStopped: make(chan struct{}), newBaseLabels: make(chan clientmodel.LabelSet, 1), @@ -131,7 +131,7 @@ func TestTargetPoolReplaceTargets(t *testing.T) { } newTarget1 := &target{ address: "example1", - state: ALIVE, + state: Alive, scraperStopping: make(chan struct{}), scraperStopped: make(chan struct{}), newBaseLabels: make(chan clientmodel.LabelSet, 1), @@ -139,7 +139,7 @@ func TestTargetPoolReplaceTargets(t *testing.T) { } newTarget2 := &target{ address: "example3", - state: ALIVE, + state: Alive, scraperStopping: make(chan struct{}), scraperStopped: make(chan struct{}), newBaseLabels: make(chan clientmodel.LabelSet, 1), diff --git a/rules/alerting.go b/rules/alerting.go index d8caee7d5..f1961e42a 100644 --- a/rules/alerting.go +++ b/rules/alerting.go @@ -16,6 +16,7 @@ package rules import ( "fmt" "html/template" + "reflect" "sync" "time" @@ -28,25 +29,25 @@ import ( ) const ( - // The metric name for synthetic alert timeseries. + // AlertMetricName is the metric name for synthetic alert timeseries. AlertMetricName clientmodel.LabelValue = "ALERTS" - // The label name indicating the name of an alert. + // AlertNameLabel is the label name indicating the name of an alert. AlertNameLabel clientmodel.LabelName = "alertname" - // The label name indicating the state of an alert. + // AlertStateLabel is the label name indicating the state of an alert. AlertStateLabel clientmodel.LabelName = "alertstate" ) -// States that active alerts can be in. +// AlertState denotes the state of an active alert. type AlertState int func (s AlertState) String() string { switch s { - case INACTIVE: + case Inactive: return "inactive" - case PENDING: + case Pending: return "pending" - case FIRING: + case Firing: return "firing" default: panic("undefined") @@ -54,9 +55,14 @@ func (s AlertState) String() string { } const ( - INACTIVE AlertState = iota - PENDING - FIRING + // Inactive alerts are neither firing nor pending. + Inactive AlertState = iota + // Pending alerts have been active for less than the configured + // threshold duration. + Pending + // Firing alerts have been active for longer than the configured + // threshold duration. + Firing ) // Alert is used to track active (pending/firing) alerts over time. @@ -65,9 +71,9 @@ type Alert struct { Name string // The vector element labelset triggering this alert. Labels clientmodel.LabelSet - // The state of the alert (PENDING or FIRING). + // The state of the alert (Pending or Firing). State AlertState - // The time when the alert first transitioned into PENDING state. + // The time when the alert first transitioned into Pending state. ActiveSince clientmodel.Timestamp // The value of the alert expression for this vector element. Value clientmodel.SampleValue @@ -91,14 +97,14 @@ func (a Alert) sample(timestamp clientmodel.Timestamp, value clientmodel.SampleV } } -// An alerting rule generates alerts from its vector expression. +// An AlertingRule generates alerts from its vector expression. type AlertingRule struct { // The name of the alert. name string // The vector expression from which to generate alerts. Vector ast.VectorNode // The duration for which a labelset needs to persist in the expression - // output vector before an alert transitions from PENDING to FIRING state. + // output vector before an alert transitions from Pending to Firing state. holdDuration time.Duration // Extra labels to attach to the resulting alert sample vectors. Labels clientmodel.LabelSet @@ -109,21 +115,24 @@ type AlertingRule struct { // Protects the below. mutex sync.Mutex - // A map of alerts which are currently active (PENDING or FIRING), keyed by + // A map of alerts which are currently active (Pending or Firing), keyed by // the fingerprint of the labelset they correspond to. activeAlerts map[clientmodel.Fingerprint]*Alert } +// Name returns the name of the alert. func (rule *AlertingRule) Name() string { return rule.name } +// EvalRaw returns the raw value of the rule expression, without creating alerts. func (rule *AlertingRule) EvalRaw(timestamp clientmodel.Timestamp, storage local.Storage) (ast.Vector, error) { return ast.EvalVectorInstant(rule.Vector, timestamp, storage, stats.NewTimerGroup()) } +// Eval evaluates the rule expression and then creates pending alerts and fires +// or removes previously pending alerts accordingly. func (rule *AlertingRule) Eval(timestamp clientmodel.Timestamp, storage local.Storage) (ast.Vector, error) { - // Get the raw value of the rule expression. exprResult, err := rule.EvalRaw(timestamp, storage) if err != nil { return nil, err @@ -150,7 +159,7 @@ func (rule *AlertingRule) Eval(timestamp clientmodel.Timestamp, storage local.St rule.activeAlerts[*fp] = &Alert{ Name: rule.name, Labels: labels, - State: PENDING, + State: Pending, ActiveSince: timestamp, Value: sample.Value, } @@ -169,9 +178,9 @@ func (rule *AlertingRule) Eval(timestamp clientmodel.Timestamp, storage local.St continue } - if activeAlert.State == PENDING && timestamp.Sub(activeAlert.ActiveSince) >= rule.holdDuration { + if activeAlert.State == Pending && timestamp.Sub(activeAlert.ActiveSince) >= rule.holdDuration { vector = append(vector, activeAlert.sample(timestamp, 0)) - activeAlert.State = FIRING + activeAlert.State = Firing } vector = append(vector, activeAlert.sample(timestamp, 1)) @@ -180,12 +189,17 @@ func (rule *AlertingRule) Eval(timestamp clientmodel.Timestamp, storage local.St return vector, nil } +// ToDotGraph returns the text representation of a dot graph. func (rule *AlertingRule) ToDotGraph() string { - graph := fmt.Sprintf(`digraph "Rules" { + graph := fmt.Sprintf( + `digraph "Rules" { %#p[shape="box",label="ALERT %s IF FOR %s"]; - %#p -> %#p; + %#p -> %x; %s - }`, &rule, rule.name, utility.DurationToString(rule.holdDuration), &rule, rule.Vector, rule.Vector.NodeTreeToDotGraph()) + }`, + &rule, rule.name, utility.DurationToString(rule.holdDuration), + &rule, reflect.ValueOf(rule.Vector).Pointer(), + rule.Vector.NodeTreeToDotGraph()) return graph } @@ -193,6 +207,7 @@ func (rule *AlertingRule) String() string { return fmt.Sprintf("ALERT %s IF %s FOR %s WITH %s", rule.name, rule.Vector, utility.DurationToString(rule.holdDuration), rule.Labels) } +// HTMLSnippet returns an HTML snippet representing this alerting rule. func (rule *AlertingRule) HTMLSnippet() template.HTML { alertMetric := clientmodel.Metric{ clientmodel.MetricNameLabel: AlertMetricName, @@ -208,11 +223,12 @@ func (rule *AlertingRule) HTMLSnippet() template.HTML { rule.Labels)) } +// State returns the "maximum" state: firing > pending > inactive. func (rule *AlertingRule) State() AlertState { rule.mutex.Lock() defer rule.mutex.Unlock() - maxState := INACTIVE + maxState := Inactive for _, activeAlert := range rule.activeAlerts { if activeAlert.State > maxState { maxState = activeAlert.State @@ -221,6 +237,7 @@ func (rule *AlertingRule) State() AlertState { return maxState } +// ActiveAlerts returns a slice of active alerts. func (rule *AlertingRule) ActiveAlerts() []Alert { rule.mutex.Lock() defer rule.mutex.Unlock() @@ -232,7 +249,7 @@ func (rule *AlertingRule) ActiveAlerts() []Alert { return alerts } -// Construct a new AlertingRule. +// NewAlertingRule constructs a new AlertingRule. func NewAlertingRule(name string, vector ast.VectorNode, holdDuration time.Duration, labels clientmodel.LabelSet, summary string, description string) *AlertingRule { return &AlertingRule{ name: name, diff --git a/rules/ast/printer.go b/rules/ast/printer.go index b32f0b10f..04068e8a9 100644 --- a/rules/ast/printer.go +++ b/rules/ast/printer.go @@ -17,6 +17,7 @@ import ( "encoding/json" "errors" "fmt" + "reflect" "sort" "strings" @@ -223,7 +224,7 @@ func EvalToVector(node Node, timestamp clientmodel.Timestamp, storage local.Stor evalTimer.Stop() return vector, nil case MATRIX: - return nil, errors.New("Matrices not supported by EvalToVector") + return nil, errors.New("matrices not supported by EvalToVector") case STRING: str := node.(StringNode).Eval(timestamp) evalTimer.Stop() @@ -242,7 +243,7 @@ func (node *ScalarLiteral) NodeTreeToDotGraph() string { func functionArgsToDotGraph(node Node, args []Node) string { graph := "" for _, arg := range args { - graph += fmt.Sprintf("%#p -> %#p;\n", node, arg) + graph += fmt.Sprintf("%x -> %x;\n", reflect.ValueOf(node).Pointer(), reflect.ValueOf(arg).Pointer()) } for _, arg := range args { graph += arg.NodeTreeToDotGraph() @@ -260,13 +261,21 @@ func (node *ScalarFunctionCall) NodeTreeToDotGraph() string { // NodeTreeToDotGraph returns a DOT representation of the expression. func (node *ScalarArithExpr) NodeTreeToDotGraph() string { - graph := fmt.Sprintf(` - %#p[label="%s"]; - %#p -> %#p; - %#p -> %#p; + nodeAddr := reflect.ValueOf(node).Pointer() + graph := fmt.Sprintf( + ` + %x[label="%s"]; + %x -> %x; + %x -> %x; %s %s - }`, node, node.opType, node, node.lhs, node, node.rhs, node.lhs.NodeTreeToDotGraph(), node.rhs.NodeTreeToDotGraph()) + }`, + nodeAddr, node.opType, + nodeAddr, reflect.ValueOf(node.lhs).Pointer(), + nodeAddr, reflect.ValueOf(node.rhs).Pointer(), + node.lhs.NodeTreeToDotGraph(), + node.rhs.NodeTreeToDotGraph(), + ) return graph } @@ -295,20 +304,28 @@ func (node *VectorAggregation) NodeTreeToDotGraph() string { node, node.aggrType, strings.Join(groupByStrings, ", ")) - graph += fmt.Sprintf("%#p -> %#p;\n", node, node.vector) + graph += fmt.Sprintf("%#p -> %x;\n", node, reflect.ValueOf(node.vector).Pointer()) graph += node.vector.NodeTreeToDotGraph() return graph } // NodeTreeToDotGraph returns a DOT representation of the expression. func (node *VectorArithExpr) NodeTreeToDotGraph() string { - graph := fmt.Sprintf(` - %#p[label="%s"]; - %#p -> %#p; - %#p -> %#p; + nodeAddr := reflect.ValueOf(node).Pointer() + graph := fmt.Sprintf( + ` + %x[label="%s"]; + %x -> %x; + %x -> %x; %s %s - `, node, node.opType, node, node.lhs, node, node.rhs, node.lhs.NodeTreeToDotGraph(), node.rhs.NodeTreeToDotGraph()) + }`, + nodeAddr, node.opType, + nodeAddr, reflect.ValueOf(node.lhs).Pointer(), + nodeAddr, reflect.ValueOf(node.rhs).Pointer(), + node.lhs.NodeTreeToDotGraph(), + node.rhs.NodeTreeToDotGraph(), + ) return graph } diff --git a/rules/helpers.go b/rules/helpers.go index 0f9ffe4bb..593859379 100644 --- a/rules/helpers.go +++ b/rules/helpers.go @@ -25,16 +25,23 @@ import ( "github.com/prometheus/prometheus/utility" ) +// CreateRecordingRule is a convenience function to create a recording rule. func CreateRecordingRule(name string, labels clientmodel.LabelSet, expr ast.Node, permanent bool) (*RecordingRule, error) { if _, ok := expr.(ast.VectorNode); !ok { - return nil, fmt.Errorf("Recording rule expression %v does not evaluate to vector type", expr) + return nil, fmt.Errorf("recording rule expression %v does not evaluate to vector type", expr) } - return NewRecordingRule(name, labels, expr.(ast.VectorNode), permanent), nil + return &RecordingRule{ + name: name, + labels: labels, + vector: expr.(ast.VectorNode), + permanent: permanent, + }, nil } +// CreateAlertingRule is a convenience function to create a new alerting rule. func CreateAlertingRule(name string, expr ast.Node, holdDurationStr string, labels clientmodel.LabelSet, summary string, description string) (*AlertingRule, error) { if _, ok := expr.(ast.VectorNode); !ok { - return nil, fmt.Errorf("Alert rule expression %v does not evaluate to vector type", expr) + return nil, fmt.Errorf("alert rule expression %v does not evaluate to vector type", expr) } holdDuration, err := utility.StringToDuration(holdDurationStr) if err != nil { @@ -43,10 +50,11 @@ func CreateAlertingRule(name string, expr ast.Node, holdDurationStr string, labe return NewAlertingRule(name, expr.(ast.VectorNode), holdDuration, labels, summary, description), nil } +// NewFunctionCall is a convenience function to create a new AST function-call node. func NewFunctionCall(name string, args []ast.Node) (ast.Node, error) { function, err := ast.GetFunction(name) if err != nil { - return nil, fmt.Errorf("Unknown function \"%v\"", name) + return nil, fmt.Errorf("unknown function %q", name) } functionCall, err := ast.NewFunctionCall(function, args) if err != nil { @@ -55,9 +63,10 @@ func NewFunctionCall(name string, args []ast.Node) (ast.Node, error) { return functionCall, nil } +// NewVectorAggregation is a convenience function to create a new AST vector aggregation. func NewVectorAggregation(aggrTypeStr string, vector ast.Node, groupBy clientmodel.LabelNames, keepExtraLabels bool) (*ast.VectorAggregation, error) { if _, ok := vector.(ast.VectorNode); !ok { - return nil, fmt.Errorf("Operand of %v aggregation must be of vector type", aggrTypeStr) + return nil, fmt.Errorf("operand of %v aggregation must be of vector type", aggrTypeStr) } var aggrTypes = map[string]ast.AggrType{ "SUM": ast.SUM, @@ -68,11 +77,12 @@ func NewVectorAggregation(aggrTypeStr string, vector ast.Node, groupBy clientmod } aggrType, ok := aggrTypes[aggrTypeStr] if !ok { - return nil, fmt.Errorf("Unknown aggregation type '%v'", aggrTypeStr) + return nil, fmt.Errorf("unknown aggregation type %q", aggrTypeStr) } return ast.NewVectorAggregation(aggrType, vector.(ast.VectorNode), groupBy, keepExtraLabels), nil } +// NewArithExpr is a convenience function to create a new AST arithmetic expression. func NewArithExpr(opTypeStr string, lhs ast.Node, rhs ast.Node) (ast.Node, error) { var opTypes = map[string]ast.BinOpType{ "+": ast.ADD, @@ -91,7 +101,7 @@ func NewArithExpr(opTypeStr string, lhs ast.Node, rhs ast.Node) (ast.Node, error } opType, ok := opTypes[opTypeStr] if !ok { - return nil, fmt.Errorf("Invalid binary operator \"%v\"", opTypeStr) + return nil, fmt.Errorf("invalid binary operator %q", opTypeStr) } expr, err := ast.NewArithExpr(opType, lhs, rhs) if err != nil { @@ -100,6 +110,7 @@ func NewArithExpr(opTypeStr string, lhs ast.Node, rhs ast.Node) (ast.Node, error return expr, nil } +// NewMatrixSelector is a convenience function to create a new AST matrix selector. func NewMatrixSelector(vector ast.Node, intervalStr string) (ast.MatrixNode, error) { switch vector.(type) { case *ast.VectorSelector: @@ -107,7 +118,7 @@ func NewMatrixSelector(vector ast.Node, intervalStr string) (ast.MatrixNode, err break } default: - return nil, fmt.Errorf("Intervals are currently only supported for vector selectors.") + return nil, fmt.Errorf("intervals are currently only supported for vector selectors") } interval, err := utility.StringToDuration(intervalStr) if err != nil { @@ -126,11 +137,13 @@ func newLabelMatcher(matchTypeStr string, name clientmodel.LabelName, value clie } matchType, ok := matchTypes[matchTypeStr] if !ok { - return nil, fmt.Errorf("Invalid label matching operator \"%v\"", matchTypeStr) + return nil, fmt.Errorf("invalid label matching operator %q", matchTypeStr) } return metric.NewLabelMatcher(matchType, name, value) } +// TableLinkForExpression creates an escaped relative link to the table view of +// the provided expression. func TableLinkForExpression(expr string) string { // url.QueryEscape percent-escapes everything except spaces, for which it // uses "+". However, in the non-query part of a URI, only percent-escaped @@ -143,6 +156,8 @@ func TableLinkForExpression(expr string) string { return fmt.Sprintf("/graph#%s", strings.Replace(urlData, "+", "%20", -1)) } +// GraphLinkForExpression creates an escaped relative link to the graph view of +// the provided expression. func GraphLinkForExpression(expr string) string { urlData := url.QueryEscape(fmt.Sprintf(`[{"expr":%q}]`, expr)) return fmt.Sprintf("/graph#%s", strings.Replace(urlData, "+", "%20", -1)) diff --git a/rules/load.go b/rules/load.go index 001408570..b9271ce51 100644 --- a/rules/load.go +++ b/rules/load.go @@ -26,6 +26,7 @@ import ( "github.com/prometheus/prometheus/rules/ast" ) +// RulesLexer is the lexer for rule expressions. type RulesLexer struct { // Errors encountered during parsing. errors []string @@ -94,38 +95,37 @@ func newRulesLexer(src io.Reader, singleExpr bool) *RulesLexer { return lexer } -func LoadFromReader(rulesReader io.Reader, singleExpr bool) (interface{}, error) { +func lexAndParse(rulesReader io.Reader, singleExpr bool) (*RulesLexer, error) { lexer := newRulesLexer(rulesReader, singleExpr) ret := yyParse(lexer) if ret != 0 && len(lexer.errors) == 0 { - lexer.Error("Unknown parser error") + lexer.Error("unknown parser error") } if len(lexer.errors) > 0 { err := errors.New(strings.Join(lexer.errors, "\n")) return nil, err } - - if singleExpr { - return lexer.parsedExpr, nil - } else { - return lexer.parsedRules, nil - } + return lexer, nil } +// LoadRulesFromReader parses rules from the provided reader and returns them. func LoadRulesFromReader(rulesReader io.Reader) ([]Rule, error) { - expr, err := LoadFromReader(rulesReader, false) + lexer, err := lexAndParse(rulesReader, false) if err != nil { return nil, err } - return expr.([]Rule), err + return lexer.parsedRules, err } +// LoadRulesFromString parses rules from the provided string returns them. func LoadRulesFromString(rulesString string) ([]Rule, error) { rulesReader := strings.NewReader(rulesString) return LoadRulesFromReader(rulesReader) } +// LoadRulesFromFile parses rules from the file of the provided name and returns +// them. func LoadRulesFromFile(fileName string) ([]Rule, error) { rulesReader, err := os.Open(fileName) if err != nil { @@ -135,19 +135,25 @@ func LoadRulesFromFile(fileName string) ([]Rule, error) { return LoadRulesFromReader(rulesReader) } +// LoadExprFromReader parses a single expression from the provided reader and +// returns it as an AST node. func LoadExprFromReader(exprReader io.Reader) (ast.Node, error) { - expr, err := LoadFromReader(exprReader, true) + lexer, err := lexAndParse(exprReader, true) if err != nil { return nil, err } - return expr.(ast.Node), err + return lexer.parsedExpr, err } +// LoadExprFromString parses a single expression from the provided string and +// returns it as an AST node. func LoadExprFromString(exprString string) (ast.Node, error) { exprReader := strings.NewReader(exprString) return LoadExprFromReader(exprReader) } +// LoadExprFromFile parses a single expression from the file of the provided +// name and returns it as an AST node. func LoadExprFromFile(fileName string) (ast.Node, error) { exprReader, err := os.Open(fileName) if err != nil { diff --git a/rules/manager/manager.go b/rules/manager/manager.go index ecff523ff..4e1d6957c 100644 --- a/rules/manager/manager.go +++ b/rules/manager/manager.go @@ -62,6 +62,8 @@ func init() { prometheus.MustRegister(evalDuration) } +// A RuleManager manages recording and alerting rules. Create instances with +// NewRuleManager. type RuleManager interface { // Load and add rules from rule files specified in the configuration. AddRulesFromConfig(config config.Config) error @@ -88,9 +90,10 @@ type ruleManager struct { results chan<- *extraction.Result notificationHandler *notification.NotificationHandler - prometheusUrl string + prometheusURL string } +// RuleManagerOptions bundles options for the RuleManager. type RuleManagerOptions struct { EvaluationInterval time.Duration Storage local.Storage @@ -98,9 +101,11 @@ type RuleManagerOptions struct { NotificationHandler *notification.NotificationHandler Results chan<- *extraction.Result - PrometheusUrl string + PrometheusURL string } +// NewRuleManager returns an implementation of RuleManager, ready to be started +// by calling the Run method. func NewRuleManager(o *RuleManagerOptions) RuleManager { manager := &ruleManager{ rules: []rules.Rule{}, @@ -110,7 +115,7 @@ func NewRuleManager(o *RuleManagerOptions) RuleManager { storage: o.Storage, results: o.Results, notificationHandler: o.NotificationHandler, - prometheusUrl: o.PrometheusUrl, + prometheusURL: o.PrometheusURL, } return manager } @@ -120,6 +125,12 @@ func (m *ruleManager) Run() { defer ticker.Stop() for { + // TODO(beorn): This has the same problem as the scraper had + // before. If rule evaluation takes longer than the interval, + // there is a 50% chance per iteration that - after stopping the + // ruleManager - a new evaluation will be started rather than + // the ruleManager actually stopped. We need a similar + // contraption here as in the scraper. select { case <-ticker.C: start := time.Now() @@ -145,7 +156,7 @@ func (m *ruleManager) queueAlertNotifications(rule *rules.AlertingRule, timestam notifications := make(notification.NotificationReqs, 0, len(activeAlerts)) for _, aa := range activeAlerts { - if aa.State != rules.FIRING { + if aa.State != rules.Firing { // BUG: In the future, make AlertManager support pending alerts? continue } @@ -185,7 +196,7 @@ func (m *ruleManager) queueAlertNotifications(rule *rules.AlertingRule, timestam Value: aa.Value, ActiveSince: aa.ActiveSince.Time(), RuleString: rule.String(), - GeneratorURL: m.prometheusUrl + rules.GraphLinkForExpression(rule.Vector.String()), + GeneratorURL: m.prometheusURL + rules.GraphLinkForExpression(rule.Vector.String()), }) } m.notificationHandler.SubmitReqs(notifications) diff --git a/rules/recording.go b/rules/recording.go index f6e5f39f4..59e869f7e 100644 --- a/rules/recording.go +++ b/rules/recording.go @@ -16,6 +16,7 @@ package rules import ( "fmt" "html/template" + "reflect" clientmodel "github.com/prometheus/client_golang/model" @@ -32,14 +33,16 @@ type RecordingRule struct { permanent bool } +// Name returns the rule name. func (rule RecordingRule) Name() string { return rule.name } +// EvalRaw returns the raw value of the rule expression. func (rule RecordingRule) EvalRaw(timestamp clientmodel.Timestamp, storage local.Storage) (ast.Vector, error) { return ast.EvalVectorInstant(rule.vector, timestamp, storage, stats.NewTimerGroup()) } +// Eval evaluates the rule and then overrides the metric names and labels accordingly. func (rule RecordingRule) Eval(timestamp clientmodel.Timestamp, storage local.Storage) (ast.Vector, error) { - // Get the raw value of the rule expression. vector, err := rule.EvalRaw(timestamp, storage) if err != nil { return nil, err @@ -60,12 +63,18 @@ func (rule RecordingRule) Eval(timestamp clientmodel.Timestamp, storage local.St return vector, nil } +// ToDotGraph returns the text representation of a dot graph. func (rule RecordingRule) ToDotGraph() string { - graph := fmt.Sprintf(`digraph "Rules" { + graph := fmt.Sprintf( + `digraph "Rules" { %#p[shape="box",label="%s = "]; - %#p -> %#p; + %#p -> %x; %s - }`, &rule, rule.name, &rule, rule.vector, rule.vector.NodeTreeToDotGraph()) + }`, + &rule, rule.name, + &rule, reflect.ValueOf(rule.vector).Pointer(), + rule.vector.NodeTreeToDotGraph(), + ) return graph } @@ -73,6 +82,7 @@ func (rule RecordingRule) String() string { return fmt.Sprintf("%s%s = %s\n", rule.name, rule.labels, rule.vector) } +// HTMLSnippet returns an HTML snippet representing this rule. func (rule RecordingRule) HTMLSnippet() template.HTML { ruleExpr := rule.vector.String() return template.HTML(fmt.Sprintf( @@ -83,13 +93,3 @@ func (rule RecordingRule) HTMLSnippet() template.HTML { GraphLinkForExpression(ruleExpr), ruleExpr)) } - -// Construct a new RecordingRule. -func NewRecordingRule(name string, labels clientmodel.LabelSet, vector ast.VectorNode, permanent bool) *RecordingRule { - return &RecordingRule{ - name: name, - labels: labels, - vector: vector, - permanent: permanent, - } -} diff --git a/stats/query_stats.go b/stats/query_stats.go index b838a2b7d..710cab140 100644 --- a/stats/query_stats.go +++ b/stats/query_stats.go @@ -21,7 +21,7 @@ type QueryTiming int const ( TotalEvalTime QueryTiming = iota ResultSortTime - JsonEncodeTime + JSONEncodeTime PreloadTime TotalQueryPreparationTime InnerViewBuildingTime @@ -44,7 +44,7 @@ func (s QueryTiming) String() string { return "Total eval time" case ResultSortTime: return "Result sorting time" - case JsonEncodeTime: + case JSONEncodeTime: return "JSON encoding time" case PreloadTime: return "Query preloading time" diff --git a/stats/timer.go b/stats/timer.go index 8d6ea8e88..7dc7f139b 100644 --- a/stats/timer.go +++ b/stats/timer.go @@ -20,7 +20,7 @@ import ( "time" ) -// A timer that can be started and stopped and accumulates the total time it +// A Timer that can be started and stopped and accumulates the total time it // was running (the time between Start() and Stop()). type Timer struct { name fmt.Stringer @@ -51,12 +51,12 @@ type TimerGroup struct { child *TimerGroup } -// Construct a new TimerGroup. +// NewTimerGroup constructs a new TimerGroup. func NewTimerGroup() *TimerGroup { return &TimerGroup{timers: map[fmt.Stringer]*Timer{}} } -// Get (and create, if necessary) the Timer for a given code section. +// GetTimer gets (and creates, if necessary) the Timer for a given code section. func (t *TimerGroup) GetTimer(name fmt.Stringer) *Timer { if timer, exists := t.timers[name]; exists { return timer @@ -69,14 +69,18 @@ func (t *TimerGroup) GetTimer(name fmt.Stringer) *Timer { return timer } +// Timers is a slice of Timer pointers that implements Len and Swap from +// sort.Interface. type Timers []*Timer type byCreationTimeSorter struct{ Timers } +// Len implements sort.Interface. func (t Timers) Len() int { return len(t) } +// Swap implements sort.Interface. func (t Timers) Swap(i, j int) { t[i], t[j] = t[j], t[i] } diff --git a/templates/templates.go b/templates/templates.go index 308e1e4de..03e94e4e4 100644 --- a/templates/templates.go +++ b/templates/templates.go @@ -91,6 +91,7 @@ type templateExpander struct { funcMap text_template.FuncMap } +// NewTemplateExpander returns a template expander ready to use. func NewTemplateExpander(text string, name string, data interface{}, timestamp clientmodel.Timestamp, storage local.Storage) *templateExpander { return &templateExpander{ text: text, @@ -152,17 +153,16 @@ func NewTemplateExpander(text string, name string, data interface{}, timestamp c v /= 1000 } return fmt.Sprintf("%.4g%s", v, prefix) - } else { - prefix := "" - for _, p := range []string{"m", "u", "n", "p", "f", "a", "z", "y"} { - if math.Abs(v) >= 1 { - break - } - prefix = p - v *= 1000 - } - return fmt.Sprintf("%.4g%s", v, prefix) } + prefix := "" + for _, p := range []string{"m", "u", "n", "p", "f", "a", "z", "y"} { + if math.Abs(v) >= 1 { + break + } + prefix = p + v *= 1000 + } + return fmt.Sprintf("%.4g%s", v, prefix) }, "humanize1024": func(v float64) string { if math.Abs(v) <= 1 { @@ -204,17 +204,16 @@ func NewTemplateExpander(text string, name string, data interface{}, timestamp c } // For seconds, we display 4 significant digts. return fmt.Sprintf("%s%.4gs", sign, math.Floor(seconds*1000+.5)/1000) - } else { - prefix := "" - for _, p := range []string{"m", "u", "n", "p", "f", "a", "z", "y"} { - if math.Abs(v) >= 1 { - break - } - prefix = p - v *= 1000 - } - return fmt.Sprintf("%.4g%ss", v, prefix) } + prefix := "" + for _, p := range []string{"m", "u", "n", "p", "f", "a", "z", "y"} { + if math.Abs(v) >= 1 { + break + } + prefix = p + v *= 1000 + } + return fmt.Sprintf("%.4g%ss", v, prefix) }, }, } @@ -229,7 +228,7 @@ func (te templateExpander) Expand() (result string, resultErr error) { var ok bool resultErr, ok = r.(error) if !ok { - resultErr = fmt.Errorf("Panic expanding template %v: %v", te.name, r) + resultErr = fmt.Errorf("panic expanding template %v: %v", te.name, r) } } }() @@ -237,11 +236,11 @@ func (te templateExpander) Expand() (result string, resultErr error) { var buffer bytes.Buffer tmpl, err := text_template.New(te.name).Funcs(te.funcMap).Parse(te.text) if err != nil { - return "", fmt.Errorf("Error parsing template %v: %v", te.name, err) + return "", fmt.Errorf("error parsing template %v: %v", te.name, err) } err = tmpl.Execute(&buffer, te.data) if err != nil { - return "", fmt.Errorf("Error executing template %v: %v", te.name, err) + return "", fmt.Errorf("error executing template %v: %v", te.name, err) } return buffer.String(), nil } @@ -253,7 +252,7 @@ func (te templateExpander) ExpandHTML(templateFiles []string) (result string, re var ok bool resultErr, ok = r.(error) if !ok { - resultErr = fmt.Errorf("Panic expanding template %v: %v", te.name, r) + resultErr = fmt.Errorf("panic expanding template %v: %v", te.name, r) } } }() @@ -269,17 +268,17 @@ func (te templateExpander) ExpandHTML(templateFiles []string) (result string, re }) tmpl, err := tmpl.Parse(te.text) if err != nil { - return "", fmt.Errorf("Error parsing template %v: %v", te.name, err) + return "", fmt.Errorf("error parsing template %v: %v", te.name, err) } if len(templateFiles) > 0 { _, err = tmpl.ParseFiles(templateFiles...) if err != nil { - return "", fmt.Errorf("Error parsing template files for %v: %v", te.name, err) + return "", fmt.Errorf("error parsing template files for %v: %v", te.name, err) } } err = tmpl.Execute(&buffer, te.data) if err != nil { - return "", fmt.Errorf("Error executing template %v: %v", te.name, err) + return "", fmt.Errorf("error executing template %v: %v", te.name, err) } return buffer.String(), nil } diff --git a/utility/bytesize.go b/utility/bytesize.go deleted file mode 100644 index c7396d9d1..000000000 --- a/utility/bytesize.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2013 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utility - -import ( - "fmt" -) - -// The canonical example: http://golang.org/doc/progs/eff_bytesize.go. -type ByteSize float64 - -const ( - _ = iota // ignore first value by assigning to blank identifier - KB ByteSize = 1 << (10 * iota) - MB - GB - TB - PB - EB - ZB - YB -) - -func (b ByteSize) String() string { - switch { - case b >= YB: - return fmt.Sprintf("%.2fYB", b/YB) - case b >= ZB: - return fmt.Sprintf("%.2fZB", b/ZB) - case b >= EB: - return fmt.Sprintf("%.2fEB", b/EB) - case b >= PB: - return fmt.Sprintf("%.2fPB", b/PB) - case b >= TB: - return fmt.Sprintf("%.2fTB", b/TB) - case b >= GB: - return fmt.Sprintf("%.2fGB", b/GB) - case b >= MB: - return fmt.Sprintf("%.2fMB", b/MB) - case b >= KB: - return fmt.Sprintf("%.2fKB", b/KB) - } - return fmt.Sprintf("%.2fB", b) -} diff --git a/utility/cache.go b/utility/cache.go deleted file mode 100644 index 80ed4025a..000000000 --- a/utility/cache.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2013 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utility - -import ( - "container/list" - "sync" -) - -type Cache interface { - Put(k, v interface{}) (replaced bool, err error) - PutIfAbsent(k, v interface{}) (put bool, err error) - Get(k interface{}) (v interface{}, ok bool, err error) - Has(k interface{}) (ok bool, err error) - Delete(k interface{}) (deleted bool, err error) - Clear() (cleared bool, err error) -} - -type LRUCache struct { - list *list.List - table map[interface{}]*list.Element - - limit uint - size uint -} - -func NewLRUCache(limit uint) *LRUCache { - return &LRUCache{ - list: list.New(), - table: map[interface{}]*list.Element{}, - - limit: limit, - } -} - -func (c *LRUCache) Has(k interface{}) (has bool, err error) { - _, ok := c.table[k] - return ok, nil -} - -func (c *LRUCache) Get(k interface{}) (v interface{}, ok bool, err error) { - element, ok := c.table[k] - if !ok { - return nil, false, nil - } - - c.moveToFront(element) - - return element.Value, true, nil -} - -func (c *LRUCache) Put(k, v interface{}) (replaced bool, err error) { - element, ok := c.table[k] - if ok { - c.updateInplace(element, v) - return true, nil - } - - c.addNew(k, v) - return false, nil -} - -func (c *LRUCache) PutIfAbsent(k, v interface{}) (put bool, err error) { - if _, ok := c.table[k]; ok { - return false, nil - } - - c.addNew(k, v) - return true, nil -} - -func (c *LRUCache) Delete(k interface{}) (deleted bool, err error) { - element, ok := c.table[k] - if !ok { - return false, nil - } - - c.list.Remove(element) - delete(c.table, k) - - return true, nil -} - -func (c *LRUCache) Clear() (cleared bool, err error) { - c.list.Init() - c.table = map[interface{}]*list.Element{} - c.size = 0 - - return true, nil -} - -func (c *LRUCache) updateInplace(e *list.Element, v interface{}) { - e.Value = v - c.moveToFront(e) - c.checkCapacity() -} - -func (c *LRUCache) moveToFront(e *list.Element) { - c.list.MoveToFront(e) -} - -func (c *LRUCache) addNew(k, v interface{}) { - c.table[k] = c.list.PushFront(v) - c.checkCapacity() -} - -func (c *LRUCache) checkCapacity() { - for c.size > c.limit { - delElem := c.list.Back() - v := delElem.Value - c.list.Remove(delElem) - delete(c.table, v) - } -} - -type SynchronizedCache struct { - mu sync.Mutex - c Cache -} - -func (c *SynchronizedCache) Put(k, v interface{}) (replaced bool, err error) { - c.mu.Lock() - defer c.mu.Unlock() - - return c.c.Put(k, v) -} -func (c *SynchronizedCache) PutIfAbsent(k, v interface{}) (put bool, err error) { - c.mu.Lock() - defer c.mu.Unlock() - - return c.PutIfAbsent(k, v) -} - -func (c *SynchronizedCache) Get(k interface{}) (v interface{}, ok bool, err error) { - c.mu.Lock() - defer c.mu.Unlock() - - return c.c.Get(k) -} -func (c *SynchronizedCache) Has(k interface{}) (ok bool, err error) { - c.mu.Lock() - defer c.mu.Unlock() - - return c.c.Has(k) -} - -func (c *SynchronizedCache) Delete(k interface{}) (deleted bool, err error) { - c.mu.Lock() - defer c.mu.Unlock() - - return c.c.Delete(k) -} - -func (c *SynchronizedCache) Clear() (cleared bool, err error) { - c.mu.Lock() - defer c.mu.Unlock() - - return c.c.Clear() -} - -func NewSynchronizedCache(c Cache) *SynchronizedCache { - return &SynchronizedCache{ - c: c, - } -} diff --git a/utility/file_ext.go b/utility/file_ext.go deleted file mode 100644 index f06eb8d5c..000000000 --- a/utility/file_ext.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2013 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Useful file/filesystem related functions. - -package utility - -import ( - "fmt" - "os" -) - -// Returns true iff dirPath is a valid directory path. -func IsDir(dirPath string) (bool, error) { - finfo, err := os.Stat(dirPath) - if err != nil { - return false, err - } - if !finfo.IsDir() { - return false, fmt.Errorf("%s not a directory", dirPath) - } - return true, nil -} diff --git a/utility/freelist.go b/utility/freelist.go deleted file mode 100644 index 4c3d02c09..000000000 --- a/utility/freelist.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2013 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utility - -type FreeList chan interface{} - -func NewFreeList(cap int) FreeList { - return make(FreeList, cap) -} - -func (l FreeList) Get() (interface{}, bool) { - select { - case v := <-l: - return v, true - default: - return nil, false - } -} - -func (l FreeList) Give(v interface{}) bool { - select { - case l <- v: - return true - default: - return false - } -} - -func (l FreeList) Close() { - close(l) - - for _ = range l { - } -} diff --git a/utility/strconv.go b/utility/strconv.go index 5e13fcfc8..bbcaff26b 100644 --- a/utility/strconv.go +++ b/utility/strconv.go @@ -22,6 +22,11 @@ import ( var durationRE = regexp.MustCompile("^([0-9]+)([ywdhms]+)$") +// DurationToString formats a time.Duration as a string with the assumption that +// a year always has 365 days and a day always has 24h. (The former doesn't work +// in leap years, the latter is broken by DST switches, not to speak about leap +// seconds, but those are not even treated properly by the duration strings in +// the standard library.) func DurationToString(duration time.Duration) string { seconds := int64(duration / time.Second) factors := map[string]int64{ @@ -45,10 +50,13 @@ func DurationToString(duration time.Duration) string { return fmt.Sprintf("%v%v", seconds/factors[unit], unit) } +// StringToDuration parses a string into a time.Duration, assuming that a year +// always has 265d, a week 7d, a day 24h. See DurationToString for problems with +// that. func StringToDuration(durationStr string) (duration time.Duration, err error) { matches := durationRE.FindStringSubmatch(durationStr) if len(matches) != 3 { - err = fmt.Errorf("Not a valid duration string: '%v'", durationStr) + err = fmt.Errorf("not a valid duration string: %q", durationStr) return } durationSeconds, _ := strconv.Atoi(matches[1]) diff --git a/utility/test/directory.go b/utility/test/directory.go index 994439591..9647718bf 100644 --- a/utility/test/directory.go +++ b/utility/test/directory.go @@ -24,11 +24,12 @@ const ( // environment variable. defaultDirectory = "" - // A NO-OP Closer. + // NilCloser is a no-op Closer. NilCloser = nilCloser(true) ) type ( + // Closer is the interface that wraps the Close method. Closer interface { // Close reaps the underlying directory and its children. The directory // could be deleted by its users already. @@ -74,6 +75,8 @@ func (c callbackCloser) Close() { c.fn() } +// NewCallbackCloser returns a Closer that calls the provided function upon +// closing. func NewCallbackCloser(fn func()) *callbackCloser { return &callbackCloser{ fn: fn, diff --git a/utility/time.go b/utility/time.go index b8b450c5e..9525589d6 100644 --- a/utility/time.go +++ b/utility/time.go @@ -17,8 +17,8 @@ import ( "time" ) -// A basic interface only useful in testing contexts for dispensing the time -// in a controlled manner. +// InstantProvider is a basic interface only useful in testing contexts for +// dispensing the time in a controlled manner. type InstantProvider interface { // The current instant. Now() time.Time @@ -35,7 +35,7 @@ type Time struct { Provider InstantProvider } -// Emit the current instant. +// Now emits the current instant. func (t Time) Now() time.Time { if t.Provider == nil { return time.Now() diff --git a/utility/uncertaintygroup.go b/utility/uncertaintygroup.go deleted file mode 100644 index 2c00833a0..000000000 --- a/utility/uncertaintygroup.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2013 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utility - -import ( - "fmt" - "sync" -) - -type state int - -func (s state) String() string { - switch s { - case unstarted: - return "unstarted" - case started: - return "started" - case finished: - return "finished" - } - panic("unreachable") -} - -const ( - unstarted state = iota - started - finished -) - -// An UncertaintyGroup models a group of operations whose result disposition is -// tenuous and needs to be validated en masse in order to make a future -// decision. -type UncertaintyGroup interface { - // Succeed makes a remark that a given action succeeded, in part. - Succeed() - // Fail makes a remark that a given action failed, in part. Nil values are - // illegal. - Fail(error) - // MayFail makes a remark that a given action either succeeded or failed. The - // determination is made by whether the error is nil. - MayFail(error) - // Wait waits for the group to have finished and emits the result of what - // occurred for the group. - Wait() (succeeded bool) - // Errors emits any errors that could have occurred. - Errors() []error -} - -type uncertaintyGroup struct { - state state - remaining uint - successes uint - results chan error - anomalies []error - sync.Mutex -} - -func (g *uncertaintyGroup) Succeed() { - if g.isFinished() { - panic("cannot remark when done") - } - - g.results <- nil -} - -func (g *uncertaintyGroup) Fail(err error) { - if g.isFinished() { - panic("cannot remark when done") - } - - if err == nil { - panic("expected a failure") - } - - g.results <- err -} - -func (g *uncertaintyGroup) MayFail(err error) { - if g.isFinished() { - panic("cannot remark when done") - } - - g.results <- err -} - -func (g *uncertaintyGroup) isFinished() bool { - g.Lock() - defer g.Unlock() - - return g.state == finished -} - -func (g *uncertaintyGroup) finish() { - g.Lock() - defer g.Unlock() - - g.state = finished -} - -func (g *uncertaintyGroup) start() { - g.Lock() - defer g.Unlock() - - if g.state != unstarted { - panic("cannot restart") - } - - g.state = started -} - -func (g *uncertaintyGroup) Wait() bool { - defer close(g.results) - g.start() - - for g.remaining > 0 { - result := <-g.results - switch result { - case nil: - g.successes++ - default: - g.anomalies = append(g.anomalies, result) - } - - g.remaining-- - } - - g.finish() - - return len(g.anomalies) == 0 -} - -func (g *uncertaintyGroup) Errors() []error { - if g.state != finished { - panic("cannot provide errors until finished") - } - - return g.anomalies -} - -func (g *uncertaintyGroup) String() string { - return fmt.Sprintf("UncertaintyGroup %s with %s failures", g.state, g.anomalies) -} - -// NewUncertaintyGroup furnishes an UncertaintyGroup for a given set of actions -// where their quantity is known a priori. -func NewUncertaintyGroup(count uint) UncertaintyGroup { - return &uncertaintyGroup{ - remaining: count, - results: make(chan error), - } -} diff --git a/utility/uncertaintygroup_test.go b/utility/uncertaintygroup_test.go deleted file mode 100644 index d2a8a13c6..000000000 --- a/utility/uncertaintygroup_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package utility - -import ( - "fmt" - "testing" - "time" -) - -func TestGroupSuccess(t *testing.T) { - uncertaintyGroup := NewUncertaintyGroup(10) - - for i := 0; i < 10; i++ { - go uncertaintyGroup.Succeed() - } - - result := make(chan bool) - go func() { - result <- uncertaintyGroup.Wait() - }() - select { - case v := <-result: - if !v { - t.Fatal("expected success") - } - case <-time.After(time.Second): - t.Fatal("deadline exceeded") - } -} - -func TestGroupFail(t *testing.T) { - uncertaintyGroup := NewUncertaintyGroup(10) - - for i := 0; i < 10; i++ { - go uncertaintyGroup.Fail(fmt.Errorf("")) - } - - result := make(chan bool) - go func() { - result <- uncertaintyGroup.Wait() - }() - select { - case v := <-result: - if v { - t.Fatal("expected failure") - } - case <-time.After(time.Second): - t.Fatal("deadline exceeded") - } -} - -func TestGroupFailMix(t *testing.T) { - uncertaintyGroup := NewUncertaintyGroup(10) - - for i := 0; i < 10; i++ { - go func(i int) { - switch { - case i%2 == 0: - uncertaintyGroup.Fail(fmt.Errorf("")) - default: - uncertaintyGroup.Succeed() - } - }(i) - } - - result := make(chan bool) - go func() { - result <- uncertaintyGroup.Wait() - }() - select { - case v := <-result: - if v { - t.Fatal("expected failure") - } - case <-time.After(time.Second): - t.Fatal("deadline exceeded") - } -} diff --git a/web/alerts.go b/web/alerts.go index 54db2e2fd..7b65c9732 100644 --- a/web/alerts.go +++ b/web/alerts.go @@ -22,17 +22,13 @@ import ( "github.com/prometheus/prometheus/rules/manager" ) +// AlertStatus bundles alerting rules and the mapping of alert states to row +// classes. type AlertStatus struct { AlertingRules []*rules.AlertingRule AlertStateToRowClass map[rules.AlertState]string } -type AlertsHandler struct { - RuleManager manager.RuleManager - - mutex sync.Mutex -} - type byAlertStateSorter struct { alerts []*rules.AlertingRule } @@ -49,6 +45,13 @@ func (s byAlertStateSorter) Swap(i, j int) { s.alerts[i], s.alerts[j] = s.alerts[j], s.alerts[i] } +// AlertsHandler implements http.Handler. +type AlertsHandler struct { + RuleManager manager.RuleManager + + mutex sync.Mutex +} + func (h *AlertsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { h.mutex.Lock() defer h.mutex.Unlock() @@ -60,9 +63,9 @@ func (h *AlertsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { alertStatus := AlertStatus{ AlertingRules: alertsSorter.alerts, AlertStateToRowClass: map[rules.AlertState]string{ - rules.INACTIVE: "success", - rules.PENDING: "warning", - rules.FIRING: "error", + rules.Inactive: "success", + rules.Pending: "warning", + rules.Firing: "error", }, } executeTemplate(w, "alerts", alertStatus) diff --git a/web/api/api.go b/web/api/api.go index 13d24db74..fe420b594 100644 --- a/web/api/api.go +++ b/web/api/api.go @@ -22,9 +22,10 @@ import ( "github.com/prometheus/prometheus/retrieval" "github.com/prometheus/prometheus/storage/local" "github.com/prometheus/prometheus/utility" - "github.com/prometheus/prometheus/web/http_utils" + "github.com/prometheus/prometheus/web/httputils" ) +// MetricsService manages the /api HTTP endpoint. type MetricsService struct { time utility.Time Config *config.Config @@ -32,9 +33,10 @@ type MetricsService struct { Storage local.Storage } +// RegisterHandler registers the handler for the various endpoints below /api. func (msrv *MetricsService) RegisterHandler() { handler := func(h func(http.ResponseWriter, *http.Request)) http.Handler { - return http_utils.CompressionHandler{ + return httputils.CompressionHandler{ Handler: http.HandlerFunc(h), } } diff --git a/web/api/query.go b/web/api/query.go index 92a144645..52043adfa 100644 --- a/web/api/query.go +++ b/web/api/query.go @@ -29,7 +29,7 @@ import ( "github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/rules/ast" "github.com/prometheus/prometheus/stats" - "github.com/prometheus/prometheus/web/http_utils" + "github.com/prometheus/prometheus/web/httputils" ) // Enables cross-site script calls. @@ -40,10 +40,11 @@ func setAccessControlHeaders(w http.ResponseWriter) { w.Header().Set("Access-Control-Expose-Headers", "Date") } +// Query handles the /api/query endpoint. func (serv MetricsService) Query(w http.ResponseWriter, r *http.Request) { setAccessControlHeaders(w) - params := http_utils.GetQueryParams(r) + params := httputils.GetQueryParams(r) expr := params.Get("expr") asText := params.Get("asText") @@ -71,11 +72,12 @@ func (serv MetricsService) Query(w http.ResponseWriter, r *http.Request) { fmt.Fprint(w, result) } +// QueryRange handles the /api/query_range endpoint. func (serv MetricsService) QueryRange(w http.ResponseWriter, r *http.Request) { setAccessControlHeaders(w) w.Header().Set("Content-Type", "application/json") - params := http_utils.GetQueryParams(r) + params := httputils.GetQueryParams(r) expr := params.Get("expr") // Input times and durations are in seconds and get converted to nanoseconds. @@ -93,7 +95,7 @@ func (serv MetricsService) QueryRange(w http.ResponseWriter, r *http.Request) { return } if exprNode.Type() != ast.VECTOR { - fmt.Fprint(w, ast.ErrorToJSON(errors.New("Expression does not evaluate to vector type"))) + fmt.Fprint(w, ast.ErrorToJSON(errors.New("expression does not evaluate to vector type"))) return } @@ -112,7 +114,7 @@ func (serv MetricsService) QueryRange(w http.ResponseWriter, r *http.Request) { // For safety, limit the number of returned points per timeseries. // This is sufficient for 60s resolution for a week or 1h resolution for a year. if duration/step > 11000 { - fmt.Fprint(w, ast.ErrorToJSON(errors.New("Exceeded maximum resolution of 11,000 points per timeseries. Try decreasing the query resolution (?step=XX)."))) + fmt.Fprint(w, ast.ErrorToJSON(errors.New("exceeded maximum resolution of 11,000 points per timeseries. Try decreasing the query resolution (?step=XX)"))) return } @@ -139,7 +141,7 @@ func (serv MetricsService) QueryRange(w http.ResponseWriter, r *http.Request) { sort.Sort(matrix) sortTimer.Stop() - jsonTimer := queryStats.GetTimer(stats.JsonEncodeTime).Start() + jsonTimer := queryStats.GetTimer(stats.JSONEncodeTime).Start() result := ast.TypedValueToJSON(matrix, "matrix") jsonTimer.Stop() @@ -147,6 +149,7 @@ func (serv MetricsService) QueryRange(w http.ResponseWriter, r *http.Request) { fmt.Fprint(w, result) } +// Metrics handles the /api/metrics endpaint. func (serv MetricsService) Metrics(w http.ResponseWriter, r *http.Request) { setAccessControlHeaders(w) diff --git a/web/api/targets.go b/web/api/targets.go index d500516ba..e920d40c8 100644 --- a/web/api/targets.go +++ b/web/api/targets.go @@ -20,16 +20,19 @@ import ( clientmodel "github.com/prometheus/client_golang/model" "github.com/prometheus/prometheus/retrieval" - "github.com/prometheus/prometheus/web/http_utils" + "github.com/prometheus/prometheus/web/httputils" ) +// TargetGroup bundles endpaints and base labels with appropriate JSON +// annotations. type TargetGroup struct { Endpoints []string `json:"endpoints"` BaseLabels map[string]string `json:"baseLabels"` } +// SetTargets handles the /api/targets endpoint. func (serv MetricsService) SetTargets(w http.ResponseWriter, r *http.Request) { - params := http_utils.GetQueryParams(r) + params := httputils.GetQueryParams(r) jobName := params.Get("job") decoder := json.NewDecoder(r.Body) diff --git a/web/blob/blob.go b/web/blob/blob.go index d71d7e561..3dd35c299 100644 --- a/web/blob/blob.go +++ b/web/blob/blob.go @@ -11,6 +11,7 @@ import ( "github.com/golang/glog" ) +// Sub-directories for templates and static content. const ( TemplateFiles = "templates" StaticFiles = "static" @@ -22,10 +23,11 @@ var mimeMap = map[string]string{ "descriptor": "application/vnd.google.protobuf;proto=google.protobuf.FileDescriptorSet", } +// GetFile retrieves the content of an embedded file. func GetFile(bucket string, name string) ([]byte, error) { blob, ok := files[bucket][name] if !ok { - return nil, fmt.Errorf("Could not find %s/%s. Missing/updated files.go?", bucket, name) + return nil, fmt.Errorf("could not find %s/%s (missing or updated files.go?)", bucket, name) } reader := bytes.NewReader(blob) gz, err := gzip.NewReader(reader) @@ -40,6 +42,7 @@ func GetFile(bucket string, name string) ([]byte, error) { return b.Bytes(), nil } +// Handler implements http.Handler. type Handler struct{} func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { diff --git a/web/consoles.go b/web/consoles.go index fab4a2a63..816718f9d 100644 --- a/web/consoles.go +++ b/web/consoles.go @@ -31,6 +31,7 @@ var ( consoleLibrariesPath = flag.String("web.console.libraries", "console_libraries", "Path to the console library directory.") ) +// ConsolesHandler implements http.Handler. type ConsolesHandler struct { Storage local.Storage } diff --git a/web/http_utils/compression.go b/web/httputils/compression.go similarity index 91% rename from web/http_utils/compression.go rename to web/httputils/compression.go index 3b00ca61d..f142767cd 100644 --- a/web/http_utils/compression.go +++ b/web/httputils/compression.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package http_utils +package httputils import ( "compress/gzip" @@ -78,13 +78,13 @@ func newCompressedResponseWriter(writer http.ResponseWriter, req *http.Request) } } -// Wrapper around http.Handler which adds suitable response compression based -// on the client's Accept-Encoding headers. +// CompressionHandler is a wrapper around http.Handler which adds suitable +// response compression based on the client's Accept-Encoding headers. type CompressionHandler struct { Handler http.Handler } -// Adds compression to the original http.Handler's ServeHTTP() method. +// ServeHTTP adds compression to the original http.Handler's ServeHTTP() method. func (c CompressionHandler) ServeHTTP(writer http.ResponseWriter, req *http.Request) { compWriter := newCompressedResponseWriter(writer, req) c.Handler.ServeHTTP(compWriter, req) diff --git a/web/http_utils/http_utils.go b/web/httputils/httputils.go similarity index 90% rename from web/http_utils/http_utils.go rename to web/httputils/httputils.go index 31476ce0e..ca3918da4 100644 --- a/web/http_utils/http_utils.go +++ b/web/httputils/httputils.go @@ -11,13 +11,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -package http_utils +package httputils import ( "net/http" "net/url" ) +// GetQueryParams calls r.ParseForm and returns r.Form. func GetQueryParams(r *http.Request) url.Values { r.ParseForm() return r.Form diff --git a/web/status.go b/web/status.go index d17c9f869..4154f90b6 100644 --- a/web/status.go +++ b/web/status.go @@ -22,6 +22,7 @@ import ( "github.com/prometheus/prometheus/rules/manager" ) +// PrometheusStatusHandler implements http.Handler. type PrometheusStatusHandler struct { mu sync.RWMutex diff --git a/web/web.go b/web/web.go index 5b0bdd10c..72890e65a 100644 --- a/web/web.go +++ b/web/web.go @@ -20,7 +20,6 @@ import ( "io/ioutil" "net" "net/http" - _ "net/http/pprof" "os" "time" @@ -41,6 +40,7 @@ var ( enableQuit = flag.Bool("web.enable-remote-shutdown", false, "Enable remote service shutdown.") ) +// WebService handles the HTTP endpoints with the exception of /api. type WebService struct { StatusHandler *PrometheusStatusHandler MetricsHandler *api.MetricsService @@ -50,19 +50,20 @@ type WebService struct { QuitDelegate func() } -func (w WebService) ServeForever() error { +// ServeForever serves the HTTP endpoints and only returns upon errors. +func (ws WebService) ServeForever() error { http.Handle("/favicon.ico", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { http.Error(w, "", 404) })) http.Handle("/", prometheus.InstrumentHandler( - "/", w.StatusHandler, + "/", ws.StatusHandler, )) http.Handle("/alerts", prometheus.InstrumentHandler( - "/alerts", w.AlertsHandler, + "/alerts", ws.AlertsHandler, )) http.Handle("/consoles/", prometheus.InstrumentHandler( - "/consoles/", http.StripPrefix("/consoles/", w.ConsolesHandler), + "/consoles/", http.StripPrefix("/consoles/", ws.ConsolesHandler), )) http.Handle("/graph", prometheus.InstrumentHandler( "/graph", http.HandlerFunc(graphHandler), @@ -71,7 +72,7 @@ func (w WebService) ServeForever() error { "/heap", http.HandlerFunc(dumpHeap), )) - w.MetricsHandler.RegisterHandler() + ws.MetricsHandler.RegisterHandler() http.Handle("/metrics", prometheus.Handler()) if *useLocalAssets { http.Handle("/static/", prometheus.InstrumentHandler( @@ -90,7 +91,7 @@ func (w WebService) ServeForever() error { } if *enableQuit { - http.Handle("/-/quit", http.HandlerFunc(w.quitHandler)) + http.Handle("/-/quit", http.HandlerFunc(ws.quitHandler)) } glog.Info("listening on ", *listenAddress) @@ -98,7 +99,7 @@ func (w WebService) ServeForever() error { return http.ListenAndServe(*listenAddress, nil) } -func (s WebService) quitHandler(w http.ResponseWriter, r *http.Request) { +func (ws WebService) quitHandler(w http.ResponseWriter, r *http.Request) { if r.Method != "POST" { w.Header().Add("Allow", "POST") w.WriteHeader(http.StatusMethodNotAllowed) @@ -107,7 +108,7 @@ func (s WebService) quitHandler(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, "Requesting termination... Goodbye!") - s.QuitDelegate() + ws.QuitDelegate() } func getTemplateFile(name string) (string, error) { @@ -118,14 +119,13 @@ func getTemplateFile(name string) (string, error) { return "", err } return string(file), nil - } else { - file, err := blob.GetFile(blob.TemplateFiles, name+".html") - if err != nil { - glog.Errorf("Could not read %s template: %s", name, err) - return "", err - } - return string(file), nil } + file, err := blob.GetFile(blob.TemplateFiles, name+".html") + if err != nil { + glog.Errorf("Could not read %s template: %s", name, err) + return "", err + } + return string(file), nil } func getConsoles() string { @@ -186,7 +186,8 @@ func dumpHeap(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, "Done") } -func MustBuildServerUrl() string { +// MustBuildServerURL returns the server URL and panics in case an error occurs. +func MustBuildServerURL() string { _, port, err := net.SplitHostPort(*listenAddress) if err != nil { panic(err)