Merge branch 'main' into dev_neg_offset
This commit is contained in:
commit
f80b52be69
|
@ -2,7 +2,7 @@
|
|||
version: 2.1
|
||||
|
||||
orbs:
|
||||
prometheus: prometheus/prometheus@0.4.0
|
||||
prometheus: prometheus/prometheus@0.9.0
|
||||
go: circleci/go@0.2.0
|
||||
win: circleci/windows@2.3.0
|
||||
|
||||
|
@ -124,14 +124,14 @@ workflows:
|
|||
filters:
|
||||
tags:
|
||||
only: /.*/
|
||||
- prometheus/publish_master:
|
||||
- prometheus/publish_main:
|
||||
context: org-context
|
||||
requires:
|
||||
- test
|
||||
- build
|
||||
filters:
|
||||
branches:
|
||||
only: master
|
||||
only: main
|
||||
image: circleci/golang:1-node
|
||||
- prometheus/publish_release:
|
||||
context: org-context
|
||||
|
@ -151,7 +151,7 @@ workflows:
|
|||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- main
|
||||
jobs:
|
||||
- repo_sync:
|
||||
context: org-context
|
||||
|
|
|
@ -13,10 +13,10 @@ name: "CodeQL"
|
|||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, release-* ]
|
||||
branches: [ main, release-* ]
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ master ]
|
||||
branches: [ main ]
|
||||
schedule:
|
||||
- cron: '26 14 * * 1'
|
||||
|
||||
|
|
4
NOTICE
4
NOTICE
|
@ -92,7 +92,7 @@ Copyright (c) 2015,2016 Damian Gryski <damian@gryski.com>
|
|||
See https://github.com/dgryski/go-tsz/blob/master/LICENSE for license details.
|
||||
|
||||
We also use code from a large number of npm packages. For details, see:
|
||||
- https://github.com/prometheus/prometheus/blob/master/web/ui/react-app/package.json
|
||||
- https://github.com/prometheus/prometheus/blob/master/web/ui/react-app/package-lock.json
|
||||
- https://github.com/prometheus/prometheus/blob/main/web/ui/react-app/package.json
|
||||
- https://github.com/prometheus/prometheus/blob/main/web/ui/react-app/package-lock.json
|
||||
- The individual package licenses as copied from the node_modules directory can be found in
|
||||
the npm_licenses.tar.bz2 archive in release tarballs and Docker images.
|
||||
|
|
10
README.md
10
README.md
|
@ -1,6 +1,6 @@
|
|||
# Prometheus
|
||||
|
||||
[![CircleCI](https://circleci.com/gh/prometheus/prometheus/tree/master.svg?style=shield)][circleci]
|
||||
[![CircleCI](https://circleci.com/gh/prometheus/prometheus/tree/main.svg?style=shield)][circleci]
|
||||
[![Docker Repository on Quay](https://quay.io/repository/prometheus/prometheus/status)][quay]
|
||||
[![Docker Pulls](https://img.shields.io/docker/pulls/prom/prometheus.svg?maxAge=604800)][hub]
|
||||
[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/prometheus)](https://goreportcard.com/report/github.com/prometheus/prometheus)
|
||||
|
@ -72,7 +72,7 @@ read its web assets from local filesystem directories under `web/ui/static` and
|
|||
from the root of the cloned repository. Note also that these directories do not include the
|
||||
new experimental React UI unless it has been built explicitly using `make assets` or `make build`.
|
||||
|
||||
An example of the above configuration file can be found [here.](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus.yml)
|
||||
An example of the above configuration file can be found [here.](https://github.com/prometheus/prometheus/blob/main/documentation/examples/prometheus.yml)
|
||||
|
||||
You can also clone the repository yourself and build using `make build`, which will compile in
|
||||
the web assets so that Prometheus can be run from anywhere:
|
||||
|
@ -106,7 +106,7 @@ You can build a docker image locally with the following commands:
|
|||
|
||||
## React UI Development
|
||||
|
||||
For more information on building, running, and developing on the new React-based UI, see the React app's [README.md](https://github.com/prometheus/prometheus/blob/master/web/ui/react-app/README.md).
|
||||
For more information on building, running, and developing on the new React-based UI, see the React app's [README.md](web/ui/react-app/README.md).
|
||||
|
||||
## More information
|
||||
|
||||
|
@ -116,11 +116,11 @@ For more information on building, running, and developing on the new React-based
|
|||
|
||||
## Contributing
|
||||
|
||||
Refer to [CONTRIBUTING.md](https://github.com/prometheus/prometheus/blob/master/CONTRIBUTING.md)
|
||||
Refer to [CONTRIBUTING.md](https://github.com/prometheus/prometheus/blob/main/CONTRIBUTING.md)
|
||||
|
||||
## License
|
||||
|
||||
Apache License 2.0, see [LICENSE](https://github.com/prometheus/prometheus/blob/master/LICENSE).
|
||||
Apache License 2.0, see [LICENSE](https://github.com/prometheus/prometheus/blob/main/LICENSE).
|
||||
|
||||
|
||||
[hub]: https://hub.docker.com/r/prom/prometheus/
|
||||
|
|
12
RELEASE.md
12
RELEASE.md
|
@ -39,7 +39,7 @@ If you are interested in volunteering please create a pull request against the [
|
|||
|
||||
The release shepherd is responsible for the entire release series of a minor release, meaning all pre- and patch releases of a minor release. The process formally starts with the initial pre-release, but some preparations should be done a few days in advance.
|
||||
|
||||
* We aim to keep the master branch in a working state at all times. In principle, it should be possible to cut a release from master at any time. In practice, things might not work out as nicely. A few days before the pre-release is scheduled, the shepherd should check the state of master. Following their best judgement, the shepherd should try to expedite bug fixes that are still in progress but should make it into the release. On the other hand, the shepherd may hold back merging last-minute invasive and risky changes that are better suited for the next minor release.
|
||||
* We aim to keep the main branch in a working state at all times. In principle, it should be possible to cut a release from main at any time. In practice, things might not work out as nicely. A few days before the pre-release is scheduled, the shepherd should check the state of main. Following their best judgement, the shepherd should try to expedite bug fixes that are still in progress but should make it into the release. On the other hand, the shepherd may hold back merging last-minute invasive and risky changes that are better suited for the next minor release.
|
||||
* On the date listed in the table above, the release shepherd cuts the first pre-release (using the suffix `-rc.0`) and creates a new branch called `release-<major>.<minor>` starting at the commit tagged for the pre-release. In general, a pre-release is considered a release candidate (that's what `rc` stands for) and should therefore not contain any known bugs that are planned to be fixed in the final release.
|
||||
* With the pre-release, the release shepherd is responsible for running and monitoring a benchmark run of the pre-release for 3 days, after which, if successful, the pre-release is promoted to a stable release.
|
||||
* If regressions or critical bugs are detected, they need to get fixed before cutting a new pre-release (called `-rc.1`, `-rc.2`, etc.).
|
||||
|
@ -58,9 +58,9 @@ We maintain a separate branch for each minor release, named `release-<major>.<mi
|
|||
|
||||
Note that branch protection kicks in automatically for any branches whose name starts with `release-`. Never use names starting with `release-` for branches that are not release branches.
|
||||
|
||||
The usual flow is to merge new features and changes into the master branch and to merge bug fixes into the latest release branch. Bug fixes are then merged into master from the latest release branch. The master branch should always contain all commits from the latest release branch. As long as master hasn't deviated from the release branch, new commits can also go to master, followed by merging master back into the release branch.
|
||||
The usual flow is to merge new features and changes into the main branch and to merge bug fixes into the latest release branch. Bug fixes are then merged into main from the latest release branch. The main branch should always contain all commits from the latest release branch. As long as main hasn't deviated from the release branch, new commits can also go to main, followed by merging main back into the release branch.
|
||||
|
||||
If a bug fix got accidentally merged into master after non-bug-fix changes in master, the bug-fix commits have to be cherry-picked into the release branch, which then have to be merged back into master. Try to avoid that situation.
|
||||
If a bug fix got accidentally merged into main after non-bug-fix changes in main, the bug-fix commits have to be cherry-picked into the release branch, which then have to be merged back into main. Try to avoid that situation.
|
||||
|
||||
Maintaining the release branches for older minor releases happens on a best effort basis.
|
||||
|
||||
|
@ -68,7 +68,7 @@ Maintaining the release branches for older minor releases happens on a best effo
|
|||
|
||||
A few days before a major or minor release, consider updating the dependencies.
|
||||
|
||||
Then create a pull request against the master branch.
|
||||
Then create a pull request against the main branch.
|
||||
|
||||
Note that after a dependency update, you should look out for any weirdness that
|
||||
might have happened. Such weirdnesses include but are not limited to: flaky
|
||||
|
@ -107,7 +107,7 @@ git add package.json yarn.lock
|
|||
|
||||
### 1. Prepare your release
|
||||
|
||||
At the start of a new major or minor release cycle create the corresponding release branch based on the master branch. For example if we're releasing `2.17.0` and the previous stable release is `2.16.0` we need to create a `release-2.17` branch. Note that all releases are handled in protected release branches, see the above `Branch management and versioning` section. Release candidates and patch releases for any given major or minor release happen in the same `release-<major>.<minor>` branch. Do not create `release-<version>` for patch or release candidate releases.
|
||||
At the start of a new major or minor release cycle create the corresponding release branch based on the main branch. For example if we're releasing `2.17.0` and the previous stable release is `2.16.0` we need to create a `release-2.17` branch. Note that all releases are handled in protected release branches, see the above `Branch management and versioning` section. Release candidates and patch releases for any given major or minor release happen in the same `release-<major>.<minor>` branch. Do not create `release-<version>` for patch or release candidate releases.
|
||||
|
||||
Changes for a patch release or release candidate should be merged into the previously mentioned release branch via pull request.
|
||||
|
||||
|
@ -154,6 +154,6 @@ Finally, wait for the build step for the tag to finish. The point here is to wai
|
|||
|
||||
For release candidate versions (`v2.16.0-rc.0`), run the benchmark for 3 days using the `/prombench vX.Y.Z` command, `vX.Y.Z` being the latest stable patch release's tag of the previous minor release series, such as `v2.15.2`.
|
||||
|
||||
If the release has happened in the latest release branch, merge the changes into master.
|
||||
If the release has happened in the latest release branch, merge the changes into main.
|
||||
|
||||
Once the binaries have been uploaded, announce the release on `prometheus-announce@googlegroups.com`. (Please do not use `prometheus-users@googlegroups.com` for announcements anymore.) Check out previous announcement mails for inspiration.
|
||||
|
|
|
@ -228,6 +228,7 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i
|
|||
var errs []error
|
||||
for ts := mint; ts.Before(maxt) || ts.Equal(maxt); ts = ts.Add(evalInterval) {
|
||||
// Collects the alerts asked for unit testing.
|
||||
var evalErrs []error
|
||||
suite.WithSamplesTill(ts, func(err error) {
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
|
@ -237,13 +238,16 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i
|
|||
g.Eval(suite.Context(), ts)
|
||||
for _, r := range g.Rules() {
|
||||
if r.LastError() != nil {
|
||||
errs = append(errs, errors.Errorf(" rule: %s, time: %s, err: %v",
|
||||
evalErrs = append(evalErrs, errors.Errorf(" rule: %s, time: %s, err: %v",
|
||||
r.Name(), ts.Sub(time.Unix(0, 0).UTC()), r.LastError()))
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
if len(errs) > 0 {
|
||||
errs = append(errs, evalErrs...)
|
||||
// Only end testing at this point if errors occurred evaluating above,
|
||||
// rather than any test failures already collected in errs.
|
||||
if len(evalErrs) > 0 {
|
||||
return errs
|
||||
}
|
||||
|
||||
|
|
|
@ -618,7 +618,7 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
|
|||
}
|
||||
for header := range c.Headers {
|
||||
if strings.ToLower(header) == "authorization" {
|
||||
return errors.New("authorization header must be changed via the basic_auth, bearer_token, or bearer_token_file parameter")
|
||||
return errors.New("authorization header must be changed via the basic_auth or authorization parameter")
|
||||
}
|
||||
if _, ok := unchangeableHeaders[strings.ToLower(header)]; ok {
|
||||
return errors.Errorf("%s is an unchangeable header", header)
|
||||
|
|
|
@ -141,7 +141,10 @@ var expectedConf = &Config{
|
|||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
BearerTokenFile: filepath.FromSlash("testdata/valid_token_file"),
|
||||
Authorization: &config.Authorization{
|
||||
Type: "Bearer",
|
||||
CredentialsFile: filepath.FromSlash("testdata/valid_token_file"),
|
||||
},
|
||||
},
|
||||
|
||||
ServiceDiscoveryConfigs: discovery.Configs{
|
||||
|
@ -344,7 +347,10 @@ var expectedConf = &Config{
|
|||
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
|
||||
},
|
||||
|
||||
BearerToken: "mysecret",
|
||||
Authorization: &config.Authorization{
|
||||
Type: "Bearer",
|
||||
Credentials: "mysecret",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -603,7 +609,10 @@ var expectedConf = &Config{
|
|||
ServiceDiscoveryConfigs: discovery.Configs{
|
||||
&digitalocean.SDConfig{
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
BearerToken: "abcdef",
|
||||
Authorization: &config.Authorization{
|
||||
Type: "Bearer",
|
||||
Credentials: "abcdef",
|
||||
},
|
||||
},
|
||||
Port: 80,
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
|
@ -665,7 +674,10 @@ var expectedConf = &Config{
|
|||
ServiceDiscoveryConfigs: discovery.Configs{
|
||||
&hetzner.SDConfig{
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
BearerToken: "abcdef",
|
||||
Authorization: &config.Authorization{
|
||||
Type: "Bearer",
|
||||
Credentials: "abcdef",
|
||||
},
|
||||
},
|
||||
Port: 80,
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
|
@ -919,6 +931,9 @@ var expectedErrors = []struct {
|
|||
}, {
|
||||
filename: "kubernetes_bearertoken_basicauth.bad.yml",
|
||||
errMsg: "at most one of basic_auth, bearer_token & bearer_token_file must be configured",
|
||||
}, {
|
||||
filename: "kubernetes_authorization_basicauth.bad.yml",
|
||||
errMsg: "at most one of basic_auth & authorization must be configured",
|
||||
}, {
|
||||
filename: "marathon_no_servers.bad.yml",
|
||||
errMsg: "marathon_sd: must contain at least one Marathon server",
|
||||
|
@ -931,6 +946,9 @@ var expectedErrors = []struct {
|
|||
}, {
|
||||
filename: "marathon_authtoken_bearertoken.bad.yml",
|
||||
errMsg: "marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured",
|
||||
}, {
|
||||
filename: "marathon_authtoken_authorization.bad.yml",
|
||||
errMsg: "marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured",
|
||||
}, {
|
||||
filename: "openstack_role.bad.yml",
|
||||
errMsg: "unknown OpenStack SD role",
|
||||
|
@ -957,7 +975,7 @@ var expectedErrors = []struct {
|
|||
errMsg: `x-prometheus-remote-write-version is an unchangeable header`,
|
||||
}, {
|
||||
filename: "remote_write_authorization_header.bad.yml",
|
||||
errMsg: `authorization header must be changed via the basic_auth, bearer_token, or bearer_token_file parameter`,
|
||||
errMsg: `authorization header must be changed via the basic_auth or authorization parameter`,
|
||||
}, {
|
||||
filename: "remote_write_url_missing.bad.yml",
|
||||
errMsg: `url for remote_write is empty`,
|
||||
|
|
|
@ -79,7 +79,8 @@ scrape_configs:
|
|||
replacement: static
|
||||
target_label: abc
|
||||
|
||||
bearer_token_file: valid_token_file
|
||||
authorization:
|
||||
credentials_file: valid_token_file
|
||||
|
||||
|
||||
- job_name: service-x
|
||||
|
@ -158,7 +159,8 @@ scrape_configs:
|
|||
cert_file: valid_cert_file
|
||||
key_file: valid_key_file
|
||||
|
||||
bearer_token: mysecret
|
||||
authorization:
|
||||
credentials: mysecret
|
||||
|
||||
- job_name: service-kubernetes
|
||||
|
||||
|
@ -263,7 +265,8 @@ scrape_configs:
|
|||
|
||||
- job_name: digitalocean-droplets
|
||||
digitalocean_sd_configs:
|
||||
- bearer_token: abcdef
|
||||
- authorization:
|
||||
credentials: abcdef
|
||||
|
||||
- job_name: dockerswarm
|
||||
dockerswarm_sd_configs:
|
||||
|
@ -284,7 +287,8 @@ scrape_configs:
|
|||
- job_name: hetzner
|
||||
hetzner_sd_configs:
|
||||
- role: hcloud
|
||||
bearer_token: abcdef
|
||||
authorization:
|
||||
credentials: abcdef
|
||||
- role: robot
|
||||
basic_auth:
|
||||
username: abcdef
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
api_server: 'https://localhost:1234'
|
||||
|
||||
authorization:
|
||||
credentials: 1234
|
||||
basic_auth:
|
||||
username: user
|
||||
password: password
|
||||
|
|
@ -2,4 +2,5 @@ scrape_configs:
|
|||
- job_name: prometheus
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
bearer_token: 1234
|
||||
authorization:
|
||||
credentials: 1234
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
|
||||
marathon_sd_configs:
|
||||
- servers:
|
||||
- 'https://localhost:1234'
|
||||
|
||||
auth_token: 1234
|
||||
authorization:
|
||||
credentials: 4567
|
|
@ -53,7 +53,8 @@ scrape_configs:
|
|||
key_file: valid_key_file
|
||||
|
||||
digitalocean_sd_configs:
|
||||
- bearer_token: <secret>
|
||||
- authorization:
|
||||
credentials: <secret>
|
||||
|
||||
dockerswarm_sd_configs:
|
||||
- host: http://127.0.0.1:2375
|
||||
|
|
|
@ -89,7 +89,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return c.HTTPClientConfig.Validate()
|
||||
}
|
||||
|
||||
// Discovery periodically performs DigitalOcean requests. It implements
|
||||
|
|
|
@ -102,7 +102,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
default:
|
||||
return fmt.Errorf("invalid role %s, expected tasks, services, or nodes", c.Role)
|
||||
}
|
||||
return nil
|
||||
return c.HTTPClientConfig.Validate()
|
||||
}
|
||||
|
||||
// Discovery periodically performs Docker Swarm requests. It implements
|
||||
|
|
|
@ -110,7 +110,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
if c.Role == "" {
|
||||
return errors.New("role missing (one of: robot, hcloud)")
|
||||
}
|
||||
return nil
|
||||
return c.HTTPClientConfig.Validate()
|
||||
}
|
||||
|
||||
// Discovery periodically performs Hetzner requests. It implements
|
||||
|
|
|
@ -111,6 +111,9 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
if (len(c.HTTPClientConfig.BearerToken) > 0 || len(c.HTTPClientConfig.BearerTokenFile) > 0) && (len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0) {
|
||||
return errors.New("marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured")
|
||||
}
|
||||
if c.HTTPClientConfig.Authorization != nil && (len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0) {
|
||||
return errors.New("marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured")
|
||||
}
|
||||
return c.HTTPClientConfig.Validate()
|
||||
}
|
||||
|
||||
|
|
|
@ -169,12 +169,16 @@ basic_auth:
|
|||
[ password_file: <string> ]
|
||||
|
||||
# Sets the `Authorization` header on every scrape request with
|
||||
# the configured bearer token. It is mutually exclusive with `bearer_token_file`.
|
||||
[ bearer_token: <secret> ]
|
||||
|
||||
# Sets the `Authorization` header on every scrape request with the bearer token
|
||||
# read from the configured file. It is mutually exclusive with `bearer_token`.
|
||||
[ bearer_token_file: <filename> ]
|
||||
# the configured credentials.
|
||||
authorization:
|
||||
# Sets the authentication type of the request.
|
||||
[ type: <string> | default: Bearer ]
|
||||
# Sets the credentials of the request. It is mutually exclusive with
|
||||
# `credentials_file`.
|
||||
[ credentials: <secret> ]
|
||||
# Sets the credentials of the request with the credentials read from the
|
||||
# configured file. It is mutually exclusive with `credentials`.
|
||||
[ credentials_file: <filename> ]
|
||||
|
||||
# Configures the scrape request's TLS settings.
|
||||
tls_config:
|
||||
|
@ -436,7 +440,7 @@ The following meta labels are available on targets during [relabeling](#relabel_
|
|||
|
||||
```yaml
|
||||
# Authentication information used to authenticate to the API server.
|
||||
# Note that `basic_auth`, `bearer_token` and `bearer_token_file` options are
|
||||
# Note that `basic_auth` and `authorization` options are
|
||||
# mutually exclusive.
|
||||
# password and password_file are mutually exclusive.
|
||||
|
||||
|
@ -446,11 +450,16 @@ basic_auth:
|
|||
[ password: <secret> ]
|
||||
[ password_file: <string> ]
|
||||
|
||||
# Optional bearer token authentication information.
|
||||
[ bearer_token: <secret> ]
|
||||
|
||||
# Optional bearer token file authentication information.
|
||||
[ bearer_token_file: <filename> ]
|
||||
# Optional the `Authorization` header configuration.
|
||||
authorization:
|
||||
# Sets the authentication type.
|
||||
[ type: <string> | default: Bearer ]
|
||||
# Sets the credentials. It is mutually exclusive with
|
||||
# `credentials_file`.
|
||||
[ credentials: <secret> ]
|
||||
# Sets the credentials with the credentials read from the configured file.
|
||||
# It is mutually exclusive with `credentials`.
|
||||
[ credentials_file: <filename> ]
|
||||
|
||||
# Optional proxy URL.
|
||||
[ proxy_url: <string> ]
|
||||
|
@ -592,7 +601,7 @@ role: <string>
|
|||
[ refresh_interval: <duration> | default = 60s ]
|
||||
|
||||
# Authentication information used to authenticate to the Docker daemon.
|
||||
# Note that `basic_auth`, `bearer_token` and `bearer_token_file` options are
|
||||
# Note that `basic_auth` and `authorization` options are
|
||||
# mutually exclusive.
|
||||
# password and password_file are mutually exclusive.
|
||||
|
||||
|
@ -602,11 +611,16 @@ basic_auth:
|
|||
[ password: <secret> ]
|
||||
[ password_file: <string> ]
|
||||
|
||||
# Optional bearer token authentication information.
|
||||
[ bearer_token: <secret> ]
|
||||
|
||||
# Optional bearer token file authentication information.
|
||||
[ bearer_token_file: <filename> ]
|
||||
# Optional the `Authorization` header configuration.
|
||||
authorization:
|
||||
# Sets the authentication type.
|
||||
[ type: <string> | default: Bearer ]
|
||||
# Sets the credentials. It is mutually exclusive with
|
||||
# `credentials_file`.
|
||||
[ credentials: <secret> ]
|
||||
# Sets the credentials with the credentials read from the configured file.
|
||||
# It is mutually exclusive with `credentials`.
|
||||
[ credentials_file: <filename> ]
|
||||
```
|
||||
|
||||
The [relabeling phase](#relabel_config) is the preferred and more powerful
|
||||
|
@ -989,7 +1003,7 @@ The labels below are only available for targets with `role` set to `robot`:
|
|||
role: <string>
|
||||
|
||||
# Authentication information used to authenticate to the API server.
|
||||
# Note that `basic_auth`, `bearer_token` and `bearer_token_file` options are
|
||||
# Note that `basic_auth` and `authorization` options are
|
||||
# mutually exclusive.
|
||||
# password and password_file are mutually exclusive.
|
||||
|
||||
|
@ -1000,12 +1014,17 @@ basic_auth:
|
|||
[ password: <secret> ]
|
||||
[ password_file: <string> ]
|
||||
|
||||
# Optional bearer token authentication information, required when role is hcloud
|
||||
# Role robot does not support bearer token authentication.
|
||||
[ bearer_token: <secret> ]
|
||||
|
||||
# Optional bearer token file authentication information.
|
||||
[ bearer_token_file: <filename> ]
|
||||
# Optional the `Authorization` header configuration. required when role is
|
||||
# hcloud. Role robot does not support bearer token authentication.
|
||||
authorization:
|
||||
# Sets the authentication type.
|
||||
[ type: <string> | default: Bearer ]
|
||||
# Sets the credentials. It is mutually exclusive with
|
||||
# `credentials_file`.
|
||||
[ credentials: <secret> ]
|
||||
# Sets the credentials with the credentials read from the configured file.
|
||||
# It is mutually exclusive with `credentials`.
|
||||
[ credentials_file: <filename> ]
|
||||
|
||||
# Optional proxy URL.
|
||||
[ proxy_url: <string> ]
|
||||
|
@ -1154,7 +1173,7 @@ See below for the configuration options for Kubernetes discovery:
|
|||
role: <string>
|
||||
|
||||
# Optional authentication information used to authenticate to the API server.
|
||||
# Note that `basic_auth`, `bearer_token` and `bearer_token_file` options are
|
||||
# Note that `basic_auth` and `authorization` options are
|
||||
# mutually exclusive.
|
||||
# password and password_file are mutually exclusive.
|
||||
|
||||
|
@ -1164,11 +1183,16 @@ basic_auth:
|
|||
[ password: <secret> ]
|
||||
[ password_file: <string> ]
|
||||
|
||||
# Optional bearer token authentication information.
|
||||
[ bearer_token: <secret> ]
|
||||
|
||||
# Optional bearer token file authentication information.
|
||||
[ bearer_token_file: <filename> ]
|
||||
# Optional the `Authorization` header configuration.
|
||||
authorization:
|
||||
# Sets the authentication type.
|
||||
[ type: <string> | default: Bearer ]
|
||||
# Sets the credentials. It is mutually exclusive with
|
||||
# `credentials_file`.
|
||||
[ credentials: <secret> ]
|
||||
# Sets the credentials with the credentials read from the configured file.
|
||||
# It is mutually exclusive with `credentials`.
|
||||
[ credentials_file: <filename> ]
|
||||
|
||||
# Optional proxy URL.
|
||||
[ proxy_url: <string> ]
|
||||
|
@ -1253,15 +1277,19 @@ basic_auth:
|
|||
[ password: <secret> ]
|
||||
[ password_file: <string> ]
|
||||
|
||||
# Sets the `Authorization` header on every request with
|
||||
# the configured bearer token. It is mutually exclusive with `bearer_token_file` and other authentication mechanisms.
|
||||
# NOTE: The current version of DC/OS marathon (v1.11.0) does not support standard Bearer token authentication. Use `auth_token` instead.
|
||||
[ bearer_token: <string> ]
|
||||
|
||||
# Sets the `Authorization` header on every request with the bearer token
|
||||
# read from the configured file. It is mutually exclusive with `bearer_token` and other authentication mechanisms.
|
||||
# NOTE: The current version of DC/OS marathon (v1.11.0) does not support standard Bearer token authentication. Use `auth_token_file` instead.
|
||||
[ bearer_token_file: <filename> ]
|
||||
# Optional the `Authorization` header configuration.
|
||||
# NOTE: The current version of DC/OS marathon (v1.11.0) does not support
|
||||
# standard `Authentication` header, use `auth_token` or `auth_token_file`
|
||||
# instead.
|
||||
authorization:
|
||||
# Sets the authentication type.
|
||||
[ type: <string> | default: Bearer ]
|
||||
# Sets the credentials. It is mutually exclusive with
|
||||
# `credentials_file`.
|
||||
[ credentials: <secret> ]
|
||||
# Sets the credentials with the credentials read from the configured file.
|
||||
# It is mutually exclusive with `credentials`.
|
||||
[ credentials_file: <filename> ]
|
||||
|
||||
# TLS configuration for connecting to marathon servers
|
||||
tls_config:
|
||||
|
@ -1447,13 +1475,16 @@ basic_auth:
|
|||
[ password: <secret> ]
|
||||
[ password_file: <string> ]
|
||||
|
||||
# Sets the `Authorization` header on every request with
|
||||
# the configured bearer token. It is mutually exclusive with `bearer_token_file`.
|
||||
[ bearer_token: <string> ]
|
||||
|
||||
# Sets the `Authorization` header on every request with the bearer token
|
||||
# read from the configured file. It is mutually exclusive with `bearer_token`.
|
||||
[ bearer_token_file: <filename> ]
|
||||
# Optional the `Authorization` header configuration.
|
||||
authorization:
|
||||
# Sets the authentication type.
|
||||
[ type: <string> | default: Bearer ]
|
||||
# Sets the credentials. It is mutually exclusive with
|
||||
# `credentials_file`.
|
||||
[ credentials: <secret> ]
|
||||
# Sets the credentials with the credentials read from the configured file.
|
||||
# It is mutually exclusive with `credentials`.
|
||||
[ credentials_file: <filename> ]
|
||||
|
||||
# Configures the scrape request's TLS settings.
|
||||
tls_config:
|
||||
|
@ -1616,13 +1647,16 @@ basic_auth:
|
|||
[ password: <secret> ]
|
||||
[ password_file: <string> ]
|
||||
|
||||
# Sets the `Authorization` header on every request with
|
||||
# the configured bearer token. It is mutually exclusive with `bearer_token_file`.
|
||||
[ bearer_token: <string> ]
|
||||
|
||||
# Sets the `Authorization` header on every request with the bearer token
|
||||
# read from the configured file. It is mutually exclusive with `bearer_token`.
|
||||
[ bearer_token_file: <filename> ]
|
||||
# Optional the `Authorization` header configuration.
|
||||
authorization:
|
||||
# Sets the authentication type.
|
||||
[ type: <string> | default: Bearer ]
|
||||
# Sets the credentials. It is mutually exclusive with
|
||||
# `credentials_file`.
|
||||
[ credentials: <secret> ]
|
||||
# Sets the credentials with the credentials read from the configured file.
|
||||
# It is mutually exclusive with `credentials`.
|
||||
[ credentials_file: <filename> ]
|
||||
|
||||
# Configures the scrape request's TLS settings.
|
||||
tls_config:
|
||||
|
@ -1742,13 +1776,16 @@ basic_auth:
|
|||
[ password: <secret> ]
|
||||
[ password_file: <string> ]
|
||||
|
||||
# Sets the `Authorization` header on every remote write request with
|
||||
# the configured bearer token. It is mutually exclusive with `bearer_token_file`.
|
||||
[ bearer_token: <string> ]
|
||||
|
||||
# Sets the `Authorization` header on every remote write request with the bearer token
|
||||
# read from the configured file. It is mutually exclusive with `bearer_token`.
|
||||
[ bearer_token_file: <filename> ]
|
||||
# Optional the `Authorization` header configuration.
|
||||
authorization:
|
||||
# Sets the authentication type.
|
||||
[ type: <string> | default: Bearer ]
|
||||
# Sets the credentials. It is mutually exclusive with
|
||||
# `credentials_file`.
|
||||
[ credentials: <secret> ]
|
||||
# Sets the credentials with the credentials read from the configured file.
|
||||
# It is mutually exclusive with `credentials`.
|
||||
[ credentials_file: <filename> ]
|
||||
|
||||
# Configures the remote write request's TLS settings.
|
||||
tls_config:
|
||||
|
@ -1825,13 +1862,16 @@ basic_auth:
|
|||
[ password: <secret> ]
|
||||
[ password_file: <string> ]
|
||||
|
||||
# Sets the `Authorization` header on every remote read request with
|
||||
# the configured bearer token. It is mutually exclusive with `bearer_token_file`.
|
||||
[ bearer_token: <string> ]
|
||||
|
||||
# Sets the `Authorization` header on every remote read request with the bearer token
|
||||
# read from the configured file. It is mutually exclusive with `bearer_token`.
|
||||
[ bearer_token_file: <filename> ]
|
||||
# Optional the `Authorization` header configuration.
|
||||
authorization:
|
||||
# Sets the authentication type.
|
||||
[ type: <string> | default: Bearer ]
|
||||
# Sets the credentials. It is mutually exclusive with
|
||||
# `credentials_file`.
|
||||
[ credentials: <secret> ]
|
||||
# Sets the credentials with the credentials read from the configured file.
|
||||
# It is mutually exclusive with `credentials`.
|
||||
[ credentials_file: <filename> ]
|
||||
|
||||
# Configures the remote read request's TLS settings.
|
||||
tls_config:
|
||||
|
|
|
@ -73,6 +73,15 @@ For each input time series, `changes(v range-vector)` returns the number of
|
|||
times its value has changed within the provided time range as an instant
|
||||
vector.
|
||||
|
||||
## `clamp()`
|
||||
|
||||
`clamp(v instant-vector, min scalar, max scalar)`
|
||||
clamps the sample values of all elements in `v` to have a lower limit of `min` and an upper limit of `max`.
|
||||
|
||||
Special cases:
|
||||
- Return an empty vector if `min > max`
|
||||
- Return `NaN` if `min` or `max` is `NaN`
|
||||
|
||||
## `clamp_max()`
|
||||
|
||||
`clamp_max(v instant-vector, max scalar)` clamps the sample values of all
|
||||
|
@ -370,6 +379,10 @@ Given a single-element input vector, `scalar(v instant-vector)` returns the
|
|||
sample value of that single element as a scalar. If the input vector does not
|
||||
have exactly one element, `scalar` will return `NaN`.
|
||||
|
||||
## `sgn()`
|
||||
|
||||
`sgn(v instant-vector)` returns a vector with all sample values converted to their sign, defined as this: 1 if v is positive, -1 if v is negative and 0 if v is equal to zero.
|
||||
|
||||
## `sort()`
|
||||
|
||||
`sort(v instant-vector)` returns vector elements sorted by their sample values,
|
||||
|
@ -418,6 +431,7 @@ over time and return an instant vector with per-series aggregation results:
|
|||
* `quantile_over_time(scalar, range-vector)`: the φ-quantile (0 ≤ φ ≤ 1) of the values in the specified interval.
|
||||
* `stddev_over_time(range-vector)`: the population standard deviation of the values in the specified interval.
|
||||
* `stdvar_over_time(range-vector)`: the population standard variance of the values in the specified interval.
|
||||
* `last_over_time(range-vector)`: the most recent point value in specified interval.
|
||||
|
||||
Note that all values in the specified interval have the same weight in the
|
||||
aggregation even if the values are not equally spaced throughout the interval.
|
||||
|
|
|
@ -125,7 +125,7 @@ For details on configuring remote storage integrations in Prometheus, see the [r
|
|||
|
||||
The built-in remote write receiver can be enabled by setting the `--enable-feature=remote-write-receiver` command line flag. When enabled, the remote write receiver endpoint is `/api/v1/write`.
|
||||
|
||||
For details on the request and response messages, see the [remote storage protocol buffer definitions](https://github.com/prometheus/prometheus/blob/master/prompb/remote.proto).
|
||||
For details on the request and response messages, see the [remote storage protocol buffer definitions](https://github.com/prometheus/prometheus/blob/main/prompb/remote.proto).
|
||||
|
||||
Note that on the read path, Prometheus only fetches raw series data for a set of label selectors and time ranges from the remote end. All PromQL evaluation on the raw data still happens in Prometheus itself. This means that remote read queries have some scalability limit, since all necessary data needs to be loaded into the querying Prometheus server first and then processed there. However, supporting fully distributed evaluation of PromQL was deemed infeasible for the time being.
|
||||
|
||||
|
|
|
@ -12,7 +12,8 @@ scrape_configs:
|
|||
- job_name: 'node'
|
||||
|
||||
digitalocean_sd_configs:
|
||||
- bearer_token: "<replace with a Personal Access Token>"
|
||||
- authorization:
|
||||
credentials: "<replace with a Personal Access Token>"
|
||||
relabel_configs:
|
||||
# Only scrape targets that have a tag 'monitoring'.
|
||||
- source_labels: [__meta_digitalocean_tags]
|
||||
|
|
|
@ -12,7 +12,8 @@ scrape_configs:
|
|||
- job_name: 'node'
|
||||
|
||||
hetzner_sd_configs:
|
||||
- bearer_token: "<replace with a Hetzner Cloud API Token>"
|
||||
- authorization:
|
||||
credentials: "<replace with a Hetzner Cloud API Token>"
|
||||
platform: "hcloud"
|
||||
relabel_configs:
|
||||
# Use the public IPv4 and port 9100 to scrape the target.
|
||||
|
@ -24,7 +25,8 @@ scrape_configs:
|
|||
- job_name: 'node_private'
|
||||
|
||||
hetzner_sd_configs:
|
||||
- bearer_token: "<replace with a Hetzner Cloud API Token>"
|
||||
- authorization:
|
||||
credentials: "<replace with a Hetzner Cloud API Token>"
|
||||
platform: "hcloud"
|
||||
relabel_configs:
|
||||
# Use the private IPv4 within the Hetzner Cloud Network and port 9100 to scrape the target.
|
||||
|
|
|
@ -25,7 +25,7 @@ scrape_configs:
|
|||
# `http`.
|
||||
scheme: https
|
||||
|
||||
# This TLS & bearer token file config is used to connect to the actual scrape
|
||||
# This TLS & authorization config is used to connect to the actual scrape
|
||||
# endpoints for cluster components. This is separate to discovery auth
|
||||
# configuration because discovery & scraping are two separate concerns in
|
||||
# Prometheus. The discovery auth config is automatic if Prometheus runs inside
|
||||
|
@ -40,7 +40,8 @@ scrape_configs:
|
|||
# disable certificate verification by uncommenting the line below.
|
||||
#
|
||||
# insecure_skip_verify: true
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
authorization:
|
||||
credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
|
||||
# Keep only the default/kubernetes service endpoints for the https port. This
|
||||
# will add targets for each API server which Kubernetes adds an endpoint to
|
||||
|
@ -62,7 +63,7 @@ scrape_configs:
|
|||
# `http`.
|
||||
scheme: https
|
||||
|
||||
# This TLS & bearer token file config is used to connect to the actual scrape
|
||||
# This TLS & authorization config is used to connect to the actual scrape
|
||||
# endpoints for cluster components. This is separate to discovery auth
|
||||
# configuration because discovery & scraping are two separate concerns in
|
||||
# Prometheus. The discovery auth config is automatic if Prometheus runs inside
|
||||
|
@ -77,7 +78,8 @@ scrape_configs:
|
|||
# disable certificate verification by uncommenting the line below.
|
||||
#
|
||||
# insecure_skip_verify: true
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
authorization:
|
||||
credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
|
@ -112,7 +114,7 @@ scrape_configs:
|
|||
# are used.
|
||||
metrics_path: /metrics/cadvisor
|
||||
|
||||
# This TLS & bearer token file config is used to connect to the actual scrape
|
||||
# This TLS & authorization config is used to connect to the actual scrape
|
||||
# endpoints for cluster components. This is separate to discovery auth
|
||||
# configuration because discovery & scraping are two separate concerns in
|
||||
# Prometheus. The discovery auth config is automatic if Prometheus runs inside
|
||||
|
@ -127,7 +129,8 @@ scrape_configs:
|
|||
# disable certificate verification by uncommenting the line below.
|
||||
#
|
||||
# insecure_skip_verify: true
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
authorization:
|
||||
credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
|
|
|
@ -44,7 +44,7 @@ Internally, the scrape discovery manager runs an instance of each configuration-
|
|||
|
||||
When a configuration change is applied, the discovery manager stops all currently running discovery mechanisms and restarts new ones as defined in the new configuration file.
|
||||
|
||||
For more details, see the more extensive [documentation about service discovery internals](https://github.com/prometheus/prometheus/blob/master/discovery/README.md).
|
||||
For more details, see the more extensive [documentation about service discovery internals](https://github.com/prometheus/prometheus/blob/main/discovery/README.md).
|
||||
|
||||
## Scrape manager
|
||||
|
||||
|
@ -93,7 +93,7 @@ Currently rules still read and write directly from/to the fanout storage, but th
|
|||
|
||||
### Local storage
|
||||
|
||||
About Prometheus's local on-disk time series database, please refer to [`github.com/prometheus/prometheus/tsdb.DB`](https://github.com/prometheus/prometheus/blob/master/tsdb/db.go). You can find more details about the TSDB's on-disk layout in the [local storage documentation](https://prometheus.io/docs/prometheus/latest/storage/).
|
||||
About Prometheus's local on-disk time series database, please refer to [`github.com/prometheus/prometheus/tsdb.DB`](https://github.com/prometheus/prometheus/blob/main/tsdb/db.go). You can find more details about the TSDB's on-disk layout in the [local storage documentation](https://prometheus.io/docs/prometheus/latest/storage/).
|
||||
|
||||
### Remote storage
|
||||
|
||||
|
|
2
go.mod
2
go.mod
|
@ -48,7 +48,7 @@ require (
|
|||
github.com/prometheus/alertmanager v0.21.0
|
||||
github.com/prometheus/client_golang v1.9.0
|
||||
github.com/prometheus/client_model v0.2.0
|
||||
github.com/prometheus/common v0.15.0
|
||||
github.com/prometheus/common v0.17.0
|
||||
github.com/prometheus/exporter-toolkit v0.5.1
|
||||
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749
|
||||
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546
|
||||
|
|
2
go.sum
2
go.sum
|
@ -715,6 +715,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b
|
|||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM=
|
||||
github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
|
||||
github.com/prometheus/common v0.17.0 h1:kDIZLI74SS+3tedSvEkykgBkD7txMxaJAPj8DtJUKYA=
|
||||
github.com/prometheus/common v0.17.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
|
||||
github.com/prometheus/exporter-toolkit v0.5.1 h1:9eqgis5er9xN613ZSADjypCJaDGj9ZlcWBvsIHa8/3c=
|
||||
github.com/prometheus/exporter-toolkit v0.5.1/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
|
|
|
@ -1240,11 +1240,16 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
|
|||
ev.currentSamples -= len(points)
|
||||
points = points[:0]
|
||||
it.Reset(s.Iterator())
|
||||
metric := selVS.Series[i].Labels()
|
||||
// The last_over_time function acts like offset; thus, it
|
||||
// should keep the metric name. For all the other range
|
||||
// vector functions, the only change needed is to drop the
|
||||
// metric name in the output.
|
||||
if e.Func.Name != "last_over_time" {
|
||||
metric = dropMetricName(metric)
|
||||
}
|
||||
ss := Series{
|
||||
// For all range vector functions, the only change to the
|
||||
// output labels is dropping the metric name so just do
|
||||
// it once here.
|
||||
Metric: dropMetricName(selVS.Series[i].Labels()),
|
||||
Metric: metric,
|
||||
Points: getPointSlice(numSteps),
|
||||
}
|
||||
inMatrix[0].Metric = selVS.Series[i].Labels()
|
||||
|
|
|
@ -70,17 +70,17 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod
|
|||
if len(samples.Points) < 2 {
|
||||
return enh.Out
|
||||
}
|
||||
var (
|
||||
counterCorrection float64
|
||||
lastValue float64
|
||||
)
|
||||
for _, sample := range samples.Points {
|
||||
if isCounter && sample.V < lastValue {
|
||||
counterCorrection += lastValue
|
||||
|
||||
resultValue := samples.Points[len(samples.Points)-1].V - samples.Points[0].V
|
||||
if isCounter {
|
||||
var lastValue float64
|
||||
for _, sample := range samples.Points {
|
||||
if sample.V < lastValue {
|
||||
resultValue += lastValue
|
||||
}
|
||||
lastValue = sample.V
|
||||
}
|
||||
lastValue = sample.V
|
||||
}
|
||||
resultValue := lastValue - samples.Points[0].V + counterCorrection
|
||||
|
||||
// Duration between first/last samples and boundary of range.
|
||||
durationToStart := float64(samples.Points[0].T-rangeStart) / 1000
|
||||
|
@ -278,6 +278,23 @@ func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel
|
|||
return Vector(byValueSorter)
|
||||
}
|
||||
|
||||
// === clamp(Vector parser.ValueTypeVector, min, max Scalar) Vector ===
|
||||
func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||
vec := vals[0].(Vector)
|
||||
min := vals[1].(Vector)[0].Point.V
|
||||
max := vals[2].(Vector)[0].Point.V
|
||||
if max < min {
|
||||
return enh.Out
|
||||
}
|
||||
for _, el := range vec {
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: enh.DropMetricName(el.Metric),
|
||||
Point: Point{V: math.Max(min, math.Min(max, el.V))},
|
||||
})
|
||||
}
|
||||
return enh.Out
|
||||
}
|
||||
|
||||
// === clamp_max(Vector parser.ValueTypeVector, max Scalar) Vector ===
|
||||
func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||
vec := vals[0].(Vector)
|
||||
|
@ -383,7 +400,16 @@ func funcCountOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNo
|
|||
})
|
||||
}
|
||||
|
||||
// === floor(Vector parser.ValueTypeVector) Vector ===
|
||||
// === last_over_time(Matrix parser.ValueTypeMatrix) Vector ===
|
||||
func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||
el := vals[0].(Matrix)[0]
|
||||
|
||||
return append(enh.Out, Sample{
|
||||
Metric: el.Metric,
|
||||
Point: Point{V: el.Points[len(el.Points)-1].V},
|
||||
})
|
||||
}
|
||||
|
||||
// === max_over_time(Matrix parser.ValueTypeMatrix) Vector ===
|
||||
func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||
return aggrOverTime(vals, enh, func(values []Point) float64 {
|
||||
|
@ -537,6 +563,18 @@ func funcLog10(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper
|
|||
return simpleFunc(vals, enh, math.Log10)
|
||||
}
|
||||
|
||||
// === sgn(Vector parser.ValueTypeVector) Vector ===
|
||||
func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||
return simpleFunc(vals, enh, func(v float64) float64 {
|
||||
if v < 0 {
|
||||
return -1
|
||||
} else if v > 0 {
|
||||
return 1
|
||||
}
|
||||
return v
|
||||
})
|
||||
}
|
||||
|
||||
// === timestamp(Vector parser.ValueTypeVector) Vector ===
|
||||
func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||
vec := vals[0].(Vector)
|
||||
|
@ -893,6 +931,7 @@ var FunctionCalls = map[string]FunctionCall{
|
|||
"avg_over_time": funcAvgOverTime,
|
||||
"ceil": funcCeil,
|
||||
"changes": funcChanges,
|
||||
"clamp": funcClamp,
|
||||
"clamp_max": funcClampMax,
|
||||
"clamp_min": funcClampMin,
|
||||
"count_over_time": funcCountOverTime,
|
||||
|
@ -914,6 +953,7 @@ var FunctionCalls = map[string]FunctionCall{
|
|||
"ln": funcLn,
|
||||
"log10": funcLog10,
|
||||
"log2": funcLog2,
|
||||
"last_over_time": funcLastOverTime,
|
||||
"max_over_time": funcMaxOverTime,
|
||||
"min_over_time": funcMinOverTime,
|
||||
"minute": funcMinute,
|
||||
|
@ -924,6 +964,7 @@ var FunctionCalls = map[string]FunctionCall{
|
|||
"resets": funcResets,
|
||||
"round": funcRound,
|
||||
"scalar": funcScalar,
|
||||
"sgn": funcSgn,
|
||||
"sort": funcSort,
|
||||
"sort_desc": funcSortDesc,
|
||||
"sqrt": funcSqrt,
|
||||
|
|
|
@ -54,6 +54,11 @@ var Functions = map[string]*Function{
|
|||
ArgTypes: []ValueType{ValueTypeMatrix},
|
||||
ReturnType: ValueTypeVector,
|
||||
},
|
||||
"clamp": {
|
||||
Name: "clamp",
|
||||
ArgTypes: []ValueType{ValueTypeVector, ValueTypeScalar, ValueTypeScalar},
|
||||
ReturnType: ValueTypeVector,
|
||||
},
|
||||
"clamp_max": {
|
||||
Name: "clamp_max",
|
||||
ArgTypes: []ValueType{ValueTypeVector, ValueTypeScalar},
|
||||
|
@ -149,6 +154,11 @@ var Functions = map[string]*Function{
|
|||
Variadic: -1,
|
||||
ReturnType: ValueTypeVector,
|
||||
},
|
||||
"last_over_time": {
|
||||
Name: "last_over_time",
|
||||
ArgTypes: []ValueType{ValueTypeMatrix},
|
||||
ReturnType: ValueTypeVector,
|
||||
},
|
||||
"ln": {
|
||||
Name: "ln",
|
||||
ArgTypes: []ValueType{ValueTypeVector},
|
||||
|
@ -217,6 +227,11 @@ var Functions = map[string]*Function{
|
|||
ArgTypes: []ValueType{ValueTypeVector},
|
||||
ReturnType: ValueTypeScalar,
|
||||
},
|
||||
"sgn": {
|
||||
Name: "sgn",
|
||||
ArgTypes: []ValueType{ValueTypeVector},
|
||||
ReturnType: ValueTypeVector,
|
||||
},
|
||||
"sort": {
|
||||
Name: "sort",
|
||||
ArgTypes: []ValueType{ValueTypeVector},
|
||||
|
|
|
@ -583,8 +583,12 @@ func lexEscape(l *Lexer) stateFn {
|
|||
return lexString
|
||||
}
|
||||
x = x*base + d
|
||||
ch = l.next()
|
||||
n--
|
||||
|
||||
// Don't seek after last rune.
|
||||
if n > 0 {
|
||||
ch = l.next()
|
||||
}
|
||||
}
|
||||
|
||||
if x > max || 0xD800 <= x && x < 0xE000 {
|
||||
|
|
|
@ -3287,6 +3287,16 @@ var testSeries = []struct {
|
|||
input: `my_metric{a="b"} 1 2 3 `,
|
||||
expectedMetric: labels.FromStrings(labels.MetricName, "my_metric", "a", "b"),
|
||||
expectedValues: newSeq(1, 2, 3),
|
||||
}, {
|
||||
// Handle escaped unicode characters as whole label values.
|
||||
input: `my_metric{a="\u70ac"} 1 2 3`,
|
||||
expectedMetric: labels.FromStrings(labels.MetricName, "my_metric", "a", `炬`),
|
||||
expectedValues: newSeq(1, 2, 3),
|
||||
}, {
|
||||
// Handle escaped unicode characters as partial label values.
|
||||
input: `my_metric{a="\u70ac = torch"} 1 2 3`,
|
||||
expectedMetric: labels.FromStrings(labels.MetricName, "my_metric", "a", `炬 = torch`),
|
||||
expectedValues: newSeq(1, 2, 3),
|
||||
}, {
|
||||
input: `my_metric{a="b"} -3-3 -3`,
|
||||
fail: true,
|
||||
|
|
|
@ -372,7 +372,7 @@ eval instant at 60m vector(time())
|
|||
{} 3600
|
||||
|
||||
|
||||
# Tests for clamp_max and clamp_min().
|
||||
# Tests for clamp_max, clamp_min(), and clamp().
|
||||
load 5m
|
||||
test_clamp{src="clamp-a"} -50
|
||||
test_clamp{src="clamp-b"} 0
|
||||
|
@ -388,6 +388,11 @@ eval instant at 0m clamp_min(test_clamp, -25)
|
|||
{src="clamp-b"} 0
|
||||
{src="clamp-c"} 100
|
||||
|
||||
eval instant at 0m clamp(test_clamp, -25, 75)
|
||||
{src="clamp-a"} -25
|
||||
{src="clamp-b"} 0
|
||||
{src="clamp-c"} 75
|
||||
|
||||
eval instant at 0m clamp_max(clamp_min(test_clamp, -20), 70)
|
||||
{src="clamp-a"} -20
|
||||
{src="clamp-b"} 0
|
||||
|
@ -398,6 +403,36 @@ eval instant at 0m clamp_max((clamp_min(test_clamp, (-20))), (70))
|
|||
{src="clamp-b"} 0
|
||||
{src="clamp-c"} 70
|
||||
|
||||
eval instant at 0m clamp(test_clamp, 0, NaN)
|
||||
{src="clamp-a"} NaN
|
||||
{src="clamp-b"} NaN
|
||||
{src="clamp-c"} NaN
|
||||
|
||||
eval instant at 0m clamp(test_clamp, NaN, 0)
|
||||
{src="clamp-a"} NaN
|
||||
{src="clamp-b"} NaN
|
||||
{src="clamp-c"} NaN
|
||||
|
||||
eval instant at 0m clamp(test_clamp, 5, -5)
|
||||
|
||||
# Test cases for sgn.
|
||||
clear
|
||||
load 5m
|
||||
test_sgn{src="sgn-a"} -Inf
|
||||
test_sgn{src="sgn-b"} Inf
|
||||
test_sgn{src="sgn-c"} NaN
|
||||
test_sgn{src="sgn-d"} -50
|
||||
test_sgn{src="sgn-e"} 0
|
||||
test_sgn{src="sgn-f"} 100
|
||||
|
||||
eval instant at 0m sgn(test_sgn)
|
||||
{src="sgn-a"} -1
|
||||
{src="sgn-b"} 1
|
||||
{src="sgn-c"} NaN
|
||||
{src="sgn-d"} -1
|
||||
{src="sgn-e"} 0
|
||||
{src="sgn-f"} 1
|
||||
|
||||
|
||||
# Tests for sort/sort_desc.
|
||||
clear
|
||||
|
@ -745,6 +780,13 @@ eval instant at 1m max_over_time(data[1m])
|
|||
{type="some_nan3"} 1
|
||||
{type="only_nan"} NaN
|
||||
|
||||
eval instant at 1m last_over_time(data[1m])
|
||||
data{type="numbers"} 3
|
||||
data{type="some_nan"} NaN
|
||||
data{type="some_nan2"} 1
|
||||
data{type="some_nan3"} 1
|
||||
data{type="only_nan"} NaN
|
||||
|
||||
clear
|
||||
|
||||
# Test for absent()
|
||||
|
|
|
@ -30,6 +30,11 @@ source_dir="$(pwd)"
|
|||
tmp_dir="$(mktemp -d)"
|
||||
trap 'rm -rf "${tmp_dir}"' EXIT
|
||||
|
||||
get_default_branch(){
|
||||
local url="https://api.github.com/repos/${1}"
|
||||
curl --retry 5 --silent -u "${git_user}:${GITHUB_TOKEN}" "${url}" 2>/dev/null | jq -r .default_branch
|
||||
}
|
||||
|
||||
fetch_repos() {
|
||||
local url="https://api.github.com/users/${1}/repos?per_page=100"
|
||||
curl --retry 5 --silent -u "${git_user}:${GITHUB_TOKEN}" "${url}" 2>/dev/null |
|
||||
|
@ -47,9 +52,9 @@ push_branch() {
|
|||
--set-upstream "${branch}" 1>/dev/null 2>&1
|
||||
}
|
||||
|
||||
post_template='{"title":"%s","base":"master","head":"%s","body":"%s"}'
|
||||
post_json="$(printf "${post_template}" "${pr_title}" "${branch}" "${pr_msg}")"
|
||||
post_pull_request() {
|
||||
post_template='{"title":"%s","base":"%s","head":"%s","body":"%s"}'
|
||||
post_json="$(printf "${post_template}" "${pr_title}" "${2}" "${branch}" "${pr_msg}")"
|
||||
curl --show-error --silent --fail \
|
||||
-u "${git_user}:${GITHUB_TOKEN}" \
|
||||
-d "${post_json}" \
|
||||
|
@ -65,11 +70,17 @@ process_repo() {
|
|||
local org_repo="$1"
|
||||
echo -e "\e[32mAnalyzing '${org_repo}'\e[0m"
|
||||
|
||||
default_branch="$(get_default_branch ${1})"
|
||||
if [[ -z "${default_branch}" ]]; then
|
||||
echo "Can't get the default branch."
|
||||
return
|
||||
fi
|
||||
|
||||
local needs_update=()
|
||||
for source_file in ${SYNC_FILES}; do
|
||||
source_checksum="$(sha256sum "${source_dir}/${source_file}" | cut -d' ' -f1)"
|
||||
|
||||
target_file="$(curl -s --fail "https://raw.githubusercontent.com/${org_repo}/master/${source_file}")"
|
||||
target_file="$(curl -s --fail "https://raw.githubusercontent.com/${org_repo}/${default_branch}/${source_file}")"
|
||||
if [[ "${source_file}" == 'LICENSE' ]] && ! check_license "${target_file}" ; then
|
||||
echo "LICENSE in ${org_repo} is not apache, skipping."
|
||||
continue
|
||||
|
@ -113,7 +124,7 @@ process_repo() {
|
|||
git add .
|
||||
git commit -s -m "${commit_msg}"
|
||||
if push_branch "${org_repo}"; then
|
||||
post_pull_request "${org_repo}"
|
||||
post_pull_request "${org_repo}" "${default_branch}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
## master / unreleased
|
||||
|
||||
## 0.10.0
|
||||
|
||||
- [FEATURE] Added `DBReadOnly` to allow opening a database in read only mode.
|
||||
|
|
|
@ -1115,7 +1115,7 @@ func TestTombstoneCleanResultEmptyBlock(t *testing.T) {
|
|||
smpls := make([]float64, numSamples)
|
||||
for i := int64(0); i < numSamples; i++ {
|
||||
smpls[i] = rand.Float64()
|
||||
app.Add(labels.Labels{{Name: "a", Value: "b"}}, i, smpls[i])
|
||||
app.Append(0, labels.Labels{{Name: "a", Value: "b"}}, i, smpls[i])
|
||||
}
|
||||
|
||||
require.NoError(t, app.Commit())
|
||||
|
|
|
@ -19,10 +19,8 @@ describe('ExpressionInput', () => {
|
|||
const metricNames = ['instance:node_cpu_utilisation:rate1m', 'node_cpu_guest_seconds_total', 'node_cpu_seconds_total'];
|
||||
const expressionInputProps = {
|
||||
value: 'node_cpu',
|
||||
autocompleteSections: {
|
||||
'Query History': [],
|
||||
'Metric Names': metricNames,
|
||||
},
|
||||
queryHistory: [],
|
||||
metricNames,
|
||||
executeQuery: (): void => {
|
||||
// Do nothing.
|
||||
},
|
||||
|
@ -133,14 +131,16 @@ describe('ExpressionInput', () => {
|
|||
describe('handleKeyPress', () => {
|
||||
it('should call executeQuery on Enter key pressed', () => {
|
||||
const spyExecuteQuery = jest.fn();
|
||||
const input = mount(<ExpressionInput executeQuery={spyExecuteQuery} {...({} as any)} />);
|
||||
const props = { ...expressionInputProps, executeQuery: spyExecuteQuery };
|
||||
const input = mount(<ExpressionInput {...props} />);
|
||||
const instance: any = input.instance();
|
||||
instance.handleKeyPress({ preventDefault: jest.fn, key: 'Enter' });
|
||||
expect(spyExecuteQuery).toHaveBeenCalled();
|
||||
});
|
||||
it('should NOT call executeQuery on Enter + Shift', () => {
|
||||
const spyExecuteQuery = jest.fn();
|
||||
const input = mount(<ExpressionInput executeQuery={spyExecuteQuery} {...({} as any)} />);
|
||||
const props = { ...expressionInputProps, executeQuery: spyExecuteQuery };
|
||||
const input = mount(<ExpressionInput {...props} />);
|
||||
const instance: any = input.instance();
|
||||
instance.handleKeyPress({ preventDefault: jest.fn, key: 'Enter', shiftKey: true });
|
||||
expect(spyExecuteQuery).not.toHaveBeenCalled();
|
||||
|
@ -159,8 +159,13 @@ describe('ExpressionInput', () => {
|
|||
});
|
||||
|
||||
describe('createAutocompleteSection', () => {
|
||||
const props = {
|
||||
...expressionInputProps,
|
||||
metricNames: ['foo', 'bar', 'baz'],
|
||||
};
|
||||
|
||||
it('should close menu if no matches found', () => {
|
||||
const input = mount(<ExpressionInput autocompleteSections={{ title: ['foo', 'bar', 'baz'] }} {...({} as any)} />);
|
||||
const input = mount(<ExpressionInput {...props} />);
|
||||
const instance: any = input.instance();
|
||||
const spyCloseMenu = jest.fn();
|
||||
instance.createAutocompleteSection({ inputValue: 'qqqqqq', closeMenu: spyCloseMenu });
|
||||
|
@ -168,34 +173,22 @@ describe('ExpressionInput', () => {
|
|||
expect(spyCloseMenu).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
it('should not render lsit if inputValue not exist', () => {
|
||||
const input = mount(<ExpressionInput autocompleteSections={{ title: ['foo', 'bar', 'baz'] }} {...({} as any)} />);
|
||||
it('should not render list if inputValue not exist', () => {
|
||||
const input = mount(<ExpressionInput {...props} />);
|
||||
const instance: any = input.instance();
|
||||
const spyCloseMenu = jest.fn();
|
||||
instance.createAutocompleteSection({ closeMenu: spyCloseMenu });
|
||||
setTimeout(() => expect(spyCloseMenu).toHaveBeenCalled());
|
||||
});
|
||||
it('should not render list if enableAutocomplete is false', () => {
|
||||
const input = mount(
|
||||
<ExpressionInput
|
||||
autocompleteSections={{ title: ['foo', 'bar', 'baz'] }}
|
||||
{...({} as any)}
|
||||
enableAutocomplete={false}
|
||||
/>
|
||||
);
|
||||
const input = mount(<ExpressionInput {...props} enableAutocomplete={false} />);
|
||||
const instance: any = input.instance();
|
||||
const spyCloseMenu = jest.fn();
|
||||
instance.createAutocompleteSection({ closeMenu: spyCloseMenu });
|
||||
setTimeout(() => expect(spyCloseMenu).toHaveBeenCalled());
|
||||
});
|
||||
it('should render autosuggest-dropdown', () => {
|
||||
const input = mount(
|
||||
<ExpressionInput
|
||||
autocompleteSections={{ title: ['foo', 'bar', 'baz'] }}
|
||||
{...({} as any)}
|
||||
enableAutocomplete={true}
|
||||
/>
|
||||
);
|
||||
const input = mount(<ExpressionInput {...props} enableAutocomplete={true} />);
|
||||
const instance: any = input.instance();
|
||||
const spyGetMenuProps = jest.fn();
|
||||
const sections = instance.createAutocompleteSection({
|
||||
|
@ -264,8 +257,10 @@ describe('ExpressionInput', () => {
|
|||
|
||||
it('renders an execute Button', () => {
|
||||
const addon = expressionInput.find(InputGroupAddon).filterWhere(addon => addon.prop('addonType') === 'append');
|
||||
const button = addon.find(Button);
|
||||
expect(button.prop('className')).toEqual('execute-btn');
|
||||
const button = addon
|
||||
.find(Button)
|
||||
.find('.execute-btn')
|
||||
.first();
|
||||
expect(button.prop('color')).toEqual('primary');
|
||||
expect(button.text()).toEqual('Execute');
|
||||
});
|
||||
|
|
|
@ -6,12 +6,14 @@ import fuzzy from 'fuzzy';
|
|||
import sanitizeHTML from 'sanitize-html';
|
||||
|
||||
import { FontAwesomeIcon } from '@fortawesome/react-fontawesome';
|
||||
import { faSearch, faSpinner } from '@fortawesome/free-solid-svg-icons';
|
||||
import { faSearch, faSpinner, faGlobeEurope } from '@fortawesome/free-solid-svg-icons';
|
||||
import MetricsExplorer from './MetricsExplorer';
|
||||
|
||||
interface ExpressionInputProps {
|
||||
value: string;
|
||||
onExpressionChange: (expr: string) => void;
|
||||
autocompleteSections: { [key: string]: string[] };
|
||||
queryHistory: string[];
|
||||
metricNames: string[];
|
||||
executeQuery: () => void;
|
||||
loading: boolean;
|
||||
enableAutocomplete: boolean;
|
||||
|
@ -19,6 +21,7 @@ interface ExpressionInputProps {
|
|||
|
||||
interface ExpressionInputState {
|
||||
height: number | string;
|
||||
showMetricsExplorer: boolean;
|
||||
}
|
||||
|
||||
class ExpressionInput extends Component<ExpressionInputProps, ExpressionInputState> {
|
||||
|
@ -28,6 +31,7 @@ class ExpressionInput extends Component<ExpressionInputProps, ExpressionInputSta
|
|||
super(props);
|
||||
this.state = {
|
||||
height: 'auto',
|
||||
showMetricsExplorer: false,
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -75,7 +79,10 @@ class ExpressionInput extends Component<ExpressionInputProps, ExpressionInputSta
|
|||
|
||||
createAutocompleteSection = (downshift: ControllerStateAndHelpers<any>) => {
|
||||
const { inputValue = '', closeMenu, highlightedIndex } = downshift;
|
||||
const { autocompleteSections } = this.props;
|
||||
const autocompleteSections = {
|
||||
'Query History': this.props.queryHistory,
|
||||
'Metric Names': this.props.metricNames,
|
||||
};
|
||||
let index = 0;
|
||||
const sections =
|
||||
inputValue!.length && this.props.enableAutocomplete
|
||||
|
@ -125,67 +132,111 @@ class ExpressionInput extends Component<ExpressionInputProps, ExpressionInputSta
|
|||
);
|
||||
};
|
||||
|
||||
openMetricsExplorer = () => {
|
||||
this.setState({
|
||||
showMetricsExplorer: true,
|
||||
});
|
||||
};
|
||||
|
||||
updateShowMetricsExplorer = (show: boolean) => {
|
||||
this.setState({
|
||||
showMetricsExplorer: show,
|
||||
});
|
||||
};
|
||||
|
||||
insertAtCursor = (value: string) => {
|
||||
if (!this.exprInputRef.current) return;
|
||||
|
||||
const startPosition = this.exprInputRef.current.selectionStart;
|
||||
const endPosition = this.exprInputRef.current.selectionEnd;
|
||||
|
||||
const previousValue = this.exprInputRef.current.value;
|
||||
let newValue: string;
|
||||
if (startPosition && endPosition) {
|
||||
newValue =
|
||||
previousValue.substring(0, startPosition) + value + previousValue.substring(endPosition, previousValue.length);
|
||||
} else {
|
||||
newValue = previousValue + value;
|
||||
}
|
||||
|
||||
this.setValue(newValue);
|
||||
};
|
||||
|
||||
render() {
|
||||
const { executeQuery, value } = this.props;
|
||||
const { height } = this.state;
|
||||
return (
|
||||
<Downshift onSelect={this.setValue}>
|
||||
{downshift => (
|
||||
<div>
|
||||
<InputGroup className="expression-input">
|
||||
<InputGroupAddon addonType="prepend">
|
||||
<InputGroupText>
|
||||
{this.props.loading ? <FontAwesomeIcon icon={faSpinner} spin /> : <FontAwesomeIcon icon={faSearch} />}
|
||||
</InputGroupText>
|
||||
</InputGroupAddon>
|
||||
<Input
|
||||
onInput={this.handleInput}
|
||||
style={{ height }}
|
||||
autoFocus
|
||||
type="textarea"
|
||||
rows="1"
|
||||
onKeyPress={this.handleKeyPress}
|
||||
placeholder="Expression (press Shift+Enter for newlines)"
|
||||
innerRef={this.exprInputRef}
|
||||
{...downshift.getInputProps({
|
||||
onKeyDown: (event: React.KeyboardEvent): void => {
|
||||
switch (event.key) {
|
||||
case 'Home':
|
||||
case 'End':
|
||||
// We want to be able to jump to the beginning/end of the input field.
|
||||
// By default, Downshift otherwise jumps to the first/last suggestion item instead.
|
||||
(event.nativeEvent as any).preventDownshiftDefault = true;
|
||||
break;
|
||||
case 'ArrowUp':
|
||||
case 'ArrowDown':
|
||||
if (!downshift.isOpen) {
|
||||
<>
|
||||
<Downshift onSelect={this.setValue}>
|
||||
{downshift => (
|
||||
<div>
|
||||
<InputGroup className="expression-input">
|
||||
<InputGroupAddon addonType="prepend">
|
||||
<InputGroupText>
|
||||
{this.props.loading ? <FontAwesomeIcon icon={faSpinner} spin /> : <FontAwesomeIcon icon={faSearch} />}
|
||||
</InputGroupText>
|
||||
</InputGroupAddon>
|
||||
<Input
|
||||
onInput={this.handleInput}
|
||||
style={{ height }}
|
||||
autoFocus
|
||||
type="textarea"
|
||||
rows="1"
|
||||
onKeyPress={this.handleKeyPress}
|
||||
placeholder="Expression (press Shift+Enter for newlines)"
|
||||
innerRef={this.exprInputRef}
|
||||
{...downshift.getInputProps({
|
||||
onKeyDown: (event: React.KeyboardEvent): void => {
|
||||
switch (event.key) {
|
||||
case 'Home':
|
||||
case 'End':
|
||||
// We want to be able to jump to the beginning/end of the input field.
|
||||
// By default, Downshift otherwise jumps to the first/last suggestion item instead.
|
||||
(event.nativeEvent as any).preventDownshiftDefault = true;
|
||||
}
|
||||
break;
|
||||
case 'Enter':
|
||||
downshift.closeMenu();
|
||||
break;
|
||||
case 'Escape':
|
||||
if (!downshift.isOpen) {
|
||||
this.exprInputRef.current!.blur();
|
||||
}
|
||||
break;
|
||||
default:
|
||||
}
|
||||
},
|
||||
} as any)}
|
||||
value={value}
|
||||
/>
|
||||
<InputGroupAddon addonType="append">
|
||||
<Button className="execute-btn" color="primary" onClick={executeQuery}>
|
||||
Execute
|
||||
</Button>
|
||||
</InputGroupAddon>
|
||||
</InputGroup>
|
||||
{downshift.isOpen && this.createAutocompleteSection(downshift)}
|
||||
</div>
|
||||
)}
|
||||
</Downshift>
|
||||
break;
|
||||
case 'ArrowUp':
|
||||
case 'ArrowDown':
|
||||
if (!downshift.isOpen) {
|
||||
(event.nativeEvent as any).preventDownshiftDefault = true;
|
||||
}
|
||||
break;
|
||||
case 'Enter':
|
||||
downshift.closeMenu();
|
||||
break;
|
||||
case 'Escape':
|
||||
if (!downshift.isOpen) {
|
||||
this.exprInputRef.current!.blur();
|
||||
}
|
||||
break;
|
||||
default:
|
||||
}
|
||||
},
|
||||
} as any)}
|
||||
value={value}
|
||||
/>
|
||||
<InputGroupAddon addonType="append">
|
||||
<Button className="btn-light border" title="Open metrics explorer" onClick={this.openMetricsExplorer}>
|
||||
<FontAwesomeIcon icon={faGlobeEurope} />
|
||||
</Button>
|
||||
</InputGroupAddon>
|
||||
<InputGroupAddon addonType="append">
|
||||
<Button className="execute-btn" color="primary" onClick={executeQuery}>
|
||||
Execute
|
||||
</Button>
|
||||
</InputGroupAddon>
|
||||
</InputGroup>
|
||||
{downshift.isOpen && this.createAutocompleteSection(downshift)}
|
||||
</div>
|
||||
)}
|
||||
</Downshift>
|
||||
|
||||
<MetricsExplorer
|
||||
show={this.state.showMetricsExplorer}
|
||||
updateShow={this.updateShowMetricsExplorer}
|
||||
metrics={this.props.metricNames}
|
||||
insertAtCursor={this.insertAtCursor}
|
||||
/>
|
||||
</>
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
.metrics-explorer.modal-dialog {
|
||||
max-width: 750px;
|
||||
overflow-wrap: break-word;
|
||||
}
|
||||
|
||||
.metrics-explorer .metric {
|
||||
cursor: pointer;
|
||||
margin: 0;
|
||||
padding: 5px;
|
||||
}
|
||||
|
||||
.metrics-explorer .metric:hover {
|
||||
background: #efefef;
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
import React, { Component } from 'react';
|
||||
import { Modal, ModalBody, ModalHeader } from 'reactstrap';
|
||||
import './MetricsExplorer.css';
|
||||
|
||||
interface Props {
|
||||
show: boolean;
|
||||
updateShow(show: boolean): void;
|
||||
metrics: string[];
|
||||
insertAtCursor(value: string): void;
|
||||
}
|
||||
|
||||
class MetricsExplorer extends Component<Props, {}> {
|
||||
handleMetricClick = (query: string) => {
|
||||
this.props.insertAtCursor(query);
|
||||
this.props.updateShow(false);
|
||||
};
|
||||
|
||||
toggle = () => {
|
||||
this.props.updateShow(!this.props.show);
|
||||
};
|
||||
|
||||
render() {
|
||||
return (
|
||||
<Modal isOpen={this.props.show} toggle={this.toggle} className="metrics-explorer">
|
||||
<ModalHeader toggle={this.toggle}>Metrics Explorer</ModalHeader>
|
||||
<ModalBody>
|
||||
{this.props.metrics.map(metric => (
|
||||
<p className="metric" key="metric" onClick={this.handleMetricClick.bind(this, metric)}>
|
||||
{metric}
|
||||
</p>
|
||||
))}
|
||||
</ModalBody>
|
||||
</Modal>
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export default MetricsExplorer;
|
|
@ -41,14 +41,12 @@ describe('Panel', () => {
|
|||
it('renders an ExpressionInput', () => {
|
||||
const input = panel.find(ExpressionInput);
|
||||
expect(input.prop('value')).toEqual('prometheus_engine');
|
||||
expect(input.prop('autocompleteSections')).toEqual({
|
||||
'Metric Names': [
|
||||
'prometheus_engine_queries',
|
||||
'prometheus_engine_queries_concurrent_max',
|
||||
'prometheus_engine_query_duration_seconds',
|
||||
],
|
||||
'Query History': [],
|
||||
});
|
||||
expect(input.prop('metricNames')).toEqual([
|
||||
'prometheus_engine_queries',
|
||||
'prometheus_engine_queries_concurrent_max',
|
||||
'prometheus_engine_query_duration_seconds',
|
||||
]);
|
||||
expect(input.prop('queryHistory')).toEqual([]);
|
||||
});
|
||||
|
||||
it('renders NavLinks', () => {
|
||||
|
|
|
@ -238,10 +238,8 @@ class Panel extends Component<PanelProps, PanelState> {
|
|||
executeQuery={this.executeQuery}
|
||||
loading={this.state.loading}
|
||||
enableAutocomplete={this.props.enableAutocomplete}
|
||||
autocompleteSections={{
|
||||
'Query History': pastQueries,
|
||||
'Metric Names': metricNames,
|
||||
}}
|
||||
queryHistory={pastQueries}
|
||||
metricNames={metricNames}
|
||||
/>
|
||||
</Col>
|
||||
</Row>
|
||||
|
|
Loading…
Reference in New Issue