mirror of
http://git.haproxy.org/git/haproxy.git/
synced 2024-12-22 12:30:07 +00:00
f673923629
With the CI occasionally slowing down, we're starting to see again some spurious failures despite the long 1-second timeouts. This reports false positives that are disturbing and doesn't provide as much value as this could. However at this delay it already becomes a pain for developers to wait for the tests to complete. This commit adds support for the new environment variable HAPROXY_TEST_TIMEOUT that will allow anyone to modify the connect, client and server timeouts. It was set to 5 seconds by default, which should be plenty for quite some time in the CI. All relevant values that were 200ms or above were replaced by this one. A few larger values were left as they are special. One test for the set-timeout action that used to rely on a fixed 1-sec value was extended to a fixed 5-sec, as the timeout is normally not reached, but it needs to be known to compare the old and new values.
113 lines
4.0 KiB
Plaintext
113 lines
4.0 KiB
Plaintext
varnishtest "prometheus exporter test"
|
|
|
|
#REQUIRE_VERSION=2.4
|
|
#REQUIRE_SERVICES=prometheus-exporter
|
|
|
|
feature ignore_unknown_macro
|
|
|
|
server s1 {
|
|
rxreq
|
|
txresp
|
|
} -repeat 2 -start
|
|
|
|
server s2 {
|
|
rxreq
|
|
txresp
|
|
} -repeat 2 -start
|
|
|
|
haproxy h1 -conf {
|
|
defaults
|
|
mode http
|
|
timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
|
|
timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
|
|
timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
|
|
option socket-stats
|
|
|
|
listen stats
|
|
bind "fd@${stats}"
|
|
http-request use-service prometheus-exporter if { path /metrics }
|
|
|
|
frontend fe
|
|
bind "fd@${fe}"
|
|
default_backend be
|
|
|
|
backend be
|
|
stick-table type ip size 1m expire 10s store http_req_rate(10s)
|
|
server s1 ${s1_addr}:${s1_port}
|
|
server s2 ${s2_addr}:${s2_port} check maxqueue 10 maxconn 12 pool-max-conn 42
|
|
} -start
|
|
|
|
client c1 -connect ${h1_stats_sock} {
|
|
txreq -url "/metrics"
|
|
rxresp
|
|
# test general metrics
|
|
expect resp.status == 200
|
|
expect resp.body ~ ".*haproxy_process.*"
|
|
expect resp.body ~ ".*haproxy_frontend.*"
|
|
expect resp.body ~ ".*haproxy_listener.*"
|
|
expect resp.body ~ ".*haproxy_backend.*"
|
|
expect resp.body ~ ".*haproxy_server.*"
|
|
expect resp.body ~ ".*haproxy_sticktable.*"
|
|
|
|
# test expected NaN values
|
|
expect resp.body ~ ".*haproxy_server_check_failures_total{proxy=\"be\",server=\"s1\"} NaN.*"
|
|
expect resp.body ~ ".*haproxy_server_check_up_down_total{proxy=\"be\",server=\"s1\"} NaN.*"
|
|
expect resp.body ~ ".*haproxy_server_check_failures_total{proxy=\"be\",server=\"s2\"} 0.*"
|
|
expect resp.body ~ ".*haproxy_server_check_up_down_total{proxy=\"be\",server=\"s2\"} 0.*"
|
|
|
|
expect resp.body ~ ".*haproxy_server_queue_limit{proxy=\"be\",server=\"s1\"} NaN.*"
|
|
expect resp.body ~ ".*haproxy_server_queue_limit{proxy=\"be\",server=\"s2\"} 10.*"
|
|
|
|
expect resp.body ~ ".*haproxy_server_limit_sessions{proxy=\"be\",server=\"s1\"} NaN.*"
|
|
expect resp.body ~ ".*haproxy_server_limit_sessions{proxy=\"be\",server=\"s2\"} 12.*"
|
|
|
|
expect resp.body ~ ".*haproxy_backend_downtime_seconds_total{proxy=\"stats\"} NaN.*"
|
|
expect resp.body ~ ".*haproxy_backend_downtime_seconds_total{proxy=\"be\"} 0.*"
|
|
expect resp.body ~ ".*haproxy_server_downtime_seconds_total{proxy=\"be\",server=\"s1\"} NaN.*"
|
|
expect resp.body ~ ".*haproxy_server_downtime_seconds_total{proxy=\"be\",server=\"s2\"} 0.*"
|
|
|
|
expect resp.body ~ ".*haproxy_server_current_throttle{proxy=\"be\",server=\"s1\"} NaN.*"
|
|
|
|
expect resp.body ~ ".*haproxy_server_idle_connections_limit{proxy=\"be\",server=\"s1\"} NaN.*"
|
|
expect resp.body ~ ".*haproxy_server_idle_connections_limit{proxy=\"be\",server=\"s2\"} 42.*"
|
|
|
|
# test well known labels presence
|
|
expect resp.body ~ ".*haproxy_process_build_info{version=\".*\"} 1.*"
|
|
expect resp.body ~ ".*haproxy_frontend_http_responses_total{proxy=\"stats\",code=\"4xx\"} 0.*"
|
|
expect resp.body ~ ".*haproxy_frontend_status{proxy=\"fe\",state=\"UP\"} 1.*"
|
|
expect resp.body ~ ".*haproxy_listener_status{proxy=\"stats\",listener=\"sock-1\",state=\"WAITING\"} 0.*"
|
|
expect resp.body ~ ".*haproxy_backend_status{proxy=\"be\",state=\"UP\"} 1.*"
|
|
expect resp.body ~ ".*haproxy_server_status{proxy=\"be\",server=\"s1\",state=\"DOWN\"} 0.*"
|
|
expect resp.body ~ ".*haproxy_server_check_status{proxy=\"be\",server=\"s2\",state=\"HANA\"} 0.*"
|
|
|
|
# test scope
|
|
txreq -url "/metrics?scope="
|
|
rxresp
|
|
expect resp.status == 200
|
|
expect resp.bodylen == 0
|
|
|
|
txreq -url "/metrics?scope=server"
|
|
rxresp
|
|
expect resp.status == 200
|
|
expect resp.body !~ ".*haproxy_process.*"
|
|
expect resp.body !~ ".*haproxy_frontend.*"
|
|
expect resp.body !~ ".*haproxy_listener.*"
|
|
expect resp.body !~ ".*haproxy_backend.*"
|
|
expect resp.body ~ ".*haproxy_server.*"
|
|
expect resp.body !~ ".*haproxy_sticktable.*"
|
|
|
|
txreq -url "/metrics?scope=frontend&scope=backend"
|
|
rxresp
|
|
expect resp.status == 200
|
|
expect resp.body !~ ".*haproxy_process.*"
|
|
expect resp.body ~ ".*haproxy_frontend.*"
|
|
expect resp.body !~ ".*haproxy_listener.*"
|
|
expect resp.body ~ ".*haproxy_backend.*"
|
|
expect resp.body !~ ".*haproxy_server.*"
|
|
expect resp.body !~ ".*haproxy_sticktable.*"
|
|
|
|
txreq -url "/metrics?scope"
|
|
rxresp
|
|
expect resp.status == 400
|
|
} -run
|