mirror of
http://git.haproxy.org/git/haproxy.git/
synced 2025-01-07 04:39:35 +00:00
e1465c1e46
Some regtests involve multiple requests from multiple clients, which can be dispatched as multiple requests to a server. It turns out that the idle connection sharing works so well that very quickly few connections are used, and regularly some of the remaining idle server connections time out at the moment they were going to be reused, causing those random "HTTP header incomplete" traces in the logs that make them fail often. In the end this is only an artefact of the test environment. And indeed, some tests like normalize-uri which perform a lot of reuse fail very often, about 20-30% of the times in the CI, and 100% of the time in local when running 1000 tests in a row. Others like ubase64, sample_fetches or vary_* fail less often but still a lot in tests. This patch addresses this by adding "tune.idle-pool.shared off" to all tests which have at least twice as many requests as clients. It proves very effective as no single error happens on normalize-uri anymore after 10000 tests. Also 100 full runs of all tests yield no error anymore. One test is tricky, http_abortonclose, it used to fail ~10 times per 1000 runs and with this workaround still fails once every 1000 runs. But the test is complex and there's a warning in it mentioning a possible issue when run in parallel due to a port reuse.
89 lines
1.9 KiB
Plaintext
89 lines
1.9 KiB
Plaintext
varnishtest "Webgui stats page check filtering with scope and changing server state"
|
|
#REQUIRE_VERSION=1.6
|
|
|
|
feature ignore_unknown_macro
|
|
|
|
server s1 {
|
|
} -start
|
|
|
|
haproxy h1 -conf {
|
|
global
|
|
# WT: limit false-positives causing "HTTP header incomplete" due to
|
|
# idle server connections being randomly used and randomly expiring
|
|
# under us.
|
|
tune.idle-pool.shared off
|
|
|
|
stats socket /tmp/haproxy.socket level admin
|
|
|
|
defaults
|
|
mode http
|
|
${no-htx} option http-use-htx
|
|
|
|
frontend fe1
|
|
bind "fd@${fe1}"
|
|
stats enable
|
|
stats refresh 5s
|
|
stats uri /
|
|
stats admin if TRUE
|
|
|
|
backend b1
|
|
server srv1 ${s1_addr}:${s1_port}
|
|
server srv2 ${s1_addr}:${s1_port}
|
|
server srv3 ${s1_addr}:${s1_port}
|
|
|
|
backend b2
|
|
server srv1 ${s1_addr}:${s1_port}
|
|
server srv2 ${s1_addr}:${s1_port}
|
|
|
|
} -start
|
|
|
|
client c1 -connect ${h1_fe1_sock} {
|
|
txreq -url "/;csv;"
|
|
rxresp
|
|
expect resp.status == 200
|
|
} -run
|
|
|
|
client c2 -connect ${h1_fe1_sock} {
|
|
txreq -url "/?;csv;scope=b1"
|
|
rxresp
|
|
expect resp.status == 200
|
|
} -run
|
|
|
|
haproxy h1 -cli {
|
|
send "show stat"
|
|
expect ~ .*
|
|
}
|
|
|
|
client c3 -connect ${h1_fe1_sock} {
|
|
txreq -url "/"
|
|
rxresp
|
|
expect resp.status == 200
|
|
|
|
txreq -url "/?;csv;scope=b1"
|
|
rxresp
|
|
expect resp.status == 200
|
|
expect resp.body ~ ".*\nb1,BACKEND.*"
|
|
expect resp.body !~ ".*\nb2,BACKEND.*"
|
|
|
|
txreq -req "POST" -url "/?scope=b2" -body "s=srv1&s=srv2&s=srv3&action=maint&b=%233"
|
|
rxresp
|
|
expect resp.status == 303
|
|
|
|
txreq -req "POST" -url "/" -body "s=srv2&action=drain&b=%233"
|
|
rxresp
|
|
expect resp.status == 303
|
|
|
|
txreq -req "POST" -url "/" -body "s=srv1&action=maint&b=%234"
|
|
rxresp
|
|
expect resp.status == 303
|
|
|
|
txreq -url "/?;csv;scope=fe1"
|
|
rxresp
|
|
expect resp.status == 200
|
|
} -run
|
|
|
|
haproxy h1 -cli {
|
|
send "show stat"
|
|
expect ~ "\nb1,srv1.*MAINT.*\nb1,srv2.*DRAIN.*\nb1,srv3.*MAINT.*\nb1,BACKEND.*DOWN.*\nb2,srv1.*MAINT.*\nb2,srv2.*no check.*\nb2,BACKEND.*UP"
|
|
} -wait
|