mirror of
http://git.haproxy.org/git/haproxy.git/
synced 2025-01-03 02:32:03 +00:00
e1465c1e46
Some regtests involve multiple requests from multiple clients, which can be dispatched as multiple requests to a server. It turns out that the idle connection sharing works so well that very quickly few connections are used, and regularly some of the remaining idle server connections time out at the moment they were going to be reused, causing those random "HTTP header incomplete" traces in the logs that make them fail often. In the end this is only an artefact of the test environment. And indeed, some tests like normalize-uri which perform a lot of reuse fail very often, about 20-30% of the times in the CI, and 100% of the time in local when running 1000 tests in a row. Others like ubase64, sample_fetches or vary_* fail less often but still a lot in tests. This patch addresses this by adding "tune.idle-pool.shared off" to all tests which have at least twice as many requests as clients. It proves very effective as no single error happens on normalize-uri anymore after 10000 tests. Also 100 full runs of all tests yield no error anymore. One test is tricky, http_abortonclose, it used to fail ~10 times per 1000 runs and with this workaround still fails once every 1000 runs. But the test is complex and there's a warning in it mentioning a possible issue when run in parallel due to a port reuse.
141 lines
3.7 KiB
Plaintext
141 lines
3.7 KiB
Plaintext
|
|
varnishtest "Basic cache test"
|
|
|
|
#REQUIRE_VERSION=1.9
|
|
|
|
feature ignore_unknown_macro
|
|
|
|
server s1 {
|
|
rxreq
|
|
txresp -nolen -hdr "Transfer-Encoding: chunked" \
|
|
-hdr "Cache-Control: max-age=5"
|
|
chunkedlen 15
|
|
chunkedlen 15
|
|
chunkedlen 15
|
|
chunkedlen 0
|
|
} -start
|
|
|
|
server s2 {
|
|
rxreq
|
|
txresp -nolen -hdr "Transfer-Encoding: chunked" \
|
|
-hdr "Cache-Control: max-age=5"
|
|
chunkedlen 16
|
|
chunkedlen 16
|
|
chunkedlen 16
|
|
chunkedlen 0
|
|
} -start
|
|
|
|
server s3 {
|
|
rxreq
|
|
txresp -nolen -hdr "Transfer-Encoding: chunked" \
|
|
-hdr "Cache-Control: max-age=5"
|
|
chunkedlen 17
|
|
chunkedlen 17
|
|
chunkedlen 17
|
|
chunkedlen 0
|
|
|
|
rxreq
|
|
txresp -nolen -hdr "Transfer-Encoding: chunked" \
|
|
-hdr "Cache-Control: max-age=5"
|
|
chunkedlen 17
|
|
chunkedlen 17
|
|
chunkedlen 17
|
|
chunkedlen 0
|
|
} -start
|
|
|
|
haproxy h1 -conf {
|
|
global
|
|
# WT: limit false-positives causing "HTTP header incomplete" due to
|
|
# idle server connections being randomly used and randomly expiring
|
|
# under us.
|
|
tune.idle-pool.shared off
|
|
|
|
defaults
|
|
mode http
|
|
${no-htx} option http-use-htx
|
|
timeout connect 1s
|
|
timeout client 1s
|
|
timeout server 1s
|
|
|
|
frontend fe
|
|
bind "fd@${fe}"
|
|
use_backend first_be if { path_beg /first }
|
|
use_backend nocache_be if { path_beg /nocache }
|
|
default_backend second_be
|
|
|
|
backend first_be
|
|
http-request cache-use first_cache
|
|
server www ${s1_addr}:${s1_port}
|
|
http-response cache-store first_cache
|
|
http-response set-header X-Cache-Hit %[res.cache_hit]
|
|
http-response set-header X-Cache-Name %[res.cache_name]
|
|
|
|
backend second_be
|
|
http-request cache-use second_cache
|
|
server www ${s2_addr}:${s2_port}
|
|
http-response cache-store second_cache
|
|
http-response set-header X-Cache-Hit %[res.cache_hit]
|
|
http-response set-header X-Cache-Name %[res.cache_name]
|
|
|
|
backend nocache_be
|
|
server www ${s3_addr}:${s3_port}
|
|
http-response set-header X-Cache-Hit %[res.cache_hit]
|
|
http-response set-header X-Cache-Name %[res.cache_name]
|
|
|
|
cache first_cache
|
|
total-max-size 3
|
|
max-age 40
|
|
max-object-size 3000
|
|
|
|
cache second_cache
|
|
total-max-size 3
|
|
max-age 20
|
|
max-object-size 3072
|
|
} -start
|
|
|
|
|
|
client c1 -connect ${h1_fe_sock} {
|
|
txreq -url "/first"
|
|
rxresp
|
|
expect resp.status == 200
|
|
expect resp.bodylen == 45
|
|
expect resp.http.X-Cache-Hit == 0
|
|
expect resp.http.X-Cache-Name == ""
|
|
|
|
txreq -url "/second"
|
|
rxresp
|
|
expect resp.status == 200
|
|
expect resp.bodylen == 48
|
|
expect resp.http.X-Cache-Hit == 0
|
|
expect resp.http.X-Cache-Name == ""
|
|
|
|
txreq -url "/nocache"
|
|
rxresp
|
|
expect resp.status == 200
|
|
expect resp.bodylen == 51
|
|
expect resp.http.X-Cache-Hit == 0
|
|
expect resp.http.X-Cache-Name == ""
|
|
|
|
# Response should come form the cache now
|
|
txreq -url "/nocache"
|
|
rxresp
|
|
expect resp.status == 200
|
|
expect resp.bodylen == 51
|
|
expect resp.http.X-Cache-Hit == 0
|
|
expect resp.http.X-Cache-Name == ""
|
|
|
|
txreq -url "/first"
|
|
rxresp
|
|
expect resp.status == 200
|
|
expect resp.bodylen == 45
|
|
expect resp.http.X-Cache-Hit == 1
|
|
expect resp.http.X-Cache-Name == "first_cache"
|
|
|
|
txreq -url "/second"
|
|
rxresp
|
|
expect resp.status == 200
|
|
expect resp.bodylen == 48
|
|
expect resp.http.X-Cache-Hit == 1
|
|
expect resp.http.X-Cache-Name == "second_cache"
|
|
} -run
|