mirror of
http://git.haproxy.org/git/haproxy.git/
synced 2024-12-18 09:24:31 +00:00
f673923629
With the CI occasionally slowing down, we're starting to see again some spurious failures despite the long 1-second timeouts. This reports false positives that are disturbing and doesn't provide as much value as this could. However at this delay it already becomes a pain for developers to wait for the tests to complete. This commit adds support for the new environment variable HAPROXY_TEST_TIMEOUT that will allow anyone to modify the connect, client and server timeouts. It was set to 5 seconds by default, which should be plenty for quite some time in the CI. All relevant values that were 200ms or above were replaced by this one. A few larger values were left as they are special. One test for the set-timeout action that used to rely on a fixed 1-sec value was extended to a fixed 5-sec, as the timeout is normally not reached, but it needs to be known to compare the old and new values.
202 lines
8.1 KiB
Plaintext
202 lines
8.1 KiB
Plaintext
varnishtest "Health-check test"
|
|
feature ignore_unknown_macro
|
|
|
|
#REQUIRE_VERSION=2.4
|
|
#EXCLUDE_TARGETS=freebsd
|
|
#REGTEST_TYPE=slow
|
|
|
|
# This script test health-checks for four backends with one server by backend.
|
|
# A syslog server is attached to each backend to check the syslog messages
|
|
# in the righ order.
|
|
|
|
# First, we check a health-check has passed for all the servers thanks to the syslog
|
|
# messages. Then each server is disabled. The health-check status are checked.
|
|
# Then each server is re-enabled. Finally health-check status
|
|
# verifications for each server terminate the execution of this script.
|
|
|
|
# Note that the CLI is synchronized with the syslog servers so that
|
|
# to be sure to receive the passed health-checks status messages before
|
|
# disabling the servers. Same thing, when we check that the servers are down
|
|
# before enabling the servers.
|
|
|
|
# Cyclic barrier to synchronize the CLI with the syslog servers
|
|
barrier b1 cond 5 -cyclic
|
|
|
|
# These servers are there only for the health-check test.
|
|
server s1 {
|
|
} -start
|
|
|
|
server s2 {
|
|
} -start
|
|
|
|
server s3 {
|
|
} -start
|
|
|
|
server s4 {
|
|
} -start
|
|
|
|
syslog S1 -level notice {
|
|
recv
|
|
expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/srv1 succeeded.+reason: Layer4 check passed.+check duration: [[:digit:]]+ms.+status: 1/1 UP"
|
|
barrier b1 sync
|
|
recv alert
|
|
expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Server be1/srv1 is going DOWN for maintenance. 0 active and 0 backup servers left. [01] sessions active, 0 requeued, 0 remaining in queue."
|
|
recv emerg
|
|
expect ~ "[^:\\[ ]\\[${h1_pid}\\]: backend be1 has no server available!"
|
|
barrier b1 sync
|
|
recv
|
|
expect ~ "[^:\\[ ]\\[${h1_pid}\\]: (Server be1/srv1 is UP/READY \\(leaving forced maintenance\\).|Health check for server be1/srv1 succeeded.+reason: Layer4 check passed.+check duration: [[:digit:]]+ms.+status: 1/1 UP)"
|
|
barrier b1 sync
|
|
} -start
|
|
|
|
syslog S2 -level notice {
|
|
recv
|
|
expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be2/srv2 succeeded.+reason: Layer4 check passed.+check duration: [[:digit:]]+ms.+status: 1/1 UP"
|
|
barrier b1 sync
|
|
recv alert
|
|
expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Server be2/srv2 is going DOWN for maintenance. 0 active and 0 backup servers left. [01] sessions active, 0 requeued, 0 remaining in queue."
|
|
recv emerg
|
|
expect ~ "[^:\\[ ]\\[${h1_pid}\\]: backend be2 has no server available!"
|
|
barrier b1 sync
|
|
recv
|
|
expect ~ "[^:\\[ ]\\[${h1_pid}\\]: (Server be2/srv2 is UP/READY \\(leaving forced maintenance\\).|Health check for server be2/srv2 succeeded.+reason: Layer4 check passed.+check duration: [[:digit:]]+ms.+status: 1/1 UP)"
|
|
barrier b1 sync
|
|
} -start
|
|
|
|
syslog S3 -level notice {
|
|
recv
|
|
expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be3/srv3 succeeded.+reason: Layer4 check passed.+check duration: [[:digit:]]+ms.+status: 1/1 UP"
|
|
barrier b1 sync
|
|
recv alert
|
|
expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Server be3/srv3 is going DOWN for maintenance. 0 active and 0 backup servers left. [01] sessions active, 0 requeued, 0 remaining in queue."
|
|
recv emerg
|
|
expect ~ "[^:\\[ ]\\[${h1_pid}\\]: backend be3 has no server available!"
|
|
barrier b1 sync
|
|
recv
|
|
expect ~ "[^:\\[ ]\\[${h1_pid}\\]: (Server be3/srv3 is UP/READY \\(leaving forced maintenance\\).|Health check for server be3/srv3 succeeded.+reason: Layer4 check passed.+check duration: [[:digit:]]+ms.+status: 1/1 UP)"
|
|
barrier b1 sync
|
|
} -start
|
|
|
|
syslog S4 -level notice {
|
|
recv
|
|
expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be4/srv4 succeeded.+reason: Layer4 check passed.+check duration: [[:digit:]]+ms.+status: 1/1 UP"
|
|
barrier b1 sync
|
|
recv alert
|
|
expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Server be4/srv4 is going DOWN for maintenance. 0 active and 0 backup servers left. [01] sessions active, 0 requeued, 0 remaining in queue."
|
|
recv emerg
|
|
expect ~ "[^:\\[ ]\\[${h1_pid}\\]: backend be4 has no server available!"
|
|
barrier b1 sync
|
|
recv
|
|
expect ~ "[^:\\[ ]\\[${h1_pid}\\]: (Server be4/srv4 is UP/READY \\(leaving forced maintenance\\).|Health check for server be4/srv4 succeeded.+reason: Layer4 check passed.+check duration: [[:digit:]]+ms.+status: 1/1 UP)"
|
|
barrier b1 sync
|
|
} -start
|
|
|
|
|
|
haproxy h1 -conf {
|
|
defaults
|
|
timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
|
|
timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
|
|
timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
|
|
default-server check inter 200ms downinter 100s rise 1 fall 1
|
|
|
|
frontend fe1
|
|
bind "fd@${fe1}"
|
|
use_backend be1
|
|
|
|
frontend fe2
|
|
bind "fd@${fe2}"
|
|
use_backend be2
|
|
|
|
frontend fe3
|
|
bind "fd@${fe3}"
|
|
use_backend be3
|
|
|
|
frontend fe4
|
|
bind "fd@${fe4}"
|
|
use_backend be4
|
|
|
|
backend be1
|
|
option log-health-checks
|
|
log ${S1_addr}:${S1_port} daemon
|
|
server srv1 ${s1_addr}:${s1_port}
|
|
|
|
backend be2
|
|
option log-health-checks
|
|
log ${S2_addr}:${S2_port} daemon
|
|
server srv2 ${s2_addr}:${s2_port}
|
|
|
|
backend be3
|
|
option log-health-checks
|
|
log ${S3_addr}:${S3_port} daemon
|
|
server srv3 ${s3_addr}:${s3_port}
|
|
|
|
backend be4
|
|
option log-health-checks
|
|
log ${S4_addr}:${S4_port} daemon
|
|
server srv4 ${s4_addr}:${s4_port}
|
|
} -start
|
|
|
|
haproxy h1 -cli {
|
|
barrier b1 sync
|
|
send "show servers state"
|
|
expect ~ "# be_id be_name srv_id srv_name srv_addr srv_op_state srv_admin_state srv_uweight srv_iweight srv_time_since_last_change srv_check_status srv_check_result srv_check_health srv_check_state srv_agent_state bk_f_forced_id srv_f_forced_id srv_fqdn srv_port srvrecord srv_use_ssl srv_check_port srv_check_addr srv_agent_addr srv_agent_port\n6 be1 1 srv1 ${s1_addr} 2 0 1 1 [[:digit:]]+ 6 3 1 [67] 0 0 0 - ${s1_port} - 0 0 - - 0\n7 be2 1 srv2 ${s2_addr} 2 0 1 1 [[:digit:]]+ 6 3 1 [67] 0 0 0 - ${s2_port} - 0 0 - - 0\n8 be3 1 srv3 ${s3_addr} 2 0 1 1 [[:digit:]]+ 6 3 1 [67] 0 0 0 - ${s3_port} - 0 0 - - 0\n9 be4 1 srv4 ${s4_addr} 2 0 1 1 [[:digit:]]+ 6 3 1 [67] 0 0 0 - ${s4_port} - 0 0 - - 0"
|
|
}
|
|
|
|
haproxy h1 -cli {
|
|
send "disable server be1/srv1"
|
|
expect ~ .*
|
|
}
|
|
|
|
haproxy h1 -cli {
|
|
send "disable server be2/srv2"
|
|
expect ~ .*
|
|
}
|
|
|
|
haproxy h1 -cli {
|
|
send "disable server be3/srv3"
|
|
expect ~ .*
|
|
}
|
|
|
|
haproxy h1 -cli {
|
|
send "disable server be4/srv4"
|
|
expect ~ .*
|
|
}
|
|
|
|
haproxy h1 -cli {
|
|
barrier b1 sync
|
|
send "show servers state"
|
|
expect ~ "# be_id be_name srv_id srv_name srv_addr srv_op_state srv_admin_state srv_uweight srv_iweight srv_time_since_last_change srv_check_status srv_check_result srv_check_health srv_check_state srv_agent_state bk_f_forced_id srv_f_forced_id srv_fqdn srv_port srvrecord srv_use_ssl srv_check_port srv_check_addr srv_agent_addr srv_agent_port\n6 be1 1 srv1 ${s1_addr} 0 1 1 1 [[:digit:]]+ 6 3 [01] 1[45] 0 0 0 - ${s1_port} - 0 0 - - 0\n7 be2 1 srv2 ${s2_addr} 0 1 1 1 [[:digit:]]+ 6 3 [01] 1[45] 0 0 0 - ${s2_port} - 0 0 - - 0\n8 be3 1 srv3 ${s3_addr} 0 1 1 1 [[:digit:]]+ 6 3 [01] 1[45] 0 0 0 - ${s3_port} - 0 0 - - 0\n9 be4 1 srv4 ${s4_addr} 0 1 1 1 [[:digit:]]+ 6 3 [01] 1[45] 0 0 0 - ${s4_port} - 0 0 - - 0"
|
|
}
|
|
|
|
haproxy h1 -cli {
|
|
send "enable server be1/srv1"
|
|
expect ~ .*
|
|
}
|
|
|
|
haproxy h1 -cli {
|
|
send "enable server be2/srv2"
|
|
expect ~ .*
|
|
}
|
|
|
|
haproxy h1 -cli {
|
|
send "enable server be3/srv3"
|
|
expect ~ .*
|
|
}
|
|
|
|
haproxy h1 -cli {
|
|
send "enable server be4/srv4"
|
|
expect ~ .*
|
|
}
|
|
|
|
haproxy h1 -cli {
|
|
barrier b1 sync
|
|
send "show servers state"
|
|
expect ~ "# be_id be_name srv_id srv_name srv_addr srv_op_state srv_admin_state srv_uweight srv_iweight srv_time_since_last_change srv_check_status srv_check_result srv_check_health srv_check_state srv_agent_state bk_f_forced_id srv_f_forced_id srv_fqdn srv_port srvrecord srv_use_ssl srv_check_port srv_check_addr srv_agent_addr srv_agent_port\n6 be1 1 srv1 ${s1_addr} 2 0 1 1 [[:digit:]]+ 6 [03] 1 [67] 0 0 0 - ${s1_port} - 0 0 - - 0\n7 be2 1 srv2 ${s2_addr} 2 0 1 1 [[:digit:]]+ 6 [03] 1 [67] 0 0 0 - ${s2_port} - 0 0 - - 0\n8 be3 1 srv3 ${s3_addr} 2 0 1 1 [[:digit:]]+ 6 [03] 1 [67] 0 0 0 - ${s3_port} - 0 0 - - 0\n9 be4 1 srv4 ${s4_addr} 2 0 1 1 [[:digit:]]+ 6 [03] 1 [67] 0 0 0 - ${s4_port} - 0 0 - - 0"
|
|
}
|
|
|
|
syslog S1 -wait
|
|
syslog S2 -wait
|
|
syslog S3 -wait
|
|
syslog S4 -wait
|
|
|