MEDIUM: connections: Add a way to control the number of idling connections.

As by default we add all keepalive connections to the idle pool, if we run
into a pathological case, where all client don't do keepalive, but the server
does, and haproxy is configured to only reuse "safe" connections, we will
soon find ourself having lots of idling, unusable for new sessions, connections,
while we won't have any file descriptors available to create new connections.

To fix this, add 2 new global settings, "pool_low_ratio" and "pool_high_ratio".
pool-low-fd-ratio  is the % of fds we're allowed to use (against the maximum
number of fds available to haproxy) before we stop adding connections to the
idle pool, and destroy them instead. The default is 20. pool-high-fd-ratio is
the % of fds we're allowed to use (against the maximum number of fds available
to haproxy) before we start killing idling connection in the event we have to
create a new outgoing connection, and no reuse is possible. The default is 25.
This commit is contained in:
Olivier Houchard 2019-04-16 19:07:22 +02:00 committed by Willy Tarreau
parent 7c49d2e213
commit 88698d966d
6 changed files with 95 additions and 1 deletions

View File

@ -1673,6 +1673,21 @@ tune.pipesize <number>
performed. This has an impact on the kernel's memory footprint, so this must
not be changed if impacts are not understood.
tune.pool-low-fd-ratio <number>
This setting sets the max number of file descriptors (in percentage) used by
haproxy globally against the maximum number of file descriptors haproxy can
use before we stop putting connection into the idle pool for reuse. The
default is 20.
tune.pool-high-fd-ratio <number>
This setting sets the max number of file descriptors (in percentage) used by
haproxy globally against the maximum number of file descriptors haproxy can
use before we start killing idle connections when we can't reuse a connection
and we have to create a new one. The default is 25 (one quarter of the file
descriptor will mean that roughly half of the maximum front connections can
keep an idle connection behind, anything beyond this probably doesn't make
much sense in the general case when targetting connection reuse).
tune.rcvbuf.client <number>
tune.rcvbuf.server <number>
Forces the kernel socket receive buffer size on the client or the server side

View File

@ -251,7 +251,8 @@ static inline int srv_add_to_idle_list(struct server *srv, struct connection *co
(srv->max_idle_conns == -1 || srv->max_idle_conns > srv->curr_idle_conns) &&
!(conn->flags & CO_FL_PRIVATE) &&
((srv->proxy->options & PR_O_REUSE_MASK) != PR_O_REUSE_NEVR) &&
!conn->mux->used_streams(conn) && conn->mux->avail_streams(conn)) {
!conn->mux->used_streams(conn) && conn->mux->avail_streams(conn) &&
ha_used_fds < global.tune.pool_low_count) {
int retadd;
retadd = _HA_ATOMIC_ADD(&srv->curr_idle_conns, 1);

View File

@ -161,6 +161,10 @@ struct global {
int pattern_cache; /* max number of entries in the pattern cache. */
int sslcachesize; /* SSL cache size in session, defaults to 20000 */
int comp_maxlevel; /* max HTTP compression level */
int pool_low_ratio; /* max ratio of FDs used before we stop using new idle connections */
int pool_high_ratio; /* max ratio of FDs used before we start killing idle connections when creating new connections */
int pool_low_count; /* max number of opened fd before we stop using new idle connections */
int pool_high_count; /* max number of opened fd before we start killing idle connections when creating new connections */
unsigned short idle_timer; /* how long before an empty buffer is considered idle (ms) */
} tune;
struct {

View File

@ -1338,6 +1338,39 @@ int connect_server(struct stream *s)
reuse = 0;
}
}
if ((!reuse || (srv_conn && !(srv_conn->flags & CO_FL_CONNECTED)))
&& ha_used_fds > global.tune.pool_high_count) {
struct connection *tokill_conn;
/* We can't reuse a connection, and e have more FDs than deemd
* acceptable, attempt to kill an idling connection
*/
/* First, try from our own idle list */
tokill_conn = LIST_POP_LOCKED(&srv->idle_orphan_conns[tid],
struct connection *, list);
if (tokill_conn)
tokill_conn->mux->destroy(tokill_conn->ctx);
/* If not, iterate over other thread's idling pool, and try to grab one */
else {
int i;
for (i = 0; i < global.nbthread; i++) {
if (i == tid)
continue;
tokill_conn = LIST_POP_LOCKED(&srv->idle_orphan_conns[i],
struct connection *, list);
if (tokill_conn) {
/* We got one, put it into the concerned thread's to kill list, and wake it's kill task */
LIST_ADDQ_LOCKED(&toremove_connections[i],
&tokill_conn->list);
task_wakeup(idle_conn_cleanup[i], TASK_WOKEN_OTHER);
break;
}
}
}
}
/* If we're really reusing the connection, remove it from the orphan
* list and add it back to the idle list.
*/

View File

@ -164,6 +164,8 @@ struct global global = {
.chksize = (BUFSIZE + 2*sizeof(void *) - 1) & -(2*sizeof(void *)),
.reserved_bufs = RESERVED_BUFS,
.pattern_cache = DEFAULT_PAT_LRU_SIZE,
.pool_low_ratio = 20,
.pool_high_ratio = 25,
#ifdef USE_OPENSSL
.sslcachesize = SSLCACHESIZE,
#endif
@ -1937,6 +1939,10 @@ static void init(int argc, char **argv)
global.maxsock += global.maxconn * sides * global.ssl_used_async_engines;
}
/* update connection pool thresholds */
global.tune.pool_low_count = ((long long)global.maxsock * global.tune.pool_low_ratio + 99) / 100;
global.tune.pool_high_count = ((long long)global.maxsock * global.tune.pool_high_ratio + 99) / 100;
proxy_adjust_all_maxconn();
if (global.tune.maxpollevents <= 0)

View File

@ -5390,6 +5390,41 @@ struct task *srv_cleanup_idle_connections(struct task *task, void *context, unsi
return task;
}
/* config parser for global "tune.pool-{low,high}-fd-ratio" */
static int cfg_parse_pool_fd_ratio(char **args, int section_type, struct proxy *curpx,
struct proxy *defpx, const char *file, int line,
char **err)
{
int arg = -1;
if (too_many_args(1, args, err, NULL))
return -1;
if (*(args[1]) != 0)
arg = atoi(args[1]);
if (arg < 0 || arg > 100) {
memprintf(err, "'%s' expects an integer argument between 0 and 100.", args[0]);
return -1;
}
if (args[0][10] == 'h')
global.tune.pool_high_ratio = arg;
else
global.tune.pool_low_ratio = arg;
return 0;
}
/* config keyword parsers */
static struct cfg_kw_list cfg_kws = {ILH, {
{ CFG_GLOBAL, "tune.pool-high-fd-ratio", cfg_parse_pool_fd_ratio },
{ CFG_GLOBAL, "tune.pool-low-fd-ratio", cfg_parse_pool_fd_ratio },
{ 0, NULL, NULL }
}};
INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
/*
* Local variables:
* c-indent-level: 8