MEDIUM: servers: Split the connections into idle, safe, and available.

Revamp the server connection lists. We know have 3 lists :
- idle_conns, which contains idling connections
- safe_conns, which contains idling connections that are safe to use even
for the first request
- available_conns, which contains connections that are not idling, but can
still accept new streams (those are HTTP/2 or fastcgi, and are always
considered safe).
This commit is contained in:
Olivier Houchard 2020-02-13 19:12:07 +01:00 committed by Olivier Houchard
parent 2444aa5b66
commit dc2f2753e9
10 changed files with 75 additions and 53 deletions

View File

@ -245,7 +245,7 @@ static inline enum srv_initaddr srv_get_next_initaddr(unsigned int *list)
/* This adds an idle connection to the server's list if the connection is
* reusable, not held by any owner anymore, but still has available streams.
*/
static inline int srv_add_to_idle_list(struct server *srv, struct connection *conn)
static inline int srv_add_to_idle_list(struct server *srv, struct connection *conn, int is_safe)
{
if (srv && srv->pool_purge_delay > 0 &&
(srv->max_idle_conns == -1 || srv->max_idle_conns > srv->curr_idle_conns) &&
@ -262,7 +262,8 @@ static inline int srv_add_to_idle_list(struct server *srv, struct connection *co
return 0;
}
LIST_DEL_INIT(&conn->list);
MT_LIST_ADDQ(&srv->idle_orphan_conns[tid], (struct mt_list *)&conn->list);
MT_LIST_ADDQ(is_safe ? &srv->safe_conns[tid] : &srv->idle_conns[tid],
(struct mt_list *)&conn->list);
srv->curr_idle_thr[tid]++;
conn->idle_time = now_ms;

View File

@ -222,9 +222,9 @@ struct server {
struct eb_root pendconns; /* pending connections */
struct list actconns; /* active connections */
struct list *idle_conns; /* sharable idle connections attached or not to a stream interface */
struct list *safe_conns; /* safe idle connections attached to stream interfaces, shared */
struct mt_list *idle_orphan_conns; /* Orphan connections idling */
struct mt_list *idle_conns; /* sharable idle connections*/
struct mt_list *safe_conns; /* safe idle connections */
struct list *available_conns; /* Connection in used, but with still new streams available */
unsigned int pool_purge_delay; /* Delay before starting to purge the idle conns pool */
unsigned int max_idle_conns; /* Max number of connection allowed in the orphan connections list */
unsigned int curr_idle_conns; /* Current number of orphan idling connections */

View File

@ -1129,7 +1129,8 @@ int connect_server(struct stream *s)
if (srv && !reuse) {
srv_conn = NULL;
/* Below we pick connections from the safe or idle lists based
/* Below we pick connections from the safe, idle or
* available (which are safe too) lists based
* on the strategy, the fact that this is a first or second
* (retryable) request, with the indicated priority (1 or 2) :
*
@ -1146,37 +1147,33 @@ int connect_server(struct stream *s)
* Idle conns are necessarily looked up on the same thread so
* that there is no concurrency issues.
*/
if (srv->idle_conns && !LIST_ISEMPTY(&srv->idle_conns[tid]) &&
if (srv->available_conns && !LIST_ISEMPTY(&srv->available_conns[tid]) &&
((s->be->options & PR_O_REUSE_MASK) != PR_O_REUSE_NEVR))
srv_conn = LIST_ELEM(srv->available_conns[tid].n, struct connection *, list);
if (srv->idle_conns && !MT_LIST_ISEMPTY(&srv->idle_conns[tid]) &&
((s->be->options & PR_O_REUSE_MASK) != PR_O_REUSE_NEVR &&
s->txn && (s->txn->flags & TX_NOT_FIRST))) {
srv_conn = LIST_ELEM(srv->idle_conns[tid].n, struct connection *, list);
srv_conn = MT_LIST_POP(&srv->idle_conns[tid], struct connection *, list);
}
else if (srv->safe_conns && !LIST_ISEMPTY(&srv->safe_conns[tid]) &&
else if (srv->safe_conns && !MT_LIST_ISEMPTY(&srv->safe_conns[tid]) &&
((s->txn && (s->txn->flags & TX_NOT_FIRST)) ||
(s->be->options & PR_O_REUSE_MASK) >= PR_O_REUSE_AGGR)) {
srv_conn = LIST_ELEM(srv->safe_conns[tid].n, struct connection *, list);
srv_conn = MT_LIST_POP(&srv->safe_conns[tid], struct connection *, list);
}
else if (srv->idle_conns && !LIST_ISEMPTY(&srv->idle_conns[tid]) &&
else if (srv->idle_conns && !MT_LIST_ISEMPTY(&srv->idle_conns[tid]) &&
(s->be->options & PR_O_REUSE_MASK) == PR_O_REUSE_ALWS) {
srv_conn = LIST_ELEM(srv->idle_conns[tid].n, struct connection *, list);
} else if (srv->idle_orphan_conns && !MT_LIST_ISEMPTY(&srv->idle_orphan_conns[tid]) &&
(((s->be->options & PR_O_REUSE_MASK) == PR_O_REUSE_ALWS) ||
(((s->be->options & PR_O_REUSE_MASK) != PR_O_REUSE_NEVR) &&
s->txn && (s->txn->flags & TX_NOT_FIRST)))) {
srv_conn = MT_LIST_POP(&srv->idle_orphan_conns[tid],
struct connection *, list);
if (srv_conn)
reuse_orphan = 1;
srv_conn = MT_LIST_POP(&srv->idle_conns[tid], struct connection *, list);
}
/* If we've picked a connection from the pool, we now have to
* detach it. We may have to get rid of the previous idle
* connection we had, so for this we try to swap it with the
* other owner's. That way it may remain alive for others to
* pick.
*/
if (srv_conn)
if (srv_conn) {
reuse_orphan = 1;
reuse = 1;
}
}
@ -1200,14 +1197,14 @@ int connect_server(struct stream *s)
}
if (((!reuse || (srv_conn && (srv_conn->flags & CO_FL_WAIT_XPRT)))
&& ha_used_fds > global.tune.pool_high_count) && srv && srv->idle_orphan_conns) {
&& ha_used_fds > global.tune.pool_high_count) && srv && srv->idle_conns) {
struct connection *tokill_conn;
/* We can't reuse a connection, and e have more FDs than deemd
* acceptable, attempt to kill an idling connection
*/
/* First, try from our own idle list */
tokill_conn = MT_LIST_POP(&srv->idle_orphan_conns[tid],
tokill_conn = MT_LIST_POP(&srv->idle_conns[tid],
struct connection *, list);
if (tokill_conn)
tokill_conn->mux->destroy(tokill_conn->ctx);
@ -1226,8 +1223,11 @@ int connect_server(struct stream *s)
ALREADY_CHECKED(i);
HA_SPIN_LOCK(OTHER_LOCK, &toremove_lock[tid]);
tokill_conn = MT_LIST_POP(&srv->idle_orphan_conns[i],
tokill_conn = MT_LIST_POP(&srv->idle_conns[i],
struct connection *, list);
if (!tokill_conn)
tokill_conn = MT_LIST_POP(&srv->safe_conns[i],
struct connection *, list);
if (tokill_conn) {
/* We got one, put it into the concerned thread's to kill list, and wake it's kill task */
@ -1251,7 +1251,7 @@ int connect_server(struct stream *s)
_HA_ATOMIC_SUB(&srv->curr_idle_conns, 1);
__ha_barrier_atomic_store();
srv->curr_idle_thr[tid]--;
LIST_ADDQ(&srv->idle_conns[tid], &srv_conn->list);
LIST_ADDQ(&srv->available_conns[tid], &srv_conn->list);
}
else {
if (srv_conn->flags & CO_FL_SESS_IDLE) {
@ -1404,7 +1404,7 @@ int connect_server(struct stream *s)
*/
if (srv && ((s->be->options & PR_O_REUSE_MASK) == PR_O_REUSE_ALWS) &&
srv_conn->mux->avail_streams(srv_conn) > 0)
LIST_ADD(&srv->idle_conns[tid], &srv_conn->list);
LIST_ADD(&srv->available_conns[tid], &srv_conn->list);
}
/* The CO_FL_SEND_PROXY flag may have been set by the connect method,
* if so, add our handshake pseudo-XPRT now.

View File

@ -3613,30 +3613,27 @@ out_uri_auth_compat:
for (newsrv = curproxy->srv; newsrv; newsrv = newsrv->next) {
int i;
newsrv->idle_conns = calloc(global.nbthread, sizeof(*newsrv->idle_conns));
newsrv->safe_conns = calloc(global.nbthread, sizeof(*newsrv->safe_conns));
newsrv->available_conns = calloc((unsigned)global.nbthread, sizeof(*newsrv->available_conns));
if (!newsrv->idle_conns || !newsrv->safe_conns) {
free(newsrv->safe_conns); newsrv->safe_conns = NULL;
free(newsrv->idle_conns); newsrv->idle_conns = NULL;
if (!newsrv->available_conns) {
ha_alert("parsing [%s:%d] : failed to allocate idle connections for server '%s'.\n",
newsrv->conf.file, newsrv->conf.line, newsrv->id);
newsrv->conf.file, newsrv->conf.line, newsrv->id);
cfgerr++;
continue;
}
for (i = 0; i < global.nbthread; i++) {
LIST_INIT(&newsrv->idle_conns[i]);
LIST_INIT(&newsrv->safe_conns[i]);
}
for (i = 0; i < global.nbthread; i++)
LIST_INIT(&newsrv->available_conns[i]);
if (newsrv->max_idle_conns != 0) {
if (idle_conn_task == NULL) {
idle_conn_task = task_new(MAX_THREADS_MASK);
if (!idle_conn_task)
goto err;
idle_conn_task->process = srv_cleanup_idle_connections;
idle_conn_task->context = NULL;
for (i = 0; i < global.nbthread; i++) {
idle_conn_cleanup[i] = task_new(1UL << i);
if (!idle_conn_cleanup[i])
@ -3646,12 +3643,30 @@ out_uri_auth_compat:
MT_LIST_INIT(&toremove_connections[i]);
}
}
newsrv->idle_orphan_conns = calloc((unsigned short)global.nbthread, sizeof(*newsrv->idle_orphan_conns));
if (!newsrv->idle_orphan_conns)
goto err;
newsrv->idle_conns = calloc((unsigned)global.nbthread, sizeof(*newsrv->idle_conns));
if (!newsrv->idle_conns) {
ha_alert("parsing [%s:%d] : failed to allocate idle connections for server '%s'.\n",
newsrv->conf.file, newsrv->conf.line, newsrv->id);
cfgerr++;
continue;
}
for (i = 0; i < global.nbthread; i++)
MT_LIST_INIT(&newsrv->idle_orphan_conns[i]);
newsrv->curr_idle_thr = calloc(global.nbthread, sizeof(*newsrv->curr_idle_thr));
MT_LIST_INIT(&newsrv->idle_conns[i]);
newsrv->safe_conns = calloc((unsigned)global.nbthread, sizeof(*newsrv->safe_conns));
if (!newsrv->safe_conns) {
ha_alert("parsing [%s:%d] : failed to allocate idle connections for server '%s'.\n",
newsrv->conf.file, newsrv->conf.line, newsrv->id);
cfgerr++;
continue;
}
for (i = 0; i < global.nbthread; i++)
MT_LIST_INIT(&newsrv->safe_conns[i]);
newsrv->curr_idle_thr = calloc(global.nbthread, sizeof(int));
if (!newsrv->curr_idle_thr)
goto err;
continue;

View File

@ -55,7 +55,7 @@ int conn_create_mux(struct connection *conn)
srv = objt_server(conn->target);
if (srv && ((srv->proxy->options & PR_O_REUSE_MASK) != PR_O_REUSE_NEVR) &&
conn->mux->avail_streams(conn) > 0)
LIST_ADD(&srv->idle_conns[tid], &conn->list);
LIST_ADD(&srv->available_conns[tid], &conn->list);
return 0;
fail:
/* let the upper layer know the connection failed */

View File

@ -2663,7 +2663,7 @@ void deinit(void)
free((char*)s->conf.file);
free(s->idle_conns);
free(s->safe_conns);
free(s->idle_orphan_conns);
free(s->available_conns);
free(s->curr_idle_thr);
if (s->use_ssl || s->check.use_ssl) {

View File

@ -3498,7 +3498,7 @@ static void fcgi_detach(struct conn_stream *cs)
return;
}
if (!(fconn->conn->flags & CO_FL_PRIVATE)) {
if (!srv_add_to_idle_list(objt_server(fconn->conn->target), fconn->conn)) {
if (!srv_add_to_idle_list(objt_server(fconn->conn->target), fconn->conn, 1)) {
/* The server doesn't want it, let's kill the connection right away */
fconn->conn->mux->destroy(fconn);
TRACE_DEVEL("outgoing connection killed", FCGI_EV_STRM_END|FCGI_EV_FCONN_ERR);
@ -3507,7 +3507,11 @@ static void fcgi_detach(struct conn_stream *cs)
TRACE_DEVEL("reusable idle connection", FCGI_EV_STRM_END, fconn->conn);
return;
}
}
} else if (LIST_ISEMPTY(&fconn->conn->list) &&
fcgi_avail_streams(fconn->conn) > 0 && objt_server(fconn->conn->target)) {
LIST_ADD(&__objt_server(fconn->conn->target)->available_conns[tid], &fconn->conn->list);
}
}
/* We don't want to close right now unless we're removing the last

View File

@ -2360,9 +2360,7 @@ static void h1_detach(struct conn_stream *cs)
struct h1s *h1s = cs->ctx;
struct h1c *h1c;
struct session *sess;
#if 0
int is_not_first;
#endif
TRACE_ENTER(H1_EV_STRM_END, h1s ? h1s->h1c->conn : NULL, h1s);
@ -2376,9 +2374,7 @@ static void h1_detach(struct conn_stream *cs)
h1c = h1s->h1c;
h1s->cs = NULL;
#if 0
is_not_first = h1s->flags & H1S_F_NOT_FIRST;
#endif
h1s_destroy(h1s);
if (conn_is_back(h1c->conn) && (h1c->flags & H1C_F_CS_IDLE)) {
@ -2415,7 +2411,7 @@ static void h1_detach(struct conn_stream *cs)
if (!(h1c->conn->flags & CO_FL_PRIVATE)) {
if (h1c->conn->owner == sess)
h1c->conn->owner = NULL;
if (!srv_add_to_idle_list(objt_server(h1c->conn->target), h1c->conn)) {
if (!srv_add_to_idle_list(objt_server(h1c->conn->target), h1c->conn, is_not_first)) {
/* The server doesn't want it, let's kill the connection right away */
h1c->conn->mux->destroy(h1c);
TRACE_DEVEL("outgoing connection killed", H1_EV_STRM_END|H1_EV_H1C_END);

View File

@ -3896,7 +3896,7 @@ static void h2_detach(struct conn_stream *cs)
return;
}
if (!(h2c->conn->flags & CO_FL_PRIVATE)) {
if (!srv_add_to_idle_list(objt_server(h2c->conn->target), h2c->conn)) {
if (!srv_add_to_idle_list(objt_server(h2c->conn->target), h2c->conn, 1)) {
/* The server doesn't want it, let's kill the connection right away */
h2c->conn->mux->destroy(h2c);
TRACE_DEVEL("leaving on error after killing outgoing connection", H2_EV_STRM_END|H2_EV_H2C_ERR);
@ -3906,6 +3906,9 @@ static void h2_detach(struct conn_stream *cs)
return;
}
} else if (LIST_ISEMPTY(&h2c->conn->list) &&
h2_avail_streams(h2c->conn) > 0 && objt_server(h2c->conn->target)) {
LIST_ADD(&__objt_server(h2c->conn->target)->available_conns[tid], &h2c->conn->list);
}
}
}

View File

@ -5617,7 +5617,10 @@ struct task *srv_cleanup_idle_connections(struct task *task, void *context, unsi
HA_SPIN_LOCK(OTHER_LOCK, &toremove_lock[i]);
for (j = 0; j < max_conn; j++) {
struct connection *conn = MT_LIST_POP(&srv->idle_orphan_conns[i], struct connection *, list);
struct connection *conn = MT_LIST_POP(&srv->idle_conns[i], struct connection *, list);
if (!conn)
conn = MT_LIST_POP(&srv->safe_conns[i],
struct connection *, list);
if (!conn)
break;
did_remove = 1;