MEDIUM: connection: protect idle conn lists with locks

This is a preparation work for connection reuse with sni/proxy
protocol/specific src-dst addresses.

Protect every access to idle conn lists with a lock. This is currently
strictly not needed because the access to the list are made with atomic
operations. However, to be able to reuse connection with specific
parameters, the list storage will be converted to eb-trees. As this
structure does not have atomic operation, it is mandatory to protect it
with a lock.

For this, the takeover lock is reused. Its role was to protect during
connection takeover. As it is now extended to general idle conns usage,
it is renamed to idle_conns_lock. A new lock section is also
instantiated named IDLE_CONNS_LOCK to isolate its impact on performance.
This commit is contained in:
Amaury Denoyelle 2021-01-11 09:21:52 +01:00
parent a3bf62ec54
commit 5c7086f6b0
10 changed files with 69 additions and 44 deletions

View File

@ -617,7 +617,7 @@ struct tlv_ssl {
struct idle_conns {
struct mt_list toremove_conns;
struct task *cleanup_task;
__decl_thread(HA_SPINLOCK_T takeover_lock);
__decl_thread(HA_SPINLOCK_T idle_conns_lock);
} THREAD_ALIGNED(64);
#endif /* _HAPROXY_CONNECTION_T_H */

View File

@ -36,6 +36,7 @@
__decl_thread(extern HA_SPINLOCK_T idle_conn_srv_lock);
extern struct idle_conns idle_conns[MAX_THREADS];
extern struct eb_root idle_conn_srv;
extern struct task *idle_conn_task;
extern struct dict server_key_dict;
@ -271,7 +272,9 @@ static inline void srv_del_conn_from_list(struct server *srv, struct connection
}
/* Remove the connection from any list (safe, idle or available) */
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
MT_LIST_DEL((struct mt_list *)&conn->list);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
/* This adds an idle connection to the server's list if the connection is
@ -304,7 +307,10 @@ static inline int srv_add_to_idle_list(struct server *srv, struct connection *co
return 0;
}
_HA_ATOMIC_SUB(&srv->curr_used_conns, 1);
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
MT_LIST_DEL(&conn->list);
if (is_safe) {
conn->flags = (conn->flags & ~CO_FL_LIST_MASK) | CO_FL_SAFE_LIST;
MT_LIST_ADDQ(&srv->safe_conns[tid], (struct mt_list *)&conn->list);
@ -314,6 +320,7 @@ static inline int srv_add_to_idle_list(struct server *srv, struct connection *co
MT_LIST_ADDQ(&srv->idle_conns[tid], (struct mt_list *)&conn->list);
_HA_ATOMIC_ADD(&srv->curr_idle_nb, 1);
}
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
_HA_ATOMIC_ADD(&srv->curr_idle_thr[tid], 1);
__ha_barrier_full();

View File

@ -400,6 +400,7 @@ enum lock_label {
SNI_LOCK,
SSL_SERVER_LOCK,
SFT_LOCK, /* sink forward target */
IDLE_CONNS_LOCK,
OTHER_LOCK,
LOCK_LABELS
};
@ -444,6 +445,7 @@ static inline const char *lock_label(enum lock_label label)
case SNI_LOCK: return "SNI";
case SSL_SERVER_LOCK: return "SSL_SERVER";
case SFT_LOCK: return "SFT";
case IDLE_CONNS_LOCK: return "IDLE_CONNS";
case OTHER_LOCK: return "OTHER";
case LOCK_LABELS: break; /* keep compiler happy */
};

View File

@ -1113,7 +1113,7 @@ static struct connection *conn_backend_get(struct stream *s, struct server *srv,
* to end up with two threads using the same connection.
*/
i = tid;
HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
conn = MT_LIST_POP(&mt_list[tid], struct connection *, list);
/* If we failed to pick a connection from the idle list, let's try again with
@ -1126,7 +1126,7 @@ static struct connection *conn_backend_get(struct stream *s, struct server *srv,
mt_list = srv->safe_conns;
}
}
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
/* If we found a connection in our own list, and we don't have to
* steal one from another thread, then we're done.
@ -1161,7 +1161,7 @@ static struct connection *conn_backend_get(struct stream *s, struct server *srv,
if (!srv->curr_idle_thr[i] || i == tid)
continue;
HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[i].takeover_lock);
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
mt_list_for_each_entry_safe(conn, &mt_list[i], list, elt1, elt2) {
if (conn->mux->takeover && conn->mux->takeover(conn, i) == 0) {
MT_LIST_DEL_SAFE(elt1);
@ -1185,7 +1185,7 @@ static struct connection *conn_backend_get(struct stream *s, struct server *srv,
}
}
}
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[i].takeover_lock);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
} while (!found && (i = (i + 1 == global.nbthread) ? 0 : i + 1) != stop);
if (!found)
@ -1335,12 +1335,15 @@ int connect_server(struct stream *s)
* acceptable, attempt to kill an idling connection
*/
/* First, try from our own idle list */
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
tokill_conn = MT_LIST_POP(&srv->idle_conns[tid],
struct connection *, list);
if (tokill_conn)
tokill_conn->mux->destroy(tokill_conn->ctx);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
/* If not, iterate over other thread's idling pool, and try to grab one */
else {
if (!tokill_conn) {
int i;
for (i = tid; (i = ((i + 1 == global.nbthread) ? 0 : i + 1)) != tid;) {
@ -1350,12 +1353,13 @@ int connect_server(struct stream *s)
// see it possibly larger.
ALREADY_CHECKED(i);
HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[i].takeover_lock);
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
tokill_conn = MT_LIST_POP(&srv->idle_conns[i],
struct connection *, list);
if (!tokill_conn)
tokill_conn = MT_LIST_POP(&srv->safe_conns[i],
struct connection *, list);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
if (tokill_conn) {
/* We got one, put it into the concerned thread's to kill list, and wake it's kill task */
@ -1363,10 +1367,8 @@ int connect_server(struct stream *s)
MT_LIST_ADDQ(&idle_conns[i].toremove_conns,
(struct mt_list *)&tokill_conn->list);
task_wakeup(idle_conns[i].cleanup_task, TASK_WOKEN_OTHER);
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[i].takeover_lock);
break;
}
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[i].takeover_lock);
}
}

View File

@ -3575,7 +3575,7 @@ out_uri_auth_compat:
goto err;
idle_conns[i].cleanup_task->process = srv_cleanup_toremove_connections;
idle_conns[i].cleanup_task->context = NULL;
HA_SPIN_INIT(&idle_conns[i].takeover_lock);
HA_SPIN_INIT(&idle_conns[i].idle_conns_lock);
MT_LIST_INIT(&idle_conns[i].toremove_conns);
}
}

View File

@ -2984,13 +2984,13 @@ struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned short status)
int ret = 0;
HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (tl->context == NULL) {
/* The connection has been taken over by another thread,
* we're no longer responsible for it, so just free the
* tasklet, and do nothing.
*/
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
tasklet_free(tl);
return NULL;
@ -3004,7 +3004,7 @@ struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned short status)
if (conn_in_list)
MT_LIST_DEL(&conn->list);
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (!(fconn->wait_event.events & SUB_RETRY_SEND))
ret = fcgi_send(fconn);
@ -3021,10 +3021,12 @@ struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned short status)
if (!ret && conn_in_list) {
struct server *srv = objt_server(conn->target);
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (conn_in_list == CO_FL_SAFE_LIST)
MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list);
else
MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
return NULL;
}
@ -3153,19 +3155,19 @@ struct task *fcgi_timeout_task(struct task *t, void *context, unsigned short sta
TRACE_ENTER(FCGI_EV_FCONN_WAKE, (fconn ? fconn->conn : NULL));
if (fconn) {
HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
/* Somebody already stole the connection from us, so we should not
* free it, we just have to free the task.
*/
if (!t->context) {
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
fconn = NULL;
goto do_leave;
}
if (!expired) {
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
TRACE_DEVEL("leaving (not expired)", FCGI_EV_FCONN_WAKE, fconn->conn);
return t;
}
@ -3176,7 +3178,7 @@ struct task *fcgi_timeout_task(struct task *t, void *context, unsigned short sta
if (fconn->conn->flags & CO_FL_LIST_MASK)
MT_LIST_DEL(&fconn->conn->list);
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
do_leave:

View File

@ -2808,13 +2808,13 @@ struct task *h1_io_cb(struct task *t, void *ctx, unsigned short status)
int ret = 0;
HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (tl->context == NULL) {
/* The connection has been taken over by another thread,
* we're no longer responsible for it, so just free the
* tasklet, and do nothing.
*/
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
tasklet_free(tl);
return NULL;
}
@ -2830,7 +2830,7 @@ struct task *h1_io_cb(struct task *t, void *ctx, unsigned short status)
if (conn_in_list)
MT_LIST_DEL(&conn->list);
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (!(h1c->wait_event.events & SUB_RETRY_SEND))
ret = h1_send(h1c);
@ -2846,10 +2846,12 @@ struct task *h1_io_cb(struct task *t, void *ctx, unsigned short status)
if (!ret && conn_in_list) {
struct server *srv = objt_server(conn->target);
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (conn_in_list == CO_FL_SAFE_LIST)
MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list);
else
MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
return NULL;
}
@ -2889,19 +2891,19 @@ struct task *h1_timeout_task(struct task *t, void *context, unsigned short state
if (h1c) {
/* Make sure nobody stole the connection from us */
HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
/* Somebody already stole the connection from us, so we should not
* free it, we just have to free the task.
*/
if (!t->context) {
h1c = NULL;
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
goto do_leave;
}
if (!expired) {
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
TRACE_DEVEL("leaving (not expired)", H1_EV_H1C_WAKE, h1c->conn, h1c->h1s);
return t;
}
@ -2910,7 +2912,7 @@ struct task *h1_timeout_task(struct task *t, void *context, unsigned short state
* stream's timeout
*/
if (h1c->flags & H1C_F_ST_READY) {
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
t->expire = TICK_ETERNITY;
TRACE_DEVEL("leaving (CS still attached)", H1_EV_H1C_WAKE, h1c->conn, h1c->h1s);
return t;
@ -2924,7 +2926,7 @@ struct task *h1_timeout_task(struct task *t, void *context, unsigned short state
h1_send(h1c);
if (b_data(&h1c->obuf) || (h1c->flags & H1C_F_ERR_PENDING)) {
h1_refresh_timeout(h1c);
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
return t;
}
}
@ -2935,7 +2937,7 @@ struct task *h1_timeout_task(struct task *t, void *context, unsigned short state
h1c->h1s->cs->flags |= (CS_FL_EOS|CS_FL_ERROR);
h1_alert(h1c->h1s);
h1_refresh_timeout(h1c);
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].idle_conns_lock);
TRACE_DEVEL("waiting to release the CS before releasing the connection", H1_EV_H1C_WAKE);
return t;
}
@ -2946,7 +2948,7 @@ struct task *h1_timeout_task(struct task *t, void *context, unsigned short state
if (h1c->conn->flags & CO_FL_LIST_MASK)
MT_LIST_DEL(&h1c->conn->list);
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
do_leave:

View File

@ -3782,13 +3782,13 @@ struct task *h2_io_cb(struct task *t, void *ctx, unsigned short status)
int ret = 0;
HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (t->context == NULL) {
/* The connection has been taken over by another thread,
* we're no longer responsible for it, so just free the
* tasklet, and do nothing.
*/
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
tasklet_free(tl);
goto leave;
}
@ -3805,7 +3805,7 @@ struct task *h2_io_cb(struct task *t, void *ctx, unsigned short status)
if (conn_in_list)
MT_LIST_DEL(&conn->list);
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (!(h2c->wait_event.events & SUB_RETRY_SEND))
ret = h2_send(h2c);
@ -3822,10 +3822,12 @@ struct task *h2_io_cb(struct task *t, void *ctx, unsigned short status)
if (!ret && conn_in_list) {
struct server *srv = objt_server(conn->target);
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (conn_in_list == CO_FL_SAFE_LIST)
MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list);
else
MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
leave:
@ -3902,15 +3904,15 @@ static int h2_process(struct h2c *h2c)
}
/* connections in error must be removed from the idle lists */
HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
MT_LIST_DEL((struct mt_list *)&conn->list);
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
else if (h2c->st0 == H2_CS_ERROR) {
/* connections in error must be removed from the idle lists */
HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
MT_LIST_DEL((struct mt_list *)&conn->list);
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
if (!b_data(&h2c->dbuf))
@ -3966,20 +3968,20 @@ struct task *h2_timeout_task(struct task *t, void *context, unsigned short state
if (h2c) {
/* Make sure nobody stole the connection from us */
HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
/* Somebody already stole the connection from us, so we should not
* free it, we just have to free the task.
*/
if (!t->context) {
h2c = NULL;
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
goto do_leave;
}
if (!expired) {
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
TRACE_DEVEL("leaving (not expired)", H2_EV_H2C_WAKE, h2c->conn);
return t;
}
@ -3988,7 +3990,7 @@ struct task *h2_timeout_task(struct task *t, void *context, unsigned short state
/* we do still have streams but all of them are idle, waiting
* for the data layer, so we must not enforce the timeout here.
*/
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
t->expire = TICK_ETERNITY;
return t;
}
@ -3999,7 +4001,7 @@ struct task *h2_timeout_task(struct task *t, void *context, unsigned short state
if (h2c->conn->flags & CO_FL_LIST_MASK)
MT_LIST_DEL(&h2c->conn->list);
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
do_leave:
@ -4047,9 +4049,9 @@ do_leave:
}
/* in any case this connection must not be considered idle anymore */
HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
MT_LIST_DEL((struct mt_list *)&h2c->conn->list);
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
/* either we can release everything now or it will be done later once
* the last stream closes.

View File

@ -5277,6 +5277,8 @@ struct task *srv_cleanup_toremove_connections(struct task *task, void *context,
/* Move toremove_nb connections from idle_list to toremove_list, -1 means
* moving them all.
* Returns the number of connections moved.
*
* Must be called with idle_conns_lock held.
*/
static int srv_migrate_conns_to_remove(struct mt_list *idle_list, struct mt_list *toremove_list, int toremove_nb)
{
@ -5308,10 +5310,12 @@ static void srv_cleanup_connections(struct server *srv)
/* check all threads starting with ours */
for (i = tid;;) {
did_remove = 0;
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
if (srv_migrate_conns_to_remove(&srv->idle_conns[i], &idle_conns[i].toremove_conns, -1) > 0)
did_remove = 1;
if (srv_migrate_conns_to_remove(&srv->safe_conns[i], &idle_conns[i].toremove_conns, -1) > 0)
did_remove = 1;
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
if (did_remove)
task_wakeup(idle_conns[i].cleanup_task, TASK_WOKEN_OTHER);
@ -5381,12 +5385,14 @@ struct task *srv_cleanup_idle_connections(struct task *task, void *context, unsi
max_conn = (exceed_conns * srv->curr_idle_thr[i]) /
curr_idle + 1;
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
j = srv_migrate_conns_to_remove(&srv->idle_conns[i], &idle_conns[i].toremove_conns, max_conn);
if (j > 0)
did_remove = 1;
if (max_conn - j > 0 &&
srv_migrate_conns_to_remove(&srv->safe_conns[i], &idle_conns[i].toremove_conns, max_conn - j) > 0)
did_remove = 1;
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
if (did_remove)
task_wakeup(idle_conns[i].cleanup_task, TASK_WOKEN_OTHER);

View File

@ -5808,9 +5808,9 @@ struct task *ssl_sock_io_cb(struct task *t, void *context, unsigned short state)
int conn_in_list;
int ret = 0;
HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (tl->context == NULL) {
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
tasklet_free(tl);
return NULL;
}
@ -5818,7 +5818,7 @@ struct task *ssl_sock_io_cb(struct task *t, void *context, unsigned short state)
conn_in_list = conn->flags & CO_FL_LIST_MASK;
if (conn_in_list)
MT_LIST_DEL(&conn->list);
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
/* First if we're doing an handshake, try that */
if (ctx->conn->flags & CO_FL_SSL_WAIT_HS)
ssl_sock_handshake(ctx->conn, CO_FL_SSL_WAIT_HS);
@ -5866,10 +5866,12 @@ leave:
if (!ret && conn_in_list) {
struct server *srv = objt_server(conn->target);
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (conn_in_list == CO_FL_SAFE_LIST)
MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list);
else
MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
return NULL;
}