MINOR: connection: allocate dynamically hash node for backend conns

Remove ebmb_node entry from struct connection and create a dedicated
struct conn_hash_node. struct connection contains now only a pointer to
a conn_hash_node, allocated only for connections where target is of type
OBJ_TYPE_SERVER. This will reduce memory footprints for every
connections that does not need http-reuse such as frontend connections.
This commit is contained in:
Amaury Denoyelle 2021-02-19 15:29:16 +01:00
parent 5567f41d0a
commit 8990b010a0
11 changed files with 121 additions and 64 deletions

View File

@ -482,7 +482,7 @@ enum conn_hash_params_t {
#define CONN_HASH_PARAMS_TYPE_COUNT 6
#define CONN_HASH_PAYLOAD_LEN \
(((sizeof(((struct connection *)0)->hash)) * 8) - CONN_HASH_PARAMS_TYPE_COUNT)
(((sizeof(((struct conn_hash_node *)0)->hash)) * 8) - CONN_HASH_PARAMS_TYPE_COUNT)
#define CONN_HASH_GET_PAYLOAD(hash) \
(((hash) << CONN_HASH_PARAMS_TYPE_COUNT) >> CONN_HASH_PARAMS_TYPE_COUNT)
@ -536,8 +536,19 @@ struct connection {
struct ist proxy_unique_id; /* Value of the unique ID TLV received via PROXYv2 */
struct quic_conn *qc; /* Only present if this connection is a QUIC one */
struct ebmb_node hash_node;
int64_t hash;
/* used to identify a backend connection for http-reuse,
* thus only present if conn.target is of type OBJ_TYPE_SERVER
*/
struct conn_hash_node *hash_node;
};
/* node for backend connection in the idle trees for http-reuse
* A connection is identified by a hash generated from its specific parameters
*/
struct conn_hash_node {
struct ebmb_node node;
int64_t hash; /* key for ebmb tree */
struct connection *conn; /* connection owner of the node */
};
struct mux_proto_list {

View File

@ -38,6 +38,7 @@
extern struct pool_head *pool_head_connection;
extern struct pool_head *pool_head_connstream;
extern struct pool_head *pool_head_conn_hash_node;
extern struct pool_head *pool_head_sockaddr;
extern struct pool_head *pool_head_authority;
extern struct xprt_ops *registered_xprt[XPRT_ENTRIES];
@ -356,8 +357,22 @@ static inline void conn_init(struct connection *conn, void *target)
conn->dst = NULL;
conn->proxy_authority = NULL;
conn->proxy_unique_id = IST_NULL;
memset(&conn->hash_node, 0, sizeof(conn->hash_node));
conn->hash = 0;
conn->hash_node = NULL;
}
static inline struct conn_hash_node *conn_alloc_hash_node(struct connection *conn)
{
struct conn_hash_node *hash_node = NULL;
hash_node = pool_alloc(pool_head_conn_hash_node);
if (unlikely(!hash_node))
return NULL;
memset(&hash_node->node, 0, sizeof(hash_node->node));
hash_node->hash = 0;
hash_node->conn = conn;
return hash_node;
}
/* sets <owner> as the connection's owner */
@ -421,13 +436,26 @@ static inline void sockaddr_free(struct sockaddr_storage **sap)
static inline struct connection *conn_new(void *target)
{
struct connection *conn;
struct conn_hash_node *hash_node;
conn = pool_alloc(pool_head_connection);
if (likely(conn != NULL)) {
conn_init(conn, target);
if (obj_type(target) == OBJ_TYPE_SERVER)
srv_use_conn(__objt_server(target), conn);
if (unlikely(!conn))
return NULL;
conn_init(conn, target);
if (obj_type(target) == OBJ_TYPE_SERVER) {
srv_use_conn(__objt_server(target), conn);
hash_node = conn_alloc_hash_node(conn);
if (unlikely(!hash_node)) {
pool_free(pool_head_connection, conn);
return NULL;
}
conn->hash_node = hash_node;
}
return conn;
}
@ -523,6 +551,10 @@ static inline void conn_free(struct connection *conn)
pool_free(pool_head_uniqueid, conn->proxy_unique_id.ptr);
conn->proxy_unique_id = IST_NULL;
}
if (conn->hash_node) {
pool_free(pool_head_conn_hash_node, conn->hash_node);
conn->hash_node = NULL;
}
/* By convention we always place a NULL where the ctx points to if the
* mux is null. It may have been used to store the connection as a

View File

@ -280,7 +280,7 @@ static inline void srv_release_conn(struct server *srv, struct connection *conn)
/* Remove the connection from any tree (safe, idle or available) */
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
conn_delete_from_tree(&conn->hash_node);
conn_delete_from_tree(&conn->hash_node->node);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
@ -316,15 +316,15 @@ static inline int srv_add_to_idle_list(struct server *srv, struct connection *co
_HA_ATOMIC_SUB(&srv->curr_used_conns, 1);
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
conn_delete_from_tree(&conn->hash_node);
conn_delete_from_tree(&conn->hash_node->node);
if (is_safe) {
conn->flags = (conn->flags & ~CO_FL_LIST_MASK) | CO_FL_SAFE_LIST;
ebmb_insert(&srv->safe_conns_tree[tid], &conn->hash_node, sizeof(conn->hash));
ebmb_insert(&srv->safe_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
_HA_ATOMIC_ADD(&srv->curr_safe_nb, 1);
} else {
conn->flags = (conn->flags & ~CO_FL_LIST_MASK) | CO_FL_IDLE_LIST;
ebmb_insert(&srv->idle_conns_tree[tid], &conn->hash_node, sizeof(conn->hash));
ebmb_insert(&srv->idle_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
_HA_ATOMIC_ADD(&srv->curr_idle_nb, 1);
}
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
@ -358,10 +358,13 @@ static inline struct connection *srv_lookup_conn(struct eb_root *tree, uint64_t
{
struct ebmb_node *node = NULL;
struct connection *conn = NULL;
struct conn_hash_node *hash_node = NULL;
node = ebmb_lookup(tree, &hash, sizeof(conn->hash));
if (node)
conn = ebmb_entry(node, struct connection, hash_node);
node = ebmb_lookup(tree, &hash, sizeof(hash_node->hash));
if (node) {
hash_node = ebmb_entry(node, struct conn_hash_node, node);
conn = hash_node->conn;
}
return conn;
}
@ -371,12 +374,15 @@ static inline struct connection *srv_lookup_conn(struct eb_root *tree, uint64_t
*/
static inline struct connection *srv_lookup_conn_next(struct connection *conn)
{
struct ebmb_node *next_node = NULL;
struct ebmb_node *node = NULL;
struct connection *next_conn = NULL;
struct conn_hash_node *hash_node = NULL;
next_node = ebmb_next_dup(&conn->hash_node);
if (next_node)
next_conn = ebmb_entry(next_node, struct connection, hash_node);
node = ebmb_next_dup(&conn->hash_node->node);
if (node) {
hash_node = ebmb_entry(node, struct conn_hash_node, node);
next_conn = hash_node->conn;
}
return next_conn;
}

View File

@ -208,7 +208,7 @@ static inline struct connection *session_get_conn(struct session *sess, void *ta
list_for_each_entry(srv_list, &sess->srv_list, srv_list) {
if (srv_list->target == target) {
list_for_each_entry(srv_conn, &srv_list->conn_list, session_list) {
if (srv_conn->hash == hash &&
if ((srv_conn->hash_node && srv_conn->hash_node->hash == hash) &&
srv_conn->mux &&
(srv_conn->mux->avail_streams(srv_conn) > 0) &&
!(srv_conn->flags & CO_FL_WAIT_XPRT)) {

View File

@ -1135,7 +1135,7 @@ static struct connection *conn_backend_get(struct stream *s, struct server *srv,
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
conn = srv_lookup_conn(&tree[tid], hash);
if (conn)
conn_delete_from_tree(&conn->hash_node);
conn_delete_from_tree(&conn->hash_node->node);
/* If we failed to pick a connection from the idle list, let's try again with
* the safe list.
@ -1143,7 +1143,7 @@ static struct connection *conn_backend_get(struct stream *s, struct server *srv,
if (!conn && !is_safe && srv->curr_safe_nb > 0) {
conn = srv_lookup_conn(&srv->safe_conns_tree[tid], hash);
if (conn) {
conn_delete_from_tree(&conn->hash_node);
conn_delete_from_tree(&conn->hash_node->node);
is_safe = 1;
tree = srv->safe_conns_tree;
}
@ -1185,7 +1185,7 @@ static struct connection *conn_backend_get(struct stream *s, struct server *srv,
conn = srv_lookup_conn(&tree[i], hash);
while (conn) {
if (conn->mux->takeover && conn->mux->takeover(conn, i) == 0) {
conn_delete_from_tree(&conn->hash_node);
conn_delete_from_tree(&conn->hash_node->node);
_HA_ATOMIC_ADD(&activity[tid].fd_takeover, 1);
found = 1;
break;
@ -1198,7 +1198,7 @@ static struct connection *conn_backend_get(struct stream *s, struct server *srv,
conn = srv_lookup_conn(&srv->safe_conns_tree[i], hash);
while (conn) {
if (conn->mux->takeover && conn->mux->takeover(conn, i) == 0) {
conn_delete_from_tree(&conn->hash_node);
conn_delete_from_tree(&conn->hash_node->node);
_HA_ATOMIC_ADD(&activity[tid].fd_takeover, 1);
found = 1;
is_safe = 1;
@ -1234,7 +1234,9 @@ static struct connection *conn_backend_get(struct stream *s, struct server *srv,
session_add_conn(s->sess, conn, conn->target);
}
else {
ebmb_insert(&srv->available_conns_tree[tid], &conn->hash_node, sizeof(conn->hash));
ebmb_insert(&srv->available_conns_tree[tid],
&conn->hash_node->node,
sizeof(conn->hash_node->hash));
}
}
return conn;
@ -1408,6 +1410,7 @@ int connect_server(struct stream *s)
if (ha_used_fds > global.tune.pool_high_count && srv && srv->idle_conns_tree) {
struct connection *tokill_conn = NULL;
struct conn_hash_node *conn_node = NULL;
struct ebmb_node *node = NULL;
/* We can't reuse a connection, and e have more FDs than deemd
@ -1417,7 +1420,8 @@ int connect_server(struct stream *s)
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
node = ebmb_first(&srv->idle_conns_tree[tid]);
if (node) {
tokill_conn = ebmb_entry(node, struct connection, hash_node);
conn_node = ebmb_entry(node, struct conn_hash_node, node);
tokill_conn = conn_node->conn;
ebmb_delete(node);
tokill_conn->mux->destroy(tokill_conn->ctx);
}
@ -1437,14 +1441,16 @@ int connect_server(struct stream *s)
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
node = ebmb_first(&srv->idle_conns_tree[i]);
if (node) {
tokill_conn = ebmb_entry(node, struct connection, hash_node);
conn_node = ebmb_entry(node, struct conn_hash_node, node);
tokill_conn = conn_node->conn;
ebmb_delete(node);
}
if (!tokill_conn) {
node = ebmb_first(&srv->safe_conns_tree[i]);
if (node) {
tokill_conn = ebmb_entry(node, struct connection, hash_node);
conn_node = ebmb_entry(node, struct conn_hash_node, node);
tokill_conn = conn_node->conn;
ebmb_delete(node);
}
}
@ -1469,7 +1475,7 @@ int connect_server(struct stream *s)
if (avail <= 1) {
/* No more streams available, remove it from the list */
conn_delete_from_tree(&srv_conn->hash_node);
conn_delete_from_tree(&srv_conn->hash_node->node);
}
if (avail >= 1) {
@ -1633,7 +1639,7 @@ skip_reuse:
if (srv && reuse_mode == PR_O_REUSE_ALWS &&
!(srv_conn->flags & CO_FL_PRIVATE) &&
srv_conn->mux->avail_streams(srv_conn) > 0) {
ebmb_insert(&srv->available_conns_tree[tid], &srv_conn->hash_node, sizeof(srv_conn->hash));
ebmb_insert(&srv->available_conns_tree[tid], &srv_conn->hash_node->node, sizeof(srv_conn->hash_node->hash));
}
else if (srv_conn->flags & CO_FL_PRIVATE ||
(reuse_mode == PR_O_REUSE_SAFE &&
@ -1716,7 +1722,8 @@ skip_reuse:
}
}
srv_conn->hash = hash;
if (srv)
srv_conn->hash_node->hash = hash;
return SF_ERR_NONE; /* connection is OK */
}

View File

@ -27,10 +27,11 @@
#include <haproxy/stream_interface.h>
DECLARE_POOL(pool_head_connection, "connection", sizeof(struct connection));
DECLARE_POOL(pool_head_connstream, "conn_stream", sizeof(struct conn_stream));
DECLARE_POOL(pool_head_sockaddr, "sockaddr", sizeof(struct sockaddr_storage));
DECLARE_POOL(pool_head_authority, "authority", PP2_AUTHORITY_MAX);
DECLARE_POOL(pool_head_connection, "connection", sizeof(struct connection));
DECLARE_POOL(pool_head_connstream, "conn_stream", sizeof(struct conn_stream));
DECLARE_POOL(pool_head_conn_hash_node, "conn_hash_node", sizeof(struct conn_hash_node));
DECLARE_POOL(pool_head_sockaddr, "sockaddr", sizeof(struct sockaddr_storage));
DECLARE_POOL(pool_head_authority, "authority", PP2_AUTHORITY_MAX);
struct idle_conns idle_conns[MAX_THREADS] = { };
struct xprt_ops *registered_xprt[XPRT_ENTRIES] = { NULL, };
@ -69,7 +70,7 @@ int conn_create_mux(struct connection *conn)
*/
if (srv && ((srv->proxy->options & PR_O_REUSE_MASK) == PR_O_REUSE_ALWS) &&
!(conn->flags & CO_FL_PRIVATE) && conn->mux->avail_streams(conn) > 0)
ebmb_insert(&srv->available_conns_tree[tid], &conn->hash_node, sizeof(conn->hash));
ebmb_insert(&srv->available_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
else if (conn->flags & CO_FL_PRIVATE) {
/* If it fail now, the same will be done in mux->detach() callback */
session_add_conn(sess, conn, conn->target);

View File

@ -3002,7 +3002,7 @@ struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned short status)
conn_in_list = conn->flags & CO_FL_LIST_MASK;
if (conn_in_list)
conn_delete_from_tree(&conn->hash_node);
conn_delete_from_tree(&conn->hash_node->node);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
@ -3023,9 +3023,9 @@ struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned short status)
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (conn_in_list == CO_FL_SAFE_LIST)
ebmb_insert(&srv->safe_conns_tree[tid], &conn->hash_node, sizeof(conn->hash));
ebmb_insert(&srv->safe_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
else
ebmb_insert(&srv->idle_conns_tree[tid], &conn->hash_node, sizeof(conn->hash));
ebmb_insert(&srv->idle_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
return NULL;
@ -3176,7 +3176,7 @@ struct task *fcgi_timeout_task(struct task *t, void *context, unsigned short sta
* to steal it from us.
*/
if (fconn->conn->flags & CO_FL_LIST_MASK)
conn_delete_from_tree(&fconn->conn->hash_node);
conn_delete_from_tree(&fconn->conn->hash_node->node);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
@ -3619,12 +3619,12 @@ static void fcgi_detach(struct conn_stream *cs)
TRACE_DEVEL("reusable idle connection", FCGI_EV_STRM_END, fconn->conn);
return;
}
else if (!fconn->conn->hash_node.node.leaf_p &&
else if (!fconn->conn->hash_node->node.node.leaf_p &&
fcgi_avail_streams(fconn->conn) > 0 && objt_server(fconn->conn->target) &&
!LIST_ADDED(&fconn->conn->session_list)) {
ebmb_insert(&__objt_server(fconn->conn->target)->available_conns_tree[tid],
&fconn->conn->hash_node,
sizeof(fconn->conn->hash));
&fconn->conn->hash_node->node,
sizeof(fconn->conn->hash_node->hash));
}
}
}

View File

@ -2828,7 +2828,7 @@ struct task *h1_io_cb(struct task *t, void *ctx, unsigned short status)
*/
conn_in_list = conn->flags & CO_FL_LIST_MASK;
if (conn_in_list)
conn_delete_from_tree(&conn->hash_node);
conn_delete_from_tree(&conn->hash_node->node);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
@ -2848,9 +2848,9 @@ struct task *h1_io_cb(struct task *t, void *ctx, unsigned short status)
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (conn_in_list == CO_FL_SAFE_LIST)
ebmb_insert(&srv->safe_conns_tree[tid], &conn->hash_node, sizeof(conn->hash));
ebmb_insert(&srv->safe_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
else
ebmb_insert(&srv->idle_conns_tree[tid], &conn->hash_node, sizeof(conn->hash));
ebmb_insert(&srv->idle_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
return NULL;
@ -2946,7 +2946,7 @@ struct task *h1_timeout_task(struct task *t, void *context, unsigned short state
* to steal it from us.
*/
if (h1c->conn->flags & CO_FL_LIST_MASK)
conn_delete_from_tree(&h1c->conn->hash_node);
conn_delete_from_tree(&h1c->conn->hash_node->node);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}

View File

@ -3803,7 +3803,7 @@ struct task *h2_io_cb(struct task *t, void *ctx, unsigned short status)
* to use it while we handle the I/O events
*/
if (conn_in_list)
conn_delete_from_tree(&conn->hash_node);
conn_delete_from_tree(&conn->hash_node->node);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
@ -3824,9 +3824,9 @@ struct task *h2_io_cb(struct task *t, void *ctx, unsigned short status)
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (conn_in_list == CO_FL_SAFE_LIST)
ebmb_insert(&srv->safe_conns_tree[tid], &conn->hash_node, sizeof(conn->hash));
ebmb_insert(&srv->safe_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
else
ebmb_insert(&srv->idle_conns_tree[tid], &conn->hash_node, sizeof(conn->hash));
ebmb_insert(&srv->idle_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
@ -3906,7 +3906,7 @@ static int h2_process(struct h2c *h2c)
/* connections in error must be removed from the idle lists */
if (conn->flags & CO_FL_LIST_MASK) {
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
conn_delete_from_tree(&conn->hash_node);
conn_delete_from_tree(&conn->hash_node->node);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
}
@ -3914,7 +3914,7 @@ static int h2_process(struct h2c *h2c)
/* connections in error must be removed from the idle lists */
if (conn->flags & CO_FL_LIST_MASK) {
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
conn_delete_from_tree(&conn->hash_node);
conn_delete_from_tree(&conn->hash_node->node);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
}
@ -4003,7 +4003,7 @@ struct task *h2_timeout_task(struct task *t, void *context, unsigned short state
* to steal it from us.
*/
if (h2c->conn->flags & CO_FL_LIST_MASK)
conn_delete_from_tree(&h2c->conn->hash_node);
conn_delete_from_tree(&h2c->conn->hash_node->node);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
@ -4055,7 +4055,7 @@ do_leave:
/* in any case this connection must not be considered idle anymore */
if (h2c->conn->flags & CO_FL_LIST_MASK) {
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
conn_delete_from_tree(&h2c->conn->hash_node);
conn_delete_from_tree(&h2c->conn->hash_node->node);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
@ -4253,12 +4253,12 @@ static void h2_detach(struct conn_stream *cs)
return;
}
else if (!h2c->conn->hash_node.node.leaf_p &&
else if (!h2c->conn->hash_node->node.node.leaf_p &&
h2_avail_streams(h2c->conn) > 0 && objt_server(h2c->conn->target) &&
!LIST_ADDED(&h2c->conn->session_list)) {
ebmb_insert(&__objt_server(h2c->conn->target)->available_conns_tree[tid],
&h2c->conn->hash_node,
sizeof(h2c->conn->hash));
&h2c->conn->hash_node->node,
sizeof(h2c->conn->hash_node->hash));
}
}
}

View File

@ -5428,7 +5428,7 @@ struct task *srv_cleanup_toremove_connections(struct task *task, void *context,
static int srv_migrate_conns_to_remove(struct eb_root *idle_tree, struct mt_list *toremove_list, int toremove_nb)
{
struct eb_node *node, *next;
struct connection *conn;
struct conn_hash_node *hash_node;
int i = 0;
node = eb_first(idle_tree);
@ -5437,9 +5437,9 @@ static int srv_migrate_conns_to_remove(struct eb_root *idle_tree, struct mt_list
if (toremove_nb != -1 && i >= toremove_nb)
break;
conn = ebmb_entry(node, struct connection, hash_node);
hash_node = ebmb_entry(node, struct conn_hash_node, node);
eb_delete(node);
MT_LIST_ADDQ(toremove_list, &conn->toremove_list);
MT_LIST_ADDQ(toremove_list, &hash_node->conn->toremove_list);
i++;
node = next;

View File

@ -5817,7 +5817,7 @@ struct task *ssl_sock_io_cb(struct task *t, void *context, unsigned short state)
conn = ctx->conn;
conn_in_list = conn->flags & CO_FL_LIST_MASK;
if (conn_in_list)
conn_delete_from_tree(&conn->hash_node);
conn_delete_from_tree(&conn->hash_node->node);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
/* First if we're doing an handshake, try that */
if (ctx->conn->flags & CO_FL_SSL_WAIT_HS)
@ -5868,9 +5868,9 @@ leave:
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (conn_in_list == CO_FL_SAFE_LIST)
ebmb_insert(&srv->safe_conns_tree[tid], &conn->hash_node, sizeof(conn->hash));
ebmb_insert(&srv->safe_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
else
ebmb_insert(&srv->idle_conns_tree[tid], &conn->hash_node, sizeof(conn->hash));
ebmb_insert(&srv->idle_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
return NULL;