MINOR: server: allocate a per-thread struct for the per-thread connections stuff

There are multiple per-thread lists in the listeners, which isn't the
most efficient in terms of cache, and doesn't easily allow to store all
the per-thread stuff.

Now we introduce an srv_per_thread structure which the servers will have an
array of, and place the idle/safe/avail conns tree heads into. Overall this
was a fairly mechanical change, and the array is now always initialized for
all servers since we'll put more stuff there. It's worth noting that the Lua
code still has to deal with its own deinit by itself despite being in a
global list, because its server is not dynamically allocated.
This commit is contained in:
Willy Tarreau 2021-03-04 09:45:32 +01:00
parent 4cdac166e0
commit 430bf4a483
12 changed files with 55 additions and 75 deletions

View File

@ -203,6 +203,13 @@ struct tree_occ {
struct eb32_node node;
};
/* Each server will have one occurrence of this structure per thread */
struct srv_per_thread {
struct eb_root idle_conns; /* Shareable idle connections */
struct eb_root safe_conns; /* Safe idle connections */
struct eb_root avail_conns; /* Connections in use, but with still new streams available */
};
struct proxy;
struct server {
enum obj_type obj_type; /* object type == OBJ_TYPE_SERVER */
@ -230,9 +237,7 @@ struct server {
struct eb_root pendconns; /* pending connections */
struct mt_list actconns; /* active connections (used by "shutdown server sessions") */
struct eb_root *idle_conns_tree; /* shareable idle connections*/
struct eb_root *safe_conns_tree; /* safe idle connections */
struct eb_root *available_conns_tree; /* Connection in used, but with still new streams available */
struct srv_per_thread *per_thr; /* array of per-thread stuff such as connections lists, may be null */
unsigned int pool_purge_delay; /* Delay before starting to purge the idle conns pool */
unsigned int low_idle_conns; /* min idle connection count to start picking from other threads */
unsigned int max_idle_conns; /* Max number of connection allowed in the orphan connections list */

View File

@ -308,8 +308,8 @@ static inline int srv_add_to_idle_list(struct server *srv, struct connection *co
((srv->proxy->options & PR_O_REUSE_MASK) != PR_O_REUSE_NEVR) &&
ha_used_fds < global.tune.pool_high_count &&
(srv->max_idle_conns == -1 || srv->max_idle_conns > srv->curr_idle_conns) &&
((eb_is_empty(&srv->safe_conns_tree[tid]) &&
(is_safe || eb_is_empty(&srv->idle_conns_tree[tid]))) ||
((eb_is_empty(&srv->per_thr[tid].safe_conns) &&
(is_safe || eb_is_empty(&srv->per_thr[tid].idle_conns))) ||
(ha_used_fds < global.tune.pool_low_count &&
(srv->curr_used_conns + srv->curr_idle_conns <=
MAX(srv->curr_used_conns, srv->est_need_conns) + srv->low_idle_conns))) &&
@ -328,11 +328,11 @@ static inline int srv_add_to_idle_list(struct server *srv, struct connection *co
if (is_safe) {
conn->flags = (conn->flags & ~CO_FL_LIST_MASK) | CO_FL_SAFE_LIST;
ebmb_insert(&srv->safe_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
ebmb_insert(&srv->per_thr[tid].safe_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
_HA_ATOMIC_ADD(&srv->curr_safe_nb, 1);
} else {
conn->flags = (conn->flags & ~CO_FL_LIST_MASK) | CO_FL_IDLE_LIST;
ebmb_insert(&srv->idle_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
ebmb_insert(&srv->per_thr[tid].idle_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
_HA_ATOMIC_ADD(&srv->curr_idle_nb, 1);
}
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);

View File

@ -1123,7 +1123,6 @@ static int alloc_bind_address(struct sockaddr_storage **ss,
*/
static struct connection *conn_backend_get(struct stream *s, struct server *srv, int is_safe, int64_t hash)
{
struct eb_root *tree = is_safe ? srv->safe_conns_tree : srv->idle_conns_tree;
struct connection *conn = NULL;
int i; // thread number
int found = 0;
@ -1135,7 +1134,7 @@ static struct connection *conn_backend_get(struct stream *s, struct server *srv,
*/
i = tid;
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
conn = srv_lookup_conn(&tree[tid], hash);
conn = srv_lookup_conn(is_safe ? &srv->per_thr[tid].safe_conns : &srv->per_thr[tid].idle_conns, hash);
if (conn)
conn_delete_from_tree(&conn->hash_node->node);
@ -1143,11 +1142,10 @@ static struct connection *conn_backend_get(struct stream *s, struct server *srv,
* the safe list.
*/
if (!conn && !is_safe && srv->curr_safe_nb > 0) {
conn = srv_lookup_conn(&srv->safe_conns_tree[tid], hash);
conn = srv_lookup_conn(&srv->per_thr[tid].safe_conns, hash);
if (conn) {
conn_delete_from_tree(&conn->hash_node->node);
is_safe = 1;
tree = srv->safe_conns_tree;
}
}
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
@ -1185,7 +1183,7 @@ static struct connection *conn_backend_get(struct stream *s, struct server *srv,
if (HA_SPIN_TRYLOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock) != 0)
continue;
conn = srv_lookup_conn(&tree[i], hash);
conn = srv_lookup_conn(is_safe ? &srv->per_thr[i].safe_conns : &srv->per_thr[i].idle_conns, hash);
while (conn) {
if (conn->mux->takeover && conn->mux->takeover(conn, i) == 0) {
conn_delete_from_tree(&conn->hash_node->node);
@ -1198,14 +1196,13 @@ static struct connection *conn_backend_get(struct stream *s, struct server *srv,
}
if (!found && !is_safe && srv->curr_safe_nb > 0) {
conn = srv_lookup_conn(&srv->safe_conns_tree[i], hash);
conn = srv_lookup_conn(&srv->per_thr[i].safe_conns, hash);
while (conn) {
if (conn->mux->takeover && conn->mux->takeover(conn, i) == 0) {
conn_delete_from_tree(&conn->hash_node->node);
_HA_ATOMIC_ADD(&activity[tid].fd_takeover, 1);
found = 1;
is_safe = 1;
tree = srv->safe_conns_tree;
break;
}
@ -1237,7 +1234,7 @@ static struct connection *conn_backend_get(struct stream *s, struct server *srv,
session_add_conn(s->sess, conn, conn->target);
}
else {
ebmb_insert(&srv->available_conns_tree[tid],
ebmb_insert(&srv->per_thr[tid].avail_conns,
&conn->hash_node->node,
sizeof(conn->hash_node->hash));
}
@ -1342,8 +1339,8 @@ int connect_server(struct stream *s)
*/
si_release_endpoint(&s->si[1]);
/* do not reuse if mode is not http or if avail list is not allocated */
if ((s->be->mode != PR_MODE_HTTP) || (srv && !srv->available_conns_tree))
/* do not reuse if mode is not http */
if (s->be->mode != PR_MODE_HTTP)
goto skip_reuse;
/* first, search for a matching connection in the session's idle conns */
@ -1370,8 +1367,8 @@ int connect_server(struct stream *s)
* Idle conns are necessarily looked up on the same thread so
* that there is no concurrency issues.
*/
if (!eb_is_empty(&srv->available_conns_tree[tid])) {
srv_conn = srv_lookup_conn(&srv->available_conns_tree[tid], hash);
if (!eb_is_empty(&srv->per_thr[tid].avail_conns)) {
srv_conn = srv_lookup_conn(&srv->per_thr[tid].avail_conns, hash);
if (srv_conn)
reuse = 1;
}
@ -1414,7 +1411,7 @@ int connect_server(struct stream *s)
* is OK.
*/
if (ha_used_fds > global.tune.pool_high_count && srv && srv->idle_conns_tree) {
if (ha_used_fds > global.tune.pool_high_count && srv) {
struct connection *tokill_conn = NULL;
struct conn_hash_node *conn_node = NULL;
struct ebmb_node *node = NULL;
@ -1424,7 +1421,7 @@ int connect_server(struct stream *s)
*/
/* First, try from our own idle list */
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
node = ebmb_first(&srv->idle_conns_tree[tid]);
node = ebmb_first(&srv->per_thr[tid].idle_conns);
if (node) {
conn_node = ebmb_entry(node, struct conn_hash_node, node);
tokill_conn = conn_node->conn;
@ -1445,7 +1442,7 @@ int connect_server(struct stream *s)
ALREADY_CHECKED(i);
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
node = ebmb_first(&srv->idle_conns_tree[i]);
node = ebmb_first(&srv->per_thr[i].idle_conns);
if (node) {
conn_node = ebmb_entry(node, struct conn_hash_node, node);
tokill_conn = conn_node->conn;
@ -1453,7 +1450,7 @@ int connect_server(struct stream *s)
}
if (!tokill_conn) {
node = ebmb_first(&srv->safe_conns_tree[i]);
node = ebmb_first(&srv->per_thr[i].safe_conns);
if (node) {
conn_node = ebmb_entry(node, struct conn_hash_node, node);
tokill_conn = conn_node->conn;
@ -1654,7 +1651,7 @@ skip_reuse:
if (srv && reuse_mode == PR_O_REUSE_ALWS &&
!(srv_conn->flags & CO_FL_PRIVATE) &&
srv_conn->mux->avail_streams(srv_conn) > 0) {
ebmb_insert(&srv->available_conns_tree[tid], &srv_conn->hash_node->node, sizeof(srv_conn->hash_node->hash));
ebmb_insert(&srv->per_thr[tid].avail_conns, &srv_conn->hash_node->node, sizeof(srv_conn->hash_node->hash));
}
else if (srv_conn->flags & CO_FL_PRIVATE ||
(reuse_mode == PR_O_REUSE_SAFE &&

View File

@ -3247,17 +3247,19 @@ out_uri_auth_compat:
/* initialize idle conns lists */
int i;
newsrv->available_conns_tree = calloc(global.nbthread, sizeof(*newsrv->available_conns_tree));
if (!newsrv->available_conns_tree) {
ha_alert("parsing [%s:%d] : failed to allocate idle connections for server '%s'.\n",
newsrv->conf.file, newsrv->conf.line, newsrv->id);
newsrv->per_thr = calloc(global.nbthread, sizeof(*newsrv->per_thr));
if (!newsrv->per_thr) {
ha_alert("parsing [%s:%d] : failed to allocate per-thread lists for server '%s'.\n",
newsrv->conf.file, newsrv->conf.line, newsrv->id);
cfgerr++;
continue;
}
for (i = 0; i < global.nbthread; i++)
newsrv->available_conns_tree[i] = EB_ROOT;
for (i = 0; i < global.nbthread; i++) {
newsrv->per_thr[i].idle_conns = EB_ROOT;
newsrv->per_thr[i].safe_conns = EB_ROOT;
newsrv->per_thr[i].avail_conns = EB_ROOT;
}
if (newsrv->max_idle_conns != 0) {
if (idle_conn_task == NULL) {
@ -3279,28 +3281,6 @@ out_uri_auth_compat:
}
}
newsrv->idle_conns_tree = calloc((unsigned short)global.nbthread, sizeof(*newsrv->idle_conns_tree));
if (!newsrv->idle_conns_tree) {
ha_alert("parsing [%s:%d] : failed to allocate idle connections for server '%s'.\n",
newsrv->conf.file, newsrv->conf.line, newsrv->id);
cfgerr++;
continue;
}
for (i = 0; i < global.nbthread; i++)
newsrv->idle_conns_tree[i] = EB_ROOT;
newsrv->safe_conns_tree = calloc(global.nbthread, sizeof(*newsrv->safe_conns_tree));
if (!newsrv->safe_conns_tree) {
ha_alert("parsing [%s:%d] : failed to allocate idle connections for server '%s'.\n",
newsrv->conf.file, newsrv->conf.line, newsrv->id);
cfgerr++;
continue;
}
for (i = 0; i < global.nbthread; i++)
newsrv->safe_conns_tree[i] = EB_ROOT;
newsrv->curr_idle_thr = calloc(global.nbthread, sizeof(*newsrv->curr_idle_thr));
if (!newsrv->curr_idle_thr)
goto err;

View File

@ -70,7 +70,7 @@ int conn_create_mux(struct connection *conn)
*/
if (srv && ((srv->proxy->options & PR_O_REUSE_MASK) == PR_O_REUSE_ALWS) &&
!(conn->flags & CO_FL_PRIVATE) && conn->mux->avail_streams(conn) > 0)
ebmb_insert(&srv->available_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
ebmb_insert(&srv->per_thr[tid].avail_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
else if (conn->flags & CO_FL_PRIVATE) {
/* If it fail now, the same will be done in mux->detach() callback */
session_add_conn(sess, conn, conn->target);

View File

@ -2658,9 +2658,7 @@ void deinit(void)
free(s->hostname);
free(s->hostname_dn);
free((char*)s->conf.file);
free(s->idle_conns_tree);
free(s->safe_conns_tree);
free(s->available_conns_tree);
free(s->per_thr);
free(s->curr_idle_thr);
free(s->resolvers_id);
free(s->addr_node.key);

View File

@ -9177,8 +9177,6 @@ void hlua_init(void) {
socket_tcp.obj_type = OBJ_TYPE_SERVER;
MT_LIST_INIT(&socket_tcp.actconns);
socket_tcp.pendconns = EB_ROOT;
socket_tcp.idle_conns_tree = NULL;
socket_tcp.safe_conns_tree = NULL;
LIST_ADD(&servers_list, &socket_tcp.global_list);
socket_tcp.next_state = SRV_ST_RUNNING; /* early server setup */
socket_tcp.last_change = 0;
@ -9225,8 +9223,6 @@ void hlua_init(void) {
socket_ssl.obj_type = OBJ_TYPE_SERVER;
MT_LIST_INIT(&socket_ssl.actconns);
socket_ssl.pendconns = EB_ROOT;
socket_ssl.idle_conns_tree = NULL;
socket_ssl.safe_conns_tree = NULL;
LIST_ADD(&servers_list, &socket_ssl.global_list);
socket_ssl.next_state = SRV_ST_RUNNING; /* early server setup */
socket_ssl.last_change = 0;
@ -9296,8 +9292,12 @@ static void hlua_deinit()
if (hlua_states[thr])
lua_close(hlua_states[thr]);
}
ha_free(&socket_tcp.per_thr);
ha_free((char**)&socket_tcp.conf.file);
#ifdef USE_OPENSSL
ha_free(&socket_ssl.per_thr);
ha_free((char**)&socket_ssl.conf.file);
#endif
}

View File

@ -3029,9 +3029,9 @@ struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned int state)
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (conn_in_list == CO_FL_SAFE_LIST)
ebmb_insert(&srv->safe_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
ebmb_insert(&srv->per_thr[tid].safe_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
else
ebmb_insert(&srv->idle_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
ebmb_insert(&srv->per_thr[tid].idle_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
return NULL;
@ -3639,7 +3639,7 @@ static void fcgi_detach(struct conn_stream *cs)
else if (!fconn->conn->hash_node->node.node.leaf_p &&
fcgi_avail_streams(fconn->conn) > 0 && objt_server(fconn->conn->target) &&
!LIST_ADDED(&fconn->conn->session_list)) {
ebmb_insert(&__objt_server(fconn->conn->target)->available_conns_tree[tid],
ebmb_insert(&__objt_server(fconn->conn->target)->per_thr[tid].avail_conns,
&fconn->conn->hash_node->node,
sizeof(fconn->conn->hash_node->hash));
}

View File

@ -2855,9 +2855,9 @@ struct task *h1_io_cb(struct task *t, void *ctx, unsigned int state)
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (conn_in_list == CO_FL_SAFE_LIST)
ebmb_insert(&srv->safe_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
ebmb_insert(&srv->per_thr[tid].safe_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
else
ebmb_insert(&srv->idle_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
ebmb_insert(&srv->per_thr[tid].idle_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
return NULL;

View File

@ -3831,9 +3831,9 @@ struct task *h2_io_cb(struct task *t, void *ctx, unsigned int state)
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (conn_in_list == CO_FL_SAFE_LIST)
ebmb_insert(&srv->safe_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
ebmb_insert(&srv->per_thr[tid].safe_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
else
ebmb_insert(&srv->idle_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
ebmb_insert(&srv->per_thr[tid].idle_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
@ -4274,7 +4274,7 @@ static void h2_detach(struct conn_stream *cs)
else if (!h2c->conn->hash_node->node.node.leaf_p &&
h2_avail_streams(h2c->conn) > 0 && objt_server(h2c->conn->target) &&
!LIST_ADDED(&h2c->conn->session_list)) {
ebmb_insert(&__objt_server(h2c->conn->target)->available_conns_tree[tid],
ebmb_insert(&__objt_server(h2c->conn->target)->per_thr[tid].avail_conns,
&h2c->conn->hash_node->node,
sizeof(h2c->conn->hash_node->hash));
}

View File

@ -4622,9 +4622,9 @@ static void srv_cleanup_connections(struct server *srv)
for (i = tid;;) {
did_remove = 0;
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
if (srv_migrate_conns_to_remove(&srv->idle_conns_tree[i], &idle_conns[i].toremove_conns, -1) > 0)
if (srv_migrate_conns_to_remove(&srv->per_thr[i].idle_conns, &idle_conns[i].toremove_conns, -1) > 0)
did_remove = 1;
if (srv_migrate_conns_to_remove(&srv->safe_conns_tree[i], &idle_conns[i].toremove_conns, -1) > 0)
if (srv_migrate_conns_to_remove(&srv->per_thr[i].safe_conns, &idle_conns[i].toremove_conns, -1) > 0)
did_remove = 1;
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
if (did_remove)
@ -4697,11 +4697,11 @@ struct task *srv_cleanup_idle_conns(struct task *task, void *context, unsigned i
curr_idle + 1;
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
j = srv_migrate_conns_to_remove(&srv->idle_conns_tree[i], &idle_conns[i].toremove_conns, max_conn);
j = srv_migrate_conns_to_remove(&srv->per_thr[i].idle_conns, &idle_conns[i].toremove_conns, max_conn);
if (j > 0)
did_remove = 1;
if (max_conn - j > 0 &&
srv_migrate_conns_to_remove(&srv->safe_conns_tree[i], &idle_conns[i].toremove_conns, max_conn - j) > 0)
srv_migrate_conns_to_remove(&srv->per_thr[i].safe_conns, &idle_conns[i].toremove_conns, max_conn - j) > 0)
did_remove = 1;
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);

View File

@ -5910,9 +5910,9 @@ leave:
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (conn_in_list == CO_FL_SAFE_LIST)
ebmb_insert(&srv->safe_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
ebmb_insert(&srv->per_thr[tid].safe_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
else
ebmb_insert(&srv->idle_conns_tree[tid], &conn->hash_node->node, sizeof(conn->hash_node->hash));
ebmb_insert(&srv->per_thr[tid].idle_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
return NULL;