BUG/MEDIUM: connection: fix srv idle count on conn takeover

On server connection migration from one thread to another, the wrong
idle thread-specific counter is decremented. This bug was introduced
since commit 3d52f0f1f8 due to the
factorization with srv_use_idle_conn. However, this statement is only
executed from conn_backend_get. Extract the decrement from
srv_use_idle_conn in conn_backend_get and use the correct
thread-specific counter.

Rename the function to srv_use_conn to better reflect its purpose as it
is also used with a newly initialized connection not in the idle list.

As a side change, the connection insertion to available list has also
been extracted to conn_backend_get. This will be useful to be able to
specify an alternative list for protocol subject to HOL risk that should
not be shared between several clients.

This bug is only present in this release and thus do not need a backport.
This commit is contained in:
Amaury Denoyelle 2020-10-14 18:17:04 +02:00 committed by Christopher Faulet
parent 5f1ded5629
commit 9c13b62b47
3 changed files with 14 additions and 12 deletions

View File

@ -396,7 +396,7 @@ static inline struct connection *conn_new(void *target)
if (likely(conn != NULL)) {
conn_init(conn, target);
if (obj_type(target) == OBJ_TYPE_SERVER)
srv_use_idle_conn(__objt_server(target), conn);
srv_use_conn(__objt_server(target), conn);
}
return conn;
}

View File

@ -239,17 +239,8 @@ static inline enum srv_initaddr srv_get_next_initaddr(unsigned int *list)
return ret;
}
static inline void srv_use_idle_conn(struct server *srv, struct connection *conn)
static inline void srv_use_conn(struct server *srv, struct connection *conn)
{
if (conn->flags & CO_FL_LIST_MASK) {
_HA_ATOMIC_SUB(&srv->curr_idle_conns, 1);
_HA_ATOMIC_SUB(conn->flags & CO_FL_SAFE_LIST ? &srv->curr_safe_nb : &srv->curr_idle_nb, 1);
_HA_ATOMIC_SUB(&srv->curr_idle_thr[tid], 1);
conn->flags &= ~CO_FL_LIST_MASK;
__ha_barrier_atomic_store();
LIST_ADDQ(&srv->available_conns[tid], mt_list_to_list(&conn->list));
}
_HA_ATOMIC_ADD(&srv->curr_used_conns, 1);
/* It's ok not to do that atomically, we don't need an

View File

@ -1167,6 +1167,7 @@ static struct connection *conn_backend_get(struct server *srv, int is_safe)
MT_LIST_DEL_SAFE(elt1);
_HA_ATOMIC_ADD(&activity[tid].fd_takeover, 1);
found = 1;
break;
}
}
@ -1179,6 +1180,7 @@ static struct connection *conn_backend_get(struct server *srv, int is_safe)
found = 1;
is_safe = 1;
mt_list = srv->safe_conns;
break;
}
}
@ -1191,7 +1193,16 @@ static struct connection *conn_backend_get(struct server *srv, int is_safe)
done:
if (conn) {
_HA_ATOMIC_STORE(&srv->next_takeover, (i + 1 == global.nbthread) ? 0 : i + 1);
srv_use_idle_conn(srv, conn);
srv_use_conn(srv, conn);
_HA_ATOMIC_SUB(&srv->curr_idle_conns, 1);
_HA_ATOMIC_SUB(conn->flags & CO_FL_SAFE_LIST ? &srv->curr_safe_nb : &srv->curr_idle_nb, 1);
_HA_ATOMIC_SUB(&srv->curr_idle_thr[i], 1);
conn->flags &= ~CO_FL_LIST_MASK;
__ha_barrier_atomic_store();
LIST_ADDQ(&srv->available_conns[tid], mt_list_to_list(&conn->list));
}
return conn;
}