BUG/MEDIUM: quic: properly take shards into account on bind lines

Shards were completely forgotten in commit f5a0c8abf ("MEDIUM: quic:
respect the threads assigned to a bind line"). The thread mask is
taken from the bind_conf, but since shards were introduced in 2.5,
the per-listener mask is held by the receiver and can be smaller
than the bind_conf's mask.

The effect here is that the traffic is not distributed to the
appropriate thread. At first glance it's not dramatic since it remains
one of the threads eligible by the bind_conf, but it still means that
in some contexts such as "shards by-thread", some concurrency may
persist on listeners while they're expected to be alone. One identified
impact is that it requires more rxbufs than necessary, but there may
possibly be other not yet identified side effects.

This must be backported to 2.7 and everywhere the commit above is
backported.
This commit is contained in:
Willy Tarreau 2022-12-21 09:09:19 +01:00
parent 15337fd808
commit eed7826529
3 changed files with 8 additions and 8 deletions

View File

@ -213,7 +213,7 @@ static inline void quic_connection_id_to_frm_cpy(struct quic_frame *dst,
to->stateless_reset_token = src->stateless_reset_token;
}
/* extract a TID from a CID for bind_conf <bc>, from 0 to global.nbthread-1 and
/* extract a TID from a CID for receiver <rx>, from 0 to global.nbthread-1 and
* in any case no more than 4095. It takes into account the bind_conf's thread
* group and the bind_conf's thread mask. The algorithm is the following: most
* packets contain a valid thread ID for the bind_conf, which means that the
@ -221,27 +221,27 @@ static inline void quic_connection_id_to_frm_cpy(struct quic_frame *dst,
* then we have to remap it. The resulting thread ID will then differ but will
* be correctly encoded and decoded.
*/
static inline uint quic_get_cid_tid(const unsigned char *cid, const struct bind_conf *bc)
static inline uint quic_get_cid_tid(const unsigned char *cid, const struct receiver *rx)
{
uint id, grp;
uint base, count;
id = read_n16(cid) & 4095;
grp = bc->bind_tgroup;
grp = rx->bind_tgroup;
base = ha_tgroup_info[grp - 1].base;
count = ha_tgroup_info[grp - 1].count;
if (base <= id && id < base + count &&
bc->bind_thread & ha_thread_info[id].ltid_bit)
rx->bind_thread & ha_thread_info[id].ltid_bit)
return id; // part of the group and bound: valid
/* The thread number isn't valid, it doesn't map to a thread bound on
* this receiver. Let's reduce it to one of the thread(s) valid for
* that receiver.
*/
count = my_popcountl(bc->bind_thread);
count = my_popcountl(rx->bind_thread);
id = count - 1 - id % count;
id = mask_find_rank_bit(id, bc->bind_thread);
id = mask_find_rank_bit(id, rx->bind_thread);
id += base;
return id;
}

View File

@ -4920,7 +4920,7 @@ static struct quic_conn *qc_new_conn(const struct quic_version *qv, int ipv4,
/* Set tasklet tid based on the SCID selected by us for this
* connection. The upper layer will also be binded on the same thread.
*/
qc->tid = quic_get_cid_tid(qc->scid.data, l->bind_conf);
qc->tid = quic_get_cid_tid(qc->scid.data, &l->rx);
qc->wait_event.tasklet->tid = qc->tid;
qc->subs = NULL;

View File

@ -221,7 +221,7 @@ static int quic_lstnr_dgram_dispatch(unsigned char *buf, size_t len, void *owner
if (!dgram)
goto err;
cid_tid = quic_get_cid_tid(dcid, l->bind_conf);
cid_tid = quic_get_cid_tid(dcid, &l->rx);
/* All the members must be initialized! */
dgram->owner = owner;