MINOR: quic: Replace the RX unprotected packet list by a thread safety one.

This list is shared between the I/O dgram handler and the task responsible
for processing the QUIC packets inside.
This commit is contained in:
Frédéric Lécaille 2021-06-07 14:38:18 +02:00 committed by Amaury Denoyelle
parent c28aba2a8d
commit a11d0e26d4
3 changed files with 13 additions and 11 deletions

View File

@ -394,7 +394,7 @@ extern struct quic_transport_params quic_dflt_transport_params;
#define QUIC_FL_RX_PACKET_ACK_ELICITING (1UL << 0)
struct quic_rx_packet {
struct list list;
struct mt_list list;
struct mt_list rx_list;
struct quic_conn *qc;
unsigned char type;
@ -533,7 +533,7 @@ struct quic_enc_level {
/* <pkts> must be protected from concurrent accesses */
__decl_thread(HA_RWLOCK_T rwlock);
/* Liste of QUIC packets with protected header. */
struct list pqpkts;
struct mt_list pqpkts;
/* Crypto frames */
struct {
uint64_t offset;

View File

@ -1100,17 +1100,17 @@ static inline void quic_rx_packet_refdec(struct quic_rx_packet *pkt)
}
/* Add <pkt> RX packet to <list>, incrementing its reference counter. */
static inline void quic_rx_packet_list_addq(struct list *list,
static inline void quic_rx_packet_list_addq(struct mt_list *list,
struct quic_rx_packet *pkt)
{
LIST_APPEND(list, &pkt->list);
MT_LIST_APPEND(list, &pkt->list);
quic_rx_packet_refinc(pkt);
}
/* Remove <pkt> RX packet from <list>, decrementing its reference counter. */
static inline void quic_rx_packet_list_del(struct quic_rx_packet *pkt)
{
LIST_DELETE(&pkt->list);
MT_LIST_DELETE(&pkt->list);
quic_rx_packet_refdec(pkt);
}

View File

@ -2369,7 +2369,8 @@ int quic_update_ack_ranges_list(struct quic_arngs *arngs,
static inline void qc_rm_hp_pkts(struct quic_enc_level *el, struct ssl_sock_ctx *ctx)
{
struct quic_tls_ctx *tls_ctx;
struct quic_rx_packet *pqpkt, *qqpkt;
struct quic_rx_packet *pqpkt;
struct mt_list *pkttmp1, pkttmp2;
struct quic_enc_level *app_qel;
TRACE_ENTER(QUIC_EV_CONN_ELRMHP, ctx->conn);
@ -2382,7 +2383,7 @@ static inline void qc_rm_hp_pkts(struct quic_enc_level *el, struct ssl_sock_ctx
goto out;
}
tls_ctx = &el->tls_ctx;
list_for_each_entry_safe(pqpkt, qqpkt, &el->rx.pqpkts, list) {
mt_list_for_each_entry_safe(pqpkt, &el->rx.pqpkts, list, pkttmp1, pkttmp2) {
if (!qc_do_rm_hp(pqpkt, tls_ctx, el->pktns->rx.largest_pn,
pqpkt->data + pqpkt->pn_offset,
pqpkt->data, pqpkt->data + pqpkt->len, ctx)) {
@ -2399,7 +2400,8 @@ static inline void qc_rm_hp_pkts(struct quic_enc_level *el, struct ssl_sock_ctx
HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &el->rx.rwlock);
TRACE_PROTO("hp removed", QUIC_EV_CONN_ELRMHP, ctx->conn, pqpkt);
}
quic_rx_packet_list_del(pqpkt);
MT_LIST_DELETE_SAFE(pkttmp1);
quic_rx_packet_refdec(pqpkt);
}
out:
@ -2529,7 +2531,7 @@ int qc_do_hdshk(struct ssl_sock_ctx *ctx)
/* If the header protection key for this level has been derived,
* remove the packet header protections.
*/
if (!LIST_ISEMPTY(&qel->rx.pqpkts) &&
if (!MT_LIST_ISEMPTY(&qel->rx.pqpkts) &&
(tls_ctx->rx.flags & QUIC_FL_TLS_SECRETS_SET))
qc_rm_hp_pkts(qel, ctx);
@ -2546,7 +2548,7 @@ int qc_do_hdshk(struct ssl_sock_ctx *ctx)
/* Check if there is something to do for the next level.
*/
if ((next_qel->tls_ctx.rx.flags & QUIC_FL_TLS_SECRETS_SET) &&
(!LIST_ISEMPTY(&next_qel->rx.pqpkts) || !eb_is_empty(&next_qel->rx.pkts))) {
(!MT_LIST_ISEMPTY(&next_qel->rx.pqpkts) || !eb_is_empty(&next_qel->rx.pkts))) {
qel = next_qel;
goto next_level;
}
@ -2619,7 +2621,7 @@ static int quic_conn_enc_level_init(struct quic_conn *qc,
qel->rx.pkts = EB_ROOT;
HA_RWLOCK_INIT(&qel->rx.rwlock);
LIST_INIT(&qel->rx.pqpkts);
MT_LIST_INIT(&qel->rx.pqpkts);
/* Allocate only one buffer. */
qel->tx.crypto.bufs = malloc(sizeof *qel->tx.crypto.bufs);