MINOR: quic: always use ncbuf for rx CRYPTO

The previous patch fix the handling of in-order CRYPTO frames which
requires the usage of a new buffer for these data as their handling is
delayed to run under TASK_HEAVY.

In fact, as now all CRYPTO frames handling must be delayed, their
handling can be unify. This is the purpose of this commit, which removes
the just introduced new buffer. Now, all CRYPTO frames are buffered
inside the ncbuf. Unused elements such as crypto_frms member for
encryption level are also removed.

This commit is not a bugcfix but is a direct follow-up to the last one.
As such, it can probably be backported with it to 2.9 to reduce code
differences between these versions.
This commit is contained in:
Amaury Denoyelle 2024-03-08 16:47:36 +01:00
parent 81f118cec0
commit 1ee7bf5bd9
4 changed files with 1 additions and 63 deletions

View File

@ -226,7 +226,6 @@ struct quic_cstream {
struct {
uint64_t offset; /* absolute current base offset of ncbuf */
struct ncbuf ncbuf; /* receive buffer - can handle out-of-order offset frames */
struct buffer buf; /* receive buffer - handle only in-order data */
} rx;
struct {
uint64_t offset; /* last offset of data ready to be sent */
@ -256,8 +255,6 @@ struct quic_enc_level {
struct eb_root pkts;
/* List of QUIC packets with protected header. */
struct list pqpkts;
/* List of crypto frames received in order. */
struct list crypto_frms;
} rx;
/* TX part */

View File

@ -719,36 +719,7 @@ static int qc_handle_crypto_frm(struct quic_conn *qc,
crypto_frm->offset = cstream->rx.offset;
}
if (crypto_frm->offset == cstream->rx.offset && ncb_is_empty(ncbuf)) {
struct buffer *buf = &qel->cstream->rx.buf;
struct qf_crypto *qf_crypto;
if (!b_alloc(buf)) {
TRACE_ERROR("in-order buffer allocation failed", QUIC_EV_CONN_PRSHPKT, qc);
goto leave;
}
qf_crypto = pool_alloc(pool_head_qf_crypto);
if (!qf_crypto) {
TRACE_ERROR("CRYPTO frame allocation failed", QUIC_EV_CONN_PRSHPKT, qc);
goto leave;
}
qf_crypto->offset = crypto_frm->offset;
qf_crypto->len = crypto_frm->len;
qf_crypto->data = (unsigned char *)b_tail(buf);
b_putblk(buf, (char *)crypto_frm->data, crypto_frm->len);
qf_crypto->qel = qel;
LIST_APPEND(&qel->rx.crypto_frms, &qf_crypto->list);
cstream->rx.offset += crypto_frm->len;
HA_ATOMIC_OR(&qc->wait_event.tasklet->state, TASK_HEAVY);
TRACE_DEVEL("increment crypto level offset", QUIC_EV_CONN_PHPKTS, qc, qel);
goto done;
}
if (!quic_get_ncbuf(ncbuf) ||
ncb_is_null(ncbuf)) {
if (!quic_get_ncbuf(ncbuf) || ncb_is_null(ncbuf)) {
TRACE_ERROR("CRYPTO ncbuf allocation failed", QUIC_EV_CONN_PRSHPKT, qc);
goto leave;
}

View File

@ -666,35 +666,14 @@ int qc_ssl_provide_all_quic_data(struct quic_conn *qc, struct ssl_sock_ctx *ctx)
{
int ret = 0;
struct quic_enc_level *qel;
struct ncbuf ncbuf = NCBUF_NULL;
TRACE_ENTER(QUIC_EV_CONN_PHPKTS, qc);
list_for_each_entry(qel, &qc->qel_list, list) {
struct qf_crypto *qf_crypto, *qf_back;
struct quic_cstream *cstream = qel->cstream;
list_for_each_entry_safe(qf_crypto, qf_back, &qel->rx.crypto_frms, list) {
const unsigned char *crypto_data = qf_crypto->data;
size_t crypto_len = qf_crypto->len;
/* Free this frame asap */
LIST_DELETE(&qf_crypto->list);
pool_free(pool_head_qf_crypto, qf_crypto);
if (!qc_ssl_provide_quic_data(&ncbuf, qel->level, ctx,
crypto_data, crypto_len))
goto leave;
TRACE_DEVEL("buffered crypto data were provided to TLS stack",
QUIC_EV_CONN_PHPKTS, qc, qel);
}
if (!cstream)
continue;
b_free(&cstream->rx.buf);
cstream->rx.buf = BUF_NULL;
if (!qc_treat_rx_crypto_frms(qc, qel, ctx))
goto leave;
}

View File

@ -124,7 +124,6 @@ void quic_cstream_free(struct quic_cstream *cs)
}
quic_free_ncbuf(&cs->rx.ncbuf);
b_free(&cs->rx.buf);
qc_stream_desc_release(cs->desc, 0);
pool_free(pool_head_quic_cstream, cs);
@ -146,7 +145,6 @@ struct quic_cstream *quic_cstream_new(struct quic_conn *qc)
cs->rx.offset = 0;
cs->rx.ncbuf = NCBUF_NULL;
cs->rx.buf = BUF_NULL;
cs->rx.offset = 0;
cs->tx.offset = 0;
@ -172,7 +170,6 @@ struct quic_cstream *quic_cstream_new(struct quic_conn *qc)
void quic_conn_enc_level_uninit(struct quic_conn *qc, struct quic_enc_level *qel)
{
int i;
struct qf_crypto *qf_crypto, *qfback;
TRACE_ENTER(QUIC_EV_CONN_CLOSE, qc);
@ -183,11 +180,6 @@ void quic_conn_enc_level_uninit(struct quic_conn *qc, struct quic_enc_level *qel
}
}
list_for_each_entry_safe(qf_crypto, qfback, &qel->rx.crypto_frms, list) {
LIST_DELETE(&qf_crypto->list);
pool_free(pool_head_qf_crypto, qf_crypto);
}
ha_free(&qel->tx.crypto.bufs);
quic_cstream_free(qel->cstream);
@ -225,7 +217,6 @@ static int quic_conn_enc_level_init(struct quic_conn *qc,
qel->rx.pkts = EB_ROOT;
LIST_INIT(&qel->rx.pqpkts);
LIST_INIT(&qel->rx.crypto_frms);
/* Allocate only one buffer. */
/* TODO: use a pool */