MINOR: quic: replace usage of ssl_sock_ctx by quic_conn

Big refactoring on xprt-quic. A lot of functions were using the
ssl_sock_ctx as argument to only access the related quic_conn. All these
arguments are replaced by a quic_conn parameter.

As a convention, the quic_conn instance is always the first parameter of
these functions.

This commit is part of the rearchitecture of xprt-quic layers and the
separation between xprt and connection instances.
This commit is contained in:
Amaury Denoyelle 2021-12-22 11:06:34 +01:00
parent 741eacca47
commit e81fed9a54

View File

@ -581,12 +581,10 @@ static void quic_trace(enum trace_level level, uint64_t mask, const struct trace
}
/* Returns 1 if the peer has validated <qc> QUIC connection address, 0 if not. */
static inline int quic_peer_validated_addr(struct ssl_sock_ctx *ctx)
static inline int quic_peer_validated_addr(struct quic_conn *qc)
{
struct quic_conn *qc;
struct quic_pktns *hdshk_pktns, *app_pktns;
qc = ctx->conn->qc;
if (objt_server(qc->conn->target))
return 1;
@ -603,16 +601,14 @@ static inline int quic_peer_validated_addr(struct ssl_sock_ctx *ctx)
/* Set the timer attached to the QUIC connection with <ctx> as I/O handler and used for
* both loss detection and PTO and schedule the task assiated to this timer if needed.
*/
static inline void qc_set_timer(struct ssl_sock_ctx *ctx)
static inline void qc_set_timer(struct quic_conn *qc)
{
struct quic_conn *qc;
struct quic_pktns *pktns;
unsigned int pto;
int handshake_complete;
qc = ctx->qc;
TRACE_ENTER(QUIC_EV_CONN_STIMER, qc,
NULL, NULL, &ctx->conn->qc->path->ifae_pkts);
NULL, NULL, &qc->path->ifae_pkts);
pktns = quic_loss_pktns(qc);
if (tick_isset(pktns->tx.loss_time)) {
@ -629,7 +625,7 @@ static inline void qc_set_timer(struct ssl_sock_ctx *ctx)
goto out;
}
if (!qc->path->ifae_pkts && quic_peer_validated_addr(ctx)) {
if (!qc->path->ifae_pkts && quic_peer_validated_addr(qc)) {
TRACE_PROTO("timer cancellation", QUIC_EV_CONN_STIMER, qc);
/* Timer cancellation. */
qc->timer = TICK_ETERNITY;
@ -1225,10 +1221,10 @@ static uint64_t decode_packet_number(uint64_t largest_pn,
* <end> points to one byte past the end of this packet.
* Returns 1 if succeeded, 0 if not.
*/
static int qc_do_rm_hp(struct quic_rx_packet *pkt, struct quic_tls_ctx *tls_ctx,
static int qc_do_rm_hp(struct quic_conn *qc,
struct quic_rx_packet *pkt, struct quic_tls_ctx *tls_ctx,
int64_t largest_pn, unsigned char *pn,
unsigned char *byte0, const unsigned char *end,
struct ssl_sock_ctx *ctx)
unsigned char *byte0, const unsigned char *end)
{
int ret, outlen, i, pnlen;
uint64_t packet_number;
@ -1240,13 +1236,13 @@ static int qc_do_rm_hp(struct quic_rx_packet *pkt, struct quic_tls_ctx *tls_ctx,
/* Check there is enough data in this packet. */
if (end - pn < QUIC_PACKET_PN_MAXLEN + sizeof mask) {
TRACE_DEVEL("too short packet", QUIC_EV_CONN_RMHP, ctx->qc, pkt);
TRACE_DEVEL("too short packet", QUIC_EV_CONN_RMHP, qc, pkt);
return 0;
}
cctx = EVP_CIPHER_CTX_new();
if (!cctx) {
TRACE_DEVEL("memory allocation failed", QUIC_EV_CONN_RMHP, ctx->qc, pkt);
TRACE_DEVEL("memory allocation failed", QUIC_EV_CONN_RMHP, qc, pkt);
return 0;
}
@ -1257,7 +1253,7 @@ static int qc_do_rm_hp(struct quic_rx_packet *pkt, struct quic_tls_ctx *tls_ctx,
if (!EVP_DecryptInit_ex(cctx, tls_ctx->rx.hp, NULL, hp_key, sample) ||
!EVP_DecryptUpdate(cctx, mask, &outlen, mask, sizeof mask) ||
!EVP_DecryptFinal_ex(cctx, mask, &outlen)) {
TRACE_DEVEL("decryption failed", QUIC_EV_CONN_RMHP, ctx->qc, pkt);
TRACE_DEVEL("decryption failed", QUIC_EV_CONN_RMHP, qc, pkt);
goto out;
}
@ -1408,11 +1404,10 @@ static int qcs_try_to_consume(struct qcs *qcs)
}
/* Treat <frm> frame whose packet it is attached to has just been acknowledged. */
static inline void qc_treat_acked_tx_frm(struct quic_frame *frm,
struct ssl_sock_ctx *ctx)
static inline void qc_treat_acked_tx_frm(struct quic_conn *qc,
struct quic_frame *frm)
{
int stream_acked;
struct quic_conn *qc = ctx->qc;
TRACE_PROTO("Removing frame", QUIC_EV_CONN_PRSAFRM, qc, frm);
stream_acked = 0;
@ -1457,11 +1452,12 @@ static inline void qc_treat_acked_tx_frm(struct quic_frame *frm,
* Returns the last node reached to be used for the next range.
* May be NULL if <largest> node could not be found.
*/
static inline struct eb64_node *qc_ackrng_pkts(struct eb_root *pkts, unsigned int *pkt_flags,
static inline struct eb64_node *qc_ackrng_pkts(struct quic_conn *qc,
struct eb_root *pkts,
unsigned int *pkt_flags,
struct list *newly_acked_pkts,
struct eb64_node *largest_node,
uint64_t largest, uint64_t smallest,
struct ssl_sock_ctx *ctx)
uint64_t largest, uint64_t smallest)
{
struct eb64_node *node;
struct quic_tx_packet *pkt;
@ -1481,9 +1477,9 @@ static inline struct eb64_node *qc_ackrng_pkts(struct eb_root *pkts, unsigned in
pkt = eb64_entry(&node->node, struct quic_tx_packet, pn_node);
*pkt_flags |= pkt->flags;
LIST_INSERT(newly_acked_pkts, &pkt->list);
TRACE_PROTO("Removing packet #", QUIC_EV_CONN_PRSAFRM, ctx->qc, NULL, &pkt->pn_node.key);
TRACE_PROTO("Removing packet #", QUIC_EV_CONN_PRSAFRM, qc, NULL, &pkt->pn_node.key);
list_for_each_entry_safe(frm, frmbak, &pkt->frms, list)
qc_treat_acked_tx_frm(frm, ctx);
qc_treat_acked_tx_frm(qc, frm);
node = eb64_prev(node);
eb64_delete(&pkt->pn_node);
}
@ -1494,11 +1490,11 @@ static inline struct eb64_node *qc_ackrng_pkts(struct eb_root *pkts, unsigned in
/* Treat <frm> frame whose packet it is attached to has just been detected as non
* acknowledged.
*/
static inline void qc_treat_nacked_tx_frm(struct quic_frame *frm,
struct quic_pktns *pktns,
struct ssl_sock_ctx *ctx)
static inline void qc_treat_nacked_tx_frm(struct quic_conn *qc,
struct quic_frame *frm,
struct quic_pktns *pktns)
{
TRACE_PROTO("to resend frame", QUIC_EV_CONN_PRSAFRM, ctx->qc, frm);
TRACE_PROTO("to resend frame", QUIC_EV_CONN_PRSAFRM, qc, frm);
LIST_DELETE(&frm->list);
MT_LIST_INSERT(&pktns->tx.frms, &frm->mt_list);
}
@ -1544,10 +1540,9 @@ static inline void qc_cc_loss_event(struct quic_conn *qc,
* <newly_acked_pkts> list and free them.
* Always succeeds.
*/
static inline void qc_treat_newly_acked_pkts(struct ssl_sock_ctx *ctx,
static inline void qc_treat_newly_acked_pkts(struct quic_conn *qc,
struct list *newly_acked_pkts)
{
struct quic_conn *qc = ctx->conn->qc;
struct quic_tx_packet *pkt, *tmp;
struct quic_cc_event ev = { .type = QUIC_CC_EVT_ACK, };
@ -1573,12 +1568,11 @@ static inline void qc_treat_newly_acked_pkts(struct ssl_sock_ctx *ctx,
* Also frees the packet in <pkts> list.
* Never fails.
*/
static inline void qc_release_lost_pkts(struct quic_pktns *pktns,
struct ssl_sock_ctx *ctx,
static inline void qc_release_lost_pkts(struct quic_conn *qc,
struct quic_pktns *pktns,
struct list *pkts,
uint64_t now_us)
{
struct quic_conn *qc = ctx->conn->qc;
struct quic_tx_packet *pkt, *tmp, *oldest_lost, *newest_lost;
struct quic_frame *frm, *frmbak;
uint64_t lost_bytes;
@ -1593,7 +1587,7 @@ static inline void qc_release_lost_pkts(struct quic_pktns *pktns,
qc->path->ifae_pkts--;
/* Treat the frames of this lost packet. */
list_for_each_entry_safe(frm, frmbak, &pkt->frms, list)
qc_treat_nacked_tx_frm(frm, pktns, ctx);
qc_treat_nacked_tx_frm(qc, frm, pktns);
LIST_DELETE(&pkt->list);
if (!oldest_lost) {
oldest_lost = newest_lost = pkt;
@ -1607,7 +1601,7 @@ static inline void qc_release_lost_pkts(struct quic_pktns *pktns,
if (lost_bytes) {
/* Sent a packet loss event to the congestion controller. */
qc_cc_loss_event(ctx->conn->qc, lost_bytes, newest_lost->time_sent,
qc_cc_loss_event(qc, lost_bytes, newest_lost->time_sent,
newest_lost->time_sent - oldest_lost->time_sent, now_us);
quic_tx_packet_refdec(oldest_lost);
if (newest_lost != oldest_lost)
@ -1677,7 +1671,8 @@ static void qc_packet_loss_lookup(struct quic_pktns *pktns,
* acked ack-eliciting packet.
* Return 1, if succeeded, 0 if not.
*/
static inline int qc_parse_ack_frm(struct quic_frame *frm, struct ssl_sock_ctx *ctx,
static inline int qc_parse_ack_frm(struct quic_conn *qc,
struct quic_frame *frm,
struct quic_enc_level *qel,
unsigned int *rtt_sample,
const unsigned char **pos, const unsigned char *end)
@ -1692,13 +1687,13 @@ static inline int qc_parse_ack_frm(struct quic_frame *frm, struct ssl_sock_ctx *
if (ack->largest_ack > qel->pktns->tx.next_pn) {
TRACE_DEVEL("ACK for not sent packet", QUIC_EV_CONN_PRSAFRM,
ctx->qc, NULL, &ack->largest_ack);
qc, NULL, &ack->largest_ack);
goto err;
}
if (ack->first_ack_range > ack->largest_ack) {
TRACE_DEVEL("too big first ACK range", QUIC_EV_CONN_PRSAFRM,
ctx->qc, NULL, &ack->first_ack_range);
qc, NULL, &ack->first_ack_range);
goto err;
}
@ -1713,7 +1708,7 @@ static inline int qc_parse_ack_frm(struct quic_frame *frm, struct ssl_sock_ctx *
largest_node = eb64_lookup(pkts, largest);
if (!largest_node) {
TRACE_DEVEL("Largest acked packet not found",
QUIC_EV_CONN_PRSAFRM, ctx->qc);
QUIC_EV_CONN_PRSAFRM, qc);
}
else {
time_sent = eb64_entry(&largest_node->node,
@ -1722,12 +1717,12 @@ static inline int qc_parse_ack_frm(struct quic_frame *frm, struct ssl_sock_ctx *
}
TRACE_PROTO("ack range", QUIC_EV_CONN_PRSAFRM,
ctx->qc, NULL, &largest, &smallest);
qc, NULL, &largest, &smallest);
do {
uint64_t gap, ack_range;
qc_ackrng_pkts(pkts, &pkt_flags, &newly_acked_pkts,
largest_node, largest, smallest, ctx);
qc_ackrng_pkts(qc, pkts, &pkt_flags, &newly_acked_pkts,
largest_node, largest, smallest);
if (!ack->ack_range_num--)
break;
@ -1736,7 +1731,7 @@ static inline int qc_parse_ack_frm(struct quic_frame *frm, struct ssl_sock_ctx *
if (smallest < gap + 2) {
TRACE_DEVEL("wrong gap value", QUIC_EV_CONN_PRSAFRM,
ctx->qc, NULL, &gap, &smallest);
qc, NULL, &gap, &smallest);
goto err;
}
@ -1746,7 +1741,7 @@ static inline int qc_parse_ack_frm(struct quic_frame *frm, struct ssl_sock_ctx *
if (largest < ack_range) {
TRACE_DEVEL("wrong ack range value", QUIC_EV_CONN_PRSAFRM,
ctx->qc, NULL, &largest, &ack_range);
qc, NULL, &largest, &ack_range);
goto err;
}
@ -1756,7 +1751,7 @@ static inline int qc_parse_ack_frm(struct quic_frame *frm, struct ssl_sock_ctx *
smallest = largest - ack_range;
TRACE_PROTO("ack range", QUIC_EV_CONN_PRSAFRM,
ctx->qc, NULL, &largest, &smallest);
qc, NULL, &largest, &smallest);
} while (1);
/* Flag this packet number space as having received an ACK. */
@ -1769,14 +1764,14 @@ static inline int qc_parse_ack_frm(struct quic_frame *frm, struct ssl_sock_ctx *
if (!LIST_ISEMPTY(&newly_acked_pkts)) {
if (!eb_is_empty(&qel->pktns->tx.pkts)) {
qc_packet_loss_lookup(qel->pktns, ctx->conn->qc, &lost_pkts);
qc_packet_loss_lookup(qel->pktns, qc, &lost_pkts);
if (!LIST_ISEMPTY(&lost_pkts))
qc_release_lost_pkts(qel->pktns, ctx, &lost_pkts, now_ms);
qc_release_lost_pkts(qc, qel->pktns, &lost_pkts, now_ms);
}
qc_treat_newly_acked_pkts(ctx, &newly_acked_pkts);
if (quic_peer_validated_addr(ctx))
ctx->conn->qc->path->loss.pto_count = 0;
qc_set_timer(ctx);
qc_treat_newly_acked_pkts(qc, &newly_acked_pkts);
if (quic_peer_validated_addr(qc))
qc->path->loss.pto_count = 0;
qc_set_timer(qc);
}
@ -1784,7 +1779,7 @@ static inline int qc_parse_ack_frm(struct quic_frame *frm, struct ssl_sock_ctx *
err:
free_quic_tx_pkts(&newly_acked_pkts);
TRACE_DEVEL("leaving in error", QUIC_EV_CONN_PRSAFRM, ctx->qc);
TRACE_DEVEL("leaving in error", QUIC_EV_CONN_PRSAFRM, qc);
return 0;
}
@ -2201,7 +2196,7 @@ static int qc_parse_pkt_frms(struct quic_rx_packet *pkt, struct ssl_sock_ctx *ct
unsigned int rtt_sample;
rtt_sample = 0;
if (!qc_parse_ack_frm(&frm, ctx, qel, &rtt_sample, &pos, end))
if (!qc_parse_ack_frm(qc, &frm, qel, &rtt_sample, &pos, end))
goto err;
if (rtt_sample) {
@ -2330,7 +2325,7 @@ static int qc_parse_pkt_frms(struct quic_rx_packet *pkt, struct ssl_sock_ctx *ct
quic_tls_discard_keys(&qc->els[QUIC_TLS_ENC_LEVEL_INITIAL]);
TRACE_PROTO("discarding Initial pktns", QUIC_EV_CONN_PRSHPKT, ctx->conn);
quic_pktns_discard(qc->els[QUIC_TLS_ENC_LEVEL_INITIAL].pktns, qc);
qc_set_timer(ctx);
qc_set_timer(ctx->qc);
if (state < QUIC_HS_ST_SERVER_HANDSHAKE)
HA_ATOMIC_STORE(&qc->state, QUIC_HS_ST_SERVER_HANDSHAKE);
}
@ -2365,9 +2360,8 @@ static inline void qc_set_dg(struct cbuf *cbuf,
* packet in this datagram.
* Returns 1 if succeeded, or 0 if something wrong happened.
*/
static int qc_prep_pkts(struct qring *qr, struct ssl_sock_ctx *ctx)
static int qc_prep_pkts(struct quic_conn *qc, struct qring *qr)
{
struct quic_conn *qc;
enum quic_tls_enc_level tel, next_tel;
struct quic_enc_level *qel;
struct cbuf *cbuf;
@ -2382,8 +2376,6 @@ static int qc_prep_pkts(struct qring *qr, struct ssl_sock_ctx *ctx)
*/
size_t dg_headlen = sizeof dglen + sizeof first_pkt;
qc = ctx->qc;
TRACE_ENTER(QUIC_EV_CONN_PHPKTS, qc);
if (!quic_get_tls_enc_levels(&tel, &next_tel, HA_ATOMIC_LOAD(&qc->state), 0)) {
@ -2443,7 +2435,7 @@ static int qc_prep_pkts(struct qring *qr, struct ssl_sock_ctx *ctx)
if (!prv_pkt) {
/* Leave room for the datagram header */
pos += dg_headlen;
if (!quic_peer_validated_addr(ctx) && objt_listener(ctx->conn->target)) {
if (!quic_peer_validated_addr(qc) && objt_listener(qc->conn->target)) {
if (qc->tx.prep_bytes >= 3 * qc->rx.bytes)
qc->flags |= QUIC_FL_CONN_ANTI_AMPLIFICATION_REACHED;
end = pos + QUIC_MIN(qc->path->mtu, 3 * qc->rx.bytes - qc->tx.prep_bytes);
@ -2497,7 +2489,7 @@ static int qc_prep_pkts(struct qring *qr, struct ssl_sock_ctx *ctx)
quic_tls_discard_keys(&qc->els[QUIC_TLS_ENC_LEVEL_INITIAL]);
TRACE_PROTO("discarding Initial pktns", QUIC_EV_CONN_PHPKTS, qc);
quic_pktns_discard(qc->els[QUIC_TLS_ENC_LEVEL_INITIAL].pktns, qc);
qc_set_timer(ctx);
qc_set_timer(qc);
HA_ATOMIC_STORE(&qc->state, QUIC_HS_ST_CLIENT_HANDSHAKE);
}
/* If the data for the current encryption level have all been sent,
@ -2630,7 +2622,7 @@ int qc_send_ppkts(struct qring *qr, struct ssl_sock_ctx *ctx)
qc->path->in_flight += pkt->in_flight_len;
pkt->pktns->tx.in_flight += pkt->in_flight_len;
if (pkt->in_flight_len)
qc_set_timer(ctx);
qc_set_timer(qc);
TRACE_PROTO("sent pkt", QUIC_EV_CONN_SPPKTS, qc, pkt);
next_pkt = pkt->next;
eb64_insert(&pkt->pktns->tx.pkts, &pkt->pn_node);
@ -2889,28 +2881,28 @@ int quic_update_ack_ranges_list(struct quic_arngs *arngs,
/* Remove the header protection of packets at <el> encryption level.
* Always succeeds.
*/
static inline void qc_rm_hp_pkts(struct quic_enc_level *el, struct ssl_sock_ctx *ctx)
static inline void qc_rm_hp_pkts(struct quic_conn *qc, struct quic_enc_level *el)
{
struct quic_tls_ctx *tls_ctx;
struct quic_rx_packet *pqpkt;
struct mt_list *pkttmp1, pkttmp2;
struct quic_enc_level *app_qel;
TRACE_ENTER(QUIC_EV_CONN_ELRMHP, ctx->qc);
app_qel = &ctx->conn->qc->els[QUIC_TLS_ENC_LEVEL_APP];
TRACE_ENTER(QUIC_EV_CONN_ELRMHP, qc);
app_qel = &qc->els[QUIC_TLS_ENC_LEVEL_APP];
/* A server must not process incoming 1-RTT packets before the handshake is complete. */
if (el == app_qel && objt_listener(ctx->conn->target) &&
HA_ATOMIC_LOAD(&ctx->conn->qc->state) < QUIC_HS_ST_COMPLETE) {
if (el == app_qel && objt_listener(qc->conn->target) &&
HA_ATOMIC_LOAD(&qc->state) < QUIC_HS_ST_COMPLETE) {
TRACE_PROTO("hp not removed (handshake not completed)",
QUIC_EV_CONN_ELRMHP, ctx->qc);
QUIC_EV_CONN_ELRMHP, qc);
goto out;
}
tls_ctx = &el->tls_ctx;
mt_list_for_each_entry_safe(pqpkt, &el->rx.pqpkts, list, pkttmp1, pkttmp2) {
if (!qc_do_rm_hp(pqpkt, tls_ctx, el->pktns->rx.largest_pn,
if (!qc_do_rm_hp(qc, pqpkt, tls_ctx, el->pktns->rx.largest_pn,
pqpkt->data + pqpkt->pn_offset,
pqpkt->data, pqpkt->data + pqpkt->len, ctx)) {
TRACE_PROTO("hp removing error", QUIC_EV_CONN_ELRMHP, ctx->qc);
pqpkt->data, pqpkt->data + pqpkt->len)) {
TRACE_PROTO("hp removing error", QUIC_EV_CONN_ELRMHP, qc);
/* XXX TO DO XXX */
}
else {
@ -2922,14 +2914,14 @@ static inline void qc_rm_hp_pkts(struct quic_enc_level *el, struct ssl_sock_ctx
eb64_insert(&el->rx.pkts, &pqpkt->pn_node);
quic_rx_packet_refinc(pqpkt);
HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &el->rx.pkts_rwlock);
TRACE_PROTO("hp removed", QUIC_EV_CONN_ELRMHP, ctx->qc, pqpkt);
TRACE_PROTO("hp removed", QUIC_EV_CONN_ELRMHP, qc, pqpkt);
}
MT_LIST_DELETE_SAFE(pkttmp1);
quic_rx_packet_refdec(pqpkt);
}
out:
TRACE_LEAVE(QUIC_EV_CONN_ELRMHP, ctx->qc);
TRACE_LEAVE(QUIC_EV_CONN_ELRMHP, qc);
}
/* Process all the CRYPTO frame at <el> encryption level.
@ -3078,7 +3070,7 @@ struct task *quic_conn_io_cb(struct task *t, void *context, unsigned int state)
*/
if (!MT_LIST_ISEMPTY(&qel->rx.pqpkts) &&
(tls_ctx->rx.flags & QUIC_FL_TLS_SECRETS_SET))
qc_rm_hp_pkts(qel, ctx);
qc_rm_hp_pkts(qc, qel);
prev_st = HA_ATOMIC_LOAD(&qc->state);
force_ack = qel == &qc->els[QUIC_TLS_ENC_LEVEL_INITIAL] ||
@ -3100,7 +3092,7 @@ struct task *quic_conn_io_cb(struct task *t, void *context, unsigned int state)
quic_tls_discard_keys(&qc->els[QUIC_TLS_ENC_LEVEL_HANDSHAKE]);
TRACE_PROTO("discarding Handshake pktns", QUIC_EV_CONN_PHPKTS, qc);
quic_pktns_discard(qc->els[QUIC_TLS_ENC_LEVEL_HANDSHAKE].pktns, qc);
qc_set_timer(ctx);
qc_set_timer(qc);
if (!quic_build_post_handshake_frames(qc))
goto err;
@ -3111,7 +3103,7 @@ struct task *quic_conn_io_cb(struct task *t, void *context, unsigned int state)
if (!qr)
qr = MT_LIST_POP(qc->tx.qring_list, typeof(qr), mt_list);
ret = qc_prep_pkts(qr, ctx);
ret = qc_prep_pkts(qc, qr);
if (ret == -1)
goto err;
else if (ret == 0)
@ -3253,8 +3245,8 @@ static struct task *process_timer(struct task *task, void *ctx, unsigned int sta
qc_packet_loss_lookup(pktns, qc, &lost_pkts);
if (!LIST_ISEMPTY(&lost_pkts))
qc_release_lost_pkts(pktns, ctx, &lost_pkts, now_ms);
qc_set_timer(conn_ctx);
qc_release_lost_pkts(qc, pktns, &lost_pkts, now_ms);
qc_set_timer(qc);
goto out;
}
@ -3476,8 +3468,7 @@ static inline int quic_packet_read_long_header(unsigned char **buf, const unsign
* encryption level matching with the packet type. <*qel> may be null if not found.
* Note that <ctx> may be null (for Initial packets).
*/
static int qc_pkt_may_rm_hp(struct quic_rx_packet *pkt,
struct quic_conn *qc, struct ssl_sock_ctx *ctx,
static int qc_pkt_may_rm_hp(struct quic_conn *qc, struct quic_rx_packet *pkt,
struct quic_enc_level **qel)
{
enum quic_tls_enc_level tel;
@ -3490,13 +3481,13 @@ static int qc_pkt_may_rm_hp(struct quic_rx_packet *pkt,
*qel = &qc->els[tel];
if ((*qel)->tls_ctx.rx.flags & QUIC_FL_TLS_SECRETS_DCD) {
TRACE_DEVEL("Discarded keys", QUIC_EV_CONN_TRMHP, ctx->qc);
TRACE_DEVEL("Discarded keys", QUIC_EV_CONN_TRMHP, qc);
return 0;
}
if (((*qel)->tls_ctx.rx.flags & QUIC_FL_TLS_SECRETS_SET) &&
(tel != QUIC_TLS_ENC_LEVEL_APP ||
HA_ATOMIC_LOAD(&ctx->conn->qc->state) >= QUIC_HS_ST_COMPLETE))
HA_ATOMIC_LOAD(&qc->state) >= QUIC_HS_ST_COMPLETE))
return 1;
return 0;
@ -3519,11 +3510,11 @@ static void qc_pkt_insert(struct quic_rx_packet *pkt, struct quic_enc_level *qel
* If succeeded, this function updates <*buf> to point to the next packet in the buffer.
* Returns 1 if succeeded, 0 if not.
*/
static inline int qc_try_rm_hp(struct quic_rx_packet *pkt,
static inline int qc_try_rm_hp(struct quic_conn *qc,
struct quic_rx_packet *pkt,
unsigned char *buf, unsigned char *beg,
const unsigned char *end,
struct quic_conn *qc, struct quic_enc_level **el,
struct ssl_sock_ctx *ctx)
struct quic_enc_level **el)
{
unsigned char *pn = NULL; /* Packet number field */
struct quic_enc_level *qel;
@ -3531,20 +3522,20 @@ static inline int qc_try_rm_hp(struct quic_rx_packet *pkt,
struct quic_rx_packet *qpkt_trace;
qpkt_trace = NULL;
TRACE_ENTER(QUIC_EV_CONN_TRMHP, ctx ? ctx->qc : NULL);
TRACE_ENTER(QUIC_EV_CONN_TRMHP, qc);
/* The packet number is here. This is also the start minus
* QUIC_PACKET_PN_MAXLEN of the sample used to add/remove the header
* protection.
*/
pn = buf;
if (qc_pkt_may_rm_hp(pkt, qc, ctx, &qel)) {
if (qc_pkt_may_rm_hp(qc, pkt, &qel)) {
/* Note that the following function enables us to unprotect the packet
* number and its length subsequently used to decrypt the entire
* packets.
*/
if (!qc_do_rm_hp(pkt, &qel->tls_ctx,
qel->pktns->rx.largest_pn, pn, beg, end, ctx)) {
TRACE_PROTO("hp error", QUIC_EV_CONN_TRMHP, ctx ? ctx->qc : NULL);
if (!qc_do_rm_hp(qc, pkt, &qel->tls_ctx,
qel->pktns->rx.largest_pn, pn, beg, end)) {
TRACE_PROTO("hp error", QUIC_EV_CONN_TRMHP, qc);
goto err;
}
@ -3557,17 +3548,17 @@ static inline int qc_try_rm_hp(struct quic_rx_packet *pkt,
/* If the packet number space has been discarded, this packet
* will be not parsed.
*/
TRACE_PROTO("Discarded pktns", QUIC_EV_CONN_TRMHP, ctx ? ctx->qc : NULL, pkt);
TRACE_PROTO("Discarded pktns", QUIC_EV_CONN_TRMHP, qc, pkt);
goto out;
}
TRACE_PROTO("hp not removed", QUIC_EV_CONN_TRMHP, ctx ? ctx->qc : NULL, pkt);
TRACE_PROTO("hp not removed", QUIC_EV_CONN_TRMHP, qc, pkt);
pkt->pn_offset = pn - beg;
MT_LIST_APPEND(&qel->rx.pqpkts, &pkt->list);
quic_rx_packet_refinc(pkt);
}
else {
TRACE_PROTO("Unknown packet type", QUIC_EV_CONN_TRMHP, ctx ? ctx->qc : NULL);
TRACE_PROTO("Unknown packet type", QUIC_EV_CONN_TRMHP, qc);
goto err;
}
@ -3578,11 +3569,11 @@ static inline int qc_try_rm_hp(struct quic_rx_packet *pkt,
pkt->data = (unsigned char *)b_tail(&qc->rx.buf);
b_add(&qc->rx.buf, pkt->len);
out:
TRACE_LEAVE(QUIC_EV_CONN_TRMHP, ctx ? ctx->qc : NULL, qpkt_trace);
TRACE_LEAVE(QUIC_EV_CONN_TRMHP, qc, qpkt_trace);
return 1;
err:
TRACE_DEVEL("leaving in error", QUIC_EV_CONN_TRMHP, ctx ? ctx->qc : NULL, qpkt_trace);
TRACE_DEVEL("leaving in error", QUIC_EV_CONN_TRMHP, qc, qpkt_trace);
return 0;
}
@ -3765,7 +3756,7 @@ static ssize_t qc_srv_pkt_rcv(unsigned char **buf, const unsigned char *end,
}
}
if (!qc_try_rm_hp(pkt, *buf, beg, end, qc, &qel, conn_ctx)) {
if (!qc_try_rm_hp(qc, pkt, *buf, beg, end, &qel)) {
HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &qc->rx.buf_rwlock);
TRACE_PROTO("Packet dropped", QUIC_EV_CONN_SPKT, qc);
goto err;
@ -4145,7 +4136,7 @@ static ssize_t qc_lstnr_pkt_rcv(unsigned char *buf, const unsigned char *end,
}
}
if (!qc_try_rm_hp(pkt, payload, beg, end, qc, &qel, conn_ctx)) {
if (!qc_try_rm_hp(qc, pkt, payload, beg, end, &qel)) {
HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &qc->rx.buf_rwlock);
TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT, qc);
goto err;