REORG: quic: Move QUIC path definitions/declarations to quic_cc module

Move quic_path struct from quic_conn-t.h to quic_cc-t.h and rename it to quic_cc_path.
Update the code consequently.
Also some inlined functions in relation with QUIC path to quic_cc.h
This commit is contained in:
Frédéric Lécaille 2023-11-27 16:50:17 +01:00
parent f32fc26b62
commit 581549851c
9 changed files with 88 additions and 85 deletions

View File

@ -30,6 +30,7 @@
#include <stddef.h> /* size_t */
#include <haproxy/buf-t.h>
#include <haproxy/quic_loss-t.h>
#define QUIC_CC_INFINITE_SSTHESH ((uint32_t)-1)
@ -86,6 +87,30 @@ struct quic_cc {
uint32_t priv[16];
};
struct quic_cc_path {
/* Control congestion. */
struct quic_cc cc;
/* Packet loss detection information. */
struct quic_loss loss;
/* MTU. */
size_t mtu;
/* Congestion window. */
uint64_t cwnd;
/* The current maximum congestion window value reached. */
uint64_t mcwnd;
/* The maximum congestion window value which can be reached. */
uint64_t max_cwnd;
/* Minimum congestion window. */
uint64_t min_cwnd;
/* Prepared data to be sent (in bytes). */
uint64_t prep_in_flight;
/* Outstanding data (in bytes). */
uint64_t in_flight;
/* Number of in flight ack-eliciting packets. */
uint64_t ifae_pkts;
};
struct quic_cc_algo {
enum quic_cc_algo_type type;
int (*init)(struct quic_cc *cc);

View File

@ -31,6 +31,7 @@
#include <haproxy/chunk.h>
#include <haproxy/quic_cc-t.h>
#include <haproxy/quic_conn-t.h>
#include <haproxy/quic_loss.h>
void quic_cc_init(struct quic_cc *cc, struct quic_cc_algo *algo, struct quic_conn *qc);
void quic_cc_event(struct quic_cc *cc, struct quic_cc_event *ev);
@ -73,5 +74,39 @@ static inline void *quic_cc_priv(const struct quic_cc *cc)
return (void *)cc->priv;
}
/* Initialize <p> QUIC network path depending on <ipv4> boolean
* which is true for an IPv4 path, if not false for an IPv6 path.
*/
static inline void quic_cc_path_init(struct quic_cc_path *path, int ipv4, unsigned long max_cwnd,
struct quic_cc_algo *algo, struct quic_conn *qc)
{
unsigned int max_dgram_sz;
max_dgram_sz = ipv4 ? QUIC_INITIAL_IPV4_MTU : QUIC_INITIAL_IPV6_MTU;
quic_loss_init(&path->loss);
path->mtu = max_dgram_sz;
path->cwnd = QUIC_MIN(10 * max_dgram_sz, QUIC_MAX(max_dgram_sz << 1, 14720U));
path->mcwnd = path->cwnd;
path->max_cwnd = max_cwnd;
path->min_cwnd = max_dgram_sz << 1;
path->prep_in_flight = 0;
path->in_flight = 0;
path->ifae_pkts = 0;
quic_cc_init(&path->cc, algo, qc);
}
/* Return the remaining <room> available on <path> QUIC path for prepared data
* (before being sent). Almost the same that for the QUIC path room, except that
* here this is the data which have been prepared which are taken into an account.
*/
static inline size_t quic_cc_path_prep_data(struct quic_cc_path *path)
{
if (path->prep_in_flight > path->cwnd)
return 0;
return path->cwnd - path->prep_in_flight;
}
#endif /* USE_QUIC */
#endif /* _PROTO_QUIC_CC_H */

View File

@ -235,30 +235,6 @@ extern const struct quic_version *preferred_version;
/* The maximum number of bytes of CRYPTO data in flight during handshakes. */
#define QUIC_CRYPTO_IN_FLIGHT_MAX 4096
struct quic_path {
/* Control congestion. */
struct quic_cc cc;
/* Packet loss detection information. */
struct quic_loss loss;
/* MTU. */
size_t mtu;
/* Congestion window. */
uint64_t cwnd;
/* The current maximum congestion window value reached. */
uint64_t mcwnd;
/* The maximum congestion window value which can be reached. */
uint64_t max_cwnd;
/* Minimum congestion window. */
uint64_t min_cwnd;
/* Prepared data to be sent (in bytes). */
uint64_t prep_in_flight;
/* Outstanding data (in bytes). */
uint64_t in_flight;
/* Number of in flight ack-eliciting packets. */
uint64_t ifae_pkts;
};
/* Status of the connection/mux layer. This defines how to handle app data.
*
* During a standard quic_conn lifetime it transitions like this :
@ -433,8 +409,8 @@ struct quic_conn {
} ku;
unsigned int max_ack_delay;
unsigned int max_idle_timeout;
struct quic_path paths[1];
struct quic_path *path;
struct quic_cc_path paths[1];
struct quic_cc_path *path;
struct mt_list accept_list; /* chaining element used for accept, only valid for frontend connections */

View File

@ -146,39 +146,6 @@ static inline void quic_connection_id_to_frm_cpy(struct quic_frame *dst,
ncid_frm->stateless_reset_token = src->stateless_reset_token;
}
/* Initialize <p> QUIC network path depending on <ipv4> boolean
* which is true for an IPv4 path, if not false for an IPv6 path.
*/
static inline void quic_path_init(struct quic_path *path, int ipv4, unsigned long max_cwnd,
struct quic_cc_algo *algo, struct quic_conn *qc)
{
unsigned int max_dgram_sz;
max_dgram_sz = ipv4 ? QUIC_INITIAL_IPV4_MTU : QUIC_INITIAL_IPV6_MTU;
quic_loss_init(&path->loss);
path->mtu = max_dgram_sz;
path->cwnd = QUIC_MIN(10 * max_dgram_sz, QUIC_MAX(max_dgram_sz << 1, 14720U));
path->mcwnd = path->cwnd;
path->max_cwnd = max_cwnd;
path->min_cwnd = max_dgram_sz << 1;
path->prep_in_flight = 0;
path->in_flight = 0;
path->ifae_pkts = 0;
quic_cc_init(&path->cc, algo, qc);
}
/* Return the remaining <room> available on <path> QUIC path for prepared data
* (before being sent). Almost the same that for the QUIC path room, except that
* here this is the data which have been prepared which are taken into an account.
*/
static inline size_t quic_path_prep_data(struct quic_path *path)
{
if (path->prep_in_flight > path->cwnd)
return 0;
return path->cwnd - path->prep_in_flight;
}
/* Return 1 if <pkt> header form is long, 0 if not. */
static inline int qc_pkt_long(const struct quic_rx_packet *pkt)
{

View File

@ -97,7 +97,7 @@ static uint32_t cubic_root(uint64_t val)
static inline void quic_cubic_update(struct quic_cc *cc, uint32_t acked)
{
struct cubic *c = quic_cc_priv(cc);
struct quic_path *path = container_of(cc, struct quic_path, cc);
struct quic_cc_path *path = container_of(cc, struct quic_cc_path, cc);
/* Current cwnd as number of packets */
uint32_t t, target, inc, inc_diff;
uint64_t delta, diff;
@ -183,7 +183,7 @@ static void quic_cc_cubic_slow_start(struct quic_cc *cc)
static void quic_enter_recovery(struct quic_cc *cc)
{
struct quic_path *path = container_of(cc, struct quic_path, cc);
struct quic_cc_path *path = container_of(cc, struct quic_cc_path, cc);
struct cubic *c = quic_cc_priv(cc);
/* Current cwnd as number of packets */
@ -207,7 +207,7 @@ static void quic_enter_recovery(struct quic_cc *cc)
/* Congestion slow-start callback. */
static void quic_cc_cubic_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev)
{
struct quic_path *path = container_of(cc, struct quic_path, cc);
struct quic_cc_path *path = container_of(cc, struct quic_cc_path, cc);
struct cubic *c = quic_cc_priv(cc);
TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc);
@ -310,10 +310,10 @@ static void quic_cc_cubic_event(struct quic_cc *cc, struct quic_cc_event *ev)
static void quic_cc_cubic_state_trace(struct buffer *buf, const struct quic_cc *cc)
{
struct quic_path *path;
struct quic_cc_path *path;
struct cubic *c = quic_cc_priv(cc);
path = container_of(cc, struct quic_path, cc);
path = container_of(cc, struct quic_cc_path, cc);
chunk_appendf(buf, " state=%s cwnd=%llu mcwnd=%llu ssthresh=%d rpst=%dms",
quic_cc_state_str(c->state),
(unsigned long long)path->cwnd,

View File

@ -51,10 +51,10 @@ static int quic_cc_nr_init(struct quic_cc *cc)
/* Re-enter slow start state. */
static void quic_cc_nr_slow_start(struct quic_cc *cc)
{
struct quic_path *path;
struct quic_cc_path *path;
struct nr *nr = quic_cc_priv(cc);
path = container_of(cc, struct quic_path, cc);
path = container_of(cc, struct quic_cc_path, cc);
path->cwnd = path->min_cwnd;
/* Re-entering slow start state. */
nr->state = QUIC_CC_ST_SS;
@ -65,10 +65,10 @@ static void quic_cc_nr_slow_start(struct quic_cc *cc)
/* Enter a recovery period. */
static void quic_cc_nr_enter_recovery(struct quic_cc *cc)
{
struct quic_path *path;
struct quic_cc_path *path;
struct nr *nr = quic_cc_priv(cc);
path = container_of(cc, struct quic_path, cc);
path = container_of(cc, struct quic_cc_path, cc);
nr->recovery_start_time = now_ms;
nr->ssthresh = path->cwnd >> 1;
path->cwnd = QUIC_MAX(nr->ssthresh, (uint32_t)path->min_cwnd);
@ -78,12 +78,12 @@ static void quic_cc_nr_enter_recovery(struct quic_cc *cc)
/* Slow start callback. */
static void quic_cc_nr_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev)
{
struct quic_path *path;
struct quic_cc_path *path;
struct nr *nr = quic_cc_priv(cc);
TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc);
TRACE_PROTO("CC reno", QUIC_EV_CONN_CC, cc->qc, ev);
path = container_of(cc, struct quic_path, cc);
path = container_of(cc, struct quic_cc_path, cc);
switch (ev->type) {
case QUIC_CC_EVT_ACK:
path->cwnd += ev->ack.acked;
@ -109,12 +109,12 @@ static void quic_cc_nr_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev)
/* Congestion avoidance callback. */
static void quic_cc_nr_ca_cb(struct quic_cc *cc, struct quic_cc_event *ev)
{
struct quic_path *path;
struct quic_cc_path *path;
struct nr *nr = quic_cc_priv(cc);
TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc);
TRACE_PROTO("CC reno", QUIC_EV_CONN_CC, cc->qc, ev);
path = container_of(cc, struct quic_path, cc);
path = container_of(cc, struct quic_cc_path, cc);
switch (ev->type) {
case QUIC_CC_EVT_ACK:
{
@ -147,12 +147,12 @@ static void quic_cc_nr_ca_cb(struct quic_cc *cc, struct quic_cc_event *ev)
/* Recovery period callback. */
static void quic_cc_nr_rp_cb(struct quic_cc *cc, struct quic_cc_event *ev)
{
struct quic_path *path;
struct quic_cc_path *path;
struct nr *nr = quic_cc_priv(cc);
TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc);
TRACE_PROTO("CC reno", QUIC_EV_CONN_CC, cc->qc, ev);
path = container_of(cc, struct quic_path, cc);
path = container_of(cc, struct quic_cc_path, cc);
switch (ev->type) {
case QUIC_CC_EVT_ACK:
/* RFC 9022 7.3.2. Recovery
@ -182,10 +182,10 @@ static void quic_cc_nr_rp_cb(struct quic_cc *cc, struct quic_cc_event *ev)
}
static void quic_cc_nr_state_trace(struct buffer *buf, const struct quic_cc *cc)
{
struct quic_path *path;
struct quic_cc_path *path;
struct nr *nr = quic_cc_priv(cc);
path = container_of(cc, struct quic_path, cc);
path = container_of(cc, struct quic_cc_path, cc);
chunk_appendf(buf, " state=%s cwnd=%llu mcwnd=%llu ssthresh=%ld rpst=%dms pktloss=%llu",
quic_cc_state_str(nr->state),
(unsigned long long)path->cwnd,

View File

@ -11,9 +11,9 @@
static int quic_cc_nocc_init(struct quic_cc *cc)
{
struct quic_path *path;
struct quic_cc_path *path;
path = container_of(cc, struct quic_path, cc);
path = container_of(cc, struct quic_cc_path, cc);
path->cwnd = path->max_cwnd;
return 1;
}
@ -48,9 +48,9 @@ static void quic_cc_nocc_rp_cb(struct quic_cc *cc, struct quic_cc_event *ev)
static void quic_cc_nocc_state_trace(struct buffer *buf, const struct quic_cc *cc)
{
struct quic_path *path;
struct quic_cc_path *path;
path = container_of(cc, struct quic_path, cc);
path = container_of(cc, struct quic_cc_path, cc);
chunk_appendf(buf, " cwnd=%llu", (unsigned long long)path->cwnd);
}

View File

@ -1118,8 +1118,8 @@ struct quic_conn *qc_new_conn(const struct quic_version *qv, int ipv4,
qc->max_ack_delay = 0;
/* Only one path at this time (multipath not supported) */
qc->path = &qc->paths[0];
quic_path_init(qc->path, ipv4, server ? l->bind_conf->max_cwnd : 0,
cc_algo ? cc_algo : default_quic_cc_algo, qc);
quic_cc_path_init(qc->path, ipv4, server ? l->bind_conf->max_cwnd : 0,
cc_algo ? cc_algo : default_quic_cc_algo, qc);
qc->stream_buf_count = 0;
memcpy(&qc->local_addr, local_addr, sizeof(qc->local_addr));

View File

@ -2092,7 +2092,7 @@ static int qc_build_frms(struct list *outlist, struct list *inlist,
* control window.
*/
if (!qel->pktns->tx.pto_probe) {
size_t remain = quic_path_prep_data(qc->path);
size_t remain = quic_cc_path_prep_data(qc->path);
if (headlen > remain)
goto leave;
@ -2443,7 +2443,7 @@ static int qc_do_build_pkt(unsigned char *pos, const unsigned char *end,
if (!probe && !LIST_ISEMPTY(frms) && !cc) {
size_t path_room;
path_room = quic_path_prep_data(qc->path);
path_room = quic_cc_path_prep_data(qc->path);
if (end - beg > path_room)
end = beg + path_room;
}
@ -2840,7 +2840,7 @@ int qc_notify_send(struct quic_conn *qc)
*
* Probe packets MUST NOT be blocked by the congestion controller.
*/
if ((quic_path_prep_data(qc->path) || pktns->tx.pto_probe) &&
if ((quic_cc_path_prep_data(qc->path) || pktns->tx.pto_probe) &&
(!qc_test_fd(qc) || !fd_send_active(qc->fd))) {
tasklet_wakeup(qc->subs->tasklet);
qc->subs->events &= ~SUB_RETRY_SEND;