MINOR: quic: quic_loss modifications to support BBR
qc_packet_loss_lookup() aim is to detect the packet losses. This is this function which must called ->on_pkt_lost() BBR specific callback. It also set <bytes_lost> passed parameter to the total number of bytes detected as lost upon an ACK frame receipt for its caller. Modify qc_release_lost_pkts() to call ->congestion_event() with the send time from the newest packet detected as lost. Modify qc_release_lost_pkts() to call ->slow_start() callback only if define by the congestion control algorithm. This is not the case for BBR.
This commit is contained in:
parent
af75665cb7
commit
d85eb127e9
|
@ -85,7 +85,7 @@ struct quic_pktns *quic_pto_pktns(struct quic_conn *qc,
|
||||||
unsigned int *pto);
|
unsigned int *pto);
|
||||||
|
|
||||||
void qc_packet_loss_lookup(struct quic_pktns *pktns, struct quic_conn *qc,
|
void qc_packet_loss_lookup(struct quic_pktns *pktns, struct quic_conn *qc,
|
||||||
struct list *lost_pkts);
|
struct list *lost_pkts, uint32_t *bytes_lost);
|
||||||
int qc_release_lost_pkts(struct quic_conn *qc, struct quic_pktns *pktns,
|
int qc_release_lost_pkts(struct quic_conn *qc, struct quic_pktns *pktns,
|
||||||
struct list *pkts, uint64_t now_us);
|
struct list *pkts, uint64_t now_us);
|
||||||
#endif /* USE_QUIC */
|
#endif /* USE_QUIC */
|
||||||
|
|
|
@ -931,7 +931,7 @@ struct task *qc_process_timer(struct task *task, void *ctx, unsigned int state)
|
||||||
if (tick_isset(pktns->tx.loss_time)) {
|
if (tick_isset(pktns->tx.loss_time)) {
|
||||||
struct list lost_pkts = LIST_HEAD_INIT(lost_pkts);
|
struct list lost_pkts = LIST_HEAD_INIT(lost_pkts);
|
||||||
|
|
||||||
qc_packet_loss_lookup(pktns, qc, &lost_pkts);
|
qc_packet_loss_lookup(pktns, qc, &lost_pkts, NULL);
|
||||||
if (!LIST_ISEMPTY(&lost_pkts))
|
if (!LIST_ISEMPTY(&lost_pkts))
|
||||||
tasklet_wakeup(qc->wait_event.tasklet);
|
tasklet_wakeup(qc->wait_event.tasklet);
|
||||||
if (qc_release_lost_pkts(qc, pktns, &lost_pkts, now_ms))
|
if (qc_release_lost_pkts(qc, pktns, &lost_pkts, now_ms))
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
#include <import/eb64tree.h>
|
#include <import/eb64tree.h>
|
||||||
|
|
||||||
|
#include <haproxy/quic_cc-t.h>
|
||||||
#include <haproxy/quic_conn-t.h>
|
#include <haproxy/quic_conn-t.h>
|
||||||
#include <haproxy/quic_loss.h>
|
#include <haproxy/quic_loss.h>
|
||||||
#include <haproxy/quic_tls.h>
|
#include <haproxy/quic_tls.h>
|
||||||
|
@ -151,7 +152,7 @@ struct quic_pktns *quic_pto_pktns(struct quic_conn *qc,
|
||||||
* Always succeeds.
|
* Always succeeds.
|
||||||
*/
|
*/
|
||||||
void qc_packet_loss_lookup(struct quic_pktns *pktns, struct quic_conn *qc,
|
void qc_packet_loss_lookup(struct quic_pktns *pktns, struct quic_conn *qc,
|
||||||
struct list *lost_pkts)
|
struct list *lost_pkts, uint32_t *bytes_lost)
|
||||||
{
|
{
|
||||||
struct eb_root *pkts;
|
struct eb_root *pkts;
|
||||||
struct eb64_node *node;
|
struct eb64_node *node;
|
||||||
|
@ -213,8 +214,14 @@ void qc_packet_loss_lookup(struct quic_pktns *pktns, struct quic_conn *qc,
|
||||||
ql->nb_reordered_pkt++;
|
ql->nb_reordered_pkt++;
|
||||||
|
|
||||||
if (tick_is_le(loss_time_limit, now_ms) || reordered) {
|
if (tick_is_le(loss_time_limit, now_ms) || reordered) {
|
||||||
|
struct quic_cc *cc = &qc->path->cc;
|
||||||
|
|
||||||
|
if (cc->algo->on_pkt_lost)
|
||||||
|
cc->algo->on_pkt_lost(cc, pkt, pkt->rs.lost);
|
||||||
eb64_delete(&pkt->pn_node);
|
eb64_delete(&pkt->pn_node);
|
||||||
LIST_APPEND(lost_pkts, &pkt->list);
|
LIST_APPEND(lost_pkts, &pkt->list);
|
||||||
|
if (bytes_lost)
|
||||||
|
*bytes_lost += pkt->len;
|
||||||
ql->nb_lost_pkt++;
|
ql->nb_lost_pkt++;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
@ -276,6 +283,7 @@ int qc_release_lost_pkts(struct quic_conn *qc, struct quic_pktns *pktns,
|
||||||
|
|
||||||
if (!close) {
|
if (!close) {
|
||||||
if (newest_lost) {
|
if (newest_lost) {
|
||||||
|
struct quic_cc *cc = &qc->path->cc;
|
||||||
/* Sent a congestion event to the controller */
|
/* Sent a congestion event to the controller */
|
||||||
struct quic_cc_event ev = { };
|
struct quic_cc_event ev = { };
|
||||||
|
|
||||||
|
@ -283,7 +291,9 @@ int qc_release_lost_pkts(struct quic_conn *qc, struct quic_pktns *pktns,
|
||||||
ev.loss.time_sent = newest_lost->time_sent;
|
ev.loss.time_sent = newest_lost->time_sent;
|
||||||
ev.loss.count = tot_lost;
|
ev.loss.count = tot_lost;
|
||||||
|
|
||||||
quic_cc_event(&qc->path->cc, &ev);
|
quic_cc_event(cc, &ev);
|
||||||
|
if (cc->algo->congestion_event)
|
||||||
|
cc->algo->congestion_event(cc, newest_lost->time_sent);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If an RTT have been already sampled, <rtt_min> has been set.
|
/* If an RTT have been already sampled, <rtt_min> has been set.
|
||||||
|
@ -295,7 +305,8 @@ int qc_release_lost_pkts(struct quic_conn *qc, struct quic_pktns *pktns,
|
||||||
unsigned int period = newest_lost->time_sent - oldest_lost->time_sent;
|
unsigned int period = newest_lost->time_sent - oldest_lost->time_sent;
|
||||||
|
|
||||||
if (quic_loss_persistent_congestion(&qc->path->loss, period,
|
if (quic_loss_persistent_congestion(&qc->path->loss, period,
|
||||||
now_ms, qc->max_ack_delay))
|
now_ms, qc->max_ack_delay) &&
|
||||||
|
qc->path->cc.algo->slow_start)
|
||||||
qc->path->cc.algo->slow_start(&qc->path->cc);
|
qc->path->cc.algo->slow_start(&qc->path->cc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue