2021-11-12 15:09:54 +00:00
|
|
|
#include <haproxy/hq_interop.h>
|
|
|
|
|
|
|
|
#include <import/ist.h>
|
|
|
|
#include <haproxy/buf.h>
|
|
|
|
#include <haproxy/connection.h>
|
|
|
|
#include <haproxy/dynbuf.h>
|
|
|
|
#include <haproxy/htx.h>
|
|
|
|
#include <haproxy/http.h>
|
2022-04-04 14:13:44 +00:00
|
|
|
#include <haproxy/mux_quic.h>
|
2023-02-17 08:51:20 +00:00
|
|
|
#include <haproxy/qmux_http.h>
|
2023-12-08 14:52:00 +00:00
|
|
|
#include <haproxy/qmux_trace.h>
|
|
|
|
#include <haproxy/trace.h>
|
2021-11-12 15:09:54 +00:00
|
|
|
|
2023-12-07 16:43:07 +00:00
|
|
|
static ssize_t hq_interop_rcv_buf(struct qcs *qcs, struct buffer *b, int fin)
|
2021-11-12 15:09:54 +00:00
|
|
|
{
|
|
|
|
struct htx *htx;
|
|
|
|
struct htx_sl *sl;
|
|
|
|
struct buffer htx_buf = BUF_NULL;
|
|
|
|
struct ist path;
|
2022-06-03 14:40:34 +00:00
|
|
|
char *ptr = b_head(b);
|
|
|
|
size_t data = b_data(b);
|
2021-11-12 15:09:54 +00:00
|
|
|
|
2023-10-04 13:46:06 +00:00
|
|
|
/* hq-interop parser does not support buffer wrapping. */
|
|
|
|
BUG_ON(b_data(b) != b_contig_data(b, 0));
|
2023-05-11 14:49:28 +00:00
|
|
|
|
2023-10-04 13:46:06 +00:00
|
|
|
/* hq-interop parser is only done once full message is received. */
|
|
|
|
if (!fin)
|
2023-02-17 08:51:20 +00:00
|
|
|
return 0;
|
|
|
|
|
MINOR: dynbuf: pass a criticality argument to b_alloc()
The goal is to indicate how critical the allocation is, between the
least one (growing an existing buffer ring) and the topmost one (boot
time allocation for the life of the process).
The 3 tcp-based muxes (h1, h2, fcgi) use a common allocation function
to try to allocate otherwise subscribe. There's currently no distinction
of direction nor part that tries to allocate, and this should be revisited
to improve this situation, particularly when we consider that mux-h2 can
reduce its Tx allocations if needed.
For now, 4 main levels are planned, to translate how the data travels
inside haproxy from a producer to a consumer:
- MUX_RX: buffer used to receive data from the OS
- SE_RX: buffer used to place a transformation of the RX data for
a mux, or to produce a response for an applet
- CHANNEL: the channel buffer for sync recv
- MUX_TX: buffer used to transfer data from the channel to the outside,
generally a mux but there can be a few specificities (e.g.
http client's response buffer passed to the application,
which also gets a transformation of the channel data).
The other levels are a bit different in that they don't strictly need to
allocate for the first two ones, or they're permanent for the last one
(used by compression).
2024-04-16 06:55:20 +00:00
|
|
|
b_alloc(&htx_buf, DB_MUX_RX);
|
2021-11-12 15:09:54 +00:00
|
|
|
htx = htx_from_buf(&htx_buf);
|
|
|
|
|
|
|
|
/* skip method */
|
2021-12-15 21:38:48 +00:00
|
|
|
while (data && HTTP_IS_TOKEN(*ptr)) {
|
2023-10-04 13:46:06 +00:00
|
|
|
ptr++;
|
2021-12-15 21:38:48 +00:00
|
|
|
data--;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!data || !HTTP_IS_SPHT(*ptr)) {
|
|
|
|
fprintf(stderr, "truncated stream\n");
|
2023-10-04 13:46:06 +00:00
|
|
|
return -1;
|
2021-12-15 21:38:48 +00:00
|
|
|
}
|
|
|
|
|
2023-10-04 13:46:06 +00:00
|
|
|
ptr++;
|
2021-12-15 21:38:48 +00:00
|
|
|
if (!--data) {
|
|
|
|
fprintf(stderr, "truncated stream\n");
|
2023-10-04 13:46:06 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (HTTP_IS_LWS(*ptr)) {
|
|
|
|
fprintf(stderr, "malformed stream\n");
|
|
|
|
return -1;
|
2021-12-15 21:38:48 +00:00
|
|
|
}
|
2021-11-12 15:09:54 +00:00
|
|
|
|
|
|
|
/* extract path */
|
|
|
|
path.ptr = ptr;
|
2021-12-15 21:38:48 +00:00
|
|
|
while (data && !HTTP_IS_LWS(*ptr)) {
|
2023-10-04 13:46:06 +00:00
|
|
|
ptr++;
|
2021-12-15 21:38:48 +00:00
|
|
|
data--;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!data) {
|
|
|
|
fprintf(stderr, "truncated stream\n");
|
2023-10-04 13:46:06 +00:00
|
|
|
return -1;
|
2021-12-15 21:38:48 +00:00
|
|
|
}
|
|
|
|
|
2021-11-12 15:09:54 +00:00
|
|
|
path.len = ptr - path.ptr;
|
|
|
|
|
|
|
|
sl = htx_add_stline(htx, HTX_BLK_REQ_SL, 0, ist("GET"), path, ist("HTTP/1.0"));
|
2021-11-18 13:40:26 +00:00
|
|
|
if (!sl)
|
2022-06-07 15:30:55 +00:00
|
|
|
return -1;
|
2021-11-18 13:40:26 +00:00
|
|
|
|
2021-11-12 15:09:54 +00:00
|
|
|
sl->flags |= HTX_SL_F_BODYLESS;
|
|
|
|
sl->info.req.meth = find_http_meth("GET", 3);
|
|
|
|
|
|
|
|
htx_add_endof(htx, HTX_BLK_EOH);
|
2023-10-04 13:46:06 +00:00
|
|
|
htx->flags |= HTX_FL_EOM;
|
2021-11-12 15:09:54 +00:00
|
|
|
htx_to_buf(htx, &htx_buf);
|
|
|
|
|
2023-05-30 13:04:46 +00:00
|
|
|
if (!qcs_attach_sc(qcs, &htx_buf, fin))
|
2022-06-07 15:30:55 +00:00
|
|
|
return -1;
|
2021-11-12 15:09:54 +00:00
|
|
|
|
|
|
|
b_free(&htx_buf);
|
|
|
|
|
2022-06-07 15:30:55 +00:00
|
|
|
return b_data(b);
|
2021-11-12 15:09:54 +00:00
|
|
|
}
|
|
|
|
|
2023-10-27 14:41:58 +00:00
|
|
|
static size_t hq_interop_snd_buf(struct qcs *qcs, struct buffer *buf,
|
2022-09-19 15:14:27 +00:00
|
|
|
size_t count)
|
2021-11-12 15:09:54 +00:00
|
|
|
{
|
|
|
|
enum htx_blk_type btype;
|
MAJOR: mux-quic: remove intermediary Tx buffer
Previously, QUIC MUX sending was implemented with data transfered along
two different buffer instances per stream.
The first QCS buffer was used for HTX blocks conversion into H3 (or
other application protocol) during snd_buf stream callback. QCS instance
is then registered for sending via qcc_io_cb().
For each sending QCS, data memcpy is performed from the first to a
secondary buffer. A STREAM frame is produced for each QCS based on the
content of their secondary buffer.
This model is useful for QUIC MUX which has a major difference with
other muxes : data must be preserved longer, even after sent to the
lower layer. Data references is shared with quic-conn layer which
implements retransmission and data deletion on ACK reception.
This double buffering stages was the first model implemented and remains
active until today. One of its major drawbacks is that it requires
memcpy invocation for every data transferred between the two buffers.
Another important drawback is that the first buffer was is allocated by
each QCS individually without restriction. On the other hand, secondary
buffers are accounted for the connection. A bottleneck can appear if
secondary buffer pool is exhausted, causing unnecessary haproxy
buffering.
The purpose of this commit is to completely break this model. The first
buffer instance is removed. Now, application protocols will directly
allocate buffer from qc_stream_desc layer. This removes completely the
memcpy invocation.
This commit has a lot of code modifications. The most obvious one is the
removal of <qcs.tx.buf> field. Now, qcc_get_stream_txbuf() returns a
buffer instance from qc_stream_desc layer. qcs_xfer_data() which was
responsible for the memcpy between the two buffers is also completely
removed. Offset fields of QCS and QCC are now incremented directly by
qcc_send_stream(). These values are used as boundary with flow control
real offset to delimit the STREAM frames built.
As this change has a big impact on the code, this commit is only the
first part to fully support single buffer emission. For the moment, some
limitations are reintroduced and will be fixed in the next patches :
* on snd_buf if QCS sent buffer in used has room but not enough for the
application protocol to store its content
* on snd_buf if QCS sent buffer is NULL and allocation cannot succeeds
due to connection pool exhaustion
One final important aspect is that extra care is necessary now in
snd_buf callback. The same buffer instance is referenced by both the
stream and quic-conn layer. As such, some operation such as realign
cannot be done anymore freely.
2024-01-16 15:47:57 +00:00
|
|
|
struct htx *htx = NULL;
|
2021-11-12 15:09:54 +00:00
|
|
|
struct htx_blk *blk;
|
|
|
|
int32_t idx;
|
|
|
|
uint32_t bsize, fsize;
|
MAJOR: mux-quic: remove intermediary Tx buffer
Previously, QUIC MUX sending was implemented with data transfered along
two different buffer instances per stream.
The first QCS buffer was used for HTX blocks conversion into H3 (or
other application protocol) during snd_buf stream callback. QCS instance
is then registered for sending via qcc_io_cb().
For each sending QCS, data memcpy is performed from the first to a
secondary buffer. A STREAM frame is produced for each QCS based on the
content of their secondary buffer.
This model is useful for QUIC MUX which has a major difference with
other muxes : data must be preserved longer, even after sent to the
lower layer. Data references is shared with quic-conn layer which
implements retransmission and data deletion on ACK reception.
This double buffering stages was the first model implemented and remains
active until today. One of its major drawbacks is that it requires
memcpy invocation for every data transferred between the two buffers.
Another important drawback is that the first buffer was is allocated by
each QCS individually without restriction. On the other hand, secondary
buffers are accounted for the connection. A bottleneck can appear if
secondary buffer pool is exhausted, causing unnecessary haproxy
buffering.
The purpose of this commit is to completely break this model. The first
buffer instance is removed. Now, application protocols will directly
allocate buffer from qc_stream_desc layer. This removes completely the
memcpy invocation.
This commit has a lot of code modifications. The most obvious one is the
removal of <qcs.tx.buf> field. Now, qcc_get_stream_txbuf() returns a
buffer instance from qc_stream_desc layer. qcs_xfer_data() which was
responsible for the memcpy between the two buffers is also completely
removed. Offset fields of QCS and QCC are now incremented directly by
qcc_send_stream(). These values are used as boundary with flow control
real offset to delimit the STREAM frames built.
As this change has a big impact on the code, this commit is only the
first part to fully support single buffer emission. For the moment, some
limitations are reintroduced and will be fixed in the next patches :
* on snd_buf if QCS sent buffer in used has room but not enough for the
application protocol to store its content
* on snd_buf if QCS sent buffer is NULL and allocation cannot succeeds
due to connection pool exhaustion
One final important aspect is that extra care is necessary now in
snd_buf callback. The same buffer instance is referenced by both the
stream and quic-conn layer. As such, some operation such as realign
cannot be done anymore freely.
2024-01-16 15:47:57 +00:00
|
|
|
struct buffer *res = NULL;
|
2021-11-12 15:09:54 +00:00
|
|
|
size_t total = 0;
|
2024-01-17 14:15:55 +00:00
|
|
|
int err;
|
2021-11-12 15:09:54 +00:00
|
|
|
|
2023-10-27 14:41:58 +00:00
|
|
|
htx = htx_from_buf(buf);
|
|
|
|
|
2024-01-22 16:03:41 +00:00
|
|
|
while (count && !htx_is_empty(htx) && qcc_stream_can_send(qcs)) {
|
2021-11-12 15:09:54 +00:00
|
|
|
/* Not implemented : QUIC on backend side */
|
|
|
|
idx = htx_get_head(htx);
|
|
|
|
blk = htx_get_blk(htx, idx);
|
|
|
|
btype = htx_get_blk_type(blk);
|
|
|
|
fsize = bsize = htx_get_blksz(blk);
|
|
|
|
|
|
|
|
BUG_ON(btype == HTX_BLK_REQ_SL);
|
|
|
|
|
|
|
|
switch (btype) {
|
|
|
|
case HTX_BLK_DATA:
|
2024-01-17 14:15:55 +00:00
|
|
|
res = qcc_get_stream_txbuf(qcs, &err);
|
MAJOR: mux-quic: remove intermediary Tx buffer
Previously, QUIC MUX sending was implemented with data transfered along
two different buffer instances per stream.
The first QCS buffer was used for HTX blocks conversion into H3 (or
other application protocol) during snd_buf stream callback. QCS instance
is then registered for sending via qcc_io_cb().
For each sending QCS, data memcpy is performed from the first to a
secondary buffer. A STREAM frame is produced for each QCS based on the
content of their secondary buffer.
This model is useful for QUIC MUX which has a major difference with
other muxes : data must be preserved longer, even after sent to the
lower layer. Data references is shared with quic-conn layer which
implements retransmission and data deletion on ACK reception.
This double buffering stages was the first model implemented and remains
active until today. One of its major drawbacks is that it requires
memcpy invocation for every data transferred between the two buffers.
Another important drawback is that the first buffer was is allocated by
each QCS individually without restriction. On the other hand, secondary
buffers are accounted for the connection. A bottleneck can appear if
secondary buffer pool is exhausted, causing unnecessary haproxy
buffering.
The purpose of this commit is to completely break this model. The first
buffer instance is removed. Now, application protocols will directly
allocate buffer from qc_stream_desc layer. This removes completely the
memcpy invocation.
This commit has a lot of code modifications. The most obvious one is the
removal of <qcs.tx.buf> field. Now, qcc_get_stream_txbuf() returns a
buffer instance from qc_stream_desc layer. qcs_xfer_data() which was
responsible for the memcpy between the two buffers is also completely
removed. Offset fields of QCS and QCC are now incremented directly by
qcc_send_stream(). These values are used as boundary with flow control
real offset to delimit the STREAM frames built.
As this change has a big impact on the code, this commit is only the
first part to fully support single buffer emission. For the moment, some
limitations are reintroduced and will be fixed in the next patches :
* on snd_buf if QCS sent buffer in used has room but not enough for the
application protocol to store its content
* on snd_buf if QCS sent buffer is NULL and allocation cannot succeeds
due to connection pool exhaustion
One final important aspect is that extra care is necessary now in
snd_buf callback. The same buffer instance is referenced by both the
stream and quic-conn layer. As such, some operation such as realign
cannot be done anymore freely.
2024-01-16 15:47:57 +00:00
|
|
|
if (!res) {
|
2024-01-17 14:15:55 +00:00
|
|
|
if (err)
|
|
|
|
ABORT_NOW();
|
|
|
|
goto end;
|
MAJOR: mux-quic: remove intermediary Tx buffer
Previously, QUIC MUX sending was implemented with data transfered along
two different buffer instances per stream.
The first QCS buffer was used for HTX blocks conversion into H3 (or
other application protocol) during snd_buf stream callback. QCS instance
is then registered for sending via qcc_io_cb().
For each sending QCS, data memcpy is performed from the first to a
secondary buffer. A STREAM frame is produced for each QCS based on the
content of their secondary buffer.
This model is useful for QUIC MUX which has a major difference with
other muxes : data must be preserved longer, even after sent to the
lower layer. Data references is shared with quic-conn layer which
implements retransmission and data deletion on ACK reception.
This double buffering stages was the first model implemented and remains
active until today. One of its major drawbacks is that it requires
memcpy invocation for every data transferred between the two buffers.
Another important drawback is that the first buffer was is allocated by
each QCS individually without restriction. On the other hand, secondary
buffers are accounted for the connection. A bottleneck can appear if
secondary buffer pool is exhausted, causing unnecessary haproxy
buffering.
The purpose of this commit is to completely break this model. The first
buffer instance is removed. Now, application protocols will directly
allocate buffer from qc_stream_desc layer. This removes completely the
memcpy invocation.
This commit has a lot of code modifications. The most obvious one is the
removal of <qcs.tx.buf> field. Now, qcc_get_stream_txbuf() returns a
buffer instance from qc_stream_desc layer. qcs_xfer_data() which was
responsible for the memcpy between the two buffers is also completely
removed. Offset fields of QCS and QCC are now incremented directly by
qcc_send_stream(). These values are used as boundary with flow control
real offset to delimit the STREAM frames built.
As this change has a big impact on the code, this commit is only the
first part to fully support single buffer emission. For the moment, some
limitations are reintroduced and will be fixed in the next patches :
* on snd_buf if QCS sent buffer in used has room but not enough for the
application protocol to store its content
* on snd_buf if QCS sent buffer is NULL and allocation cannot succeeds
due to connection pool exhaustion
One final important aspect is that extra care is necessary now in
snd_buf callback. The same buffer instance is referenced by both the
stream and quic-conn layer. As such, some operation such as realign
cannot be done anymore freely.
2024-01-16 15:47:57 +00:00
|
|
|
}
|
|
|
|
|
2023-12-08 14:52:00 +00:00
|
|
|
if (unlikely(fsize == count &&
|
|
|
|
!b_data(res) &&
|
|
|
|
htx_nbblks(htx) == 1 && btype == HTX_BLK_DATA)) {
|
|
|
|
void *old_area = res->area;
|
|
|
|
|
|
|
|
TRACE_DATA("perform zero-copy DATA transfer", QMUX_EV_STRM_SEND,
|
|
|
|
qcs->qcc->conn, qcs);
|
|
|
|
|
|
|
|
/* remap MUX buffer to HTX area */
|
|
|
|
*res = b_make(buf->area, buf->size,
|
|
|
|
sizeof(struct htx) + blk->addr, fsize);
|
|
|
|
|
|
|
|
/* assign old MUX area to HTX buffer. */
|
|
|
|
buf->area = old_area;
|
|
|
|
buf->data = buf->head = 0;
|
|
|
|
total += fsize;
|
|
|
|
|
|
|
|
/* reload HTX with empty buffer. */
|
|
|
|
*htx = *htx_from_buf(buf);
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
2021-11-12 15:09:54 +00:00
|
|
|
if (fsize > count)
|
|
|
|
fsize = count;
|
2021-12-07 15:19:03 +00:00
|
|
|
|
MAJOR: mux-quic: remove intermediary Tx buffer
Previously, QUIC MUX sending was implemented with data transfered along
two different buffer instances per stream.
The first QCS buffer was used for HTX blocks conversion into H3 (or
other application protocol) during snd_buf stream callback. QCS instance
is then registered for sending via qcc_io_cb().
For each sending QCS, data memcpy is performed from the first to a
secondary buffer. A STREAM frame is produced for each QCS based on the
content of their secondary buffer.
This model is useful for QUIC MUX which has a major difference with
other muxes : data must be preserved longer, even after sent to the
lower layer. Data references is shared with quic-conn layer which
implements retransmission and data deletion on ACK reception.
This double buffering stages was the first model implemented and remains
active until today. One of its major drawbacks is that it requires
memcpy invocation for every data transferred between the two buffers.
Another important drawback is that the first buffer was is allocated by
each QCS individually without restriction. On the other hand, secondary
buffers are accounted for the connection. A bottleneck can appear if
secondary buffer pool is exhausted, causing unnecessary haproxy
buffering.
The purpose of this commit is to completely break this model. The first
buffer instance is removed. Now, application protocols will directly
allocate buffer from qc_stream_desc layer. This removes completely the
memcpy invocation.
This commit has a lot of code modifications. The most obvious one is the
removal of <qcs.tx.buf> field. Now, qcc_get_stream_txbuf() returns a
buffer instance from qc_stream_desc layer. qcs_xfer_data() which was
responsible for the memcpy between the two buffers is also completely
removed. Offset fields of QCS and QCC are now incremented directly by
qcc_send_stream(). These values are used as boundary with flow control
real offset to delimit the STREAM frames built.
As this change has a big impact on the code, this commit is only the
first part to fully support single buffer emission. For the moment, some
limitations are reintroduced and will be fixed in the next patches :
* on snd_buf if QCS sent buffer in used has room but not enough for the
application protocol to store its content
* on snd_buf if QCS sent buffer is NULL and allocation cannot succeeds
due to connection pool exhaustion
One final important aspect is that extra care is necessary now in
snd_buf callback. The same buffer instance is referenced by both the
stream and quic-conn layer. As such, some operation such as realign
cannot be done anymore freely.
2024-01-16 15:47:57 +00:00
|
|
|
if (b_contig_space(res) < fsize)
|
|
|
|
fsize = b_contig_space(res);
|
2021-12-07 15:19:03 +00:00
|
|
|
|
|
|
|
if (!fsize) {
|
2024-01-22 16:03:41 +00:00
|
|
|
/* Release buf and restart parsing if sending still possible. */
|
|
|
|
qcc_release_stream_txbuf(qcs);
|
|
|
|
continue;
|
2021-12-07 15:19:03 +00:00
|
|
|
}
|
|
|
|
|
MAJOR: mux-quic: remove intermediary Tx buffer
Previously, QUIC MUX sending was implemented with data transfered along
two different buffer instances per stream.
The first QCS buffer was used for HTX blocks conversion into H3 (or
other application protocol) during snd_buf stream callback. QCS instance
is then registered for sending via qcc_io_cb().
For each sending QCS, data memcpy is performed from the first to a
secondary buffer. A STREAM frame is produced for each QCS based on the
content of their secondary buffer.
This model is useful for QUIC MUX which has a major difference with
other muxes : data must be preserved longer, even after sent to the
lower layer. Data references is shared with quic-conn layer which
implements retransmission and data deletion on ACK reception.
This double buffering stages was the first model implemented and remains
active until today. One of its major drawbacks is that it requires
memcpy invocation for every data transferred between the two buffers.
Another important drawback is that the first buffer was is allocated by
each QCS individually without restriction. On the other hand, secondary
buffers are accounted for the connection. A bottleneck can appear if
secondary buffer pool is exhausted, causing unnecessary haproxy
buffering.
The purpose of this commit is to completely break this model. The first
buffer instance is removed. Now, application protocols will directly
allocate buffer from qc_stream_desc layer. This removes completely the
memcpy invocation.
This commit has a lot of code modifications. The most obvious one is the
removal of <qcs.tx.buf> field. Now, qcc_get_stream_txbuf() returns a
buffer instance from qc_stream_desc layer. qcs_xfer_data() which was
responsible for the memcpy between the two buffers is also completely
removed. Offset fields of QCS and QCC are now incremented directly by
qcc_send_stream(). These values are used as boundary with flow control
real offset to delimit the STREAM frames built.
As this change has a big impact on the code, this commit is only the
first part to fully support single buffer emission. For the moment, some
limitations are reintroduced and will be fixed in the next patches :
* on snd_buf if QCS sent buffer in used has room but not enough for the
application protocol to store its content
* on snd_buf if QCS sent buffer is NULL and allocation cannot succeeds
due to connection pool exhaustion
One final important aspect is that extra care is necessary now in
snd_buf callback. The same buffer instance is referenced by both the
stream and quic-conn layer. As such, some operation such as realign
cannot be done anymore freely.
2024-01-16 15:47:57 +00:00
|
|
|
b_putblk(res, htx_get_blk_ptr(htx, blk), fsize);
|
2021-11-12 15:09:54 +00:00
|
|
|
total += fsize;
|
|
|
|
count -= fsize;
|
|
|
|
|
|
|
|
if (fsize == bsize)
|
|
|
|
htx_remove_blk(htx, blk);
|
|
|
|
else
|
|
|
|
htx_cut_data_blk(htx, blk, fsize);
|
|
|
|
break;
|
|
|
|
|
2021-12-25 06:45:52 +00:00
|
|
|
/* only body is transferred on HTTP/0.9 */
|
2021-11-12 15:09:54 +00:00
|
|
|
case HTX_BLK_RES_SL:
|
|
|
|
case HTX_BLK_TLR:
|
|
|
|
case HTX_BLK_EOT:
|
|
|
|
default:
|
|
|
|
htx_remove_blk(htx, blk);
|
|
|
|
total += bsize;
|
|
|
|
count -= bsize;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-07 15:19:03 +00:00
|
|
|
end:
|
2023-10-27 14:41:58 +00:00
|
|
|
htx_to_buf(htx, buf);
|
2021-11-12 15:09:54 +00:00
|
|
|
|
|
|
|
return total;
|
|
|
|
}
|
|
|
|
|
2023-12-07 09:36:36 +00:00
|
|
|
static size_t hq_interop_nego_ff(struct qcs *qcs, size_t count)
|
|
|
|
{
|
2024-01-17 14:15:55 +00:00
|
|
|
int err, ret = 0;
|
MAJOR: mux-quic: remove intermediary Tx buffer
Previously, QUIC MUX sending was implemented with data transfered along
two different buffer instances per stream.
The first QCS buffer was used for HTX blocks conversion into H3 (or
other application protocol) during snd_buf stream callback. QCS instance
is then registered for sending via qcc_io_cb().
For each sending QCS, data memcpy is performed from the first to a
secondary buffer. A STREAM frame is produced for each QCS based on the
content of their secondary buffer.
This model is useful for QUIC MUX which has a major difference with
other muxes : data must be preserved longer, even after sent to the
lower layer. Data references is shared with quic-conn layer which
implements retransmission and data deletion on ACK reception.
This double buffering stages was the first model implemented and remains
active until today. One of its major drawbacks is that it requires
memcpy invocation for every data transferred between the two buffers.
Another important drawback is that the first buffer was is allocated by
each QCS individually without restriction. On the other hand, secondary
buffers are accounted for the connection. A bottleneck can appear if
secondary buffer pool is exhausted, causing unnecessary haproxy
buffering.
The purpose of this commit is to completely break this model. The first
buffer instance is removed. Now, application protocols will directly
allocate buffer from qc_stream_desc layer. This removes completely the
memcpy invocation.
This commit has a lot of code modifications. The most obvious one is the
removal of <qcs.tx.buf> field. Now, qcc_get_stream_txbuf() returns a
buffer instance from qc_stream_desc layer. qcs_xfer_data() which was
responsible for the memcpy between the two buffers is also completely
removed. Offset fields of QCS and QCC are now incremented directly by
qcc_send_stream(). These values are used as boundary with flow control
real offset to delimit the STREAM frames built.
As this change has a big impact on the code, this commit is only the
first part to fully support single buffer emission. For the moment, some
limitations are reintroduced and will be fixed in the next patches :
* on snd_buf if QCS sent buffer in used has room but not enough for the
application protocol to store its content
* on snd_buf if QCS sent buffer is NULL and allocation cannot succeeds
due to connection pool exhaustion
One final important aspect is that extra care is necessary now in
snd_buf callback. The same buffer instance is referenced by both the
stream and quic-conn layer. As such, some operation such as realign
cannot be done anymore freely.
2024-01-16 15:47:57 +00:00
|
|
|
struct buffer *res;
|
2023-12-07 09:36:36 +00:00
|
|
|
|
2024-01-22 16:03:41 +00:00
|
|
|
start:
|
2024-01-17 14:15:55 +00:00
|
|
|
res = qcc_get_stream_txbuf(qcs, &err);
|
MAJOR: mux-quic: remove intermediary Tx buffer
Previously, QUIC MUX sending was implemented with data transfered along
two different buffer instances per stream.
The first QCS buffer was used for HTX blocks conversion into H3 (or
other application protocol) during snd_buf stream callback. QCS instance
is then registered for sending via qcc_io_cb().
For each sending QCS, data memcpy is performed from the first to a
secondary buffer. A STREAM frame is produced for each QCS based on the
content of their secondary buffer.
This model is useful for QUIC MUX which has a major difference with
other muxes : data must be preserved longer, even after sent to the
lower layer. Data references is shared with quic-conn layer which
implements retransmission and data deletion on ACK reception.
This double buffering stages was the first model implemented and remains
active until today. One of its major drawbacks is that it requires
memcpy invocation for every data transferred between the two buffers.
Another important drawback is that the first buffer was is allocated by
each QCS individually without restriction. On the other hand, secondary
buffers are accounted for the connection. A bottleneck can appear if
secondary buffer pool is exhausted, causing unnecessary haproxy
buffering.
The purpose of this commit is to completely break this model. The first
buffer instance is removed. Now, application protocols will directly
allocate buffer from qc_stream_desc layer. This removes completely the
memcpy invocation.
This commit has a lot of code modifications. The most obvious one is the
removal of <qcs.tx.buf> field. Now, qcc_get_stream_txbuf() returns a
buffer instance from qc_stream_desc layer. qcs_xfer_data() which was
responsible for the memcpy between the two buffers is also completely
removed. Offset fields of QCS and QCC are now incremented directly by
qcc_send_stream(). These values are used as boundary with flow control
real offset to delimit the STREAM frames built.
As this change has a big impact on the code, this commit is only the
first part to fully support single buffer emission. For the moment, some
limitations are reintroduced and will be fixed in the next patches :
* on snd_buf if QCS sent buffer in used has room but not enough for the
application protocol to store its content
* on snd_buf if QCS sent buffer is NULL and allocation cannot succeeds
due to connection pool exhaustion
One final important aspect is that extra care is necessary now in
snd_buf callback. The same buffer instance is referenced by both the
stream and quic-conn layer. As such, some operation such as realign
cannot be done anymore freely.
2024-01-16 15:47:57 +00:00
|
|
|
if (!res) {
|
2024-01-17 14:15:55 +00:00
|
|
|
if (err)
|
|
|
|
ABORT_NOW();
|
2023-12-07 09:36:36 +00:00
|
|
|
qcs->sd->iobuf.flags |= IOBUF_FL_FF_BLOCKED;
|
|
|
|
goto end;
|
MAJOR: mux-quic: remove intermediary Tx buffer
Previously, QUIC MUX sending was implemented with data transfered along
two different buffer instances per stream.
The first QCS buffer was used for HTX blocks conversion into H3 (or
other application protocol) during snd_buf stream callback. QCS instance
is then registered for sending via qcc_io_cb().
For each sending QCS, data memcpy is performed from the first to a
secondary buffer. A STREAM frame is produced for each QCS based on the
content of their secondary buffer.
This model is useful for QUIC MUX which has a major difference with
other muxes : data must be preserved longer, even after sent to the
lower layer. Data references is shared with quic-conn layer which
implements retransmission and data deletion on ACK reception.
This double buffering stages was the first model implemented and remains
active until today. One of its major drawbacks is that it requires
memcpy invocation for every data transferred between the two buffers.
Another important drawback is that the first buffer was is allocated by
each QCS individually without restriction. On the other hand, secondary
buffers are accounted for the connection. A bottleneck can appear if
secondary buffer pool is exhausted, causing unnecessary haproxy
buffering.
The purpose of this commit is to completely break this model. The first
buffer instance is removed. Now, application protocols will directly
allocate buffer from qc_stream_desc layer. This removes completely the
memcpy invocation.
This commit has a lot of code modifications. The most obvious one is the
removal of <qcs.tx.buf> field. Now, qcc_get_stream_txbuf() returns a
buffer instance from qc_stream_desc layer. qcs_xfer_data() which was
responsible for the memcpy between the two buffers is also completely
removed. Offset fields of QCS and QCC are now incremented directly by
qcc_send_stream(). These values are used as boundary with flow control
real offset to delimit the STREAM frames built.
As this change has a big impact on the code, this commit is only the
first part to fully support single buffer emission. For the moment, some
limitations are reintroduced and will be fixed in the next patches :
* on snd_buf if QCS sent buffer in used has room but not enough for the
application protocol to store its content
* on snd_buf if QCS sent buffer is NULL and allocation cannot succeeds
due to connection pool exhaustion
One final important aspect is that extra care is necessary now in
snd_buf callback. The same buffer instance is referenced by both the
stream and quic-conn layer. As such, some operation such as realign
cannot be done anymore freely.
2024-01-16 15:47:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!b_room(res)) {
|
2024-01-22 16:03:41 +00:00
|
|
|
if (qcc_release_stream_txbuf(qcs)) {
|
|
|
|
qcs->sd->iobuf.flags |= IOBUF_FL_FF_BLOCKED;
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
|
|
|
goto start;
|
2023-12-07 09:36:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* No header required for HTTP/0.9, no need to reserve an offset. */
|
|
|
|
qcs->sd->iobuf.buf = res;
|
|
|
|
qcs->sd->iobuf.offset = 0;
|
|
|
|
qcs->sd->iobuf.data = 0;
|
|
|
|
|
MAJOR: mux-quic: remove intermediary Tx buffer
Previously, QUIC MUX sending was implemented with data transfered along
two different buffer instances per stream.
The first QCS buffer was used for HTX blocks conversion into H3 (or
other application protocol) during snd_buf stream callback. QCS instance
is then registered for sending via qcc_io_cb().
For each sending QCS, data memcpy is performed from the first to a
secondary buffer. A STREAM frame is produced for each QCS based on the
content of their secondary buffer.
This model is useful for QUIC MUX which has a major difference with
other muxes : data must be preserved longer, even after sent to the
lower layer. Data references is shared with quic-conn layer which
implements retransmission and data deletion on ACK reception.
This double buffering stages was the first model implemented and remains
active until today. One of its major drawbacks is that it requires
memcpy invocation for every data transferred between the two buffers.
Another important drawback is that the first buffer was is allocated by
each QCS individually without restriction. On the other hand, secondary
buffers are accounted for the connection. A bottleneck can appear if
secondary buffer pool is exhausted, causing unnecessary haproxy
buffering.
The purpose of this commit is to completely break this model. The first
buffer instance is removed. Now, application protocols will directly
allocate buffer from qc_stream_desc layer. This removes completely the
memcpy invocation.
This commit has a lot of code modifications. The most obvious one is the
removal of <qcs.tx.buf> field. Now, qcc_get_stream_txbuf() returns a
buffer instance from qc_stream_desc layer. qcs_xfer_data() which was
responsible for the memcpy between the two buffers is also completely
removed. Offset fields of QCS and QCC are now incremented directly by
qcc_send_stream(). These values are used as boundary with flow control
real offset to delimit the STREAM frames built.
As this change has a big impact on the code, this commit is only the
first part to fully support single buffer emission. For the moment, some
limitations are reintroduced and will be fixed in the next patches :
* on snd_buf if QCS sent buffer in used has room but not enough for the
application protocol to store its content
* on snd_buf if QCS sent buffer is NULL and allocation cannot succeeds
due to connection pool exhaustion
One final important aspect is that extra care is necessary now in
snd_buf callback. The same buffer instance is referenced by both the
stream and quic-conn layer. As such, some operation such as realign
cannot be done anymore freely.
2024-01-16 15:47:57 +00:00
|
|
|
ret = MIN(count, b_contig_space(res));
|
2023-12-07 09:36:36 +00:00
|
|
|
end:
|
MAJOR: mux-quic: remove intermediary Tx buffer
Previously, QUIC MUX sending was implemented with data transfered along
two different buffer instances per stream.
The first QCS buffer was used for HTX blocks conversion into H3 (or
other application protocol) during snd_buf stream callback. QCS instance
is then registered for sending via qcc_io_cb().
For each sending QCS, data memcpy is performed from the first to a
secondary buffer. A STREAM frame is produced for each QCS based on the
content of their secondary buffer.
This model is useful for QUIC MUX which has a major difference with
other muxes : data must be preserved longer, even after sent to the
lower layer. Data references is shared with quic-conn layer which
implements retransmission and data deletion on ACK reception.
This double buffering stages was the first model implemented and remains
active until today. One of its major drawbacks is that it requires
memcpy invocation for every data transferred between the two buffers.
Another important drawback is that the first buffer was is allocated by
each QCS individually without restriction. On the other hand, secondary
buffers are accounted for the connection. A bottleneck can appear if
secondary buffer pool is exhausted, causing unnecessary haproxy
buffering.
The purpose of this commit is to completely break this model. The first
buffer instance is removed. Now, application protocols will directly
allocate buffer from qc_stream_desc layer. This removes completely the
memcpy invocation.
This commit has a lot of code modifications. The most obvious one is the
removal of <qcs.tx.buf> field. Now, qcc_get_stream_txbuf() returns a
buffer instance from qc_stream_desc layer. qcs_xfer_data() which was
responsible for the memcpy between the two buffers is also completely
removed. Offset fields of QCS and QCC are now incremented directly by
qcc_send_stream(). These values are used as boundary with flow control
real offset to delimit the STREAM frames built.
As this change has a big impact on the code, this commit is only the
first part to fully support single buffer emission. For the moment, some
limitations are reintroduced and will be fixed in the next patches :
* on snd_buf if QCS sent buffer in used has room but not enough for the
application protocol to store its content
* on snd_buf if QCS sent buffer is NULL and allocation cannot succeeds
due to connection pool exhaustion
One final important aspect is that extra care is necessary now in
snd_buf callback. The same buffer instance is referenced by both the
stream and quic-conn layer. As such, some operation such as realign
cannot be done anymore freely.
2024-01-16 15:47:57 +00:00
|
|
|
return ret;
|
2023-12-07 09:36:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static size_t hq_interop_done_ff(struct qcs *qcs)
|
|
|
|
{
|
|
|
|
const size_t ret = qcs->sd->iobuf.data;
|
|
|
|
|
|
|
|
/* No header required for HTTP/0.9, simply mark ff as done. */
|
|
|
|
qcs->sd->iobuf.buf = NULL;
|
|
|
|
qcs->sd->iobuf.offset = 0;
|
|
|
|
qcs->sd->iobuf.data = 0;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-08-03 09:17:57 +00:00
|
|
|
static int hq_interop_attach(struct qcs *qcs, void *conn_ctx)
|
|
|
|
{
|
|
|
|
qcs_wait_http_req(qcs);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-11-12 15:09:54 +00:00
|
|
|
const struct qcc_app_ops hq_interop_ops = {
|
2023-12-07 16:43:07 +00:00
|
|
|
.rcv_buf = hq_interop_rcv_buf,
|
2021-11-12 15:09:54 +00:00
|
|
|
.snd_buf = hq_interop_snd_buf,
|
2023-12-07 09:36:36 +00:00
|
|
|
.nego_ff = hq_interop_nego_ff,
|
|
|
|
.done_ff = hq_interop_done_ff,
|
2022-08-03 09:17:57 +00:00
|
|
|
.attach = hq_interop_attach,
|
2021-11-12 15:09:54 +00:00
|
|
|
};
|