mirror of
http://git.haproxy.org/git/haproxy.git/
synced 2024-12-28 15:42:30 +00:00
BUG/MAJOR: connection: fix mismatch between rcv_buf's API and usage
Steve Ruiz reported some reproducible crashes with HTTP health checks
on a certain page returning a huge length. The traces he provided
clearly showed that the recv() call was performed twice for a total
size exceeding the buffer's length.
Cyril Bont tracked down the problem to be caused by the full buffer
size being passed to rcv_buf() in event_srv_chk_r() instead of passing
just the remaining amount of space. Indeed, this change happened during
the connection rework in 1.5-dev13 with the following commit :
f150317
MAJOR: checks: completely use the connection transport layer
But one of the problems is also that the comments at the top of the
rcv_buf() functions suggest that the caller only has to ensure the
requested size doesn't overflow the buffer's size.
Also, these functions already have to care about the buffer's size to
handle wrapping free space when there are pending data in the buffer.
So let's change the API instead to more closely match what could be
expected from these functions :
- the caller asks for the maximum amount of bytes it wants to read ;
This means that only the caller is responsible for enforcing the
reserve if it wants to (eg: checks don't).
- the rcv_buf() functions fix their computations to always consider
this size as a max, and always perform validity checks based on
the buffer's free space.
As a result, the code is simplified and reduced, and made more robust
for callers which now just have to care about whether they want the
buffer to be filled or not.
Since the bug was introduced in 1.5-dev13, no backport to stable versions
is needed.
This commit is contained in:
parent
4448925930
commit
abf08d9365
@ -2065,7 +2065,7 @@ static void tcpcheck_main(struct connection *conn)
|
||||
goto out_end_tcpcheck;
|
||||
|
||||
if ((conn->flags & CO_FL_WAIT_RD) ||
|
||||
conn->xprt->rcv_buf(conn, check->bi, buffer_total_space(check->bi)) <= 0) {
|
||||
conn->xprt->rcv_buf(conn, check->bi, check->bi->size) <= 0) {
|
||||
if (conn->flags & (CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_DATA_RD_SH)) {
|
||||
done = 1;
|
||||
if ((conn->flags & CO_FL_ERROR) && !check->bi->i) {
|
||||
|
@ -226,8 +226,7 @@ int raw_sock_from_pipe(struct connection *conn, struct pipe *pipe)
|
||||
|
||||
|
||||
/* Receive up to <count> bytes from connection <conn>'s socket and store them
|
||||
* into buffer <buf>. The caller must ensure that <count> is always smaller
|
||||
* than the buffer's size. Only one call to recv() is performed, unless the
|
||||
* into buffer <buf>. Only one call to recv() is performed, unless the
|
||||
* buffer wraps, in which case a second call may be performed. The connection's
|
||||
* flags are updated with whatever special event is detected (error, read0,
|
||||
* empty). The caller is responsible for taking care of those events and
|
||||
@ -239,7 +238,7 @@ int raw_sock_from_pipe(struct connection *conn, struct pipe *pipe)
|
||||
static int raw_sock_to_buf(struct connection *conn, struct buffer *buf, int count)
|
||||
{
|
||||
int ret, done = 0;
|
||||
int try = count;
|
||||
int try;
|
||||
|
||||
if (!(conn->flags & CO_FL_CTRL_READY))
|
||||
return 0;
|
||||
@ -258,24 +257,27 @@ static int raw_sock_to_buf(struct connection *conn, struct buffer *buf, int coun
|
||||
}
|
||||
}
|
||||
|
||||
/* compute the maximum block size we can read at once. */
|
||||
if (buffer_empty(buf)) {
|
||||
/* let's realign the buffer to optimize I/O */
|
||||
/* let's realign the buffer to optimize I/O */
|
||||
if (buffer_empty(buf))
|
||||
buf->p = buf->data;
|
||||
}
|
||||
else if (buf->data + buf->o < buf->p &&
|
||||
buf->p + buf->i < buf->data + buf->size) {
|
||||
/* remaining space wraps at the end, with a moving limit */
|
||||
if (try > buf->data + buf->size - (buf->p + buf->i))
|
||||
try = buf->data + buf->size - (buf->p + buf->i);
|
||||
}
|
||||
|
||||
/* read the largest possible block. For this, we perform only one call
|
||||
* to recv() unless the buffer wraps and we exactly fill the first hunk,
|
||||
* in which case we accept to do it once again. A new attempt is made on
|
||||
* EINTR too.
|
||||
*/
|
||||
while (try) {
|
||||
while (count > 0) {
|
||||
/* first check if we have some room after p+i */
|
||||
try = buf->data + buf->size - (buf->p + buf->i);
|
||||
/* otherwise continue between data and p-o */
|
||||
if (try <= 0) {
|
||||
try = buf->p - (buf->data + buf->o);
|
||||
if (try <= 0)
|
||||
break;
|
||||
}
|
||||
if (try > count)
|
||||
try = count;
|
||||
|
||||
ret = recv(conn->t.sock.fd, bi_end(buf), try, 0);
|
||||
|
||||
if (ret > 0) {
|
||||
@ -291,7 +293,6 @@ static int raw_sock_to_buf(struct connection *conn, struct buffer *buf, int coun
|
||||
break;
|
||||
}
|
||||
count -= ret;
|
||||
try = count;
|
||||
}
|
||||
else if (ret == 0) {
|
||||
goto read0;
|
||||
|
@ -1325,8 +1325,7 @@ reneg_ok:
|
||||
}
|
||||
|
||||
/* Receive up to <count> bytes from connection <conn>'s socket and store them
|
||||
* into buffer <buf>. The caller must ensure that <count> is always smaller
|
||||
* than the buffer's size. Only one call to recv() is performed, unless the
|
||||
* into buffer <buf>. Only one call to recv() is performed, unless the
|
||||
* buffer wraps, in which case a second call may be performed. The connection's
|
||||
* flags are updated with whatever special event is detected (error, read0,
|
||||
* empty). The caller is responsible for taking care of those events and
|
||||
@ -1336,7 +1335,7 @@ reneg_ok:
|
||||
static int ssl_sock_to_buf(struct connection *conn, struct buffer *buf, int count)
|
||||
{
|
||||
int ret, done = 0;
|
||||
int try = count;
|
||||
int try;
|
||||
|
||||
if (!conn->xprt_ctx)
|
||||
goto out_error;
|
||||
@ -1345,17 +1344,9 @@ static int ssl_sock_to_buf(struct connection *conn, struct buffer *buf, int coun
|
||||
/* a handshake was requested */
|
||||
return 0;
|
||||
|
||||
/* compute the maximum block size we can read at once. */
|
||||
if (buffer_empty(buf)) {
|
||||
/* let's realign the buffer to optimize I/O */
|
||||
/* let's realign the buffer to optimize I/O */
|
||||
if (buffer_empty(buf))
|
||||
buf->p = buf->data;
|
||||
}
|
||||
else if (buf->data + buf->o < buf->p &&
|
||||
buf->p + buf->i < buf->data + buf->size) {
|
||||
/* remaining space wraps at the end, with a moving limit */
|
||||
if (try > buf->data + buf->size - (buf->p + buf->i))
|
||||
try = buf->data + buf->size - (buf->p + buf->i);
|
||||
}
|
||||
|
||||
/* read the largest possible block. For this, we perform only one call
|
||||
* to recv() unless the buffer wraps and we exactly fill the first hunk,
|
||||
@ -1363,6 +1354,17 @@ static int ssl_sock_to_buf(struct connection *conn, struct buffer *buf, int coun
|
||||
* EINTR too.
|
||||
*/
|
||||
while (try) {
|
||||
/* first check if we have some room after p+i */
|
||||
try = buf->data + buf->size - (buf->p + buf->i);
|
||||
/* otherwise continue between data and p-o */
|
||||
if (try <= 0) {
|
||||
try = buf->p - (buf->data + buf->o);
|
||||
if (try <= 0)
|
||||
break;
|
||||
}
|
||||
if (try > count)
|
||||
try = count;
|
||||
|
||||
ret = SSL_read(conn->xprt_ctx, bi_end(buf), try);
|
||||
if (conn->flags & CO_FL_ERROR) {
|
||||
/* CO_FL_ERROR may be set by ssl_sock_infocbk */
|
||||
@ -1374,7 +1376,6 @@ static int ssl_sock_to_buf(struct connection *conn, struct buffer *buf, int coun
|
||||
if (ret < try)
|
||||
break;
|
||||
count -= ret;
|
||||
try = count;
|
||||
}
|
||||
else if (ret == 0) {
|
||||
ret = SSL_get_error(conn->xprt_ctx, ret);
|
||||
|
Loading…
Reference in New Issue
Block a user