mirror of
http://git.haproxy.org/git/haproxy.git/
synced 2025-04-01 22:48:25 +00:00
BUG/MEDIUM: chunks: always reject negative-length chunks
The recent addition of "show env" on the CLI has revealed an interesting design bug. Chunks are supposed to support a negative length to indicate that they carry no data. chunk_printf() sets this size to -1 if the string is too large for the buffer. At a few places in the http engine we may end up with trash.len = -1. But bi_putchk(), chunk_appendf() and a few other chunks consumers don't consider this case as possible and will use such a chunk, possibly restoring an invalid string or trying to copy -1 bytes. This fix takes care of clarifying the situation in a backportable way where such sizes are used, so that a negative length indicating an error remains present until the chunk is reinitialized or overwritten. But a cleaner design adjustment needs to be done so that there's a clear contract on how to use these chunks. At first glance it doesn't seem *that* useful to support negative sizes, so probably this is what should change. This fix must be backported to 1.6 and 1.5.
This commit is contained in:
parent
09a9178311
commit
320ec2a745
@ -67,7 +67,7 @@ static inline void chunk_init(struct chunk *chk, char *str, size_t size)
|
||||
static inline int chunk_initlen(struct chunk *chk, char *str, size_t size, int len)
|
||||
{
|
||||
|
||||
if (size && len > size)
|
||||
if (len < 0 || (size && len > size))
|
||||
return 0;
|
||||
|
||||
chk->str = str;
|
||||
@ -112,7 +112,7 @@ static inline int chunk_strcat(struct chunk *chk, const char *str)
|
||||
|
||||
len = strlen(str);
|
||||
|
||||
if (unlikely(chk->len + len >= chk->size))
|
||||
if (unlikely(chk->len < 0 || chk->len + len >= chk->size))
|
||||
return 0;
|
||||
|
||||
memcpy(chk->str + chk->len, str, len + 1);
|
||||
@ -134,7 +134,7 @@ static inline int chunk_strcat(struct chunk *chk, const char *str)
|
||||
*/
|
||||
static inline char *chunk_newstr(struct chunk *chk)
|
||||
{
|
||||
if (chk->len + 1 >= chk->size)
|
||||
if (chk->len < 0 || chk->len + 1 >= chk->size)
|
||||
return NULL;
|
||||
|
||||
chk->str[chk->len++] = 0;
|
||||
@ -166,7 +166,7 @@ static inline void chunk_destroy(struct chunk *chk)
|
||||
*/
|
||||
static inline char *chunk_dup(struct chunk *dst, const struct chunk *src)
|
||||
{
|
||||
if (!dst || !src || !src->str)
|
||||
if (!dst || !src || src->len < 0 || !src->str)
|
||||
return NULL;
|
||||
|
||||
if (dst->size)
|
||||
|
@ -77,7 +77,7 @@ int bo_inject(struct channel *chn, const char *msg, int len)
|
||||
if (len == 0)
|
||||
return -1;
|
||||
|
||||
if (len > chn->buf->size) {
|
||||
if (len < 0 || len > chn->buf->size) {
|
||||
/* we can't write this chunk and will never be able to, because
|
||||
* it is larger than the buffer. This must be reported as an
|
||||
* error. Then we return -2 so that writers that don't care can
|
||||
@ -142,6 +142,9 @@ int bi_putblk(struct channel *chn, const char *blk, int len)
|
||||
if (unlikely(channel_input_closed(chn)))
|
||||
return -2;
|
||||
|
||||
if (len < 0)
|
||||
return -3;
|
||||
|
||||
max = channel_recv_limit(chn);
|
||||
if (unlikely(len > max - buffer_len(chn->buf))) {
|
||||
/* we can't write this chunk right now because the buffer is
|
||||
|
@ -111,7 +111,7 @@ int chunk_appendf(struct chunk *chk, const char *fmt, ...)
|
||||
va_list argp;
|
||||
int ret;
|
||||
|
||||
if (!chk->str || !chk->size)
|
||||
if (chk->len < 0 || !chk->str || !chk->size)
|
||||
return 0;
|
||||
|
||||
va_start(argp, fmt);
|
||||
@ -136,6 +136,9 @@ int chunk_htmlencode(struct chunk *dst, struct chunk *src)
|
||||
int olen, free;
|
||||
char c;
|
||||
|
||||
if (dst->len < 0)
|
||||
return dst->len;
|
||||
|
||||
olen = dst->len;
|
||||
|
||||
for (i = 0; i < src->len; i++) {
|
||||
@ -177,6 +180,9 @@ int chunk_asciiencode(struct chunk *dst, struct chunk *src, char qc)
|
||||
int olen, free;
|
||||
char c;
|
||||
|
||||
if (dst->len < 0)
|
||||
return dst->len;
|
||||
|
||||
olen = dst->len;
|
||||
|
||||
for (i = 0; i < src->len; i++) {
|
||||
|
Loading…
Reference in New Issue
Block a user