2012-08-24 17:22:53 +00:00
|
|
|
/*
|
|
|
|
* include/common/buffer.h
|
|
|
|
* Buffer management definitions, macros and inline functions.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2000-2012 Willy Tarreau - w@1wt.eu
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation, version 2.1
|
|
|
|
* exclusively.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _COMMON_BUFFER_H
|
|
|
|
#define _COMMON_BUFFER_H
|
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
|
|
|
|
2018-07-11 07:39:05 +00:00
|
|
|
#include <common/buf.h>
|
2012-09-28 14:02:48 +00:00
|
|
|
#include <common/chunk.h>
|
2012-08-24 17:22:53 +00:00
|
|
|
#include <common/config.h>
|
2017-09-22 13:02:54 +00:00
|
|
|
#include <common/ist.h>
|
2012-10-12 21:49:43 +00:00
|
|
|
#include <common/memory.h>
|
2012-08-24 17:22:53 +00:00
|
|
|
|
|
|
|
|
BUG/MAJOR: Fix how the list of entities waiting for a buffer is handled
When an entity tries to get a buffer, if it cannot be allocted, for example
because the number of buffers which may be allocated per process is limited,
this entity is added in a list (called <buffer_wq>) and wait for an available
buffer.
Historically, the <buffer_wq> list was logically attached to streams because it
were the only entities likely to be added in it. Now, applets can also be
waiting for a free buffer. And with filters, we could imagine to have more other
entities waiting for a buffer. So it make sense to have a generic list.
Anyway, with the current design there is a bug. When an applet failed to get a
buffer, it will wait. But we add the stream attached to the applet in
<buffer_wq>, instead of the applet itself. So when a buffer is available, we
wake up the stream and not the waiting applet. So, it is possible to have
waiting applets and never awakened.
So, now, <buffer_wq> is independant from streams. And we really add the waiting
entity in <buffer_wq>. To be generic, the entity is responsible to define the
callback used to awaken it.
In addition, applets will still request an input buffer when they become
active. But they will not be sleeped anymore if no buffer are available. So this
is the responsibility to the applet I/O handler to check if this buffer is
allocated or not. This way, an applet can decide if this buffer is required or
not and can do additional processing if not.
[wt: backport to 1.7 and 1.6]
2016-12-09 16:30:18 +00:00
|
|
|
/* an element of the <buffer_wq> list. It represents an object that need to
|
|
|
|
* acquire a buffer to continue its process. */
|
|
|
|
struct buffer_wait {
|
|
|
|
void *target; /* The waiting object that should be woken up */
|
|
|
|
int (*wakeup_cb)(void *); /* The function used to wake up the <target>, passed as argument */
|
|
|
|
struct list list; /* Next element in the <buffer_wq> list */
|
|
|
|
};
|
|
|
|
|
2017-11-24 16:34:44 +00:00
|
|
|
extern struct pool_head *pool_head_buffer;
|
2014-11-24 10:39:34 +00:00
|
|
|
extern struct buffer buf_empty;
|
2014-11-24 10:55:08 +00:00
|
|
|
extern struct buffer buf_wanted;
|
BUG/MAJOR: Fix how the list of entities waiting for a buffer is handled
When an entity tries to get a buffer, if it cannot be allocted, for example
because the number of buffers which may be allocated per process is limited,
this entity is added in a list (called <buffer_wq>) and wait for an available
buffer.
Historically, the <buffer_wq> list was logically attached to streams because it
were the only entities likely to be added in it. Now, applets can also be
waiting for a free buffer. And with filters, we could imagine to have more other
entities waiting for a buffer. So it make sense to have a generic list.
Anyway, with the current design there is a bug. When an applet failed to get a
buffer, it will wait. But we add the stream attached to the applet in
<buffer_wq>, instead of the applet itself. So when a buffer is available, we
wake up the stream and not the waiting applet. So, it is possible to have
waiting applets and never awakened.
So, now, <buffer_wq> is independant from streams. And we really add the waiting
entity in <buffer_wq>. To be generic, the entity is responsible to define the
callback used to awaken it.
In addition, applets will still request an input buffer when they become
active. But they will not be sleeped anymore if no buffer are available. So this
is the responsibility to the applet I/O handler to check if this buffer is
allocated or not. This way, an applet can decide if this buffer is required or
not and can do additional processing if not.
[wt: backport to 1.7 and 1.6]
2016-12-09 16:30:18 +00:00
|
|
|
extern struct list buffer_wq;
|
2017-11-26 10:00:37 +00:00
|
|
|
__decl_hathreads(extern HA_SPINLOCK_T buffer_wq_lock);
|
2012-08-24 17:22:53 +00:00
|
|
|
|
2012-10-12 21:49:43 +00:00
|
|
|
int init_buffer();
|
2017-08-29 13:30:11 +00:00
|
|
|
void deinit_buffer();
|
2012-08-27 20:08:00 +00:00
|
|
|
int buffer_replace2(struct buffer *b, char *pos, char *end, const char *str, int len);
|
|
|
|
int buffer_insert_line2(struct buffer *b, char *pos, const char *str, int len);
|
2012-08-24 17:22:53 +00:00
|
|
|
void buffer_dump(FILE *o, struct buffer *b, int from, int to);
|
|
|
|
|
|
|
|
/*****************************************************************/
|
|
|
|
/* These functions are used to compute various buffer area sizes */
|
|
|
|
/*****************************************************************/
|
|
|
|
|
|
|
|
|
2018-06-15 11:59:36 +00:00
|
|
|
|
|
|
|
/***** FIXME: OLD API BELOW *****/
|
2012-08-24 17:22:53 +00:00
|
|
|
|
|
|
|
/* Return non-zero only if the buffer is empty */
|
|
|
|
static inline int buffer_empty(const struct buffer *buf)
|
|
|
|
{
|
2018-07-10 07:50:25 +00:00
|
|
|
return !b_data(buf);
|
2012-08-24 17:22:53 +00:00
|
|
|
}
|
|
|
|
|
2012-08-27 17:51:36 +00:00
|
|
|
/* Returns non-zero if the buffer's INPUT is considered full, which means that
|
|
|
|
* it holds at least as much INPUT data as (size - reserve). This also means
|
|
|
|
* that data that are scheduled for output are considered as potential free
|
|
|
|
* space, and that the reserved space is always considered as not usable. This
|
|
|
|
* information alone cannot be used as a general purpose free space indicator.
|
|
|
|
* However it accurately indicates that too many data were fed in the buffer
|
2015-01-13 19:20:10 +00:00
|
|
|
* for an analyzer for instance. See the channel_may_recv() function for a more
|
2012-08-27 17:51:36 +00:00
|
|
|
* generic function taking everything into account.
|
|
|
|
*/
|
|
|
|
static inline int buffer_full(const struct buffer *b, unsigned int reserve)
|
|
|
|
{
|
2014-11-28 19:54:13 +00:00
|
|
|
if (b == &buf_empty)
|
|
|
|
return 0;
|
|
|
|
|
2012-08-27 17:51:36 +00:00
|
|
|
return (b->i + reserve >= b->size);
|
|
|
|
}
|
|
|
|
|
2012-08-24 17:22:53 +00:00
|
|
|
/* Normalizes a pointer after a subtract */
|
|
|
|
static inline char *buffer_wrap_sub(const struct buffer *buf, char *ptr)
|
|
|
|
{
|
|
|
|
if (ptr < buf->data)
|
|
|
|
ptr += buf->size;
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Normalizes a pointer after an addition */
|
|
|
|
static inline char *buffer_wrap_add(const struct buffer *buf, char *ptr)
|
|
|
|
{
|
|
|
|
if (ptr - buf->size >= buf->data)
|
|
|
|
ptr -= buf->size;
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
2015-03-07 13:38:50 +00:00
|
|
|
/* Returns the amount of byte that can be written starting from <p> into the
|
|
|
|
* input buffer at once, including reserved space which may be overwritten.
|
|
|
|
* This is used by Lua to insert data in the input side just before the other
|
|
|
|
* data using buffer_replace(). The goal is to transfer these new data in the
|
|
|
|
* output buffer.
|
|
|
|
*/
|
|
|
|
static inline int bi_space_for_replace(const struct buffer *buf)
|
|
|
|
{
|
|
|
|
const char *end;
|
|
|
|
|
|
|
|
/* If the input side data overflows, we cannot insert data contiguously. */
|
|
|
|
if (buf->p + buf->i >= buf->data + buf->size)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Check the last byte used in the buffer, it may be a byte of the output
|
|
|
|
* side if the buffer wraps, or its the end of the buffer.
|
|
|
|
*/
|
|
|
|
end = buffer_wrap_sub(buf, buf->p - buf->o);
|
|
|
|
if (end <= buf->p)
|
|
|
|
end = buf->data + buf->size;
|
|
|
|
|
|
|
|
/* Compute the amount of bytes which can be written. */
|
|
|
|
return end - (buf->p + buf->i);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-08-24 17:22:53 +00:00
|
|
|
/* Normalizes a pointer which is supposed to be relative to the beginning of a
|
|
|
|
* buffer, so that wrapping is correctly handled. The intent is to use this
|
|
|
|
* when increasing a pointer. Note that the wrapping test is only performed
|
|
|
|
* once, so the original pointer must be between ->data-size and ->data+2*size-1,
|
|
|
|
* otherwise an invalid pointer might be returned.
|
|
|
|
*/
|
|
|
|
static inline const char *buffer_pointer(const struct buffer *buf, const char *ptr)
|
|
|
|
{
|
|
|
|
if (ptr < buf->data)
|
|
|
|
ptr += buf->size;
|
|
|
|
else if (ptr - buf->size >= buf->data)
|
|
|
|
ptr -= buf->size;
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Returns the distance between two pointers, taking into account the ability
|
|
|
|
* to wrap around the buffer's end.
|
|
|
|
*/
|
|
|
|
static inline int buffer_count(const struct buffer *buf, const char *from, const char *to)
|
|
|
|
{
|
|
|
|
int count = to - from;
|
2013-04-01 23:25:57 +00:00
|
|
|
|
|
|
|
count += count < 0 ? buf->size : 0;
|
2012-08-24 17:22:53 +00:00
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* returns the amount of pending bytes in the buffer. It is the amount of bytes
|
|
|
|
* that is not scheduled to be sent.
|
|
|
|
*/
|
|
|
|
static inline int buffer_pending(const struct buffer *buf)
|
|
|
|
{
|
|
|
|
return buf->i;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return 1 if the buffer has less than 1/4 of its capacity free, otherwise 0 */
|
|
|
|
static inline int buffer_almost_full(const struct buffer *buf)
|
|
|
|
{
|
2014-11-28 19:54:13 +00:00
|
|
|
if (buf == &buf_empty)
|
|
|
|
return 0;
|
|
|
|
|
MINOR: buffer: add a few basic functions for the new API
Here's the list of newly introduced functions :
- b_data(), returning the total amount of data in the buffer (currently i+o)
- b_orig(), returning the origin of the storage area, that is, the place of
position 0.
- b_wrap(), pointer to wrapping point (currently data+size)
- b_size(), returning the size of the buffer
- b_room(), returning the amount of bytes left available
- b_full(), returning true if the buffer is full, otherwise false
- b_stop(), pointer to end of data mark (currently p+i), used to compute
distances or a stop pointer for a loop.
- b_peek(), this one will help make the transition to the new buffer model.
It returns a pointer to a position in the buffer known from an offest
relative to the beginning of the data in the buffer. Thus, we can replace
the following occurrences :
bo_ptr(b) => b_peek(b, 0);
bo_end(b) => b_peek(b, b->o);
bi_ptr(b) => b_peek(b, b->o);
bi_end(b) => b_peek(b, b->i + b->o);
b_ptr(b, ofs) => b_peek(b, b->o + ofs);
- b_head(), pointer to the beginning of data (currently bo_ptr())
- b_tail(), pointer to first free place (currently bi_ptr())
- b_next() / b_next_ofs(), pointer to the next byte, taking wrapping
into account.
- b_dist(), returning the distance between two pointers belonging to a buffer
- b_reset(), which resets the buffer
- b_space_wraps(), indicating if the free space wraps around the buffer
- b_almost_full(), indicating if 3/4 or more of the buffer are used
Some of these are provided with the unchecked variants using the "__"
prefix, or with the "_ofs" suffix indicating they return a relative
position to the buffer's origin instead of a pointer.
Cc: Olivier Houchard <ohouchard@haproxy.com>
2018-06-06 12:30:50 +00:00
|
|
|
return b_almost_full(buf);
|
2012-08-24 17:22:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Cut the first <n> pending bytes in a contiguous buffer. It is illegal to
|
|
|
|
* call this function with remaining data waiting to be sent (o > 0). The
|
|
|
|
* caller must ensure that <n> is smaller than the actual buffer's length.
|
|
|
|
* This is mainly used to remove empty lines at the beginning of a request
|
|
|
|
* or a response.
|
|
|
|
*/
|
|
|
|
static inline void bi_fast_delete(struct buffer *buf, int n)
|
|
|
|
{
|
|
|
|
buf->i -= n;
|
|
|
|
buf->p += n;
|
|
|
|
}
|
|
|
|
|
2012-08-24 20:56:11 +00:00
|
|
|
/* Schedule all remaining buffer data to be sent. ->o is not touched if it
|
|
|
|
* already covers those data. That permits doing a flush even after a forward,
|
|
|
|
* although not recommended.
|
|
|
|
*/
|
|
|
|
static inline void buffer_flush(struct buffer *buf)
|
|
|
|
{
|
|
|
|
buf->p = buffer_wrap_add(buf, buf->p + buf->i);
|
|
|
|
buf->o += buf->i;
|
|
|
|
buf->i = 0;
|
|
|
|
}
|
2012-08-24 17:22:53 +00:00
|
|
|
|
2012-08-27 20:08:00 +00:00
|
|
|
/* This function writes the string <str> at position <pos> which must be in
|
|
|
|
* buffer <b>, and moves <end> just after the end of <str>. <b>'s parameters
|
|
|
|
* (l, r, lr) are updated to be valid after the shift. the shift value
|
|
|
|
* (positive or negative) is returned. If there's no space left, the move is
|
|
|
|
* not done. The function does not adjust ->o because it does not make sense
|
|
|
|
* to use it on data scheduled to be sent.
|
|
|
|
*/
|
|
|
|
static inline int buffer_replace(struct buffer *b, char *pos, char *end, const char *str)
|
|
|
|
{
|
|
|
|
return buffer_replace2(b, pos, end, str, strlen(str));
|
|
|
|
}
|
|
|
|
|
2012-09-28 14:02:48 +00:00
|
|
|
/* Tries to write char <c> into output data at buffer <b>. Supports wrapping.
|
|
|
|
* Data are truncated if buffer is full.
|
|
|
|
*/
|
|
|
|
static inline void bo_putchr(struct buffer *b, char c)
|
|
|
|
{
|
2018-06-15 11:59:36 +00:00
|
|
|
if (b_data(b) == b->size)
|
2012-09-28 14:02:48 +00:00
|
|
|
return;
|
|
|
|
*b->p = c;
|
2018-06-15 11:45:17 +00:00
|
|
|
b->p = b_peek(b, b->o + 1);
|
2012-09-28 14:02:48 +00:00
|
|
|
b->o++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Tries to copy block <blk> into output data at buffer <b>. Supports wrapping.
|
2015-02-06 17:40:20 +00:00
|
|
|
* Data are truncated if buffer is too short. It returns the number of bytes
|
|
|
|
* copied.
|
2012-09-28 14:02:48 +00:00
|
|
|
*/
|
2015-02-06 17:40:20 +00:00
|
|
|
static inline int bo_putblk(struct buffer *b, const char *blk, int len)
|
2012-09-28 14:02:48 +00:00
|
|
|
{
|
2018-06-15 11:59:36 +00:00
|
|
|
int cur_len = b_data(b);
|
2012-09-28 14:02:48 +00:00
|
|
|
int half;
|
|
|
|
|
|
|
|
if (len > b->size - cur_len)
|
|
|
|
len = (b->size - cur_len);
|
|
|
|
if (!len)
|
2015-02-06 17:40:20 +00:00
|
|
|
return 0;
|
2012-09-28 14:02:48 +00:00
|
|
|
|
2018-06-07 16:58:07 +00:00
|
|
|
half = b_contig_space(b);
|
2012-09-28 14:02:48 +00:00
|
|
|
if (half > len)
|
|
|
|
half = len;
|
|
|
|
|
|
|
|
memcpy(b->p, blk, half);
|
2018-06-15 11:45:17 +00:00
|
|
|
b->p = b_peek(b, b->o + half);
|
|
|
|
b->o += half;
|
2012-09-28 14:02:48 +00:00
|
|
|
if (len > half) {
|
2018-02-26 09:47:03 +00:00
|
|
|
memcpy(b->p, blk + half, len - half);
|
2018-06-15 11:45:17 +00:00
|
|
|
b->p = b_peek(b, b->o + len - half);
|
|
|
|
b->o += len - half;
|
2012-09-28 14:02:48 +00:00
|
|
|
}
|
2015-02-06 17:40:20 +00:00
|
|
|
return len;
|
2012-09-28 14:02:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Tries to copy string <str> into output data at buffer <b>. Supports wrapping.
|
2015-02-06 17:40:20 +00:00
|
|
|
* Data are truncated if buffer is too short. It returns the number of bytes
|
|
|
|
* copied.
|
2012-09-28 14:02:48 +00:00
|
|
|
*/
|
2015-02-06 17:40:20 +00:00
|
|
|
static inline int bo_putstr(struct buffer *b, const char *str)
|
2012-09-28 14:02:48 +00:00
|
|
|
{
|
|
|
|
return bo_putblk(b, str, strlen(str));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Tries to copy chunk <chk> into output data at buffer <b>. Supports wrapping.
|
2015-02-06 17:40:20 +00:00
|
|
|
* Data are truncated if buffer is too short. It returns the number of bytes
|
|
|
|
* copied.
|
2012-09-28 14:02:48 +00:00
|
|
|
*/
|
2015-02-06 17:40:20 +00:00
|
|
|
static inline int bo_putchk(struct buffer *b, const struct chunk *chk)
|
2012-09-28 14:02:48 +00:00
|
|
|
{
|
|
|
|
return bo_putblk(b, chk->str, chk->len);
|
|
|
|
}
|
|
|
|
|
2017-10-26 13:26:17 +00:00
|
|
|
/* Tries to write char <c> into input data at buffer <b>. Supports wrapping.
|
|
|
|
* Data are truncated if buffer is full.
|
|
|
|
*/
|
|
|
|
static inline void bi_putchr(struct buffer *b, char c)
|
|
|
|
{
|
2018-06-15 11:59:36 +00:00
|
|
|
if (b_data(b) == b->size)
|
2017-10-26 13:26:17 +00:00
|
|
|
return;
|
2018-06-07 16:46:28 +00:00
|
|
|
*b_tail(b) = c;
|
2017-10-26 13:26:17 +00:00
|
|
|
b->i++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Tries to copy block <blk> into input data at buffer <b>. Supports wrapping.
|
|
|
|
* Data are truncated if buffer is too short. It returns the number of bytes
|
|
|
|
* copied.
|
|
|
|
*/
|
|
|
|
static inline int bi_putblk(struct buffer *b, const char *blk, int len)
|
|
|
|
{
|
2018-06-15 11:59:36 +00:00
|
|
|
int cur_len = b_data(b);
|
2017-10-26 13:26:17 +00:00
|
|
|
int half;
|
|
|
|
|
|
|
|
if (len > b->size - cur_len)
|
|
|
|
len = (b->size - cur_len);
|
|
|
|
if (!len)
|
|
|
|
return 0;
|
|
|
|
|
2018-06-07 16:58:07 +00:00
|
|
|
half = b_contig_space(b);
|
2017-10-26 13:26:17 +00:00
|
|
|
if (half > len)
|
|
|
|
half = len;
|
|
|
|
|
2018-06-07 16:46:28 +00:00
|
|
|
memcpy(b_tail(b), blk, half);
|
2017-10-26 13:26:17 +00:00
|
|
|
if (len > half)
|
2018-06-15 11:45:17 +00:00
|
|
|
memcpy(b_peek(b, b->o + b->i + half), blk + half, len - half);
|
2017-10-26 13:26:17 +00:00
|
|
|
b->i += len;
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Tries to copy string <str> into input data at buffer <b>. Supports wrapping.
|
|
|
|
* Data are truncated if buffer is too short. It returns the number of bytes
|
|
|
|
* copied.
|
|
|
|
*/
|
|
|
|
static inline int bi_putstr(struct buffer *b, const char *str)
|
|
|
|
{
|
|
|
|
return bi_putblk(b, str, strlen(str));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Tries to copy chunk <chk> into input data at buffer <b>. Supports wrapping.
|
|
|
|
* Data are truncated if buffer is too short. It returns the number of bytes
|
|
|
|
* copied.
|
|
|
|
*/
|
|
|
|
static inline int bi_putchk(struct buffer *b, const struct chunk *chk)
|
|
|
|
{
|
|
|
|
return bi_putblk(b, chk->str, chk->len);
|
|
|
|
}
|
|
|
|
|
2014-11-24 10:55:08 +00:00
|
|
|
/* Allocates a buffer and replaces *buf with this buffer. If no memory is
|
|
|
|
* available, &buf_wanted is used instead. No control is made to check if *buf
|
|
|
|
* already pointed to another buffer. The allocated buffer is returned, or
|
|
|
|
* NULL in case no memory is available.
|
2014-11-24 10:30:16 +00:00
|
|
|
*/
|
|
|
|
static inline struct buffer *b_alloc(struct buffer **buf)
|
|
|
|
{
|
2014-11-24 10:55:08 +00:00
|
|
|
struct buffer *b;
|
|
|
|
|
|
|
|
*buf = &buf_wanted;
|
2017-11-24 16:34:44 +00:00
|
|
|
b = pool_alloc_dirty(pool_head_buffer);
|
2014-11-24 10:55:08 +00:00
|
|
|
if (likely(b)) {
|
2017-11-24 16:34:44 +00:00
|
|
|
b->size = pool_head_buffer->size - sizeof(struct buffer);
|
2014-11-24 10:55:08 +00:00
|
|
|
b_reset(b);
|
|
|
|
*buf = b;
|
2014-11-24 10:30:16 +00:00
|
|
|
}
|
2014-11-24 10:55:08 +00:00
|
|
|
return b;
|
2014-11-24 10:30:16 +00:00
|
|
|
}
|
|
|
|
|
2014-12-08 15:37:26 +00:00
|
|
|
/* Allocates a buffer and replaces *buf with this buffer. If no memory is
|
|
|
|
* available, &buf_wanted is used instead. No control is made to check if *buf
|
|
|
|
* already pointed to another buffer. The allocated buffer is returned, or
|
|
|
|
* NULL in case no memory is available. The difference with b_alloc() is that
|
|
|
|
* this function only picks from the pool and never calls malloc(), so it can
|
|
|
|
* fail even if some memory is available.
|
|
|
|
*/
|
|
|
|
static inline struct buffer *b_alloc_fast(struct buffer **buf)
|
|
|
|
{
|
|
|
|
struct buffer *b;
|
|
|
|
|
|
|
|
*buf = &buf_wanted;
|
2017-11-24 16:34:44 +00:00
|
|
|
b = pool_get_first(pool_head_buffer);
|
2014-12-08 15:37:26 +00:00
|
|
|
if (likely(b)) {
|
2017-11-24 16:34:44 +00:00
|
|
|
b->size = pool_head_buffer->size - sizeof(struct buffer);
|
2014-12-08 15:37:26 +00:00
|
|
|
b_reset(b);
|
|
|
|
*buf = b;
|
|
|
|
}
|
|
|
|
return b;
|
|
|
|
}
|
|
|
|
|
2014-11-24 10:39:34 +00:00
|
|
|
/* Releases buffer *buf (no check of emptiness) */
|
|
|
|
static inline void __b_drop(struct buffer **buf)
|
2014-11-25 18:45:11 +00:00
|
|
|
{
|
2017-11-24 16:34:44 +00:00
|
|
|
pool_free(pool_head_buffer, *buf);
|
2014-11-25 18:45:11 +00:00
|
|
|
}
|
|
|
|
|
2014-11-24 10:39:34 +00:00
|
|
|
/* Releases buffer *buf if allocated. */
|
|
|
|
static inline void b_drop(struct buffer **buf)
|
|
|
|
{
|
|
|
|
if (!(*buf)->size)
|
|
|
|
return;
|
|
|
|
__b_drop(buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Releases buffer *buf if allocated, and replaces it with &buf_empty. */
|
|
|
|
static inline void b_free(struct buffer **buf)
|
|
|
|
{
|
|
|
|
b_drop(buf);
|
|
|
|
*buf = &buf_empty;
|
|
|
|
}
|
|
|
|
|
2014-12-02 12:54:01 +00:00
|
|
|
/* Ensures that <buf> is allocated. If an allocation is needed, it ensures that
|
|
|
|
* there are still at least <margin> buffers available in the pool after this
|
|
|
|
* allocation so that we don't leave the pool in a condition where a session or
|
|
|
|
* a response buffer could not be allocated anymore, resulting in a deadlock.
|
|
|
|
* This means that we sometimes need to try to allocate extra entries even if
|
|
|
|
* only one buffer is needed.
|
BUG/MINOR: buffers: Fix b_alloc_margin to be "fonctionnaly" thread-safe
b_alloc_margin is, strickly speeking, thread-safe. It will not crash
HAproxy. But its contract is not respected anymore in a multithreaded
environment. In this function, we need to be sure to have <margin> buffers
available in the pool after the allocation. So to have this guarantee, we must
lock the memory pool during all the operation. This also means, we must call
internal and lockless memory functions (prefixed with '__').
For the record, this patch fixes a pernicious bug happens after a soft reload
where some streams can be blocked infinitly, waiting for a buffer in the
buffer_wq list. This happens because, during a soft reload, pool_gc2 is called,
making some calls to b_alloc_fast fail.
This is specific to threads, no backport is needed.
2017-11-10 09:39:16 +00:00
|
|
|
*
|
|
|
|
* We need to lock the pool here to be sure to have <margin> buffers available
|
|
|
|
* after the allocation, regardless how many threads that doing it in the same
|
|
|
|
* time. So, we use internal and lockless memory functions (prefixed with '__').
|
2014-12-02 12:54:01 +00:00
|
|
|
*/
|
|
|
|
static inline struct buffer *b_alloc_margin(struct buffer **buf, int margin)
|
|
|
|
{
|
BUG/MINOR: buffers: Fix b_alloc_margin to be "fonctionnaly" thread-safe
b_alloc_margin is, strickly speeking, thread-safe. It will not crash
HAproxy. But its contract is not respected anymore in a multithreaded
environment. In this function, we need to be sure to have <margin> buffers
available in the pool after the allocation. So to have this guarantee, we must
lock the memory pool during all the operation. This also means, we must call
internal and lockless memory functions (prefixed with '__').
For the record, this patch fixes a pernicious bug happens after a soft reload
where some streams can be blocked infinitly, waiting for a buffer in the
buffer_wq list. This happens because, during a soft reload, pool_gc2 is called,
making some calls to b_alloc_fast fail.
This is specific to threads, no backport is needed.
2017-11-10 09:39:16 +00:00
|
|
|
struct buffer *b;
|
2014-12-02 12:54:01 +00:00
|
|
|
|
|
|
|
if ((*buf)->size)
|
|
|
|
return *buf;
|
|
|
|
|
BUG/MINOR: buffers: Fix b_alloc_margin to be "fonctionnaly" thread-safe
b_alloc_margin is, strickly speeking, thread-safe. It will not crash
HAproxy. But its contract is not respected anymore in a multithreaded
environment. In this function, we need to be sure to have <margin> buffers
available in the pool after the allocation. So to have this guarantee, we must
lock the memory pool during all the operation. This also means, we must call
internal and lockless memory functions (prefixed with '__').
For the record, this patch fixes a pernicious bug happens after a soft reload
where some streams can be blocked infinitly, waiting for a buffer in the
buffer_wq list. This happens because, during a soft reload, pool_gc2 is called,
making some calls to b_alloc_fast fail.
This is specific to threads, no backport is needed.
2017-11-10 09:39:16 +00:00
|
|
|
*buf = &buf_wanted;
|
2018-02-22 13:05:55 +00:00
|
|
|
#ifndef CONFIG_HAP_LOCKLESS_POOLS
|
2017-11-24 16:34:44 +00:00
|
|
|
HA_SPIN_LOCK(POOL_LOCK, &pool_head_buffer->lock);
|
2018-01-24 17:38:31 +00:00
|
|
|
#endif
|
BUG/MINOR: buffers: Fix b_alloc_margin to be "fonctionnaly" thread-safe
b_alloc_margin is, strickly speeking, thread-safe. It will not crash
HAproxy. But its contract is not respected anymore in a multithreaded
environment. In this function, we need to be sure to have <margin> buffers
available in the pool after the allocation. So to have this guarantee, we must
lock the memory pool during all the operation. This also means, we must call
internal and lockless memory functions (prefixed with '__').
For the record, this patch fixes a pernicious bug happens after a soft reload
where some streams can be blocked infinitly, waiting for a buffer in the
buffer_wq list. This happens because, during a soft reload, pool_gc2 is called,
making some calls to b_alloc_fast fail.
This is specific to threads, no backport is needed.
2017-11-10 09:39:16 +00:00
|
|
|
|
2014-12-02 12:54:01 +00:00
|
|
|
/* fast path */
|
2017-11-24 16:34:44 +00:00
|
|
|
if ((pool_head_buffer->allocated - pool_head_buffer->used) > margin) {
|
|
|
|
b = __pool_get_first(pool_head_buffer);
|
BUG/MINOR: buffers: Fix b_alloc_margin to be "fonctionnaly" thread-safe
b_alloc_margin is, strickly speeking, thread-safe. It will not crash
HAproxy. But its contract is not respected anymore in a multithreaded
environment. In this function, we need to be sure to have <margin> buffers
available in the pool after the allocation. So to have this guarantee, we must
lock the memory pool during all the operation. This also means, we must call
internal and lockless memory functions (prefixed with '__').
For the record, this patch fixes a pernicious bug happens after a soft reload
where some streams can be blocked infinitly, waiting for a buffer in the
buffer_wq list. This happens because, during a soft reload, pool_gc2 is called,
making some calls to b_alloc_fast fail.
This is specific to threads, no backport is needed.
2017-11-10 09:39:16 +00:00
|
|
|
if (likely(b)) {
|
2018-02-22 13:05:55 +00:00
|
|
|
#ifndef CONFIG_HAP_LOCKLESS_POOLS
|
2017-11-24 16:34:44 +00:00
|
|
|
HA_SPIN_UNLOCK(POOL_LOCK, &pool_head_buffer->lock);
|
2018-01-24 17:38:31 +00:00
|
|
|
#endif
|
2017-11-24 16:34:44 +00:00
|
|
|
b->size = pool_head_buffer->size - sizeof(struct buffer);
|
BUG/MINOR: buffers: Fix b_alloc_margin to be "fonctionnaly" thread-safe
b_alloc_margin is, strickly speeking, thread-safe. It will not crash
HAproxy. But its contract is not respected anymore in a multithreaded
environment. In this function, we need to be sure to have <margin> buffers
available in the pool after the allocation. So to have this guarantee, we must
lock the memory pool during all the operation. This also means, we must call
internal and lockless memory functions (prefixed with '__').
For the record, this patch fixes a pernicious bug happens after a soft reload
where some streams can be blocked infinitly, waiting for a buffer in the
buffer_wq list. This happens because, during a soft reload, pool_gc2 is called,
making some calls to b_alloc_fast fail.
This is specific to threads, no backport is needed.
2017-11-10 09:39:16 +00:00
|
|
|
b_reset(b);
|
|
|
|
*buf = b;
|
|
|
|
return b;
|
|
|
|
}
|
|
|
|
}
|
2014-12-02 12:54:01 +00:00
|
|
|
|
BUG/MINOR: buffers: Fix b_alloc_margin to be "fonctionnaly" thread-safe
b_alloc_margin is, strickly speeking, thread-safe. It will not crash
HAproxy. But its contract is not respected anymore in a multithreaded
environment. In this function, we need to be sure to have <margin> buffers
available in the pool after the allocation. So to have this guarantee, we must
lock the memory pool during all the operation. This also means, we must call
internal and lockless memory functions (prefixed with '__').
For the record, this patch fixes a pernicious bug happens after a soft reload
where some streams can be blocked infinitly, waiting for a buffer in the
buffer_wq list. This happens because, during a soft reload, pool_gc2 is called,
making some calls to b_alloc_fast fail.
This is specific to threads, no backport is needed.
2017-11-10 09:39:16 +00:00
|
|
|
/* slow path, uses malloc() */
|
2017-11-24 16:34:44 +00:00
|
|
|
b = __pool_refill_alloc(pool_head_buffer, margin);
|
2014-12-02 12:54:01 +00:00
|
|
|
|
2018-02-22 13:05:55 +00:00
|
|
|
#ifndef CONFIG_HAP_LOCKLESS_POOLS
|
2017-11-24 16:34:44 +00:00
|
|
|
HA_SPIN_UNLOCK(POOL_LOCK, &pool_head_buffer->lock);
|
2018-01-24 17:38:31 +00:00
|
|
|
#endif
|
BUG/MINOR: buffers: Fix b_alloc_margin to be "fonctionnaly" thread-safe
b_alloc_margin is, strickly speeking, thread-safe. It will not crash
HAproxy. But its contract is not respected anymore in a multithreaded
environment. In this function, we need to be sure to have <margin> buffers
available in the pool after the allocation. So to have this guarantee, we must
lock the memory pool during all the operation. This also means, we must call
internal and lockless memory functions (prefixed with '__').
For the record, this patch fixes a pernicious bug happens after a soft reload
where some streams can be blocked infinitly, waiting for a buffer in the
buffer_wq list. This happens because, during a soft reload, pool_gc2 is called,
making some calls to b_alloc_fast fail.
This is specific to threads, no backport is needed.
2017-11-10 09:39:16 +00:00
|
|
|
|
|
|
|
if (b) {
|
2017-11-24 16:34:44 +00:00
|
|
|
b->size = pool_head_buffer->size - sizeof(struct buffer);
|
BUG/MINOR: buffers: Fix b_alloc_margin to be "fonctionnaly" thread-safe
b_alloc_margin is, strickly speeking, thread-safe. It will not crash
HAproxy. But its contract is not respected anymore in a multithreaded
environment. In this function, we need to be sure to have <margin> buffers
available in the pool after the allocation. So to have this guarantee, we must
lock the memory pool during all the operation. This also means, we must call
internal and lockless memory functions (prefixed with '__').
For the record, this patch fixes a pernicious bug happens after a soft reload
where some streams can be blocked infinitly, waiting for a buffer in the
buffer_wq list. This happens because, during a soft reload, pool_gc2 is called,
making some calls to b_alloc_fast fail.
This is specific to threads, no backport is needed.
2017-11-10 09:39:16 +00:00
|
|
|
b_reset(b);
|
|
|
|
*buf = b;
|
|
|
|
}
|
|
|
|
return b;
|
2014-12-02 12:54:01 +00:00
|
|
|
}
|
|
|
|
|
BUG/MAJOR: Fix how the list of entities waiting for a buffer is handled
When an entity tries to get a buffer, if it cannot be allocted, for example
because the number of buffers which may be allocated per process is limited,
this entity is added in a list (called <buffer_wq>) and wait for an available
buffer.
Historically, the <buffer_wq> list was logically attached to streams because it
were the only entities likely to be added in it. Now, applets can also be
waiting for a free buffer. And with filters, we could imagine to have more other
entities waiting for a buffer. So it make sense to have a generic list.
Anyway, with the current design there is a bug. When an applet failed to get a
buffer, it will wait. But we add the stream attached to the applet in
<buffer_wq>, instead of the applet itself. So when a buffer is available, we
wake up the stream and not the waiting applet. So, it is possible to have
waiting applets and never awakened.
So, now, <buffer_wq> is independant from streams. And we really add the waiting
entity in <buffer_wq>. To be generic, the entity is responsible to define the
callback used to awaken it.
In addition, applets will still request an input buffer when they become
active. But they will not be sleeped anymore if no buffer are available. So this
is the responsibility to the applet I/O handler to check if this buffer is
allocated or not. This way, an applet can decide if this buffer is required or
not and can do additional processing if not.
[wt: backport to 1.7 and 1.6]
2016-12-09 16:30:18 +00:00
|
|
|
|
2018-03-02 09:27:12 +00:00
|
|
|
/* Offer a buffer currently belonging to target <from> to whoever needs one.
|
|
|
|
* Any pointer is valid for <from>, including NULL. Its purpose is to avoid
|
|
|
|
* passing a buffer to oneself in case of failed allocations (e.g. need two
|
|
|
|
* buffers, get one, fail, release it and wake up self again). In case of
|
|
|
|
* normal buffer release where it is expected that the caller is not waiting
|
|
|
|
* for a buffer, NULL is fine.
|
|
|
|
*/
|
BUG/MAJOR: Fix how the list of entities waiting for a buffer is handled
When an entity tries to get a buffer, if it cannot be allocted, for example
because the number of buffers which may be allocated per process is limited,
this entity is added in a list (called <buffer_wq>) and wait for an available
buffer.
Historically, the <buffer_wq> list was logically attached to streams because it
were the only entities likely to be added in it. Now, applets can also be
waiting for a free buffer. And with filters, we could imagine to have more other
entities waiting for a buffer. So it make sense to have a generic list.
Anyway, with the current design there is a bug. When an applet failed to get a
buffer, it will wait. But we add the stream attached to the applet in
<buffer_wq>, instead of the applet itself. So when a buffer is available, we
wake up the stream and not the waiting applet. So, it is possible to have
waiting applets and never awakened.
So, now, <buffer_wq> is independant from streams. And we really add the waiting
entity in <buffer_wq>. To be generic, the entity is responsible to define the
callback used to awaken it.
In addition, applets will still request an input buffer when they become
active. But they will not be sleeped anymore if no buffer are available. So this
is the responsibility to the applet I/O handler to check if this buffer is
allocated or not. This way, an applet can decide if this buffer is required or
not and can do additional processing if not.
[wt: backport to 1.7 and 1.6]
2016-12-09 16:30:18 +00:00
|
|
|
void __offer_buffer(void *from, unsigned int threshold);
|
|
|
|
|
|
|
|
static inline void offer_buffers(void *from, unsigned int threshold)
|
|
|
|
{
|
2017-11-07 09:42:54 +00:00
|
|
|
HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
2017-06-21 13:42:52 +00:00
|
|
|
if (LIST_ISEMPTY(&buffer_wq)) {
|
2017-11-07 09:42:54 +00:00
|
|
|
HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
BUG/MAJOR: Fix how the list of entities waiting for a buffer is handled
When an entity tries to get a buffer, if it cannot be allocted, for example
because the number of buffers which may be allocated per process is limited,
this entity is added in a list (called <buffer_wq>) and wait for an available
buffer.
Historically, the <buffer_wq> list was logically attached to streams because it
were the only entities likely to be added in it. Now, applets can also be
waiting for a free buffer. And with filters, we could imagine to have more other
entities waiting for a buffer. So it make sense to have a generic list.
Anyway, with the current design there is a bug. When an applet failed to get a
buffer, it will wait. But we add the stream attached to the applet in
<buffer_wq>, instead of the applet itself. So when a buffer is available, we
wake up the stream and not the waiting applet. So, it is possible to have
waiting applets and never awakened.
So, now, <buffer_wq> is independant from streams. And we really add the waiting
entity in <buffer_wq>. To be generic, the entity is responsible to define the
callback used to awaken it.
In addition, applets will still request an input buffer when they become
active. But they will not be sleeped anymore if no buffer are available. So this
is the responsibility to the applet I/O handler to check if this buffer is
allocated or not. This way, an applet can decide if this buffer is required or
not and can do additional processing if not.
[wt: backport to 1.7 and 1.6]
2016-12-09 16:30:18 +00:00
|
|
|
return;
|
2017-06-21 13:42:52 +00:00
|
|
|
}
|
BUG/MAJOR: Fix how the list of entities waiting for a buffer is handled
When an entity tries to get a buffer, if it cannot be allocted, for example
because the number of buffers which may be allocated per process is limited,
this entity is added in a list (called <buffer_wq>) and wait for an available
buffer.
Historically, the <buffer_wq> list was logically attached to streams because it
were the only entities likely to be added in it. Now, applets can also be
waiting for a free buffer. And with filters, we could imagine to have more other
entities waiting for a buffer. So it make sense to have a generic list.
Anyway, with the current design there is a bug. When an applet failed to get a
buffer, it will wait. But we add the stream attached to the applet in
<buffer_wq>, instead of the applet itself. So when a buffer is available, we
wake up the stream and not the waiting applet. So, it is possible to have
waiting applets and never awakened.
So, now, <buffer_wq> is independant from streams. And we really add the waiting
entity in <buffer_wq>. To be generic, the entity is responsible to define the
callback used to awaken it.
In addition, applets will still request an input buffer when they become
active. But they will not be sleeped anymore if no buffer are available. So this
is the responsibility to the applet I/O handler to check if this buffer is
allocated or not. This way, an applet can decide if this buffer is required or
not and can do additional processing if not.
[wt: backport to 1.7 and 1.6]
2016-12-09 16:30:18 +00:00
|
|
|
__offer_buffer(from, threshold);
|
2017-11-07 09:42:54 +00:00
|
|
|
HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
BUG/MAJOR: Fix how the list of entities waiting for a buffer is handled
When an entity tries to get a buffer, if it cannot be allocted, for example
because the number of buffers which may be allocated per process is limited,
this entity is added in a list (called <buffer_wq>) and wait for an available
buffer.
Historically, the <buffer_wq> list was logically attached to streams because it
were the only entities likely to be added in it. Now, applets can also be
waiting for a free buffer. And with filters, we could imagine to have more other
entities waiting for a buffer. So it make sense to have a generic list.
Anyway, with the current design there is a bug. When an applet failed to get a
buffer, it will wait. But we add the stream attached to the applet in
<buffer_wq>, instead of the applet itself. So when a buffer is available, we
wake up the stream and not the waiting applet. So, it is possible to have
waiting applets and never awakened.
So, now, <buffer_wq> is independant from streams. And we really add the waiting
entity in <buffer_wq>. To be generic, the entity is responsible to define the
callback used to awaken it.
In addition, applets will still request an input buffer when they become
active. But they will not be sleeped anymore if no buffer are available. So this
is the responsibility to the applet I/O handler to check if this buffer is
allocated or not. This way, an applet can decide if this buffer is required or
not and can do additional processing if not.
[wt: backport to 1.7 and 1.6]
2016-12-09 16:30:18 +00:00
|
|
|
}
|
|
|
|
|
2017-09-22 13:02:54 +00:00
|
|
|
/*************************************************************************/
|
|
|
|
/* functions used to manipulate strings and blocks with wrapping buffers */
|
|
|
|
/*************************************************************************/
|
|
|
|
|
2018-06-15 11:45:17 +00:00
|
|
|
/* returns > 0 if the first <n> characters of buffer <b> starting at offset <o>
|
|
|
|
* relative to the buffer's head match <ist>. (empty strings do match). It is
|
2017-09-22 13:02:54 +00:00
|
|
|
* designed to be use with reasonably small strings (ie matches a single byte
|
|
|
|
* per iteration). This function is usable both with input and output data. To
|
|
|
|
* be used like this depending on what to match :
|
2018-06-15 11:45:17 +00:00
|
|
|
* - input contents : b_isteq(b, b->o, b->i, ist);
|
|
|
|
* - output contents : b_isteq(b, 0, b->o, ist);
|
2017-09-22 13:02:54 +00:00
|
|
|
* Return value :
|
|
|
|
* >0 : the number of matching bytes
|
|
|
|
* =0 : not enough bytes (or matching of empty string)
|
|
|
|
* <0 : non-matching byte found
|
|
|
|
*/
|
|
|
|
static inline int b_isteq(const struct buffer *b, unsigned int o, size_t n, const struct ist ist)
|
|
|
|
{
|
|
|
|
struct ist r = ist;
|
|
|
|
const char *p;
|
2018-06-15 11:45:17 +00:00
|
|
|
const char *end = b_wrap(b);
|
2017-09-22 13:02:54 +00:00
|
|
|
|
|
|
|
if (n < r.len)
|
|
|
|
return 0;
|
|
|
|
|
2018-06-15 11:45:17 +00:00
|
|
|
p = b_peek(b, o);
|
2017-09-22 13:02:54 +00:00
|
|
|
while (r.len--) {
|
|
|
|
if (*p++ != *r.ptr++)
|
|
|
|
return -1;
|
|
|
|
if (unlikely(p == end))
|
|
|
|
p = b->data;
|
|
|
|
}
|
|
|
|
return ist.len;
|
|
|
|
}
|
|
|
|
|
2018-06-15 11:45:17 +00:00
|
|
|
/* "eats" string <ist> from the head of buffer <b>. Wrapping data is explicitly
|
|
|
|
* supported. It matches a single byte per iteration so strings should remain
|
|
|
|
* reasonably small. Returns :
|
2017-09-22 13:02:54 +00:00
|
|
|
* > 0 : number of bytes matched and eaten
|
|
|
|
* = 0 : not enough bytes (or matching an empty string)
|
|
|
|
* < 0 : non-matching byte found
|
|
|
|
*/
|
2018-06-15 11:45:17 +00:00
|
|
|
static inline int b_eat(struct buffer *b, const struct ist ist)
|
2017-09-22 13:02:54 +00:00
|
|
|
{
|
2018-06-15 11:45:17 +00:00
|
|
|
int ret = b_isteq(b, 0, b_data(b), ist);
|
2017-09-22 13:02:54 +00:00
|
|
|
if (ret > 0)
|
2018-06-15 08:28:05 +00:00
|
|
|
b_del(b, ret);
|
2017-09-22 13:02:54 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-06-15 11:45:17 +00:00
|
|
|
/* injects string <ist> at the tail of input buffer <b> provided that it
|
2017-09-22 13:47:51 +00:00
|
|
|
* fits. Wrapping is supported. It's designed for small strings as it only
|
|
|
|
* writes a single byte per iteration. Returns the number of characters copied
|
|
|
|
* (ist.len), 0 if it temporarily does not fit or -1 if it will never fit. It
|
|
|
|
* will only modify the buffer upon success. In all cases, the contents are
|
|
|
|
* copied prior to reporting an error, so that the destination at least
|
|
|
|
* contains a valid but truncated string.
|
|
|
|
*/
|
|
|
|
static inline int bi_istput(struct buffer *b, const struct ist ist)
|
|
|
|
{
|
2018-06-15 11:45:17 +00:00
|
|
|
const char *end = b_wrap(b);
|
2017-09-22 13:47:51 +00:00
|
|
|
struct ist r = ist;
|
|
|
|
char *p;
|
|
|
|
|
2018-06-15 11:59:36 +00:00
|
|
|
if (r.len > (size_t)b_room(b))
|
2017-09-22 13:47:51 +00:00
|
|
|
return r.len < b->size ? 0 : -1;
|
|
|
|
|
2018-06-15 11:45:17 +00:00
|
|
|
p = b_tail(b);
|
2017-09-22 13:47:51 +00:00
|
|
|
b->i += r.len;
|
|
|
|
while (r.len--) {
|
|
|
|
*p++ = *r.ptr++;
|
|
|
|
if (unlikely(p == end))
|
|
|
|
p = b->data;
|
|
|
|
}
|
|
|
|
return ist.len;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-06-15 11:45:17 +00:00
|
|
|
/* injects string <ist> at the tail of output buffer <b> provided that it
|
2017-09-22 13:47:51 +00:00
|
|
|
* fits. Input data is assumed not to exist and will silently be overwritten.
|
|
|
|
* Wrapping is supported. It's designed for small strings as it only writes a
|
|
|
|
* single byte per iteration. Returns the number of characters copied (ist.len),
|
|
|
|
* 0 if it temporarily does not fit or -1 if it will never fit. It will only
|
|
|
|
* modify the buffer upon success. In all cases, the contents are copied prior
|
|
|
|
* to reporting an error, so that the destination at least contains a valid
|
|
|
|
* but truncated string.
|
|
|
|
*/
|
|
|
|
static inline int bo_istput(struct buffer *b, const struct ist ist)
|
|
|
|
{
|
2018-06-15 11:45:17 +00:00
|
|
|
const char *end = b_wrap(b);
|
2017-09-22 13:47:51 +00:00
|
|
|
struct ist r = ist;
|
|
|
|
char *p;
|
|
|
|
|
2018-06-15 11:59:36 +00:00
|
|
|
if (r.len > (size_t)b_room(b))
|
2017-09-22 13:47:51 +00:00
|
|
|
return r.len < b->size ? 0 : -1;
|
|
|
|
|
2018-06-15 11:45:17 +00:00
|
|
|
p = b_tail(b);
|
|
|
|
b->p = b_peek(b, b->o + r.len);
|
2017-09-22 13:47:51 +00:00
|
|
|
b->o += r.len;
|
|
|
|
while (r.len--) {
|
|
|
|
*p++ = *r.ptr++;
|
|
|
|
if (unlikely(p == end))
|
|
|
|
p = b->data;
|
|
|
|
}
|
|
|
|
return ist.len;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-08-24 17:22:53 +00:00
|
|
|
#endif /* _COMMON_BUFFER_H */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Local variables:
|
|
|
|
* c-indent-level: 8
|
|
|
|
* c-basic-offset: 8
|
|
|
|
* End:
|
|
|
|
*/
|