haproxy/include/proto/buffers.h

518 lines
14 KiB
C
Raw Normal View History

/*
include/proto/buffers.h
Buffer management definitions, macros and inline functions.
Copyright (C) 2000-2009 Willy Tarreau - w@1wt.eu
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation, version 2.1
exclusively.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _PROTO_BUFFERS_H
#define _PROTO_BUFFERS_H
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <common/config.h>
#include <common/memory.h>
#include <common/ticks.h>
#include <common/time.h>
#include <types/buffers.h>
extern struct pool_head *pool2_buffer;
/* perform minimal intializations, report 0 in case of error, 1 if OK. */
int init_buffer();
/* Initializes all fields in the buffer. The ->max_len field is initialized last
* so that the compiler can optimize it away if changed immediately after the
* call to this function. By default, it is set to the full size of the buffer.
* This implies that buffer_init() must only be called once ->size is set !
* The BF_OUT_EMPTY flags is set.
*/
static inline void buffer_init(struct buffer *buf)
{
buf->send_max = 0;
buf->to_forward = 0;
buf->l = buf->total = 0;
buf->pipe = NULL;
buf->analysers = 0;
[MAJOR] rework of the server FSM srv_state has been removed from HTTP state machines, and states have been split in either TCP states or analyzers. For instance, the TARPIT state has just become a simple analyzer. New flags have been added to the struct buffer to compensate this. The high-level stream processors sometimes need to force a disconnection without touching a file-descriptor (eg: report an error). But if they touched BF_SHUTW or BF_SHUTR, the file descriptor would not be closed. Thus, the two SHUT?_NOW flags have been added so that an application can request a forced close which the stream interface will be forced to obey. During this change, a new BF_HIJACK flag was added. It will be used for data generation, eg during a stats dump. It prevents the producer on a buffer from sending data into it. BF_SHUTR_NOW /* the producer must shut down for reads ASAP */ BF_SHUTW_NOW /* the consumer must shut down for writes ASAP */ BF_HIJACK /* the producer is temporarily replaced */ BF_SHUTW_NOW has precedence over BF_HIJACK. BF_HIJACK has precedence over BF_MAY_FORWARD (so that it does not need it). New functions buffer_shutr_now(), buffer_shutw_now(), buffer_abort() are provided to manipulate BF_SHUT* flags. A new type "stream_interface" has been added to describe both sides of a buffer. A stream interface has states and error reporting. The session now has two stream interfaces (one per side). Each buffer has stream_interface pointers to both consumer and producer sides. The server-side file descriptor has moved to its stream interface, so that even the buffer has access to it. process_srv() has been split into three parts : - tcp_get_connection() obtains a connection to the server - tcp_connection_failed() tests if a previously attempted connection has succeeded or not. - process_srv_data() only manages the data phase, and in this sense should be roughly equivalent to process_cli. Little code has been removed, and a lot of old code has been left in comments for now.
2008-10-19 05:30:41 +00:00
buf->cons = NULL;
buf->flags = BF_OUT_EMPTY;
buf->r = buf->lr = buf->w = buf->data;
buf->max_len = buf->size;
}
/* Check buffer timeouts, and set the corresponding flags. The
* likely/unlikely have been optimized for fastest normal path.
* The read/write timeouts are not set if there was activity on the buffer.
* That way, we don't have to update the timeout on every I/O. Note that the
* analyser timeout is always checked.
*/
static inline void buffer_check_timeouts(struct buffer *b)
{
if (likely(!(b->flags & (BF_SHUTR|BF_READ_TIMEOUT|BF_READ_ACTIVITY|BF_READ_NOEXP))) &&
unlikely(tick_is_expired(b->rex, now_ms)))
b->flags |= BF_READ_TIMEOUT;
if (likely(!(b->flags & (BF_SHUTW|BF_WRITE_TIMEOUT|BF_WRITE_ACTIVITY))) &&
unlikely(tick_is_expired(b->wex, now_ms)))
b->flags |= BF_WRITE_TIMEOUT;
if (likely(!(b->flags & BF_ANA_TIMEOUT)) &&
unlikely(tick_is_expired(b->analyse_exp, now_ms)))
b->flags |= BF_ANA_TIMEOUT;
}
[MEDIUM] i/o: rework ->to_forward and ->send_max The way the buffers and stream interfaces handled ->to_forward was really not handy for multiple reasons. Now we've moved its control to the receive-side of the buffer, which is also responsible for keeping send_max up to date. This makes more sense as it now becomes possible to send some pre-formatted data followed by forwarded data. The following explanation has also been added to buffer.h to clarify the situation. Right now, tests show that the I/O is behaving extremely well. Some work will have to be done to adapt existing splice code though. /* Note about the buffer structure The buffer contains two length indicators, one to_forward counter and one send_max limit. First, it must be understood that the buffer is in fact split in two parts : - the visible data (->data, for ->l bytes) - the invisible data, typically in kernel buffers forwarded directly from the source stream sock to the destination stream sock (->splice_len bytes). Those are used only during forward. In order not to mix data streams, the producer may only feed the invisible data with data to forward, and only when the visible buffer is empty. The consumer may not always be able to feed the invisible buffer due to platform limitations (lack of kernel support). Conversely, the consumer must always take data from the invisible data first before ever considering visible data. There is no limit to the size of data to consume from the invisible buffer, as platform-specific implementations will rarely leave enough control on this. So any byte fed into the invisible buffer is expected to reach the destination file descriptor, by any means. However, it's the consumer's responsibility to ensure that the invisible data has been entirely consumed before consuming visible data. This must be reflected by ->splice_len. This is very important as this and only this can ensure strict ordering of data between buffers. The producer is responsible for decreasing ->to_forward and increasing ->send_max. The ->to_forward parameter indicates how many bytes may be fed into either data buffer without waking the parent up. The ->send_max parameter says how many bytes may be read from the visible buffer. Thus it may never exceed ->l. This parameter is updated by any buffer_write() as well as any data forwarded through the visible buffer. The consumer is responsible for decreasing ->send_max when it sends data from the visible buffer, and ->splice_len when it sends data from the invisible buffer. A real-world example consists in part in an HTTP response waiting in a buffer to be forwarded. We know the header length (300) and the amount of data to forward (content-length=9000). The buffer already contains 1000 bytes of data after the 300 bytes of headers. Thus the caller will set ->send_max to 300 indicating that it explicitly wants to send those data, and set ->to_forward to 9000 (content-length). This value must be normalised immediately after updating ->to_forward : since there are already 1300 bytes in the buffer, 300 of which are already counted in ->send_max, and that size is smaller than ->to_forward, we must update ->send_max to 1300 to flush the whole buffer, and reduce ->to_forward to 8000. After that, the producer may try to feed the additional data through the invisible buffer using a platform-specific method such as splice(). */
2009-01-07 23:09:41 +00:00
/* Schedule <bytes> more bytes to be forwarded by the buffer without notifying
* the task. Any pending data in the buffer is scheduled to be sent as well,
* in the limit of the number of bytes to forward. This must be the only method
* to use to schedule bytes to be sent. Directly touching ->to_forward will
* cause lockups when send_max goes down to zero if nobody is ready to push the
* remaining data.
*/
static inline void buffer_forward(struct buffer *buf, unsigned long bytes)
[MEDIUM] i/o: rework ->to_forward and ->send_max The way the buffers and stream interfaces handled ->to_forward was really not handy for multiple reasons. Now we've moved its control to the receive-side of the buffer, which is also responsible for keeping send_max up to date. This makes more sense as it now becomes possible to send some pre-formatted data followed by forwarded data. The following explanation has also been added to buffer.h to clarify the situation. Right now, tests show that the I/O is behaving extremely well. Some work will have to be done to adapt existing splice code though. /* Note about the buffer structure The buffer contains two length indicators, one to_forward counter and one send_max limit. First, it must be understood that the buffer is in fact split in two parts : - the visible data (->data, for ->l bytes) - the invisible data, typically in kernel buffers forwarded directly from the source stream sock to the destination stream sock (->splice_len bytes). Those are used only during forward. In order not to mix data streams, the producer may only feed the invisible data with data to forward, and only when the visible buffer is empty. The consumer may not always be able to feed the invisible buffer due to platform limitations (lack of kernel support). Conversely, the consumer must always take data from the invisible data first before ever considering visible data. There is no limit to the size of data to consume from the invisible buffer, as platform-specific implementations will rarely leave enough control on this. So any byte fed into the invisible buffer is expected to reach the destination file descriptor, by any means. However, it's the consumer's responsibility to ensure that the invisible data has been entirely consumed before consuming visible data. This must be reflected by ->splice_len. This is very important as this and only this can ensure strict ordering of data between buffers. The producer is responsible for decreasing ->to_forward and increasing ->send_max. The ->to_forward parameter indicates how many bytes may be fed into either data buffer without waking the parent up. The ->send_max parameter says how many bytes may be read from the visible buffer. Thus it may never exceed ->l. This parameter is updated by any buffer_write() as well as any data forwarded through the visible buffer. The consumer is responsible for decreasing ->send_max when it sends data from the visible buffer, and ->splice_len when it sends data from the invisible buffer. A real-world example consists in part in an HTTP response waiting in a buffer to be forwarded. We know the header length (300) and the amount of data to forward (content-length=9000). The buffer already contains 1000 bytes of data after the 300 bytes of headers. Thus the caller will set ->send_max to 300 indicating that it explicitly wants to send those data, and set ->to_forward to 9000 (content-length). This value must be normalised immediately after updating ->to_forward : since there are already 1300 bytes in the buffer, 300 of which are already counted in ->send_max, and that size is smaller than ->to_forward, we must update ->send_max to 1300 to flush the whole buffer, and reduce ->to_forward to 8000. After that, the producer may try to feed the additional data through the invisible buffer using a platform-specific method such as splice(). */
2009-01-07 23:09:41 +00:00
{
unsigned long data_left;
[MEDIUM] i/o: rework ->to_forward and ->send_max The way the buffers and stream interfaces handled ->to_forward was really not handy for multiple reasons. Now we've moved its control to the receive-side of the buffer, which is also responsible for keeping send_max up to date. This makes more sense as it now becomes possible to send some pre-formatted data followed by forwarded data. The following explanation has also been added to buffer.h to clarify the situation. Right now, tests show that the I/O is behaving extremely well. Some work will have to be done to adapt existing splice code though. /* Note about the buffer structure The buffer contains two length indicators, one to_forward counter and one send_max limit. First, it must be understood that the buffer is in fact split in two parts : - the visible data (->data, for ->l bytes) - the invisible data, typically in kernel buffers forwarded directly from the source stream sock to the destination stream sock (->splice_len bytes). Those are used only during forward. In order not to mix data streams, the producer may only feed the invisible data with data to forward, and only when the visible buffer is empty. The consumer may not always be able to feed the invisible buffer due to platform limitations (lack of kernel support). Conversely, the consumer must always take data from the invisible data first before ever considering visible data. There is no limit to the size of data to consume from the invisible buffer, as platform-specific implementations will rarely leave enough control on this. So any byte fed into the invisible buffer is expected to reach the destination file descriptor, by any means. However, it's the consumer's responsibility to ensure that the invisible data has been entirely consumed before consuming visible data. This must be reflected by ->splice_len. This is very important as this and only this can ensure strict ordering of data between buffers. The producer is responsible for decreasing ->to_forward and increasing ->send_max. The ->to_forward parameter indicates how many bytes may be fed into either data buffer without waking the parent up. The ->send_max parameter says how many bytes may be read from the visible buffer. Thus it may never exceed ->l. This parameter is updated by any buffer_write() as well as any data forwarded through the visible buffer. The consumer is responsible for decreasing ->send_max when it sends data from the visible buffer, and ->splice_len when it sends data from the invisible buffer. A real-world example consists in part in an HTTP response waiting in a buffer to be forwarded. We know the header length (300) and the amount of data to forward (content-length=9000). The buffer already contains 1000 bytes of data after the 300 bytes of headers. Thus the caller will set ->send_max to 300 indicating that it explicitly wants to send those data, and set ->to_forward to 9000 (content-length). This value must be normalised immediately after updating ->to_forward : since there are already 1300 bytes in the buffer, 300 of which are already counted in ->send_max, and that size is smaller than ->to_forward, we must update ->send_max to 1300 to flush the whole buffer, and reduce ->to_forward to 8000. After that, the producer may try to feed the additional data through the invisible buffer using a platform-specific method such as splice(). */
2009-01-07 23:09:41 +00:00
if (!bytes)
return;
[MEDIUM] i/o: rework ->to_forward and ->send_max The way the buffers and stream interfaces handled ->to_forward was really not handy for multiple reasons. Now we've moved its control to the receive-side of the buffer, which is also responsible for keeping send_max up to date. This makes more sense as it now becomes possible to send some pre-formatted data followed by forwarded data. The following explanation has also been added to buffer.h to clarify the situation. Right now, tests show that the I/O is behaving extremely well. Some work will have to be done to adapt existing splice code though. /* Note about the buffer structure The buffer contains two length indicators, one to_forward counter and one send_max limit. First, it must be understood that the buffer is in fact split in two parts : - the visible data (->data, for ->l bytes) - the invisible data, typically in kernel buffers forwarded directly from the source stream sock to the destination stream sock (->splice_len bytes). Those are used only during forward. In order not to mix data streams, the producer may only feed the invisible data with data to forward, and only when the visible buffer is empty. The consumer may not always be able to feed the invisible buffer due to platform limitations (lack of kernel support). Conversely, the consumer must always take data from the invisible data first before ever considering visible data. There is no limit to the size of data to consume from the invisible buffer, as platform-specific implementations will rarely leave enough control on this. So any byte fed into the invisible buffer is expected to reach the destination file descriptor, by any means. However, it's the consumer's responsibility to ensure that the invisible data has been entirely consumed before consuming visible data. This must be reflected by ->splice_len. This is very important as this and only this can ensure strict ordering of data between buffers. The producer is responsible for decreasing ->to_forward and increasing ->send_max. The ->to_forward parameter indicates how many bytes may be fed into either data buffer without waking the parent up. The ->send_max parameter says how many bytes may be read from the visible buffer. Thus it may never exceed ->l. This parameter is updated by any buffer_write() as well as any data forwarded through the visible buffer. The consumer is responsible for decreasing ->send_max when it sends data from the visible buffer, and ->splice_len when it sends data from the invisible buffer. A real-world example consists in part in an HTTP response waiting in a buffer to be forwarded. We know the header length (300) and the amount of data to forward (content-length=9000). The buffer already contains 1000 bytes of data after the 300 bytes of headers. Thus the caller will set ->send_max to 300 indicating that it explicitly wants to send those data, and set ->to_forward to 9000 (content-length). This value must be normalised immediately after updating ->to_forward : since there are already 1300 bytes in the buffer, 300 of which are already counted in ->send_max, and that size is smaller than ->to_forward, we must update ->send_max to 1300 to flush the whole buffer, and reduce ->to_forward to 8000. After that, the producer may try to feed the additional data through the invisible buffer using a platform-specific method such as splice(). */
2009-01-07 23:09:41 +00:00
data_left = buf->l - buf->send_max;
if (data_left >= bytes) {
buf->send_max += bytes;
buf->flags &= ~BF_OUT_EMPTY;
return;
}
[MEDIUM] i/o: rework ->to_forward and ->send_max The way the buffers and stream interfaces handled ->to_forward was really not handy for multiple reasons. Now we've moved its control to the receive-side of the buffer, which is also responsible for keeping send_max up to date. This makes more sense as it now becomes possible to send some pre-formatted data followed by forwarded data. The following explanation has also been added to buffer.h to clarify the situation. Right now, tests show that the I/O is behaving extremely well. Some work will have to be done to adapt existing splice code though. /* Note about the buffer structure The buffer contains two length indicators, one to_forward counter and one send_max limit. First, it must be understood that the buffer is in fact split in two parts : - the visible data (->data, for ->l bytes) - the invisible data, typically in kernel buffers forwarded directly from the source stream sock to the destination stream sock (->splice_len bytes). Those are used only during forward. In order not to mix data streams, the producer may only feed the invisible data with data to forward, and only when the visible buffer is empty. The consumer may not always be able to feed the invisible buffer due to platform limitations (lack of kernel support). Conversely, the consumer must always take data from the invisible data first before ever considering visible data. There is no limit to the size of data to consume from the invisible buffer, as platform-specific implementations will rarely leave enough control on this. So any byte fed into the invisible buffer is expected to reach the destination file descriptor, by any means. However, it's the consumer's responsibility to ensure that the invisible data has been entirely consumed before consuming visible data. This must be reflected by ->splice_len. This is very important as this and only this can ensure strict ordering of data between buffers. The producer is responsible for decreasing ->to_forward and increasing ->send_max. The ->to_forward parameter indicates how many bytes may be fed into either data buffer without waking the parent up. The ->send_max parameter says how many bytes may be read from the visible buffer. Thus it may never exceed ->l. This parameter is updated by any buffer_write() as well as any data forwarded through the visible buffer. The consumer is responsible for decreasing ->send_max when it sends data from the visible buffer, and ->splice_len when it sends data from the invisible buffer. A real-world example consists in part in an HTTP response waiting in a buffer to be forwarded. We know the header length (300) and the amount of data to forward (content-length=9000). The buffer already contains 1000 bytes of data after the 300 bytes of headers. Thus the caller will set ->send_max to 300 indicating that it explicitly wants to send those data, and set ->to_forward to 9000 (content-length). This value must be normalised immediately after updating ->to_forward : since there are already 1300 bytes in the buffer, 300 of which are already counted in ->send_max, and that size is smaller than ->to_forward, we must update ->send_max to 1300 to flush the whole buffer, and reduce ->to_forward to 8000. After that, the producer may try to feed the additional data through the invisible buffer using a platform-specific method such as splice(). */
2009-01-07 23:09:41 +00:00
buf->send_max += data_left;
if (buf->send_max)
buf->flags &= ~BF_OUT_EMPTY;
if (buf->to_forward == BUF_INFINITE_FORWARD)
return;
buf->to_forward += bytes - data_left;
if (bytes == BUF_INFINITE_FORWARD)
buf->to_forward = bytes;
[MEDIUM] i/o: rework ->to_forward and ->send_max The way the buffers and stream interfaces handled ->to_forward was really not handy for multiple reasons. Now we've moved its control to the receive-side of the buffer, which is also responsible for keeping send_max up to date. This makes more sense as it now becomes possible to send some pre-formatted data followed by forwarded data. The following explanation has also been added to buffer.h to clarify the situation. Right now, tests show that the I/O is behaving extremely well. Some work will have to be done to adapt existing splice code though. /* Note about the buffer structure The buffer contains two length indicators, one to_forward counter and one send_max limit. First, it must be understood that the buffer is in fact split in two parts : - the visible data (->data, for ->l bytes) - the invisible data, typically in kernel buffers forwarded directly from the source stream sock to the destination stream sock (->splice_len bytes). Those are used only during forward. In order not to mix data streams, the producer may only feed the invisible data with data to forward, and only when the visible buffer is empty. The consumer may not always be able to feed the invisible buffer due to platform limitations (lack of kernel support). Conversely, the consumer must always take data from the invisible data first before ever considering visible data. There is no limit to the size of data to consume from the invisible buffer, as platform-specific implementations will rarely leave enough control on this. So any byte fed into the invisible buffer is expected to reach the destination file descriptor, by any means. However, it's the consumer's responsibility to ensure that the invisible data has been entirely consumed before consuming visible data. This must be reflected by ->splice_len. This is very important as this and only this can ensure strict ordering of data between buffers. The producer is responsible for decreasing ->to_forward and increasing ->send_max. The ->to_forward parameter indicates how many bytes may be fed into either data buffer without waking the parent up. The ->send_max parameter says how many bytes may be read from the visible buffer. Thus it may never exceed ->l. This parameter is updated by any buffer_write() as well as any data forwarded through the visible buffer. The consumer is responsible for decreasing ->send_max when it sends data from the visible buffer, and ->splice_len when it sends data from the invisible buffer. A real-world example consists in part in an HTTP response waiting in a buffer to be forwarded. We know the header length (300) and the amount of data to forward (content-length=9000). The buffer already contains 1000 bytes of data after the 300 bytes of headers. Thus the caller will set ->send_max to 300 indicating that it explicitly wants to send those data, and set ->to_forward to 9000 (content-length). This value must be normalised immediately after updating ->to_forward : since there are already 1300 bytes in the buffer, 300 of which are already counted in ->send_max, and that size is smaller than ->to_forward, we must update ->send_max to 1300 to flush the whole buffer, and reduce ->to_forward to 8000. After that, the producer may try to feed the additional data through the invisible buffer using a platform-specific method such as splice(). */
2009-01-07 23:09:41 +00:00
}
/* Schedule all remaining buffer data to be sent. send_max is not touched if it
* already covers those data. That permits doing a flush even after a forward,
* although not recommended.
*/
static inline void buffer_flush(struct buffer *buf)
{
if (buf->send_max < buf->l)
buf->send_max = buf->l;
if (buf->send_max)
buf->flags &= ~BF_OUT_EMPTY;
}
/* Erase any content from buffer <buf> and adjusts flags accordingly. Note
[MEDIUM] i/o: rework ->to_forward and ->send_max The way the buffers and stream interfaces handled ->to_forward was really not handy for multiple reasons. Now we've moved its control to the receive-side of the buffer, which is also responsible for keeping send_max up to date. This makes more sense as it now becomes possible to send some pre-formatted data followed by forwarded data. The following explanation has also been added to buffer.h to clarify the situation. Right now, tests show that the I/O is behaving extremely well. Some work will have to be done to adapt existing splice code though. /* Note about the buffer structure The buffer contains two length indicators, one to_forward counter and one send_max limit. First, it must be understood that the buffer is in fact split in two parts : - the visible data (->data, for ->l bytes) - the invisible data, typically in kernel buffers forwarded directly from the source stream sock to the destination stream sock (->splice_len bytes). Those are used only during forward. In order not to mix data streams, the producer may only feed the invisible data with data to forward, and only when the visible buffer is empty. The consumer may not always be able to feed the invisible buffer due to platform limitations (lack of kernel support). Conversely, the consumer must always take data from the invisible data first before ever considering visible data. There is no limit to the size of data to consume from the invisible buffer, as platform-specific implementations will rarely leave enough control on this. So any byte fed into the invisible buffer is expected to reach the destination file descriptor, by any means. However, it's the consumer's responsibility to ensure that the invisible data has been entirely consumed before consuming visible data. This must be reflected by ->splice_len. This is very important as this and only this can ensure strict ordering of data between buffers. The producer is responsible for decreasing ->to_forward and increasing ->send_max. The ->to_forward parameter indicates how many bytes may be fed into either data buffer without waking the parent up. The ->send_max parameter says how many bytes may be read from the visible buffer. Thus it may never exceed ->l. This parameter is updated by any buffer_write() as well as any data forwarded through the visible buffer. The consumer is responsible for decreasing ->send_max when it sends data from the visible buffer, and ->splice_len when it sends data from the invisible buffer. A real-world example consists in part in an HTTP response waiting in a buffer to be forwarded. We know the header length (300) and the amount of data to forward (content-length=9000). The buffer already contains 1000 bytes of data after the 300 bytes of headers. Thus the caller will set ->send_max to 300 indicating that it explicitly wants to send those data, and set ->to_forward to 9000 (content-length). This value must be normalised immediately after updating ->to_forward : since there are already 1300 bytes in the buffer, 300 of which are already counted in ->send_max, and that size is smaller than ->to_forward, we must update ->send_max to 1300 to flush the whole buffer, and reduce ->to_forward to 8000. After that, the producer may try to feed the additional data through the invisible buffer using a platform-specific method such as splice(). */
2009-01-07 23:09:41 +00:00
* that any spliced data is not affected since we may not have any access to
* it.
*/
static inline void buffer_erase(struct buffer *buf)
{
buf->send_max = 0;
buf->to_forward = 0;
buf->r = buf->lr = buf->w = buf->data;
buf->l = 0;
buf->flags &= ~(BF_FULL | BF_OUT_EMPTY);
if (!buf->pipe)
buf->flags |= BF_OUT_EMPTY;
if (!buf->max_len)
buf->flags |= BF_FULL;
}
/* Cut the "tail" of the buffer, which means strip it to the length of unsent
* data only, and kill any remaining unsent data. Any scheduled forwarding is
* stopped. This is mainly to be used to send error messages after existing
* data.
*/
static inline void buffer_cut_tail(struct buffer *buf)
{
if (!buf->send_max)
return buffer_erase(buf);
buf->to_forward = 0;
if (buf->l == buf->send_max)
return;
buf->l = buf->send_max;
buf->r = buf->w + buf->l;
if (buf->r >= buf->data + buf->size)
buf->r -= buf->size;
buf->lr = buf->r;
buf->flags &= ~BF_FULL;
if (buf->l >= buf->max_len)
buf->flags |= BF_FULL;
}
[MAJOR] rework of the server FSM srv_state has been removed from HTTP state machines, and states have been split in either TCP states or analyzers. For instance, the TARPIT state has just become a simple analyzer. New flags have been added to the struct buffer to compensate this. The high-level stream processors sometimes need to force a disconnection without touching a file-descriptor (eg: report an error). But if they touched BF_SHUTW or BF_SHUTR, the file descriptor would not be closed. Thus, the two SHUT?_NOW flags have been added so that an application can request a forced close which the stream interface will be forced to obey. During this change, a new BF_HIJACK flag was added. It will be used for data generation, eg during a stats dump. It prevents the producer on a buffer from sending data into it. BF_SHUTR_NOW /* the producer must shut down for reads ASAP */ BF_SHUTW_NOW /* the consumer must shut down for writes ASAP */ BF_HIJACK /* the producer is temporarily replaced */ BF_SHUTW_NOW has precedence over BF_HIJACK. BF_HIJACK has precedence over BF_MAY_FORWARD (so that it does not need it). New functions buffer_shutr_now(), buffer_shutw_now(), buffer_abort() are provided to manipulate BF_SHUT* flags. A new type "stream_interface" has been added to describe both sides of a buffer. A stream interface has states and error reporting. The session now has two stream interfaces (one per side). Each buffer has stream_interface pointers to both consumer and producer sides. The server-side file descriptor has moved to its stream interface, so that even the buffer has access to it. process_srv() has been split into three parts : - tcp_get_connection() obtains a connection to the server - tcp_connection_failed() tests if a previously attempted connection has succeeded or not. - process_srv_data() only manages the data phase, and in this sense should be roughly equivalent to process_cli. Little code has been removed, and a lot of old code has been left in comments for now.
2008-10-19 05:30:41 +00:00
/* marks the buffer as "shutdown" ASAP for reads */
static inline void buffer_shutr_now(struct buffer *buf)
{
buf->flags |= BF_SHUTR_NOW;
[MAJOR] rework of the server FSM srv_state has been removed from HTTP state machines, and states have been split in either TCP states or analyzers. For instance, the TARPIT state has just become a simple analyzer. New flags have been added to the struct buffer to compensate this. The high-level stream processors sometimes need to force a disconnection without touching a file-descriptor (eg: report an error). But if they touched BF_SHUTW or BF_SHUTR, the file descriptor would not be closed. Thus, the two SHUT?_NOW flags have been added so that an application can request a forced close which the stream interface will be forced to obey. During this change, a new BF_HIJACK flag was added. It will be used for data generation, eg during a stats dump. It prevents the producer on a buffer from sending data into it. BF_SHUTR_NOW /* the producer must shut down for reads ASAP */ BF_SHUTW_NOW /* the consumer must shut down for writes ASAP */ BF_HIJACK /* the producer is temporarily replaced */ BF_SHUTW_NOW has precedence over BF_HIJACK. BF_HIJACK has precedence over BF_MAY_FORWARD (so that it does not need it). New functions buffer_shutr_now(), buffer_shutw_now(), buffer_abort() are provided to manipulate BF_SHUT* flags. A new type "stream_interface" has been added to describe both sides of a buffer. A stream interface has states and error reporting. The session now has two stream interfaces (one per side). Each buffer has stream_interface pointers to both consumer and producer sides. The server-side file descriptor has moved to its stream interface, so that even the buffer has access to it. process_srv() has been split into three parts : - tcp_get_connection() obtains a connection to the server - tcp_connection_failed() tests if a previously attempted connection has succeeded or not. - process_srv_data() only manages the data phase, and in this sense should be roughly equivalent to process_cli. Little code has been removed, and a lot of old code has been left in comments for now.
2008-10-19 05:30:41 +00:00
}
/* marks the buffer as "shutdown" ASAP for writes */
static inline void buffer_shutw_now(struct buffer *buf)
{
buf->flags |= BF_SHUTW_NOW;
}
/* marks the buffer as "shutdown" ASAP in both directions */
static inline void buffer_abort(struct buffer *buf)
{
buf->flags |= BF_SHUTR_NOW | BF_SHUTW_NOW;
[MAJOR] rework of the server FSM srv_state has been removed from HTTP state machines, and states have been split in either TCP states or analyzers. For instance, the TARPIT state has just become a simple analyzer. New flags have been added to the struct buffer to compensate this. The high-level stream processors sometimes need to force a disconnection without touching a file-descriptor (eg: report an error). But if they touched BF_SHUTW or BF_SHUTR, the file descriptor would not be closed. Thus, the two SHUT?_NOW flags have been added so that an application can request a forced close which the stream interface will be forced to obey. During this change, a new BF_HIJACK flag was added. It will be used for data generation, eg during a stats dump. It prevents the producer on a buffer from sending data into it. BF_SHUTR_NOW /* the producer must shut down for reads ASAP */ BF_SHUTW_NOW /* the consumer must shut down for writes ASAP */ BF_HIJACK /* the producer is temporarily replaced */ BF_SHUTW_NOW has precedence over BF_HIJACK. BF_HIJACK has precedence over BF_MAY_FORWARD (so that it does not need it). New functions buffer_shutr_now(), buffer_shutw_now(), buffer_abort() are provided to manipulate BF_SHUT* flags. A new type "stream_interface" has been added to describe both sides of a buffer. A stream interface has states and error reporting. The session now has two stream interfaces (one per side). Each buffer has stream_interface pointers to both consumer and producer sides. The server-side file descriptor has moved to its stream interface, so that even the buffer has access to it. process_srv() has been split into three parts : - tcp_get_connection() obtains a connection to the server - tcp_connection_failed() tests if a previously attempted connection has succeeded or not. - process_srv_data() only manages the data phase, and in this sense should be roughly equivalent to process_cli. Little code has been removed, and a lot of old code has been left in comments for now.
2008-10-19 05:30:41 +00:00
}
/* Installs <func> as a hijacker on the buffer <b> for session <s>. The hijack
* flag is set, and the function called once. The function is responsible for
* clearing the hijack bit. It is possible that the function clears the flag
* during this first call.
*/
static inline void buffer_install_hijacker(struct session *s,
struct buffer *b,
void (*func)(struct session *, struct buffer *))
{
b->hijacker = func;
b->flags |= BF_HIJACK;
func(s, b);
}
/* Releases the buffer from hijacking mode. Often used by the hijack function */
static inline void buffer_stop_hijack(struct buffer *buf)
{
buf->flags &= ~BF_HIJACK;
}
/* allow the consumer to try to establish a new connection. */
static inline void buffer_auto_connect(struct buffer *buf)
{
buf->flags |= BF_AUTO_CONNECT;
}
/* prevent the consumer from trying to establish a new connection, and also
* disable auto shutdown forwarding.
*/
static inline void buffer_dont_connect(struct buffer *buf)
{
buf->flags &= ~(BF_AUTO_CONNECT|BF_AUTO_CLOSE);
}
/* allow the producer to forward shutdown requests */
static inline void buffer_auto_close(struct buffer *buf)
{
buf->flags |= BF_AUTO_CLOSE;
}
/* prevent the producer from forwarding shutdown requests */
static inline void buffer_dont_close(struct buffer *buf)
{
buf->flags &= ~BF_AUTO_CLOSE;
}
/* returns the maximum number of bytes writable at once in this buffer */
static inline int buffer_max(const struct buffer *buf)
{
if (buf->l == buf->size)
return 0;
else if (buf->r >= buf->w)
return buf->data + buf->size - buf->r;
else
return buf->w - buf->r;
}
/* sets the buffer read limit to <size> bytes, and adjusts the FULL
* flag accordingly.
*/
static inline void buffer_set_rlim(struct buffer *buf, int size)
{
buf->max_len = size;
if (buf->l < size)
buf->flags &= ~BF_FULL;
else
buf->flags |= BF_FULL;
}
/*
* Tries to realign the given buffer, and returns how many bytes can be written
* there at once without overwriting anything.
*/
static inline int buffer_realign(struct buffer *buf)
{
if (buf->l == 0) {
/* let's realign the buffer to optimize I/O */
buf->r = buf->w = buf->lr = buf->data;
}
return buffer_max(buf);
}
/*
* Return the max amount of bytes that can be stuffed into the buffer at once.
* Note that this may be lower than the actual buffer size when the free space
* wraps after the end, so it's preferable to call this function again after
* writing. Also note that this function respects max_len.
*/
static inline int buffer_contig_space(struct buffer *buf)
{
int ret;
if (buf->l == 0) {
buf->r = buf->w = buf->lr = buf->data;
ret = buf->max_len;
}
else if (buf->r > buf->w) {
ret = buf->data + buf->max_len - buf->r;
}
else {
ret = buf->w - buf->r;
if (ret > buf->max_len)
ret = buf->max_len;
}
return ret;
}
/*
* Return the max amount of bytes that can be read from the buffer at once.
* Note that this may be lower than the actual buffer length when the data
* wrap after the end, so it's preferable to call this function again after
* reading. Also note that this function respects the send_max limit.
*/
static inline int buffer_contig_data(struct buffer *buf)
{
int ret;
if (!buf->send_max || !buf->l)
return 0;
if (buf->r > buf->w)
ret = buf->r - buf->w;
else
ret = buf->data + buf->size - buf->w;
/* limit the amount of outgoing data if required */
if (ret > buf->send_max)
ret = buf->send_max;
return ret;
}
/*
* Advance the buffer's read pointer by <len> bytes. This is useful when data
* have been read directly from the buffer. It is illegal to call this function
* with <len> causing a wrapping at the end of the buffer. It's the caller's
* responsibility to ensure that <len> is never larger than buf->send_max.
*/
static inline void buffer_skip(struct buffer *buf, int len)
{
buf->w += len;
if (buf->w >= buf->data + buf->size)
buf->w -= buf->size; /* wrap around the buffer */
buf->l -= len;
if (!buf->l)
buf->r = buf->w = buf->lr = buf->data;
if (buf->l < buf->max_len)
buf->flags &= ~BF_FULL;
buf->send_max -= len;
if (!buf->send_max && !buf->pipe)
buf->flags |= BF_OUT_EMPTY;
/* notify that some data was written to the SI from the buffer */
buf->flags |= BF_WRITE_PARTIAL;
}
/*
* Return one char from the buffer. If the buffer is empty and closed, return -1.
* If the buffer is just empty, return -2. The buffer's pointer is not advanced,
* it's up to the caller to call buffer_skip(buf, 1) when it has consumed the char.
* Also note that this function respects the send_max limit.
*/
static inline int buffer_si_peekchar(struct buffer *buf)
{
if (buf->send_max)
return *buf->w;
if (buf->flags & (BF_SHUTW|BF_SHUTW_NOW))
return -1;
else
return -2;
}
/* Try to write character <c> into buffer <buf> after length controls. This
* work like buffer_feed(buf, &c, 1).
* Returns non-zero in case of success, 0 if the buffer was full.
* The send limit is automatically adjusted with the amount of data written.
*/
static inline int buffer_si_putchar(struct buffer *buf, char c)
{
if (buf->flags & BF_FULL)
return 0;
*buf->r = c;
buf->l++;
if (buf->l >= buf->max_len)
buf->flags |= BF_FULL;
buf->r++;
if (buf->r - buf->data == buf->size)
buf->r -= buf->size;
if (buf->to_forward >= 1) {
if (buf->to_forward != BUF_INFINITE_FORWARD)
buf->to_forward--;
buf->send_max++;
buf->flags &= ~BF_OUT_EMPTY;
}
buf->total++;
return 1;
}
int buffer_write(struct buffer *buf, const char *msg, int len);
int buffer_feed(struct buffer *buf, const char *str, int len);
int buffer_si_putchar(struct buffer *buf, char c);
int buffer_si_peekline(struct buffer *buf, char *str, int len);
int buffer_replace(struct buffer *b, char *pos, char *end, const char *str);
int buffer_replace2(struct buffer *b, char *pos, char *end, const char *str, int len);
int buffer_insert_line2(struct buffer *b, char *pos, const char *str, int len);
void buffer_dump(FILE *o, struct buffer *b, int from, int to);
/* writes the chunk <chunk> to buffer <buf>. Returns -1 in case of success,
* -2 if it is larger than the buffer size, or the number of bytes available
* otherwise. If the chunk has been written, its size is automatically reset
* to zero. The send limit is automatically adjusted with the amount of data
* written.
*/
static inline int buffer_write_chunk(struct buffer *buf, struct chunk *chunk)
{
int ret;
ret = buffer_write(buf, chunk->str, chunk->len);
if (ret == -1)
chunk->len = 0;
return ret;
}
/* Try to write chunk <chunk> into buffer <buf> after length controls. This is
* the equivalent of buffer_write_chunk() except that to_forward and send_max
* are updated and that max_len is respected. Returns -1 in case of success,
* -2 if it is larger than the buffer size, or the number of bytes available
* otherwise. If the chunk has been written, its size is automatically reset
* to zero. The send limit is automatically adjusted with the amount of data
* written.
*/
static inline int buffer_feed_chunk(struct buffer *buf, struct chunk *chunk)
{
int ret;
ret = buffer_feed(buf, chunk->str, chunk->len);
if (ret == -1)
chunk->len = 0;
return ret;
}
static inline void chunk_init(struct chunk *chk, char *str, size_t size) {
chk->str = str;
chk->len = 0;
chk->size = size;
}
/* report 0 in case of error, 1 if OK. */
static inline int chunk_initlen(struct chunk *chk, char *str, size_t size, size_t len) {
if (len > size)
return 0;
chk->str = str;
chk->len = len;
chk->size = size;
return 1;
}
static inline void chunk_initstr(struct chunk *chk, char *str) {
chk->str = str;
chk->len = strlen(str);
chk->size = 0; /* mark it read-only */
}
static inline int chunk_strcpy(struct chunk *chk, const char *str) {
size_t len;
len = strlen(str);
if (unlikely(len > chk->size))
return 0;
chk->len = len;
memcpy(chk->str, str, len);
return 1;
}
int chunk_printf(struct chunk *chk, const char *fmt, ...)
__attribute__ ((format(printf, 2, 3)));
static inline void chunk_reset(struct chunk *chk) {
chk->str = NULL;
chk->len = -1;
chk->size = 0;
}
static inline void chunk_destroy(struct chunk *chk) {
if (!chk->size)
return;
if (chk->str)
free(chk->str);
chunk_reset(chk);
}
/*
* frees the destination chunk if already allocated, allocates a new string,
* and copies the source into it. The pointer to the destination string is
* returned, or NULL if the allocation fails or if any pointer is NULL..
*/
static inline char *chunk_dup(struct chunk *dst, const struct chunk *src) {
if (!dst || !src || !src->str)
return NULL;
if (dst->str)
free(dst->str);
dst->len = src->len;
dst->str = (char *)malloc(dst->len);
memcpy(dst->str, src->str, dst->len);
return dst->str;
}
#endif /* _PROTO_BUFFERS_H */
/*
* Local variables:
* c-indent-level: 8
* c-basic-offset: 8
* End:
*/