mirror of
http://git.haproxy.org/git/haproxy.git/
synced 2024-12-13 15:04:42 +00:00
MAJOR: channel: stop relying on BF_FULL to take action
This flag is quite complex to get right and updating it everywhere is a major pain, especially since the buffer/channel split. This is the first step of getting rid of it. Instead now it's dynamically computed whenever needed.
This commit is contained in:
parent
42d06661a2
commit
3bf1b2b816
@ -146,7 +146,7 @@ int bi_putchr(struct channel *buf, char c)
|
||||
if (unlikely(buffer_input_closed(buf)))
|
||||
return -2;
|
||||
|
||||
if (buf->flags & BF_FULL)
|
||||
if (channel_full(buf))
|
||||
return -1;
|
||||
|
||||
*bi_end(&buf->buf) = c;
|
||||
|
@ -396,11 +396,12 @@ int frontend_decode_proxy_request(struct session *s, struct channel *req, int an
|
||||
return 1;
|
||||
|
||||
missing:
|
||||
if (!(req->flags & (BF_SHUTR|BF_FULL))) {
|
||||
buffer_dont_connect(s->req);
|
||||
return 0;
|
||||
}
|
||||
/* missing data and buffer is either full or shutdown => fail */
|
||||
if ((req->flags & BF_SHUTR) || buffer_full(&req->buf, global.tune.maxrewrite))
|
||||
goto fail;
|
||||
|
||||
buffer_dont_connect(s->req);
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
buffer_abort(req);
|
||||
|
@ -2019,7 +2019,7 @@ int http_wait_for_request(struct session *s, struct channel *req, int an_bit)
|
||||
*/
|
||||
if (buffer_not_empty(&req->buf) && msg->msg_state < HTTP_MSG_ERROR) {
|
||||
if ((txn->flags & TX_NOT_FIRST) &&
|
||||
unlikely((req->flags & BF_FULL) ||
|
||||
unlikely(channel_full(req) ||
|
||||
bi_end(&req->buf) < b_ptr(&req->buf, msg->next) ||
|
||||
bi_end(&req->buf) > req->buf.data + req->buf.size - global.tune.maxrewrite)) {
|
||||
if (req->buf.o) {
|
||||
@ -2043,7 +2043,7 @@ int http_wait_for_request(struct session *s, struct channel *req, int an_bit)
|
||||
* keep-alive requests.
|
||||
*/
|
||||
if ((txn->flags & TX_NOT_FIRST) &&
|
||||
unlikely((s->rep->flags & BF_FULL) ||
|
||||
unlikely(channel_full(s->rep) ||
|
||||
bi_end(&s->rep->buf) < b_ptr(&s->rep->buf, txn->rsp.next) ||
|
||||
bi_end(&s->rep->buf) > s->rep->buf.data + s->rep->buf.size - global.tune.maxrewrite)) {
|
||||
if (s->rep->buf.o) {
|
||||
@ -2115,7 +2115,7 @@ int http_wait_for_request(struct session *s, struct channel *req, int an_bit)
|
||||
* later, so the session will never terminate. We
|
||||
* must terminate it now.
|
||||
*/
|
||||
if (unlikely(req->flags & BF_FULL)) {
|
||||
if (unlikely(buffer_full(&req->buf, global.tune.maxrewrite))) {
|
||||
/* FIXME: check if URI is set and return Status
|
||||
* 414 Request URI too long instead.
|
||||
*/
|
||||
@ -3636,7 +3636,7 @@ int http_process_request_body(struct session *s, struct channel *req, int an_bit
|
||||
|
||||
missing_data:
|
||||
/* we get here if we need to wait for more data */
|
||||
if (req->flags & BF_FULL) {
|
||||
if (buffer_full(&req->buf, global.tune.maxrewrite)) {
|
||||
session_inc_http_err_ctr(s);
|
||||
goto return_bad_req;
|
||||
}
|
||||
@ -3653,7 +3653,7 @@ int http_process_request_body(struct session *s, struct channel *req, int an_bit
|
||||
}
|
||||
|
||||
/* we get here if we need to wait for more data */
|
||||
if (!(req->flags & (BF_FULL | BF_READ_ERROR | BF_SHUTR))) {
|
||||
if (!(req->flags & (BF_SHUTR | BF_READ_ERROR)) && !buffer_full(&req->buf, global.tune.maxrewrite)) {
|
||||
/* Not enough data. We'll re-use the http-request
|
||||
* timeout here. Ideally, we should set the timeout
|
||||
* relative to the accept() date. We just set the
|
||||
@ -3853,7 +3853,7 @@ void http_end_txn_clean_session(struct session *s)
|
||||
*/
|
||||
if (s->req->buf.i) {
|
||||
if (s->rep->buf.o &&
|
||||
!(s->rep->flags & BF_FULL) &&
|
||||
!buffer_full(&s->rep->buf, global.tune.maxrewrite) &&
|
||||
bi_end(&s->rep->buf) <= s->rep->buf.data + s->rep->buf.size - global.tune.maxrewrite)
|
||||
s->rep->flags |= BF_EXPECT_MORE;
|
||||
}
|
||||
@ -4508,7 +4508,7 @@ int http_wait_for_response(struct session *s, struct channel *rep, int an_bit)
|
||||
* data later, which is much more complicated.
|
||||
*/
|
||||
if (buffer_not_empty(&rep->buf) && msg->msg_state < HTTP_MSG_ERROR) {
|
||||
if (unlikely((rep->flags & BF_FULL) ||
|
||||
if (unlikely(channel_full(rep) ||
|
||||
bi_end(&rep->buf) < b_ptr(&rep->buf, msg->next) ||
|
||||
bi_end(&rep->buf) > rep->buf.data + rep->buf.size - global.tune.maxrewrite)) {
|
||||
if (rep->buf.o) {
|
||||
@ -4593,7 +4593,7 @@ int http_wait_for_response(struct session *s, struct channel *rep, int an_bit)
|
||||
}
|
||||
|
||||
/* too large response does not fit in buffer. */
|
||||
else if (rep->flags & BF_FULL) {
|
||||
else if (buffer_full(&rep->buf, global.tune.maxrewrite)) {
|
||||
if (msg->err_pos < 0)
|
||||
msg->err_pos = rep->buf.i;
|
||||
goto hdr_response_bad;
|
||||
@ -7606,7 +7606,8 @@ acl_prefetch_http(struct proxy *px, struct session *s, void *l7, unsigned int op
|
||||
return 0;
|
||||
|
||||
if (unlikely(txn->req.msg_state < HTTP_MSG_BODY)) {
|
||||
if ((msg->msg_state == HTTP_MSG_ERROR) || (s->req->flags & BF_FULL)) {
|
||||
if ((msg->msg_state == HTTP_MSG_ERROR) ||
|
||||
buffer_full(&s->req->buf, global.tune.maxrewrite)) {
|
||||
smp->data.uint = 0;
|
||||
return -1;
|
||||
}
|
||||
@ -7617,7 +7618,8 @@ acl_prefetch_http(struct proxy *px, struct session *s, void *l7, unsigned int op
|
||||
|
||||
/* Still no valid request ? */
|
||||
if (unlikely(msg->msg_state < HTTP_MSG_BODY)) {
|
||||
if ((msg->msg_state == HTTP_MSG_ERROR) || (s->req->flags & BF_FULL)) {
|
||||
if ((msg->msg_state == HTTP_MSG_ERROR) ||
|
||||
buffer_full(&s->req->buf, global.tune.maxrewrite)) {
|
||||
smp->data.uint = 0;
|
||||
return -1;
|
||||
}
|
||||
|
@ -809,7 +809,8 @@ int tcp_inspect_request(struct session *s, struct channel *req, int an_bit)
|
||||
* - if one rule returns KO, then return KO
|
||||
*/
|
||||
|
||||
if (req->flags & (BF_SHUTR|BF_FULL) || !s->be->tcp_req.inspect_delay || tick_is_expired(req->analyse_exp, now_ms))
|
||||
if ((req->flags & BF_SHUTR) || buffer_full(&req->buf, global.tune.maxrewrite) ||
|
||||
!s->be->tcp_req.inspect_delay || tick_is_expired(req->analyse_exp, now_ms))
|
||||
partial = SMP_OPT_FINAL;
|
||||
else
|
||||
partial = 0;
|
||||
|
@ -1650,7 +1650,7 @@ struct task *process_session(struct task *t)
|
||||
unsigned int flags = s->rep->flags;
|
||||
|
||||
if ((s->rep->flags & (BF_WRITE_PARTIAL|BF_WRITE_ERROR|BF_SHUTW)) &&
|
||||
!(s->rep->flags & BF_FULL)) {
|
||||
!channel_full(s->rep)) {
|
||||
s->rep->hijacker(s, s->rep);
|
||||
}
|
||||
|
||||
|
@ -150,7 +150,7 @@ static void stream_int_update_embedded(struct stream_interface *si)
|
||||
channel_is_empty(si->ob))
|
||||
si_shutw(si);
|
||||
|
||||
if ((si->ob->flags & (BF_FULL|BF_SHUTW|BF_SHUTW_NOW|BF_HIJACK)) == 0)
|
||||
if ((si->ob->flags & (BF_SHUTW|BF_SHUTW_NOW|BF_HIJACK)) == 0 && !channel_full(si->ob))
|
||||
si->flags |= SI_FL_WAIT_DATA;
|
||||
|
||||
/* we're almost sure that we need some space if the buffer is not
|
||||
@ -172,7 +172,8 @@ static void stream_int_update_embedded(struct stream_interface *si)
|
||||
|
||||
/* save flags to detect changes */
|
||||
old_flags = si->flags;
|
||||
if (likely((si->ob->flags & (BF_SHUTW|BF_WRITE_PARTIAL|BF_FULL|BF_DONT_READ)) == BF_WRITE_PARTIAL &&
|
||||
if (likely((si->ob->flags & (BF_SHUTW|BF_WRITE_PARTIAL|BF_DONT_READ)) == BF_WRITE_PARTIAL &&
|
||||
!channel_full(si->ob) &&
|
||||
(si->ob->prod->flags & SI_FL_WAIT_ROOM)))
|
||||
si_chk_rcv(si->ob->prod);
|
||||
|
||||
@ -180,7 +181,7 @@ static void stream_int_update_embedded(struct stream_interface *si)
|
||||
(si->ib->cons->flags & SI_FL_WAIT_DATA)) {
|
||||
si_chk_snd(si->ib->cons);
|
||||
/* check if the consumer has freed some space */
|
||||
if (!(si->ib->flags & BF_FULL))
|
||||
if (!channel_full(si->ib))
|
||||
si->flags &= ~SI_FL_WAIT_ROOM;
|
||||
}
|
||||
|
||||
@ -366,13 +367,12 @@ static void stream_int_chk_rcv(struct stream_interface *si)
|
||||
__FUNCTION__,
|
||||
si, si->state, si->ib->flags, si->ob->flags);
|
||||
|
||||
if (unlikely(si->state != SI_ST_EST || (ib->flags & BF_SHUTR)))
|
||||
if (unlikely(si->state != SI_ST_EST || (ib->flags & (BF_SHUTR|BF_HIJACK|BF_DONT_READ))))
|
||||
return;
|
||||
|
||||
if (ib->flags & (BF_FULL|BF_HIJACK|BF_DONT_READ)) {
|
||||
if (channel_full(ib)) {
|
||||
/* stop reading */
|
||||
if ((ib->flags & (BF_FULL|BF_HIJACK|BF_DONT_READ)) == BF_FULL)
|
||||
si->flags |= SI_FL_WAIT_ROOM;
|
||||
si->flags |= SI_FL_WAIT_ROOM;
|
||||
}
|
||||
else {
|
||||
/* (re)start reading */
|
||||
@ -586,7 +586,7 @@ void conn_notify_si(struct connection *conn)
|
||||
si->ob->wex = TICK_ETERNITY;
|
||||
}
|
||||
|
||||
if ((si->ob->flags & (BF_FULL|BF_SHUTW|BF_SHUTW_NOW|BF_HIJACK)) == 0)
|
||||
if ((si->ob->flags & (BF_SHUTW|BF_SHUTW_NOW|BF_HIJACK)) == 0 && !channel_full(si->ob))
|
||||
si->flags |= SI_FL_WAIT_DATA;
|
||||
|
||||
if (si->ob->flags & BF_WRITE_ACTIVITY) {
|
||||
@ -600,7 +600,8 @@ void conn_notify_si(struct connection *conn)
|
||||
if (tick_isset(si->ib->rex))
|
||||
si->ib->rex = tick_add_ifset(now_ms, si->ib->rto);
|
||||
|
||||
if (likely((si->ob->flags & (BF_SHUTW|BF_WRITE_PARTIAL|BF_FULL|BF_DONT_READ)) == BF_WRITE_PARTIAL &&
|
||||
if (likely((si->ob->flags & (BF_SHUTW|BF_WRITE_PARTIAL|BF_DONT_READ)) == BF_WRITE_PARTIAL &&
|
||||
!channel_full(si->ob) &&
|
||||
(si->ob->prod->flags & SI_FL_WAIT_ROOM)))
|
||||
si_chk_rcv(si->ob->prod);
|
||||
}
|
||||
@ -622,7 +623,7 @@ void conn_notify_si(struct connection *conn)
|
||||
/* check if the consumer has freed some space either in the
|
||||
* buffer or in the pipe.
|
||||
*/
|
||||
if (!(si->ib->flags & BF_FULL) &&
|
||||
if (!channel_full(si->ib) &&
|
||||
(!last_len || !si->ib->pipe || si->ib->pipe->data < last_len))
|
||||
si->flags &= ~SI_FL_WAIT_ROOM;
|
||||
}
|
||||
@ -631,7 +632,8 @@ void conn_notify_si(struct connection *conn)
|
||||
__conn_data_stop_recv(conn);
|
||||
si->ib->rex = TICK_ETERNITY;
|
||||
}
|
||||
else if ((si->ib->flags & (BF_SHUTR|BF_READ_PARTIAL|BF_FULL|BF_DONT_READ|BF_READ_NOEXP)) == BF_READ_PARTIAL) {
|
||||
else if ((si->ib->flags & (BF_SHUTR|BF_READ_PARTIAL|BF_DONT_READ|BF_READ_NOEXP)) == BF_READ_PARTIAL &&
|
||||
!channel_full(si->ib)) {
|
||||
if (tick_isset(si->ib->rex))
|
||||
si->ib->rex = tick_add_ifset(now_ms, si->ib->rto);
|
||||
}
|
||||
@ -772,10 +774,10 @@ void stream_int_update_conn(struct stream_interface *si)
|
||||
/* Check if we need to close the read side */
|
||||
if (!(ib->flags & BF_SHUTR)) {
|
||||
/* Read not closed, update FD status and timeout for reads */
|
||||
if (ib->flags & (BF_FULL|BF_HIJACK|BF_DONT_READ)) {
|
||||
if ((ib->flags & (BF_HIJACK|BF_DONT_READ)) || channel_full(ib)) {
|
||||
/* stop reading */
|
||||
if (!(si->flags & SI_FL_WAIT_ROOM)) {
|
||||
if ((ib->flags & (BF_FULL|BF_HIJACK|BF_DONT_READ)) == BF_FULL)
|
||||
if (!(ib->flags & (BF_HIJACK|BF_DONT_READ))) /* full */
|
||||
si->flags |= SI_FL_WAIT_ROOM;
|
||||
conn_data_stop_recv(&si->conn);
|
||||
ib->rex = TICK_ETERNITY;
|
||||
@ -800,7 +802,7 @@ void stream_int_update_conn(struct stream_interface *si)
|
||||
if (channel_is_empty(ob)) {
|
||||
/* stop writing */
|
||||
if (!(si->flags & SI_FL_WAIT_DATA)) {
|
||||
if ((ob->flags & (BF_FULL|BF_HIJACK|BF_SHUTW_NOW)) == 0)
|
||||
if ((ob->flags & (BF_HIJACK|BF_SHUTW_NOW)) == 0)
|
||||
si->flags |= SI_FL_WAIT_DATA;
|
||||
conn_data_stop_send(&si->conn);
|
||||
ob->wex = TICK_ETERNITY;
|
||||
@ -848,9 +850,9 @@ static void stream_int_chk_rcv_conn(struct stream_interface *si)
|
||||
return;
|
||||
}
|
||||
|
||||
if (ib->flags & (BF_FULL|BF_HIJACK|BF_DONT_READ)) {
|
||||
if ((ib->flags & (BF_HIJACK|BF_DONT_READ)) || channel_full(ib)) {
|
||||
/* stop reading */
|
||||
if ((ib->flags & (BF_FULL|BF_HIJACK|BF_DONT_READ)) == BF_FULL)
|
||||
if (!(ib->flags & (BF_HIJACK|BF_DONT_READ))) /* full */
|
||||
si->flags |= SI_FL_WAIT_ROOM;
|
||||
conn_data_stop_recv(&si->conn);
|
||||
}
|
||||
@ -915,7 +917,7 @@ static void stream_int_chk_snd_conn(struct stream_interface *si)
|
||||
goto out_wakeup;
|
||||
}
|
||||
|
||||
if ((ob->flags & (BF_SHUTW|BF_SHUTW_NOW|BF_FULL|BF_HIJACK)) == 0)
|
||||
if ((ob->flags & (BF_SHUTW|BF_SHUTW_NOW|BF_HIJACK)) == 0)
|
||||
si->flags |= SI_FL_WAIT_DATA;
|
||||
ob->wex = TICK_ETERNITY;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user