REORG/MAJOR: move session's req and resp channels back into the session

The channels were pointers to outside structs and this is not needed
anymore since the buffers have moved, but this complicates operations.
Move them back into the session so that both channels and stream interfaces
are always allocated for a session. Some places (some early sample fetch
functions) used to validate that a channel was NULL prior to dereferencing
it. Now instead we check if chn->buf is NULL and we force it to remain NULL
until the channel is initialized.
This commit is contained in:
Willy Tarreau 2014-11-27 20:45:39 +01:00
parent 17bd152b58
commit 22ec1eadd0
12 changed files with 672 additions and 720 deletions

View File

@ -107,8 +107,8 @@ struct session {
unsigned int uniq_id; /* unique ID used for the traces */
enum obj_type *target; /* target to use for this session ; for mini-sess: incoming connection */
struct channel *req; /* request buffer */
struct channel *rep; /* response buffer */
struct channel req; /* request channel */
struct channel res; /* response channel */
struct proxy *fe; /* the proxy this session depends on for the client side */
struct proxy *be; /* the proxy this session depends on for the server side */

View File

@ -301,7 +301,7 @@ struct server *get_server_ph_post(struct session *s)
{
unsigned int hash = 0;
struct http_txn *txn = &s->txn;
struct channel *req = s->req;
struct channel *req = &s->req;
struct http_msg *msg = &txn->req;
struct proxy *px = s->be;
unsigned int plen = px->url_param_len;
@ -393,7 +393,7 @@ struct server *get_server_hh(struct session *s)
ctx.idx = 0;
/* if the message is chunked, we skip the chunk size, but use the value as len */
http_find_header2(px->hh_name, plen, b_ptr(s->req->buf, -http_hdr_rewind(&txn->req)), &txn->hdr_idx, &ctx);
http_find_header2(px->hh_name, plen, b_ptr(s->req.buf, -http_hdr_rewind(&txn->req)), &txn->hdr_idx, &ctx);
/* if the header is not found or empty, let's fallback to round robin */
if (!ctx.idx || !ctx.vlen)
@ -466,12 +466,12 @@ struct server *get_server_rch(struct session *s)
memset(&smp, 0, sizeof(smp));
b_rew(s->req->buf, rewind = s->req->buf->o);
b_rew(s->req.buf, rewind = s->req.buf->o);
ret = fetch_rdp_cookie_name(s, &smp, px->hh_name, px->hh_len);
len = smp.data.str.len;
b_adv(s->req->buf, rewind);
b_adv(s->req.buf, rewind);
if (ret == 0 || (smp.flags & SMP_F_MAY_CHANGE) || len == 0)
return NULL;
@ -548,7 +548,7 @@ int assign_server(struct session *s)
srv = NULL;
s->target = NULL;
conn = objt_conn(s->req->cons->end);
conn = objt_conn(s->req.cons->end);
if (conn &&
(conn->flags & CO_FL_CONNECTED) &&
@ -607,7 +607,7 @@ int assign_server(struct session *s)
switch (s->be->lbprm.algo & BE_LB_PARM) {
case BE_LB_HASH_SRC:
conn = objt_conn(s->req->prod->end);
conn = objt_conn(s->req.prod->end);
if (conn && conn->addr.from.ss_family == AF_INET) {
srv = get_server_sh(s->be,
(void *)&((struct sockaddr_in *)&conn->addr.from)->sin_addr,
@ -630,7 +630,7 @@ int assign_server(struct session *s)
if (s->txn.req.msg_state < HTTP_MSG_BODY)
break;
srv = get_server_uh(s->be,
b_ptr(s->req->buf, -http_uri_rewind(&s->txn.req)),
b_ptr(s->req.buf, -http_uri_rewind(&s->txn.req)),
s->txn.req.sl.rq.u_l);
break;
@ -640,7 +640,7 @@ int assign_server(struct session *s)
break;
srv = get_server_ph(s->be,
b_ptr(s->req->buf, -http_uri_rewind(&s->txn.req)),
b_ptr(s->req.buf, -http_uri_rewind(&s->txn.req)),
s->txn.req.sl.rq.u_l);
if (!srv && s->txn.meth == HTTP_METH_POST)
@ -698,7 +698,7 @@ int assign_server(struct session *s)
s->target = &s->be->obj_type;
}
else if ((s->be->options & PR_O_HTTP_PROXY) &&
(conn = objt_conn(s->req->cons->end)) &&
(conn = objt_conn(s->req.cons->end)) &&
is_addr(&conn->addr.to)) {
/* in proxy mode, we need a valid destination address */
s->target = &s->be->obj_type;
@ -746,8 +746,8 @@ int assign_server(struct session *s)
*/
int assign_server_address(struct session *s)
{
struct connection *cli_conn = objt_conn(s->req->prod->end);
struct connection *srv_conn = objt_conn(s->req->cons->end);
struct connection *cli_conn = objt_conn(s->req.prod->end);
struct connection *srv_conn = objt_conn(s->req.cons->end);
#ifdef DEBUG_FULL
fprintf(stderr,"assign_server_address : s=%p\n",s);
@ -942,7 +942,7 @@ int assign_server_and_queue(struct session *s)
/* If an explicit source binding is specified on the server and/or backend, and
* this source makes use of the transparent proxy, then it is extracted now and
* assigned to the session's pending connection. This function assumes that an
* outgoing connection has already been assigned to s->req->cons->end.
* outgoing connection has already been assigned to s->req.cons->end.
*/
static void assign_tproxy_address(struct session *s)
{
@ -950,7 +950,7 @@ static void assign_tproxy_address(struct session *s)
struct server *srv = objt_server(s->target);
struct conn_src *src;
struct connection *cli_conn;
struct connection *srv_conn = objt_conn(s->req->cons->end);
struct connection *srv_conn = objt_conn(s->req.cons->end);
if (srv && srv->conn_src.opts & CO_SRC_BIND)
src = &srv->conn_src;
@ -966,7 +966,7 @@ static void assign_tproxy_address(struct session *s)
case CO_SRC_TPROXY_CLI:
case CO_SRC_TPROXY_CIP:
/* FIXME: what can we do if the client connects in IPv6 or unix socket ? */
cli_conn = objt_conn(s->req->prod->end);
cli_conn = objt_conn(s->req.prod->end);
if (cli_conn)
srv_conn->addr.from = cli_conn->addr.from;
else
@ -983,13 +983,13 @@ static void assign_tproxy_address(struct session *s)
((struct sockaddr_in *)&srv_conn->addr.from)->sin_port = 0;
((struct sockaddr_in *)&srv_conn->addr.from)->sin_addr.s_addr = 0;
b_rew(s->req->buf, rewind = http_hdr_rewind(&s->txn.req));
b_rew(s->req.buf, rewind = http_hdr_rewind(&s->txn.req));
if (http_get_hdr(&s->txn.req, src->bind_hdr_name, src->bind_hdr_len,
&s->txn.hdr_idx, src->bind_hdr_occ, NULL, &vptr, &vlen)) {
((struct sockaddr_in *)&srv_conn->addr.from)->sin_addr.s_addr =
htonl(inetaddr_host_lim(vptr, vptr + vlen));
}
b_adv(s->req->buf, rewind);
b_adv(s->req.buf, rewind);
}
break;
default:
@ -1001,7 +1001,7 @@ static void assign_tproxy_address(struct session *s)
/*
* This function initiates a connection to the server assigned to this session
* (s->target, s->req->cons->addr.to). It will assign a server if none
* (s->target, s->req.cons->addr.to). It will assign a server if none
* is assigned yet.
* It can return one of :
* - SN_ERR_NONE if everything's OK
@ -1012,7 +1012,7 @@ static void assign_tproxy_address(struct session *s)
* - SN_ERR_INTERNAL for any other purely internal errors
* Additionnally, in the case of SN_ERR_RESOURCE, an emergency log will be emitted.
* The server-facing stream interface is expected to hold a pre-allocated connection
* in s->req->cons->conn.
* in s->req.cons->conn.
*/
int connect_server(struct session *s)
{
@ -1022,7 +1022,7 @@ int connect_server(struct session *s)
int reuse = 0;
int err;
srv_conn = objt_conn(s->req->cons->end);
srv_conn = objt_conn(s->req.cons->end);
if (srv_conn)
reuse = s->target == srv_conn->target;
@ -1043,7 +1043,7 @@ int connect_server(struct session *s)
}
}
srv_conn = si_alloc_conn(s->req->cons, reuse);
srv_conn = si_alloc_conn(s->req.cons, reuse);
if (!srv_conn)
return SN_ERR_RESOURCE;
@ -1064,7 +1064,7 @@ int connect_server(struct session *s)
else if (obj_type(s->target) == OBJ_TYPE_PROXY) {
/* proxies exclusively run on raw_sock right now */
conn_prepare(srv_conn, protocol_by_family(srv_conn->addr.to.ss_family), &raw_sock);
if (!objt_conn(s->req->cons->end) || !objt_conn(s->req->cons->end)->ctrl)
if (!objt_conn(s->req.cons->end) || !objt_conn(s->req.cons->end)->ctrl)
return SN_ERR_INTERNAL;
}
else
@ -1074,36 +1074,36 @@ int connect_server(struct session *s)
srv_conn->send_proxy_ofs = 0;
if (objt_server(s->target) && objt_server(s->target)->pp_opts) {
srv_conn->send_proxy_ofs = 1; /* must compute size */
cli_conn = objt_conn(s->req->prod->end);
cli_conn = objt_conn(s->req.prod->end);
if (cli_conn)
conn_get_to_addr(cli_conn);
}
si_attach_conn(s->req->cons, srv_conn);
si_attach_conn(s->req.cons, srv_conn);
assign_tproxy_address(s);
}
else {
/* the connection is being reused, just re-attach it */
si_attach_conn(s->req->cons, srv_conn);
si_attach_conn(s->req.cons, srv_conn);
s->flags |= SN_SRV_REUSED;
}
/* flag for logging source ip/port */
if (s->fe->options2 & PR_O2_SRC_ADDR)
s->req->cons->flags |= SI_FL_SRC_ADDR;
s->req.cons->flags |= SI_FL_SRC_ADDR;
/* disable lingering */
if (s->be->options & PR_O_TCP_NOLING)
s->req->cons->flags |= SI_FL_NOLINGER;
s->req.cons->flags |= SI_FL_NOLINGER;
err = si_connect(s->req->cons);
err = si_connect(s->req.cons);
if (err != SN_ERR_NONE)
return err;
/* set connect timeout */
s->req->cons->exp = tick_add_ifset(now_ms, s->be->timeout.connect);
s->req.cons->exp = tick_add_ifset(now_ms, s->be->timeout.connect);
srv = objt_server(s->target);
if (srv) {
@ -1157,8 +1157,8 @@ int srv_redispatch_connect(struct session *s)
goto redispatch;
}
if (!s->req->cons->err_type) {
s->req->cons->err_type = SI_ET_QUEUE_ERR;
if (!s->req.cons->err_type) {
s->req.cons->err_type = SI_ET_QUEUE_ERR;
}
srv->counters.failed_conns++;
@ -1167,23 +1167,23 @@ int srv_redispatch_connect(struct session *s)
case SRV_STATUS_NOSRV:
/* note: it is guaranteed that srv == NULL here */
if (!s->req->cons->err_type) {
s->req->cons->err_type = SI_ET_CONN_ERR;
if (!s->req.cons->err_type) {
s->req.cons->err_type = SI_ET_CONN_ERR;
}
s->be->be_counters.failed_conns++;
return 1;
case SRV_STATUS_QUEUED:
s->req->cons->exp = tick_add_ifset(now_ms, s->be->timeout.queue);
s->req->cons->state = SI_ST_QUE;
s->req.cons->exp = tick_add_ifset(now_ms, s->be->timeout.queue);
s->req.cons->state = SI_ST_QUE;
/* do nothing else and do not wake any other session up */
return 1;
case SRV_STATUS_INTERNAL:
default:
if (!s->req->cons->err_type) {
s->req->cons->err_type = SI_ET_CONN_OTHER;
if (!s->req.cons->err_type) {
s->req.cons->err_type = SI_ET_CONN_OTHER;
}
if (srv)

View File

@ -246,11 +246,11 @@ static int stats_accept(struct session *s)
s->logs.prx_queue_size = 0; /* we get the number of pending conns before us */
s->logs.srv_queue_size = 0; /* we will get this number soon */
s->req->flags |= CF_READ_DONTWAIT; /* we plan to read small requests */
s->req.flags |= CF_READ_DONTWAIT; /* we plan to read small requests */
if (s->listener->timeout) {
s->req->rto = *s->listener->timeout;
s->rep->wto = *s->listener->timeout;
s->req.rto = *s->listener->timeout;
s->res.wto = *s->listener->timeout;
}
return 1;
}
@ -1488,7 +1488,7 @@ static int stats_sock_parse_request(struct stream_interface *si, char *line)
return 1;
}
s->req->rto = s->rep->wto = 1 + MS_TO_TICKS(timeout*1000);
s->req.rto = s->res.wto = 1 + MS_TO_TICKS(timeout*1000);
return 1;
}
else {
@ -5208,60 +5208,60 @@ static int stats_dump_full_sess_to_buffer(struct stream_interface *si, struct se
chunk_appendf(&trash,
" req=%p (f=0x%06x an=0x%x pipe=%d tofwd=%d total=%lld)\n"
" an_exp=%s",
sess->req,
sess->req->flags, sess->req->analysers,
sess->req->pipe ? sess->req->pipe->data : 0,
sess->req->to_forward, sess->req->total,
sess->req->analyse_exp ?
human_time(TICKS_TO_MS(sess->req->analyse_exp - now_ms),
&sess->req,
sess->req.flags, sess->req.analysers,
sess->req.pipe ? sess->req.pipe->data : 0,
sess->req.to_forward, sess->req.total,
sess->req.analyse_exp ?
human_time(TICKS_TO_MS(sess->req.analyse_exp - now_ms),
TICKS_TO_MS(1000)) : "<NEVER>");
chunk_appendf(&trash,
" rex=%s",
sess->req->rex ?
human_time(TICKS_TO_MS(sess->req->rex - now_ms),
sess->req.rex ?
human_time(TICKS_TO_MS(sess->req.rex - now_ms),
TICKS_TO_MS(1000)) : "<NEVER>");
chunk_appendf(&trash,
" wex=%s\n"
" buf=%p data=%p o=%d p=%d req.next=%d i=%d size=%d\n",
sess->req->wex ?
human_time(TICKS_TO_MS(sess->req->wex - now_ms),
sess->req.wex ?
human_time(TICKS_TO_MS(sess->req.wex - now_ms),
TICKS_TO_MS(1000)) : "<NEVER>",
sess->req->buf,
sess->req->buf->data, sess->req->buf->o,
(int)(sess->req->buf->p - sess->req->buf->data),
sess->txn.req.next, sess->req->buf->i,
sess->req->buf->size);
sess->req.buf,
sess->req.buf->data, sess->req.buf->o,
(int)(sess->req.buf->p - sess->req.buf->data),
sess->txn.req.next, sess->req.buf->i,
sess->req.buf->size);
chunk_appendf(&trash,
" res=%p (f=0x%06x an=0x%x pipe=%d tofwd=%d total=%lld)\n"
" an_exp=%s",
sess->rep,
sess->rep->flags, sess->rep->analysers,
sess->rep->pipe ? sess->rep->pipe->data : 0,
sess->rep->to_forward, sess->rep->total,
sess->rep->analyse_exp ?
human_time(TICKS_TO_MS(sess->rep->analyse_exp - now_ms),
&sess->res,
sess->res.flags, sess->res.analysers,
sess->res.pipe ? sess->res.pipe->data : 0,
sess->res.to_forward, sess->res.total,
sess->res.analyse_exp ?
human_time(TICKS_TO_MS(sess->res.analyse_exp - now_ms),
TICKS_TO_MS(1000)) : "<NEVER>");
chunk_appendf(&trash,
" rex=%s",
sess->rep->rex ?
human_time(TICKS_TO_MS(sess->rep->rex - now_ms),
sess->res.rex ?
human_time(TICKS_TO_MS(sess->res.rex - now_ms),
TICKS_TO_MS(1000)) : "<NEVER>");
chunk_appendf(&trash,
" wex=%s\n"
" buf=%p data=%p o=%d p=%d rsp.next=%d i=%d size=%d\n",
sess->rep->wex ?
human_time(TICKS_TO_MS(sess->rep->wex - now_ms),
sess->res.wex ?
human_time(TICKS_TO_MS(sess->res.wex - now_ms),
TICKS_TO_MS(1000)) : "<NEVER>",
sess->rep->buf,
sess->rep->buf->data, sess->rep->buf->o,
(int)(sess->rep->buf->p - sess->rep->buf->data),
sess->txn.rsp.next, sess->rep->buf->i,
sess->rep->buf->size);
sess->res.buf,
sess->res.buf->data, sess->res.buf->o,
(int)(sess->res.buf->p - sess->res.buf->data),
sess->txn.rsp.next, sess->res.buf->i,
sess->res.buf->size);
if (bi_putchk(si->ib, &trash) == -1)
return 0;
@ -5613,44 +5613,44 @@ static int stats_dump_sess_to_buffer(struct stream_interface *si)
chunk_appendf(&trash,
" rq[f=%06xh,i=%d,an=%02xh,rx=%s",
curr_sess->req->flags,
curr_sess->req->buf->i,
curr_sess->req->analysers,
curr_sess->req->rex ?
human_time(TICKS_TO_MS(curr_sess->req->rex - now_ms),
curr_sess->req.flags,
curr_sess->req.buf->i,
curr_sess->req.analysers,
curr_sess->req.rex ?
human_time(TICKS_TO_MS(curr_sess->req.rex - now_ms),
TICKS_TO_MS(1000)) : "");
chunk_appendf(&trash,
",wx=%s",
curr_sess->req->wex ?
human_time(TICKS_TO_MS(curr_sess->req->wex - now_ms),
curr_sess->req.wex ?
human_time(TICKS_TO_MS(curr_sess->req.wex - now_ms),
TICKS_TO_MS(1000)) : "");
chunk_appendf(&trash,
",ax=%s]",
curr_sess->req->analyse_exp ?
human_time(TICKS_TO_MS(curr_sess->req->analyse_exp - now_ms),
curr_sess->req.analyse_exp ?
human_time(TICKS_TO_MS(curr_sess->req.analyse_exp - now_ms),
TICKS_TO_MS(1000)) : "");
chunk_appendf(&trash,
" rp[f=%06xh,i=%d,an=%02xh,rx=%s",
curr_sess->rep->flags,
curr_sess->rep->buf->i,
curr_sess->rep->analysers,
curr_sess->rep->rex ?
human_time(TICKS_TO_MS(curr_sess->rep->rex - now_ms),
curr_sess->res.flags,
curr_sess->res.buf->i,
curr_sess->res.analysers,
curr_sess->res.rex ?
human_time(TICKS_TO_MS(curr_sess->res.rex - now_ms),
TICKS_TO_MS(1000)) : "");
chunk_appendf(&trash,
",wx=%s",
curr_sess->rep->wex ?
human_time(TICKS_TO_MS(curr_sess->rep->wex - now_ms),
curr_sess->res.wex ?
human_time(TICKS_TO_MS(curr_sess->res.wex - now_ms),
TICKS_TO_MS(1000)) : "");
chunk_appendf(&trash,
",ax=%s]",
curr_sess->rep->analyse_exp ?
human_time(TICKS_TO_MS(curr_sess->rep->analyse_exp - now_ms),
curr_sess->res.analyse_exp ?
human_time(TICKS_TO_MS(curr_sess->res.analyse_exp - now_ms),
TICKS_TO_MS(1000)) : "");
conn = objt_conn(curr_sess->si[0].end);

View File

@ -189,16 +189,16 @@ int frontend_accept(struct session *s)
}
if (s->fe->mode == PR_MODE_HTTP)
s->req->flags |= CF_READ_DONTWAIT; /* one read is usually enough */
s->req.flags |= CF_READ_DONTWAIT; /* one read is usually enough */
/* note: this should not happen anymore since there's always at least the switching rules */
if (!s->req->analysers) {
channel_auto_connect(s->req); /* don't wait to establish connection */
channel_auto_close(s->req); /* let the producer forward close requests */
if (!s->req.analysers) {
channel_auto_connect(&s->req); /* don't wait to establish connection */
channel_auto_close(&s->req); /* let the producer forward close requests */
}
s->req->rto = s->fe->timeout.client;
s->rep->wto = s->fe->timeout.client;
s->req.rto = s->fe->timeout.client;
s->res.wto = s->fe->timeout.client;
/* everything's OK, let's go on */
return 1;

View File

@ -1424,7 +1424,7 @@ static int hlua_socket_write_yield(struct lua_State *L,int status, lua_KContext
sent = MAY_LJMP(luaL_checkinteger(L, 3));
/* Check for connection close. */
if (!socket->s || channel_output_closed(socket->s->req)) {
if (!socket->s || channel_output_closed(&socket->s->req)) {
lua_pushinteger(L, -1);
return 1;
}
@ -1440,9 +1440,9 @@ static int hlua_socket_write_yield(struct lua_State *L,int status, lua_KContext
/* Check if the buffer is avalaible because HAProxy doesn't allocate
* the request buffer if its not required.
*/
if (socket->s->req->buf->size == 0) {
if (!session_alloc_recv_buffer(socket->s, &socket->s->req->buf)) {
socket->s->req->prod->flags |= SI_FL_WAIT_ROOM;
if (socket->s->req.buf->size == 0) {
if (!session_alloc_recv_buffer(socket->s, &socket->s->req.buf)) {
socket->s->req.prod->flags |= SI_FL_WAIT_ROOM;
goto hlua_socket_write_yield_return;
}
}
@ -1700,7 +1700,7 @@ __LJMP static int hlua_socket_connect_yield(struct lua_State *L, int status, lua
struct appctx *appctx;
/* Check for connection close. */
if (!hlua || !socket->s || channel_output_closed(socket->s->req)) {
if (!hlua || !socket->s || channel_output_closed(&socket->s->req)) {
lua_pushnil(L);
lua_pushstring(L, "Can't connect");
return 2;
@ -1737,7 +1737,7 @@ __LJMP static int hlua_socket_connect(struct lua_State *L)
ip = MAY_LJMP(luaL_checkstring(L, 2));
port = MAY_LJMP(luaL_checkinteger(L, 3));
conn = si_alloc_conn(socket->s->req->cons, 0);
conn = si_alloc_conn(socket->s->req.cons, 0);
if (!conn)
WILL_LJMP(luaL_error(L, "connect: internal error"));
@ -1794,10 +1794,10 @@ __LJMP static int hlua_socket_settimeout(struct lua_State *L)
socket = MAY_LJMP(hlua_checksocket(L, 1));
tmout = MAY_LJMP(luaL_checkinteger(L, 2)) * 1000;
socket->s->req->rto = tmout;
socket->s->req->wto = tmout;
socket->s->rep->rto = tmout;
socket->s->rep->wto = tmout;
socket->s->req.rto = tmout;
socket->s->req.wto = tmout;
socket->s->res.rto = tmout;
socket->s->res.wto = tmout;
return 0;
}
@ -1847,26 +1847,14 @@ __LJMP static int hlua_socket_new(lua_State *L)
goto out_free_session;
}
socket->s->req = pool_alloc2(pool2_channel);
if (!socket->s->req) {
hlua_pusherror(L, "socket: out of memory");
goto out_fail_req;
}
socket->s->req->buf = pool_alloc2(pool2_buffer);
if (!socket->s->req->buf) {
socket->s->req.buf = pool_alloc2(pool2_buffer);
if (!socket->s->req.buf) {
hlua_pusherror(L, "socket: out of memory");
goto out_fail_req_buf;
}
socket->s->rep = pool_alloc2(pool2_channel);
if (!socket->s->rep) {
hlua_pusherror(L, "socket: out of memory");
goto out_fail_rep;
}
socket->s->rep->buf = pool_alloc2(pool2_buffer);
if (!socket->s->rep->buf) {
socket->s->res.buf = pool_alloc2(pool2_buffer);
if (!socket->s->res.buf) {
hlua_pusherror(L, "socket: out of memory");
goto out_fail_rep_buf;
}
@ -1909,8 +1897,8 @@ __LJMP static int hlua_socket_new(lua_State *L)
* Initialize the attached buffers
*
*/
socket->s->req->buf->size = global.tune.bufsize;
socket->s->rep->buf->size = global.tune.bufsize;
socket->s->req.buf->size = global.tune.bufsize;
socket->s->res.buf->size = global.tune.bufsize;
/*
*
@ -1921,34 +1909,34 @@ __LJMP static int hlua_socket_new(lua_State *L)
/* This function reset the struct. It must be called
* before the configuration.
*/
channel_init(socket->s->req);
channel_init(socket->s->rep);
channel_init(&socket->s->req);
channel_init(&socket->s->res);
socket->s->req->prod = &socket->s->si[0];
socket->s->req->cons = &socket->s->si[1];
socket->s->req.prod = &socket->s->si[0];
socket->s->req.cons = &socket->s->si[1];
socket->s->rep->prod = &socket->s->si[1];
socket->s->rep->cons = &socket->s->si[0];
socket->s->res.prod = &socket->s->si[1];
socket->s->res.cons = &socket->s->si[0];
socket->s->si[0].ib = socket->s->req;
socket->s->si[0].ob = socket->s->rep;
socket->s->si[0].ib = &socket->s->req;
socket->s->si[0].ob = &socket->s->res;
socket->s->si[1].ib = socket->s->rep;
socket->s->si[1].ob = socket->s->req;
socket->s->si[1].ib = &socket->s->res;
socket->s->si[1].ob = &socket->s->req;
socket->s->req->analysers = 0;
socket->s->req->rto = socket_proxy.timeout.client;
socket->s->req->wto = socket_proxy.timeout.server;
socket->s->req->rex = TICK_ETERNITY;
socket->s->req->wex = TICK_ETERNITY;
socket->s->req->analyse_exp = TICK_ETERNITY;
socket->s->req.analysers = 0;
socket->s->req.rto = socket_proxy.timeout.client;
socket->s->req.wto = socket_proxy.timeout.server;
socket->s->req.rex = TICK_ETERNITY;
socket->s->req.wex = TICK_ETERNITY;
socket->s->req.analyse_exp = TICK_ETERNITY;
socket->s->rep->analysers = 0;
socket->s->rep->rto = socket_proxy.timeout.server;
socket->s->rep->wto = socket_proxy.timeout.client;
socket->s->rep->rex = TICK_ETERNITY;
socket->s->rep->wex = TICK_ETERNITY;
socket->s->rep->analyse_exp = TICK_ETERNITY;
socket->s->res.analysers = 0;
socket->s->res.rto = socket_proxy.timeout.server;
socket->s->res.wto = socket_proxy.timeout.client;
socket->s->res.rex = TICK_ETERNITY;
socket->s->res.wex = TICK_ETERNITY;
socket->s->res.analyse_exp = TICK_ETERNITY;
/*
*
@ -2009,10 +1997,10 @@ __LJMP static int hlua_socket_new(lua_State *L)
*/
/* The data producer is already connected. It is the applet. */
socket->s->req->flags = CF_READ_ATTACHED;
socket->s->req.flags = CF_READ_ATTACHED;
channel_auto_connect(socket->s->req); /* don't wait to establish connection */
channel_auto_close(socket->s->req); /* let the producer forward close requests */
channel_auto_connect(&socket->s->req); /* don't wait to establish connection */
channel_auto_close(&socket->s->req); /* let the producer forward close requests */
si_reset(&socket->s->si[0], socket->s->task);
si_set_state(&socket->s->si[0], SI_ST_EST); /* connection established (resource exists) */
@ -2052,14 +2040,10 @@ __LJMP static int hlua_socket_new(lua_State *L)
return 1;
out_fail_conn1:
pool_free2(pool2_buffer, socket->s->rep->buf);
pool_free2(pool2_buffer, socket->s->res.buf);
out_fail_rep_buf:
pool_free2(pool2_channel, socket->s->rep);
out_fail_rep:
pool_free2(pool2_buffer, socket->s->req->buf);
pool_free2(pool2_buffer, socket->s->req.buf);
out_fail_req_buf:
pool_free2(pool2_channel, socket->s->req);
out_fail_req:
task_free(socket->s->task);
out_free_session:
pool_free2(pool2_session, socket->s);
@ -2405,7 +2389,7 @@ __LJMP static int hlua_channel_send_yield(lua_State *L, int status, lua_KContext
* must set the flag WAKERESWR. This flag required the task
* wake up if any activity is detected on the response buffer.
*/
if (chn->chn == chn->s->rep)
if (chn->chn == &chn->s->res)
HLUA_SET_WAKERESWR(hlua);
else
HLUA_SET_WAKEREQWR(hlua);
@ -2467,7 +2451,7 @@ __LJMP static int hlua_channel_forward_yield(lua_State *L, int status, lua_KCont
* must set the flag WAKERESWR. This flag required the task
* wake up if any activity is detected on the response buffer.
*/
if (chn->chn == chn->s->rep)
if (chn->chn == &chn->s->res)
HLUA_SET_WAKERESWR(hlua);
else
HLUA_SET_WAKEREQWR(hlua);
@ -2849,13 +2833,13 @@ static int hlua_txn_new(lua_State *L, struct session *s, struct proxy *p, void *
/* Create the "req" field that contains the request channel object. */
lua_pushstring(L, "req");
if (!hlua_channel_new(L, s, s->req))
if (!hlua_channel_new(L, s, &s->req))
return 0;
lua_settable(L, -3);
/* Create the "res" field that contains the response channel object. */
lua_pushstring(L, "res");
if (!hlua_channel_new(L, s, s->rep))
if (!hlua_channel_new(L, s, &s->res))
return 0;
lua_settable(L, -3);
@ -2904,7 +2888,7 @@ static int hlua_session_get_headers(lua_State *L)
/* Build array of headers. */
old_idx = 0;
cur_next = sess->req->buf->p + hdr_idx_first_pos(&sess->txn.hdr_idx);
cur_next = sess->req.buf->p + hdr_idx_first_pos(&sess->txn.hdr_idx);
while (1) {
cur_idx = sess->txn.hdr_idx.v[old_idx].next;
@ -3606,21 +3590,21 @@ static int hlua_request_act_wrapper(struct hlua_rule *rule, struct proxy *px,
/* Set timeout in the required channel. */
if (s->hlua.wake_time != TICK_ETERNITY) {
if (analyzer & (AN_REQ_INSPECT_FE|AN_REQ_HTTP_PROCESS_FE))
s->req->analyse_exp = s->hlua.wake_time;
s->req.analyse_exp = s->hlua.wake_time;
else if (analyzer & (AN_RES_INSPECT|AN_RES_HTTP_PROCESS_BE))
s->rep->analyse_exp = s->hlua.wake_time;
s->res.analyse_exp = s->hlua.wake_time;
}
/* Some actions can be wake up when a "write" event
* is detected on a response channel. This is useful
* only for actions targetted on the requests.
*/
if (HLUA_IS_WAKERESWR(&s->hlua)) {
s->rep->flags |= CF_WAKE_WRITE;
s->res.flags |= CF_WAKE_WRITE;
if ((analyzer & (AN_REQ_INSPECT_FE|AN_REQ_HTTP_PROCESS_FE)))
s->rep->analysers |= analyzer;
s->res.analysers |= analyzer;
}
if (HLUA_IS_WAKEREQWR(&s->hlua))
s->req->flags |= CF_WAKE_WRITE;
s->req.flags |= CF_WAKE_WRITE;
return 0;
/* finished with error. */

View File

@ -984,7 +984,7 @@ int build_logline(struct session *s, char *dst, size_t maxsize, struct list *lis
break;
case LOG_FMT_CLIENTIP: // %ci
conn = objt_conn(s->req->prod->end);
conn = objt_conn(s->req.prod->end);
if (conn)
ret = lf_ip(tmplog, (struct sockaddr *)&conn->addr.from, dst + maxsize - tmplog, tmp);
else
@ -996,7 +996,7 @@ int build_logline(struct session *s, char *dst, size_t maxsize, struct list *lis
break;
case LOG_FMT_CLIENTPORT: // %cp
conn = objt_conn(s->req->prod->end);
conn = objt_conn(s->req.prod->end);
if (conn) {
if (conn->addr.from.ss_family == AF_UNIX) {
ret = ltoa_o(s->listener->luid, tmplog, dst + maxsize - tmplog);
@ -1015,7 +1015,7 @@ int build_logline(struct session *s, char *dst, size_t maxsize, struct list *lis
break;
case LOG_FMT_FRONTENDIP: // %fi
conn = objt_conn(s->req->prod->end);
conn = objt_conn(s->req.prod->end);
if (conn) {
conn_get_to_addr(conn);
ret = lf_ip(tmplog, (struct sockaddr *)&conn->addr.to, dst + maxsize - tmplog, tmp);
@ -1030,7 +1030,7 @@ int build_logline(struct session *s, char *dst, size_t maxsize, struct list *lis
break;
case LOG_FMT_FRONTENDPORT: // %fp
conn = objt_conn(s->req->prod->end);
conn = objt_conn(s->req.prod->end);
if (conn) {
conn_get_to_addr(conn);
if (conn->addr.to.ss_family == AF_UNIX)
@ -1048,7 +1048,7 @@ int build_logline(struct session *s, char *dst, size_t maxsize, struct list *lis
break;
case LOG_FMT_BACKENDIP: // %bi
conn = objt_conn(s->req->cons->end);
conn = objt_conn(s->req.cons->end);
if (conn)
ret = lf_ip(tmplog, (struct sockaddr *)&conn->addr.from, dst + maxsize - tmplog, tmp);
else
@ -1061,7 +1061,7 @@ int build_logline(struct session *s, char *dst, size_t maxsize, struct list *lis
break;
case LOG_FMT_BACKENDPORT: // %bp
conn = objt_conn(s->req->cons->end);
conn = objt_conn(s->req.cons->end);
if (conn)
ret = lf_port(tmplog, (struct sockaddr *)&conn->addr.from, dst + maxsize - tmplog, tmp);
else
@ -1074,7 +1074,7 @@ int build_logline(struct session *s, char *dst, size_t maxsize, struct list *lis
break;
case LOG_FMT_SERVERIP: // %si
conn = objt_conn(s->req->cons->end);
conn = objt_conn(s->req.cons->end);
if (conn)
ret = lf_ip(tmplog, (struct sockaddr *)&conn->addr.to, dst + maxsize - tmplog, tmp);
else
@ -1087,7 +1087,7 @@ int build_logline(struct session *s, char *dst, size_t maxsize, struct list *lis
break;
case LOG_FMT_SERVERPORT: // %sp
conn = objt_conn(s->req->cons->end);
conn = objt_conn(s->req.cons->end);
if (conn)
ret = lf_port(tmplog, (struct sockaddr *)&conn->addr.to, dst + maxsize - tmplog, tmp);
else
@ -1386,8 +1386,8 @@ int build_logline(struct session *s, char *dst, size_t maxsize, struct list *lis
case LOG_FMT_RETRIES: // %rq
if (s->flags & SN_REDISP)
LOGCHAR('+');
ret = ltoa_o((s->req->cons->conn_retries>0) ?
(be->conn_retries - s->req->cons->conn_retries) :
ret = ltoa_o((s->req.cons->conn_retries>0) ?
(be->conn_retries - s->req.cons->conn_retries) :
be->conn_retries, tmplog, dst + maxsize - tmplog);
if (ret == NULL)
goto out;
@ -1611,7 +1611,7 @@ void sess_log(struct session *s)
err = (s->flags & SN_REDISP) ||
((s->flags & SN_ERR_MASK) > SN_ERR_LOCAL) ||
(((s->flags & SN_ERR_MASK) == SN_ERR_NONE) &&
(s->req->cons->conn_retries != s->be->conn_retries)) ||
(s->req.cons->conn_retries != s->be->conn_retries)) ||
((s->fe->mode == PR_MODE_HTTP) && s->txn.status >= 500);
if (!err && (s->fe->options2 & PR_O2_NOLOGNORM))

View File

@ -46,9 +46,13 @@ static int
smp_fetch_len(struct proxy *px, struct session *s, void *l7, unsigned int opt,
const struct arg *args, struct sample *smp, const char *kw, void *private)
{
struct channel *chn = ((opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? s->rep : s->req;
struct channel *chn;
if (!s || !chn)
if (!s)
return 0;
chn = ((opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &s->res : &s->req;
if (!chn->buf)
return 0;
smp->type = SMP_T_UINT;
@ -70,9 +74,8 @@ smp_fetch_ssl_hello_type(struct proxy *px, struct session *s, void *l7, unsigned
if (!s)
goto not_ssl_hello;
chn = ((opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? s->rep : s->req;
if (!chn)
chn = ((opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &s->res : &s->req;
if (!chn->buf)
goto not_ssl_hello;
bleft = chn->buf->i;
@ -137,15 +140,15 @@ smp_fetch_req_ssl_ver(struct proxy *px, struct session *s, void *l7, unsigned in
int version, bleft, msg_len;
const unsigned char *data;
if (!s || !s->req)
if (!s || !s->req.buf)
return 0;
msg_len = 0;
bleft = s->req->buf->i;
bleft = s->req.buf->i;
if (!bleft)
goto too_short;
data = (const unsigned char *)s->req->buf->p;
data = (const unsigned char *)s->req.buf->p;
if ((*data >= 0x14 && *data <= 0x17) || (*data == 0xFF)) {
/* SSLv3 header format */
if (bleft < 5)
@ -213,8 +216,8 @@ smp_fetch_req_ssl_ver(struct proxy *px, struct session *s, void *l7, unsigned in
* all the part of the request which fits in a buffer is already
* there.
*/
if (msg_len > channel_recv_limit(s->req) + s->req->buf->data - s->req->buf->p)
msg_len = channel_recv_limit(s->req) + s->req->buf->data - s->req->buf->p;
if (msg_len > channel_recv_limit(&s->req) + s->req.buf->data - s->req.buf->p)
msg_len = channel_recv_limit(&s->req) + s->req.buf->data - s->req.buf->p;
if (bleft < msg_len)
goto too_short;
@ -277,9 +280,8 @@ smp_fetch_ssl_hello_sni(struct proxy *px, struct session *s, void *l7, unsigned
if (!s)
goto not_ssl_hello;
chn = ((opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? s->rep : s->req;
if (!chn)
chn = ((opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &s->res : &s->req;
if (!chn->buf)
goto not_ssl_hello;
bleft = chn->buf->i;
@ -413,17 +415,17 @@ fetch_rdp_cookie_name(struct session *s, struct sample *smp, const char *cname,
int bleft;
const unsigned char *data;
if (!s || !s->req)
if (!s || !s->req.buf)
return 0;
smp->flags = SMP_F_CONST;
smp->type = SMP_T_STR;
bleft = s->req->buf->i;
bleft = s->req.buf->i;
if (bleft <= 11)
goto too_short;
data = (const unsigned char *)s->req->buf->p + 11;
data = (const unsigned char *)s->req.buf->p + 11;
bleft -= 11;
if (bleft <= 7)
@ -543,9 +545,8 @@ smp_fetch_payload_lv(struct proxy *px, struct session *s, void *l7, unsigned int
if (!s)
return 0;
chn = ((opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? s->rep : s->req;
if (!chn)
chn = ((opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &s->res : &s->req;
if (!chn->buf)
return 0;
if (len_offset + len_size > chn->buf->i)
@ -594,9 +595,8 @@ smp_fetch_payload(struct proxy *px, struct session *s, void *l7, unsigned int op
if (!s)
return 0;
chn = ((opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? s->rep : s->req;
if (!chn)
chn = ((opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &s->res : &s->req;
if (!chn->buf)
return 0;
if (buf_size > chn->buf->size || buf_offset + buf_size > chn->buf->size) {

View File

@ -38,10 +38,10 @@
#include <proto/proto_http.h>
#include <proto/proxy.h>
#include <proto/session.h>
#include <proto/signal.h>
#include <proto/stick_table.h>
#include <proto/stream_interface.h>
#include <proto/task.h>
#include <proto/stick_table.h>
#include <proto/signal.h>
/*******************************/
@ -1105,11 +1105,11 @@ int peer_accept(struct session *s)
s->logs.prx_queue_size = 0;/* we get the number of pending conns before us */
s->logs.srv_queue_size = 0; /* we will get this number soon */
s->req->flags |= CF_READ_DONTWAIT; /* we plan to read small requests */
s->req.flags |= CF_READ_DONTWAIT; /* we plan to read small requests */
if (s->listener->timeout) {
s->req->rto = *s->listener->timeout;
s->rep->wto = *s->listener->timeout;
s->req.rto = *s->listener->timeout;
s->res.wto = *s->listener->timeout;
}
return 1;
}
@ -1161,8 +1161,7 @@ static struct session *peer_session_create(struct peer *peer, struct peer_sessio
* when the default backend is assigned.
*/
s->be = s->fe = p;
s->req = s->rep = NULL; /* will be allocated later */
s->req.buf = s->res.buf = NULL;
si_reset(&s->si[0], t);
si_set_state(&s->si[0], SI_ST_EST);
@ -1235,48 +1234,42 @@ static struct session *peer_session_create(struct peer *peer, struct peer_sessio
txn->hdr_idx.v = NULL;
txn->hdr_idx.size = txn->hdr_idx.used = 0;
if ((s->req = pool_alloc2(pool2_channel)) == NULL)
goto out_fail_req; /* no memory */
channel_init(&s->req);
s->req.prod = &s->si[0];
s->req.cons = &s->si[1];
s->si[0].ib = s->si[1].ob = &s->req;
channel_init(s->req);
s->req->prod = &s->si[0];
s->req->cons = &s->si[1];
s->si[0].ib = s->si[1].ob = s->req;
s->req->flags |= CF_READ_ATTACHED; /* the producer is already connected */
s->req.flags |= CF_READ_ATTACHED; /* the producer is already connected */
/* activate default analysers enabled for this listener */
s->req->analysers = l->analysers;
s->req.analysers = l->analysers;
/* note: this should not happen anymore since there's always at least the switching rules */
if (!s->req->analysers) {
channel_auto_connect(s->req);/* don't wait to establish connection */
channel_auto_close(s->req);/* let the producer forward close requests */
if (!s->req.analysers) {
channel_auto_connect(&s->req);/* don't wait to establish connection */
channel_auto_close(&s->req);/* let the producer forward close requests */
}
s->req->rto = s->fe->timeout.client;
s->req->wto = s->be->timeout.server;
s->req.rto = s->fe->timeout.client;
s->req.wto = s->be->timeout.server;
if ((s->rep = pool_alloc2(pool2_channel)) == NULL)
goto out_fail_rep; /* no memory */
channel_init(&s->res);
s->res.prod = &s->si[1];
s->res.cons = &s->si[0];
s->si[0].ob = s->si[1].ib = &s->res;
channel_init(s->rep);
s->rep->prod = &s->si[1];
s->rep->cons = &s->si[0];
s->si[0].ob = s->si[1].ib = s->rep;
s->res.rto = s->be->timeout.server;
s->res.wto = s->fe->timeout.client;
s->rep->rto = s->be->timeout.server;
s->rep->wto = s->fe->timeout.client;
s->req->rex = TICK_ETERNITY;
s->req->wex = TICK_ETERNITY;
s->req->analyse_exp = TICK_ETERNITY;
s->rep->rex = TICK_ETERNITY;
s->rep->wex = TICK_ETERNITY;
s->rep->analyse_exp = TICK_ETERNITY;
s->req.rex = TICK_ETERNITY;
s->req.wex = TICK_ETERNITY;
s->req.analyse_exp = TICK_ETERNITY;
s->res.rex = TICK_ETERNITY;
s->res.wex = TICK_ETERNITY;
s->res.analyse_exp = TICK_ETERNITY;
t->expire = TICK_ETERNITY;
s->rep->flags |= CF_READ_DONTWAIT;
s->res.flags |= CF_READ_DONTWAIT;
/* it is important not to call the wakeup function directly but to
* pass through task_wakeup(), because this one knows how to apply
@ -1294,10 +1287,6 @@ static struct session *peer_session_create(struct peer *peer, struct peer_sessio
return s;
/* Error unrolling */
out_fail_rep:
pool_free2(pool2_channel, s->req);
out_fail_req:
conn_free(conn);
out_fail_conn1:
task_free(t);
out_free_session:

View File

@ -1001,12 +1001,12 @@ void http_perform_server_redirect(struct session *s, struct stream_interface *si
* to temporarily rewind the buffer.
*/
txn = &s->txn;
b_rew(s->req->buf, rewind = http_hdr_rewind(&txn->req));
b_rew(s->req.buf, rewind = http_hdr_rewind(&txn->req));
path = http_get_path(txn);
len = buffer_count(s->req->buf, path, b_ptr(s->req->buf, txn->req.sl.rq.u + txn->req.sl.rq.u_l));
len = buffer_count(s->req.buf, path, b_ptr(s->req.buf, txn->req.sl.rq.u + txn->req.sl.rq.u_l));
b_adv(s->req->buf, rewind);
b_adv(s->req.buf, rewind);
if (!path)
return;
@ -1460,7 +1460,7 @@ get_http_auth(struct session *s)
len = strlen(h);
}
if (!http_find_header2(h, len, s->req->buf->p, &txn->hdr_idx, &ctx))
if (!http_find_header2(h, len, s->req.buf->p, &txn->hdr_idx, &ctx))
return 0;
h = ctx.line + ctx.val;
@ -2514,7 +2514,7 @@ void http_adjust_conn_mode(struct session *s, struct http_txn *txn, struct http_
/* This stream analyser waits for a complete HTTP request. It returns 1 if the
* processing can continue on next analysers, or zero if it either needs more
* data or wants to immediately abort the request (eg: timeout, error, ...). It
* is tied to AN_REQ_WAIT_HTTP and may may remove itself from s->req->analysers
* is tied to AN_REQ_WAIT_HTTP and may may remove itself from s->req.analysers
* when it has nothing left to do, and may remove any analyser when it wants to
* abort.
*/
@ -2588,17 +2588,17 @@ int http_wait_for_request(struct session *s, struct channel *req, int an_bit)
* keep-alive requests.
*/
if ((txn->flags & TX_NOT_FIRST) &&
unlikely(!channel_is_rewritable(s->rep) ||
bi_end(s->rep->buf) < b_ptr(s->rep->buf, txn->rsp.next) ||
bi_end(s->rep->buf) > s->rep->buf->data + s->rep->buf->size - global.tune.maxrewrite)) {
if (s->rep->buf->o) {
if (s->rep->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_WRITE_ERROR|CF_WRITE_TIMEOUT))
unlikely(!channel_is_rewritable(&s->res) ||
bi_end(s->res.buf) < b_ptr(s->res.buf, txn->rsp.next) ||
bi_end(s->res.buf) > s->res.buf->data + s->res.buf->size - global.tune.maxrewrite)) {
if (s->res.buf->o) {
if (s->res.flags & (CF_SHUTW|CF_SHUTW_NOW|CF_WRITE_ERROR|CF_WRITE_TIMEOUT))
goto failed_keep_alive;
/* don't let a connection request be initiated */
channel_dont_connect(req);
s->rep->flags &= ~CF_EXPECT_MORE; /* speed up sending a previous response */
s->rep->flags |= CF_WAKE_WRITE;
s->rep->analysers |= an_bit; /* wake us up once it changes */
s->res.flags &= ~CF_EXPECT_MORE; /* speed up sending a previous response */
s->res.flags |= CF_WAKE_WRITE;
s->res.analysers |= an_bit; /* wake us up once it changes */
return 0;
}
}
@ -2765,14 +2765,14 @@ int http_wait_for_request(struct session *s, struct channel *req, int an_bit)
channel_dont_connect(req);
req->flags |= CF_READ_DONTWAIT; /* try to get back here ASAP */
s->rep->flags &= ~CF_EXPECT_MORE; /* speed up sending a previous response */
s->res.flags &= ~CF_EXPECT_MORE; /* speed up sending a previous response */
#ifdef TCP_QUICKACK
if (s->listener->options & LI_O_NOQUICKACK && req->buf->i && objt_conn(s->req->prod->end) && conn_ctrl_ready(__objt_conn(s->req->prod->end))) {
if (s->listener->options & LI_O_NOQUICKACK && req->buf->i && objt_conn(s->req.prod->end) && conn_ctrl_ready(__objt_conn(s->req.prod->end))) {
/* We need more data, we have to re-enable quick-ack in case we
* previously disabled it, otherwise we might cause the client
* to delay next data.
*/
setsockopt(__objt_conn(s->req->prod->end)->t.sock.fd, IPPROTO_TCP, TCP_QUICKACK, &one, sizeof(one));
setsockopt(__objt_conn(s->req.prod->end)->t.sock.fd, IPPROTO_TCP, TCP_QUICKACK, &one, sizeof(one));
}
#endif
@ -2808,7 +2808,7 @@ int http_wait_for_request(struct session *s, struct channel *req, int an_bit)
req->analysers = 0;
s->logs.logwait = 0;
s->logs.level = 0;
s->rep->flags &= ~CF_EXPECT_MORE; /* speed up sending a previous response */
s->res.flags &= ~CF_EXPECT_MORE; /* speed up sending a previous response */
stream_int_retnclose(req->prod, NULL);
return 0;
}
@ -3094,7 +3094,7 @@ int http_wait_for_request(struct session *s, struct channel *req, int an_bit)
int http_handle_stats(struct session *s, struct channel *req)
{
struct stats_admin_rule *stats_admin_rule;
struct stream_interface *si = s->rep->prod;
struct stream_interface *si = s->res.prod;
struct http_txn *txn = &s->txn;
struct http_msg *msg = &txn->req;
struct uri_auth *uri_auth = s->be->uri_auth;
@ -3440,13 +3440,13 @@ resume_execution:
break;
case HTTP_REQ_ACT_SET_TOS:
if ((cli_conn = objt_conn(s->req->prod->end)) && conn_ctrl_ready(cli_conn))
if ((cli_conn = objt_conn(s->req.prod->end)) && conn_ctrl_ready(cli_conn))
inet_set_tos(cli_conn->t.sock.fd, cli_conn->addr.from, rule->arg.tos);
break;
case HTTP_REQ_ACT_SET_MARK:
#ifdef SO_MARK
if ((cli_conn = objt_conn(s->req->prod->end)) && conn_ctrl_ready(cli_conn))
if ((cli_conn = objt_conn(s->req.prod->end)) && conn_ctrl_ready(cli_conn))
setsockopt(cli_conn->t.sock.fd, SOL_SOCKET, SO_MARK, &rule->arg.mark, sizeof(rule->arg.mark));
#endif
break;
@ -3686,13 +3686,13 @@ resume_execution:
break;
case HTTP_RES_ACT_SET_TOS:
if ((cli_conn = objt_conn(s->req->prod->end)) && conn_ctrl_ready(cli_conn))
if ((cli_conn = objt_conn(s->req.prod->end)) && conn_ctrl_ready(cli_conn))
inet_set_tos(cli_conn->t.sock.fd, cli_conn->addr.from, rule->arg.tos);
break;
case HTTP_RES_ACT_SET_MARK:
#ifdef SO_MARK
if ((cli_conn = objt_conn(s->req->prod->end)) && conn_ctrl_ready(cli_conn))
if ((cli_conn = objt_conn(s->req.prod->end)) && conn_ctrl_ready(cli_conn))
setsockopt(cli_conn->t.sock.fd, SOL_SOCKET, SO_MARK, &rule->arg.mark, sizeof(rule->arg.mark));
#endif
break;
@ -4072,7 +4072,7 @@ static int http_apply_redirect_rule(struct redirect_rule *rule, struct session *
msg->next -= msg->sov;
msg->sov = 0;
txn->req.chn->analysers = AN_REQ_HTTP_XFER_BODY;
s->rep->analysers = AN_RES_HTTP_XFER_BODY;
s->res.analysers = AN_RES_HTTP_XFER_BODY;
txn->req.msg_state = HTTP_MSG_CLOSED;
txn->rsp.msg_state = HTTP_MSG_DONE;
} else {
@ -4162,9 +4162,9 @@ int http_process_req_common(struct session *s, struct channel *req, int an_bit,
* by a possible reqrep, while they are processed *after* so that a
* reqdeny can still block them. This clearly needs to change in 1.6!
*/
if (stats_check_uri(s->rep->prod, txn, px)) {
if (stats_check_uri(s->res.prod, txn, px)) {
s->target = &http_stats_applet.obj_type;
if (unlikely(!stream_int_register_handler(s->rep->prod, objt_applet(s->target)))) {
if (unlikely(!stream_int_register_handler(s->res.prod, objt_applet(s->target)))) {
txn->status = 500;
s->logs.tv_request = now;
stream_int_retnclose(req->prod, http_error_message(s, HTTP_ERR_500));
@ -4274,7 +4274,7 @@ int http_process_req_common(struct session *s, struct channel *req, int an_bit,
* If unset, then set it to zero because we really want it to
* eventually expire. We build the tarpit as an analyser.
*/
channel_erase(s->req);
channel_erase(&s->req);
/* wipe the request out so that we can drop the connection early
* if the client closes first.
@ -4341,7 +4341,7 @@ int http_process_req_common(struct session *s, struct channel *req, int an_bit,
/* This function performs all the processing enabled for the current request.
* It returns 1 if the processing can continue on next analysers, or zero if it
* needs more data, encounters an error, or wants to immediately abort the
* request. It relies on buffers flags, and updates s->req->analysers.
* request. It relies on buffers flags, and updates s->req.analysers.
*/
int http_process_request(struct session *s, struct channel *req, int an_bit)
{
@ -4761,7 +4761,7 @@ int http_wait_for_request_body(struct session *s, struct channel *req, int an_bi
/* Expect is allowed in 1.1, look for it */
if (http_find_header2("Expect", 6, req->buf->p, &txn->hdr_idx, &ctx) &&
unlikely(ctx.vlen == 12 && strncasecmp(ctx.line+ctx.val, "100-continue", 12) == 0)) {
bo_inject(s->rep, http_100_chunk.str, http_100_chunk.len);
bo_inject(&s->res, http_100_chunk.str, http_100_chunk.len);
}
}
msg->msg_state = HTTP_MSG_100_SENT;
@ -4945,10 +4945,10 @@ void http_end_txn_clean_session(struct session *s)
* to the server.
*/
if (((s->txn.flags & TX_CON_WANT_MSK) != TX_CON_WANT_KAL) ||
!si_conn_ready(s->req->cons)) {
s->req->cons->flags |= SI_FL_NOLINGER | SI_FL_NOHALF;
si_shutr(s->req->cons);
si_shutw(s->req->cons);
!si_conn_ready(s->req.cons)) {
s->req.cons->flags |= SI_FL_NOLINGER | SI_FL_NOHALF;
si_shutr(s->req.cons);
si_shutw(s->req.cons);
}
if (s->flags & SN_BE_ASSIGNED) {
@ -4982,13 +4982,13 @@ void http_end_txn_clean_session(struct session *s)
}
/* don't count other requests' data */
s->logs.bytes_in -= s->req->buf->i;
s->logs.bytes_out -= s->rep->buf->i;
s->logs.bytes_in -= s->req.buf->i;
s->logs.bytes_out -= s->res.buf->i;
/* let's do a final log if we need it */
if (!LIST_ISEMPTY(&s->fe->logformat) && s->logs.logwait &&
!(s->flags & SN_MONITOR) &&
(!(s->fe->options & PR_O_NULLNOLOG) || s->req->total)) {
(!(s->fe->options & PR_O_NULLNOLOG) || s->req.total)) {
s->do_log(s);
}
@ -5006,8 +5006,8 @@ void http_end_txn_clean_session(struct session *s)
s->logs.prx_queue_size = 0; /* we get the number of pending conns before us */
s->logs.srv_queue_size = 0; /* we will get this number soon */
s->logs.bytes_in = s->req->total = s->req->buf->i;
s->logs.bytes_out = s->rep->total = s->rep->buf->i;
s->logs.bytes_in = s->req.total = s->req.buf->i;
s->logs.bytes_out = s->res.total = s->res.buf->i;
if (s->pend_pos)
pendconn_free(s->pend_pos);
@ -5027,17 +5027,17 @@ void http_end_txn_clean_session(struct session *s)
* connection.
*/
if (((s->txn.flags & TX_CON_WANT_MSK) != TX_CON_WANT_KAL) ||
!si_conn_ready(s->req->cons)) {
si_release_endpoint(s->req->cons);
!si_conn_ready(s->req.cons)) {
si_release_endpoint(s->req.cons);
}
s->req->cons->state = s->req->cons->prev_state = SI_ST_INI;
s->req->cons->err_type = SI_ET_NONE;
s->req->cons->conn_retries = 0; /* used for logging too */
s->req->cons->exp = TICK_ETERNITY;
s->req->cons->flags &= SI_FL_DONT_WAKE; /* we're in the context of process_session */
s->req->flags &= ~(CF_SHUTW|CF_SHUTW_NOW|CF_AUTO_CONNECT|CF_WRITE_ERROR|CF_STREAMER|CF_STREAMER_FAST|CF_NEVER_WAIT|CF_WAKE_CONNECT|CF_WROTE_DATA);
s->rep->flags &= ~(CF_SHUTR|CF_SHUTR_NOW|CF_READ_ATTACHED|CF_READ_ERROR|CF_READ_NOEXP|CF_STREAMER|CF_STREAMER_FAST|CF_WRITE_PARTIAL|CF_NEVER_WAIT|CF_WROTE_DATA);
s->req.cons->state = s->req.cons->prev_state = SI_ST_INI;
s->req.cons->err_type = SI_ET_NONE;
s->req.cons->conn_retries = 0; /* used for logging too */
s->req.cons->exp = TICK_ETERNITY;
s->req.cons->flags &= SI_FL_DONT_WAKE; /* we're in the context of process_session */
s->req.flags &= ~(CF_SHUTW|CF_SHUTW_NOW|CF_AUTO_CONNECT|CF_WRITE_ERROR|CF_STREAMER|CF_STREAMER_FAST|CF_NEVER_WAIT|CF_WAKE_CONNECT|CF_WROTE_DATA);
s->res.flags &= ~(CF_SHUTR|CF_SHUTR_NOW|CF_READ_ATTACHED|CF_READ_ERROR|CF_READ_NOEXP|CF_STREAMER|CF_STREAMER_FAST|CF_WRITE_PARTIAL|CF_NEVER_WAIT|CF_WROTE_DATA);
s->flags &= ~(SN_DIRECT|SN_ASSIGNED|SN_ADDR_SET|SN_BE_ASSIGNED|SN_FORCE_PRST|SN_IGNORE_PRST);
s->flags &= ~(SN_CURR_SESS|SN_REDIRECTABLE|SN_SRV_REUSED);
s->flags &= ~(SN_ERR_MASK|SN_FINST_MASK|SN_REDISP);
@ -5058,11 +5058,11 @@ void http_end_txn_clean_session(struct session *s)
}
if (s->fe->options2 & PR_O2_INDEPSTR)
s->req->cons->flags |= SI_FL_INDEP_STR;
s->req.cons->flags |= SI_FL_INDEP_STR;
if (s->fe->options2 & PR_O2_NODELAY) {
s->req->flags |= CF_NEVER_WAIT;
s->rep->flags |= CF_NEVER_WAIT;
s->req.flags |= CF_NEVER_WAIT;
s->res.flags |= CF_NEVER_WAIT;
}
/* if the request buffer is not empty, it means we're
@ -5072,24 +5072,24 @@ void http_end_txn_clean_session(struct session *s)
* because the request will wait for it to flush a little
* bit before proceeding.
*/
if (s->req->buf->i) {
if (s->rep->buf->o &&
!buffer_full(s->rep->buf, global.tune.maxrewrite) &&
bi_end(s->rep->buf) <= s->rep->buf->data + s->rep->buf->size - global.tune.maxrewrite)
s->rep->flags |= CF_EXPECT_MORE;
if (s->req.buf->i) {
if (s->res.buf->o &&
!buffer_full(s->res.buf, global.tune.maxrewrite) &&
bi_end(s->res.buf) <= s->res.buf->data + s->res.buf->size - global.tune.maxrewrite)
s->res.flags |= CF_EXPECT_MORE;
}
/* we're removing the analysers, we MUST re-enable events detection */
channel_auto_read(s->req);
channel_auto_close(s->req);
channel_auto_read(s->rep);
channel_auto_close(s->rep);
channel_auto_read(&s->req);
channel_auto_close(&s->req);
channel_auto_read(&s->res);
channel_auto_close(&s->res);
/* we're in keep-alive with an idle connection, monitor it */
si_idle_conn(s->req->cons);
si_idle_conn(s->req.cons);
s->req->analysers = s->listener->analysers;
s->rep->analysers = 0;
s->req.analysers = s->listener->analysers;
s->res.analysers = 0;
}
@ -5102,7 +5102,7 @@ void http_end_txn_clean_session(struct session *s)
*/
int http_sync_req_state(struct session *s)
{
struct channel *chn = s->req;
struct channel *chn = &s->req;
struct http_txn *txn = &s->txn;
unsigned int old_flags = chn->flags;
unsigned int old_state = txn->req.msg_state;
@ -5240,7 +5240,7 @@ int http_sync_req_state(struct session *s)
*/
int http_sync_res_state(struct session *s)
{
struct channel *chn = s->rep;
struct channel *chn = &s->res;
struct http_txn *txn = &s->txn;
unsigned int old_flags = chn->flags;
unsigned int old_state = txn->rsp.msg_state;
@ -5397,25 +5397,25 @@ int http_resync_states(struct session *s)
txn->rsp.msg_state == HTTP_MSG_TUNNEL ||
(txn->req.msg_state == HTTP_MSG_CLOSED &&
txn->rsp.msg_state == HTTP_MSG_CLOSED)) {
s->req->analysers = 0;
channel_auto_close(s->req);
channel_auto_read(s->req);
s->rep->analysers = 0;
channel_auto_close(s->rep);
channel_auto_read(s->rep);
s->req.analysers = 0;
channel_auto_close(&s->req);
channel_auto_read(&s->req);
s->res.analysers = 0;
channel_auto_close(&s->res);
channel_auto_read(&s->res);
}
else if ((txn->req.msg_state >= HTTP_MSG_DONE &&
(txn->rsp.msg_state == HTTP_MSG_CLOSED || (s->rep->flags & CF_SHUTW))) ||
(txn->rsp.msg_state == HTTP_MSG_CLOSED || (s->res.flags & CF_SHUTW))) ||
txn->rsp.msg_state == HTTP_MSG_ERROR ||
txn->req.msg_state == HTTP_MSG_ERROR) {
s->rep->analysers = 0;
channel_auto_close(s->rep);
channel_auto_read(s->rep);
s->req->analysers = 0;
channel_abort(s->req);
channel_auto_close(s->req);
channel_auto_read(s->req);
channel_truncate(s->req);
s->res.analysers = 0;
channel_auto_close(&s->res);
channel_auto_read(&s->res);
s->req.analysers = 0;
channel_abort(&s->req);
channel_auto_close(&s->req);
channel_auto_read(&s->req);
channel_truncate(&s->req);
}
else if ((txn->req.msg_state == HTTP_MSG_DONE ||
txn->req.msg_state == HTTP_MSG_CLOSED) &&
@ -5494,7 +5494,7 @@ int http_request_forward_body(struct session *s, struct channel *req, int an_bit
* whichs need to parse/process the request after we've enabled forwarding.
*/
if (unlikely(msg->flags & HTTP_MSGF_WAIT_CONN)) {
if (!(s->rep->flags & CF_READ_ATTACHED)) {
if (!(s->res.flags & CF_READ_ATTACHED)) {
channel_auto_connect(req);
req->flags |= CF_WAKE_CONNECT;
goto missing_data;
@ -5581,7 +5581,7 @@ int http_request_forward_body(struct session *s, struct channel *req, int an_bit
* such as last chunk of data or trailers.
*/
b_adv(req->buf, msg->next);
if (unlikely(!(s->req->flags & CF_WROTE_DATA)))
if (unlikely(!(s->req.flags & CF_WROTE_DATA)))
msg->sov -= msg->next;
msg->next = 0;
@ -5633,7 +5633,7 @@ int http_request_forward_body(struct session *s, struct channel *req, int an_bit
missing_data:
/* we may have some pending data starting at req->buf->p */
b_adv(req->buf, msg->next);
if (unlikely(!(s->req->flags & CF_WROTE_DATA)))
if (unlikely(!(s->req.flags & CF_WROTE_DATA)))
msg->sov -= msg->next + MIN(msg->chunk_len, req->buf->i);
msg->next = 0;
@ -5700,7 +5700,7 @@ int http_request_forward_body(struct session *s, struct channel *req, int an_bit
stream_int_retnclose(req->prod, http_error_message(s, HTTP_ERR_400));
}
req->analysers = 0;
s->rep->analysers = 0; /* we're in data phase, we want to abort both directions */
s->res.analysers = 0; /* we're in data phase, we want to abort both directions */
if (!(s->flags & SN_ERR_MASK))
s->flags |= SN_ERR_PRXCOND;
@ -5722,7 +5722,7 @@ int http_request_forward_body(struct session *s, struct channel *req, int an_bit
stream_int_retnclose(req->prod, http_error_message(s, HTTP_ERR_502));
}
req->analysers = 0;
s->rep->analysers = 0; /* we're in data phase, we want to abort both directions */
s->res.analysers = 0; /* we're in data phase, we want to abort both directions */
s->fe->fe_counters.srv_aborts++;
s->be->be_counters.srv_aborts++;
@ -5743,7 +5743,7 @@ int http_request_forward_body(struct session *s, struct channel *req, int an_bit
/* This stream analyser waits for a complete HTTP response. It returns 1 if the
* processing can continue on next analysers, or zero if it either needs more
* data or wants to immediately abort the response (eg: timeout, error, ...). It
* is tied to AN_RES_WAIT_HTTP and may may remove itself from s->rep->analysers
* is tied to AN_RES_WAIT_HTTP and may may remove itself from s->res.analysers
* when it has nothing left to do, and may remove any analyser when it wants to
* abort.
*/
@ -5933,7 +5933,7 @@ int http_wait_for_response(struct session *s, struct channel *rep, int an_bit)
}
/* client abort with an abortonclose */
else if ((rep->flags & CF_SHUTR) && ((s->req->flags & (CF_SHUTR|CF_SHUTW)) == (CF_SHUTR|CF_SHUTW))) {
else if ((rep->flags & CF_SHUTR) && ((s->req.flags & (CF_SHUTR|CF_SHUTW)) == (CF_SHUTR|CF_SHUTW))) {
s->fe->fe_counters.cli_aborts++;
s->be->be_counters.cli_aborts++;
if (objt_server(s->target))
@ -6299,11 +6299,11 @@ skip_content_length:
*/
txn->status = 0;
rep->analysers = 0;
s->req->analysers = 0;
s->req.analysers = 0;
channel_auto_close(rep);
s->logs.logwait = 0;
s->logs.level = 0;
s->rep->flags &= ~CF_EXPECT_MORE; /* speed up sending a previous response */
s->res.flags &= ~CF_EXPECT_MORE; /* speed up sending a previous response */
channel_truncate(rep);
stream_int_retnclose(rep->cons, NULL);
return 0;
@ -6311,7 +6311,7 @@ skip_content_length:
/* This function performs all the processing enabled for the current response.
* It normally returns 1 unless it wants to break. It relies on buffers flags,
* and updates s->rep->analysers. It might make sense to explode it into several
* and updates s->res.analysers. It might make sense to explode it into several
* other functions. It works like process_request (see indications above).
*/
int http_process_res_common(struct session *s, struct channel *rep, int an_bit, struct proxy *px)
@ -6666,7 +6666,7 @@ int http_response_forward_body(struct session *s, struct channel *res, int an_bi
if ((res->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) ||
((res->flags & CF_SHUTW) && (res->to_forward || res->buf->o)) ||
!s->req->analysers) {
!s->req.analysers) {
/* Output closed while we were sending data. We must abort and
* wake the other side up.
*/
@ -6870,7 +6870,7 @@ int http_response_forward_body(struct session *s, struct channel *res, int an_bi
* server abort.
*/
if (res->flags & CF_SHUTR) {
if ((s->req->flags & (CF_SHUTR|CF_SHUTW)) == (CF_SHUTR|CF_SHUTW))
if ((s->req.flags & (CF_SHUTR|CF_SHUTW)) == (CF_SHUTR|CF_SHUTW))
goto aborted_xfer;
if (!(s->flags & SN_ERR_MASK))
s->flags |= SN_ERR_SRVCL;
@ -6881,7 +6881,7 @@ int http_response_forward_body(struct session *s, struct channel *res, int an_bi
}
/* we need to obey the req analyser, so if it leaves, we must too */
if (!s->req->analysers)
if (!s->req.analysers)
goto return_bad_res;
/* When TE: chunked is used, we need to get there again to parse remaining
@ -6929,7 +6929,7 @@ int http_response_forward_body(struct session *s, struct channel *res, int an_bi
/* don't send any error message as we're in the body */
stream_int_retnclose(res->cons, NULL);
res->analysers = 0;
s->req->analysers = 0; /* we're in data phase, we want to abort both directions */
s->req.analysers = 0; /* we're in data phase, we want to abort both directions */
if (objt_server(s->target))
health_adjust(objt_server(s->target), HANA_STATUS_HTTP_HDRRSP);
@ -6949,7 +6949,7 @@ int http_response_forward_body(struct session *s, struct channel *res, int an_bi
/* don't send any error message as we're in the body */
stream_int_retnclose(res->cons, NULL);
res->analysers = 0;
s->req->analysers = 0; /* we're in data phase, we want to abort both directions */
s->req.analysers = 0; /* we're in data phase, we want to abort both directions */
s->fe->fe_counters.cli_aborts++;
s->be->be_counters.cli_aborts++;
@ -8638,8 +8638,8 @@ void http_capture_bad_message(struct error_snapshot *es, struct session *s,
es->sid = s->uniq_id;
es->srv = objt_server(s->target);
es->oe = other_end;
if (objt_conn(s->req->prod->end))
es->src = __objt_conn(s->req->prod->end)->addr.from;
if (objt_conn(s->req.prod->end))
es->src = __objt_conn(s->req.prod->end)->addr.from;
else
memset(&es->src, 0, sizeof(es->src));
@ -8796,8 +8796,8 @@ void debug_hdr(const char *dir, struct session *s, const char *start, const char
int max;
chunk_printf(&trash, "%08x:%s.%s[%04x:%04x]: ", s->uniq_id, s->be->id,
dir,
objt_conn(s->req->prod->end) ? (unsigned short)objt_conn(s->req->prod->end)->t.sock.fd : -1,
objt_conn(s->req->cons->end) ? (unsigned short)objt_conn(s->req->cons->end)->t.sock.fd : -1);
objt_conn(s->req.prod->end) ? (unsigned short)objt_conn(s->req.prod->end)->t.sock.fd : -1,
objt_conn(s->req.cons->end) ? (unsigned short)objt_conn(s->req.cons->end)->t.sock.fd : -1);
for (max = 0; start + max < end; max++)
if (start[max] == '\r' || start[max] == '\n')
@ -8837,8 +8837,8 @@ void http_init_txn(struct session *s)
txn->rsp.body_len = 0LL;
txn->req.msg_state = HTTP_MSG_RQBEFORE; /* at the very beginning of the request */
txn->rsp.msg_state = HTTP_MSG_RPBEFORE; /* at the very beginning of the response */
txn->req.chn = s->req;
txn->rsp.chn = s->rep;
txn->req.chn = &s->req;
txn->rsp.chn = &s->res;
txn->auth.method = HTTP_AUTH_UNKNOWN;
@ -8918,7 +8918,7 @@ void http_reset_txn(struct session *s)
s->pend_pos = NULL;
s->req->flags |= CF_READ_DONTWAIT; /* one read is usually enough */
s->req.flags |= CF_READ_DONTWAIT; /* one read is usually enough */
/* We must trim any excess data from the response buffer, because we
* may have blocked an invalid response from a server that we don't
@ -8928,21 +8928,21 @@ void http_reset_txn(struct session *s)
* a HEAD with some data, or sending more than the advertised
* content-length.
*/
if (unlikely(s->rep->buf->i))
s->rep->buf->i = 0;
if (unlikely(s->res.buf->i))
s->res.buf->i = 0;
s->req->rto = s->fe->timeout.client;
s->req->wto = TICK_ETERNITY;
s->req.rto = s->fe->timeout.client;
s->req.wto = TICK_ETERNITY;
s->rep->rto = TICK_ETERNITY;
s->rep->wto = s->fe->timeout.client;
s->res.rto = TICK_ETERNITY;
s->res.wto = s->fe->timeout.client;
s->req->rex = TICK_ETERNITY;
s->req->wex = TICK_ETERNITY;
s->req->analyse_exp = TICK_ETERNITY;
s->rep->rex = TICK_ETERNITY;
s->rep->wex = TICK_ETERNITY;
s->rep->analyse_exp = TICK_ETERNITY;
s->req.rex = TICK_ETERNITY;
s->req.wex = TICK_ETERNITY;
s->req.analyse_exp = TICK_ETERNITY;
s->res.rex = TICK_ETERNITY;
s->res.wex = TICK_ETERNITY;
s->res.analyse_exp = TICK_ETERNITY;
}
void free_http_res_rules(struct list *r)
@ -9912,28 +9912,25 @@ smp_prefetch_http(struct proxy *px, struct session *s, void *l7, unsigned int op
smp->type = SMP_T_BOOL;
if ((opt & SMP_OPT_DIR) == SMP_OPT_DIR_REQ) {
if (unlikely(!s->req))
return 0;
/* If the buffer does not leave enough free space at the end,
* we must first realign it.
*/
if (s->req->buf->p > s->req->buf->data &&
s->req->buf->i + s->req->buf->p > s->req->buf->data + s->req->buf->size - global.tune.maxrewrite)
buffer_slow_realign(s->req->buf);
if (s->req.buf->p > s->req.buf->data &&
s->req.buf->i + s->req.buf->p > s->req.buf->data + s->req.buf->size - global.tune.maxrewrite)
buffer_slow_realign(s->req.buf);
if (unlikely(txn->req.msg_state < HTTP_MSG_BODY)) {
if (msg->msg_state == HTTP_MSG_ERROR)
return 0;
/* Try to decode HTTP request */
if (likely(msg->next < s->req->buf->i))
if (likely(msg->next < s->req.buf->i))
http_msg_analyzer(msg, &txn->hdr_idx);
/* Still no valid request ? */
if (unlikely(msg->msg_state < HTTP_MSG_BODY)) {
if ((msg->msg_state == HTTP_MSG_ERROR) ||
buffer_full(s->req->buf, global.tune.maxrewrite)) {
buffer_full(s->req.buf, global.tune.maxrewrite)) {
return 0;
}
/* wait for final state */
@ -9951,8 +9948,8 @@ smp_prefetch_http(struct proxy *px, struct session *s, void *l7, unsigned int op
* cannot happen, but if the parsers are to change in the future,
* we want this check to be maintained.
*/
if (unlikely(s->req->buf->i + s->req->buf->p >
s->req->buf->data + s->req->buf->size - global.tune.maxrewrite)) {
if (unlikely(s->req.buf->i + s->req.buf->p >
s->req.buf->data + s->req.buf->size - global.tune.maxrewrite)) {
msg->msg_state = HTTP_MSG_ERROR;
smp->data.uint = 1;
return 1;
@ -11666,7 +11663,7 @@ int http_action_set_req_line(struct http_req_rule *rule, struct proxy *px, struc
switch (*(int *)&rule->arg.act.p[2]) {
case 0: // method
cur_ptr = s->req->buf->p;
cur_ptr = s->req.buf->p;
cur_end = cur_ptr + txn->req.sl.rq.m_l;
/* adjust req line offsets and lengths */
@ -11679,10 +11676,10 @@ int http_action_set_req_line(struct http_req_rule *rule, struct proxy *px, struc
case 1: // path
cur_ptr = http_get_path(txn);
if (!cur_ptr)
cur_ptr = s->req->buf->p + txn->req.sl.rq.u;
cur_ptr = s->req.buf->p + txn->req.sl.rq.u;
cur_end = cur_ptr;
while (cur_end < s->req->buf->p + txn->req.sl.rq.u + txn->req.sl.rq.u_l && *cur_end != '?')
while (cur_end < s->req.buf->p + txn->req.sl.rq.u + txn->req.sl.rq.u_l && *cur_end != '?')
cur_end++;
/* adjust req line offsets and lengths */
@ -11692,7 +11689,7 @@ int http_action_set_req_line(struct http_req_rule *rule, struct proxy *px, struc
break;
case 2: // query
cur_ptr = s->req->buf->p + txn->req.sl.rq.u;
cur_ptr = s->req.buf->p + txn->req.sl.rq.u;
cur_end = cur_ptr + txn->req.sl.rq.u_l;
while (cur_ptr < cur_end && *cur_ptr != '?')
cur_ptr++;
@ -11712,7 +11709,7 @@ int http_action_set_req_line(struct http_req_rule *rule, struct proxy *px, struc
break;
case 3: // uri
cur_ptr = s->req->buf->p + txn->req.sl.rq.u;
cur_ptr = s->req.buf->p + txn->req.sl.rq.u;
cur_end = cur_ptr + txn->req.sl.rq.u_l;
/* adjust req line offsets and lengths */
@ -11726,7 +11723,7 @@ int http_action_set_req_line(struct http_req_rule *rule, struct proxy *px, struc
}
/* commit changes and adjust end of message */
delta = buffer_replace2(s->req->buf, cur_ptr, cur_end, trash.str + offset, trash.len - offset);
delta = buffer_replace2(s->req.buf, cur_ptr, cur_end, trash.str + offset, trash.len - offset);
http_msg_move_end(&txn->req, delta);
return 0;
}

View File

@ -1149,7 +1149,7 @@ resume_execution:
/* we have a matching rule. */
if (rule->action == TCP_ACT_REJECT) {
channel_abort(req);
channel_abort(s->rep);
channel_abort(&s->res);
req->analysers = 0;
s->be->be_counters.denied_req++;
@ -1310,7 +1310,7 @@ resume_execution:
/* we have a matching rule. */
if (rule->action == TCP_ACT_REJECT) {
channel_abort(rep);
channel_abort(s->req);
channel_abort(&s->req);
rep->analysers = 0;
s->be->be_counters.denied_resp++;

View File

@ -973,8 +973,8 @@ int session_set_backend(struct session *s, struct proxy *be)
s->txn.req.flags |= HTTP_MSGF_WAIT_CONN;
if (be->options2 & PR_O2_NODELAY) {
s->req->flags |= CF_NEVER_WAIT;
s->rep->flags |= CF_NEVER_WAIT;
s->req.flags |= CF_NEVER_WAIT;
s->res.flags |= CF_NEVER_WAIT;
}
/* We want to enable the backend-specific analysers except those which
@ -982,7 +982,7 @@ int session_set_backend(struct session *s, struct proxy *be)
* be more reliable to store the list of analysers that have been run,
* but what we do here is OK for now.
*/
s->req->analysers |= be->be_req_ana & ~(s->listener->analysers);
s->req.analysers |= be->be_req_ana & ~(s->listener->analysers);
return 1;
}

File diff suppressed because it is too large Load Diff