MAJOR: session: only allocate buffers when needed

A session doesn't need buffers all the time, especially when they're
empty. With this patch, we don't allocate buffers anymore when the
session is initialized, we only allocate them in two cases :

  - during process_session()
  - during I/O operations

During process_session(), we try hard to allocate both buffers at once
so that we know for sure that a started operation can complete. Indeed,
a previous version of this patch used to allocate one buffer at a time,
but it can result in a deadlock when all buffers are allocated for
requests for example, and there's no buffer left to emit error responses.
Here, if any of the buffers cannot be allocated, the whole operation is
cancelled and the session is added at the tail of the buffer wait queue.

At the end of process_session(), a call to session_release_buffers() is
done so that we can offer unused buffers to other sessions waiting for
them.

For I/O operations, we only need to allocate a buffer on the Rx path.
For this, we only allocate a single buffer but ensure that at least two
are available to avoid the deadlock situation. In case buffers are not
available, SI_FL_WAIT_ROOM is set on the stream interface and the session
is queued. Unused buffers resulting either from a successful send() or
from an unused read buffer are offered to pending sessions during the
->wake() callback.
This commit is contained in:
Willy Tarreau 2014-11-25 19:46:36 +01:00
parent bf883e0aa7
commit 10fc09e872
3 changed files with 25 additions and 24 deletions

View File

@ -1277,12 +1277,6 @@ static struct session *peer_session_create(struct peer *peer, struct peer_sessio
s->rep->flags |= CF_READ_DONTWAIT;
if (unlikely(b_alloc(&s->req->buf) == NULL))
goto out_fail_req_buf; /* no memory */
if (unlikely(b_alloc(&s->rep->buf) == NULL))
goto out_fail_rep_buf; /* no memory */
/* it is important not to call the wakeup function directly but to
* pass through task_wakeup(), because this one knows how to apply
* priorities to tasks.
@ -1299,10 +1293,6 @@ static struct session *peer_session_create(struct peer *peer, struct peer_sessio
return s;
/* Error unrolling */
out_fail_rep_buf:
b_free(&s->req->buf);
out_fail_req_buf:
pool_free2(pool2_channel, s->rep);
out_fail_rep:
pool_free2(pool2_channel, s->req);
out_fail_req:

View File

@ -516,12 +516,6 @@ int session_complete(struct session *s)
s->rep->wex = TICK_ETERNITY;
s->rep->analyse_exp = TICK_ETERNITY;
if (unlikely(b_alloc(&s->req->buf) == NULL))
goto out_free_rep; /* no memory */
if (unlikely(b_alloc(&s->rep->buf) == NULL))
goto out_free_req_buf; /* no memory */
txn = &s->txn;
/* Those variables will be checked and freed if non-NULL in
* session.c:session_free(). It is important that they are
@ -550,7 +544,7 @@ int session_complete(struct session *s)
* finished (=0, eg: monitoring), in both situations,
* we can release everything and close.
*/
goto out_free_rep_buf;
goto out_free_rep;
}
/* if logs require transport layer information, note it on the connection */
@ -568,10 +562,6 @@ int session_complete(struct session *s)
return 1;
/* Error unrolling */
out_free_rep_buf:
b_free(&s->rep->buf);
out_free_req_buf:
b_free(&s->req->buf);
out_free_rep:
pool_free2(pool2_channel, s->rep);
out_free_req:
@ -1804,6 +1794,16 @@ struct task *process_session(struct task *t)
goto update_exp_and_leave;
}
/* below we may emit error messages so we have to ensure that we have
* our buffers properly allocated.
*/
if (!session_alloc_buffers(s)) {
/* No buffer available, we've been subscribed to the list of
* buffer waiters, let's wait for our turn.
*/
goto update_exp_and_leave;
}
/* 1b: check for low-level errors reported at the stream interface.
* First we check if it's a retryable error (in which case we don't
* want to tell the buffer). Otherwise we report the error one level
@ -2602,6 +2602,7 @@ struct task *process_session(struct task *t)
if ((si_applet_call(s->req->cons) | si_applet_call(s->rep->cons)) != 0) {
if (task_in_rq(t)) {
t->expire = TICK_ETERNITY;
session_release_buffers(s);
return t;
}
}
@ -2631,6 +2632,7 @@ struct task *process_session(struct task *t)
if (!tick_isset(t->expire))
ABORT_NOW();
#endif
session_release_buffers(s);
return t; /* nothing more to do */
}

View File

@ -642,6 +642,8 @@ static int si_conn_wake_cb(struct connection *conn)
}
if (si->ib->flags & CF_READ_ACTIVITY)
si->ib->flags &= ~CF_READ_DONTWAIT;
session_release_buffers(si_sess(si));
return 0;
}
@ -1166,6 +1168,12 @@ static void si_conn_recv_cb(struct connection *conn)
chn->pipe = NULL;
}
/* now we'll need a buffer */
if (!session_alloc_recv_buffer(si_sess(si), &chn->buf)) {
si->flags |= SI_FL_WAIT_ROOM;
goto end_recv;
}
/* Important note : if we're called with POLL_IN|POLL_HUP, it means the read polling
* was enabled, which implies that the recv buffer was not full. So we have a guarantee
* that if such an event is not handled above in splice, it will be handled here by
@ -1230,9 +1238,6 @@ static void si_conn_recv_cb(struct connection *conn)
}
} /* while !flags */
if (conn->flags & CO_FL_ERROR)
return;
if (cur_read) {
if ((chn->flags & (CF_STREAMER | CF_STREAMER_FAST)) &&
(cur_read <= chn->buf->size / 2)) {
@ -1272,6 +1277,10 @@ static void si_conn_recv_cb(struct connection *conn)
chn->last_read = now_ms;
}
end_recv:
if (conn->flags & CO_FL_ERROR)
return;
if (conn_data_read0_pending(conn))
/* connection closed */
goto out_shutdown_r;