2006-06-26 00:48:02 +00:00
|
|
|
/*
|
|
|
|
* Queue management functions.
|
|
|
|
*
|
2009-10-04 21:12:44 +00:00
|
|
|
* Copyright 2000-2009 Willy Tarreau <w@1wt.eu>
|
2006-06-26 00:48:02 +00:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2006-06-29 16:54:54 +00:00
|
|
|
#include <common/config.h>
|
2007-05-13 18:19:55 +00:00
|
|
|
#include <common/memory.h>
|
2006-06-29 15:53:05 +00:00
|
|
|
#include <common/time.h>
|
2006-06-26 00:48:02 +00:00
|
|
|
|
|
|
|
#include <proto/queue.h>
|
|
|
|
#include <proto/server.h>
|
|
|
|
#include <proto/task.h>
|
|
|
|
|
|
|
|
|
2007-05-13 18:19:55 +00:00
|
|
|
struct pool_head *pool2_pendconn;
|
|
|
|
|
|
|
|
/* perform minimal intializations, report 0 in case of error, 1 if OK. */
|
|
|
|
int init_pendconn()
|
|
|
|
{
|
|
|
|
pool2_pendconn = create_pool("pendconn", sizeof(struct pendconn), MEM_F_SHARED);
|
|
|
|
return pool2_pendconn != NULL;
|
|
|
|
}
|
2006-06-26 00:48:02 +00:00
|
|
|
|
|
|
|
/* returns the effective dynamic maxconn for a server, considering the minconn
|
2006-12-28 23:10:33 +00:00
|
|
|
* and the proxy's usage relative to its dynamic connections limit. It is
|
2007-11-30 16:42:05 +00:00
|
|
|
* expected that 0 < s->minconn <= s->maxconn when this is called. If the
|
|
|
|
* server is currently warming up, the slowstart is also applied to the
|
|
|
|
* resulting value, which can be lower than minconn in this case, but never
|
|
|
|
* less than 1.
|
2006-06-26 00:48:02 +00:00
|
|
|
*/
|
2006-10-15 13:17:57 +00:00
|
|
|
unsigned int srv_dynamic_maxconn(const struct server *s)
|
2006-06-26 00:48:02 +00:00
|
|
|
{
|
2007-11-30 16:42:05 +00:00
|
|
|
unsigned int max;
|
|
|
|
|
2006-12-28 23:10:33 +00:00
|
|
|
if (s->proxy->beconn >= s->proxy->fullconn)
|
|
|
|
/* no fullconn or proxy is full */
|
2007-11-30 16:42:05 +00:00
|
|
|
max = s->maxconn;
|
|
|
|
else if (s->minconn == s->maxconn)
|
2006-12-28 23:10:33 +00:00
|
|
|
/* static limit */
|
2007-11-30 16:42:05 +00:00
|
|
|
max = s->maxconn;
|
|
|
|
else max = MAX(s->minconn,
|
|
|
|
s->proxy->beconn * s->maxconn / s->proxy->fullconn);
|
2006-12-28 23:10:33 +00:00
|
|
|
|
2007-11-30 16:42:05 +00:00
|
|
|
if ((s->state & SRV_WARMINGUP) &&
|
|
|
|
now.tv_sec < s->last_change + s->slowstart &&
|
|
|
|
now.tv_sec >= s->last_change) {
|
|
|
|
unsigned int ratio;
|
2008-09-14 15:43:27 +00:00
|
|
|
ratio = 100 * (now.tv_sec - s->last_change) / s->slowstart;
|
|
|
|
max = MAX(1, max * ratio / 100);
|
2007-11-30 16:42:05 +00:00
|
|
|
}
|
|
|
|
return max;
|
2006-06-26 00:48:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
[BUG] fix the dequeuing logic to ensure that all requests get served
The dequeuing logic was completely wrong. First, a task was assigned
to all servers to process the queue, but this task was never scheduled
and was only woken up on session free. Second, there was no reservation
of server entries when a task was assigned a server. This means that
as long as the task was not connected to the server, its presence was
not accounted for. This was causing trouble when detecting whether or
not a server had reached maxconn. Third, during a redispatch, a session
could lose its place at the server's and get blocked because another
session at the same moment would have stolen the entry. Fourth, the
redispatch option did not work when maxqueue was reached for a server,
and it was not possible to do so without indefinitely hanging a session.
The root cause of all those problems was the lack of pre-reservation of
connections at the server's, and the lack of tracking of servers during
a redispatch. Everything relied on combinations of flags which could
appear similarly in quite distinct situations.
This patch is a major rework but there was no other solution, as the
internal logic was deeply flawed. The resulting code is cleaner, more
understandable, uses less magics and is overall more robust.
As an added bonus, "option redispatch" now works when maxqueue has
been reached on a server.
2008-06-20 13:04:11 +00:00
|
|
|
* Manages a server's connection queue. This function will try to dequeue as
|
|
|
|
* many pending sessions as possible, and wake them up.
|
2006-06-26 00:48:02 +00:00
|
|
|
*/
|
[BUG] fix the dequeuing logic to ensure that all requests get served
The dequeuing logic was completely wrong. First, a task was assigned
to all servers to process the queue, but this task was never scheduled
and was only woken up on session free. Second, there was no reservation
of server entries when a task was assigned a server. This means that
as long as the task was not connected to the server, its presence was
not accounted for. This was causing trouble when detecting whether or
not a server had reached maxconn. Third, during a redispatch, a session
could lose its place at the server's and get blocked because another
session at the same moment would have stolen the entry. Fourth, the
redispatch option did not work when maxqueue was reached for a server,
and it was not possible to do so without indefinitely hanging a session.
The root cause of all those problems was the lack of pre-reservation of
connections at the server's, and the lack of tracking of servers during
a redispatch. Everything relied on combinations of flags which could
appear similarly in quite distinct situations.
This patch is a major rework but there was no other solution, as the
internal logic was deeply flawed. The resulting code is cleaner, more
understandable, uses less magics and is overall more robust.
As an added bonus, "option redispatch" now works when maxqueue has
been reached on a server.
2008-06-20 13:04:11 +00:00
|
|
|
void process_srv_queue(struct server *s)
|
2006-06-26 00:48:02 +00:00
|
|
|
{
|
|
|
|
struct proxy *p = s->proxy;
|
[BUG] fix the dequeuing logic to ensure that all requests get served
The dequeuing logic was completely wrong. First, a task was assigned
to all servers to process the queue, but this task was never scheduled
and was only woken up on session free. Second, there was no reservation
of server entries when a task was assigned a server. This means that
as long as the task was not connected to the server, its presence was
not accounted for. This was causing trouble when detecting whether or
not a server had reached maxconn. Third, during a redispatch, a session
could lose its place at the server's and get blocked because another
session at the same moment would have stolen the entry. Fourth, the
redispatch option did not work when maxqueue was reached for a server,
and it was not possible to do so without indefinitely hanging a session.
The root cause of all those problems was the lack of pre-reservation of
connections at the server's, and the lack of tracking of servers during
a redispatch. Everything relied on combinations of flags which could
appear similarly in quite distinct situations.
This patch is a major rework but there was no other solution, as the
internal logic was deeply flawed. The resulting code is cleaner, more
understandable, uses less magics and is overall more robust.
As an added bonus, "option redispatch" now works when maxqueue has
been reached on a server.
2008-06-20 13:04:11 +00:00
|
|
|
int maxconn;
|
2006-06-26 00:48:02 +00:00
|
|
|
|
|
|
|
/* First, check if we can handle some connections queued at the proxy. We
|
|
|
|
* will take as many as we can handle.
|
|
|
|
*/
|
|
|
|
|
[BUG] fix the dequeuing logic to ensure that all requests get served
The dequeuing logic was completely wrong. First, a task was assigned
to all servers to process the queue, but this task was never scheduled
and was only woken up on session free. Second, there was no reservation
of server entries when a task was assigned a server. This means that
as long as the task was not connected to the server, its presence was
not accounted for. This was causing trouble when detecting whether or
not a server had reached maxconn. Third, during a redispatch, a session
could lose its place at the server's and get blocked because another
session at the same moment would have stolen the entry. Fourth, the
redispatch option did not work when maxqueue was reached for a server,
and it was not possible to do so without indefinitely hanging a session.
The root cause of all those problems was the lack of pre-reservation of
connections at the server's, and the lack of tracking of servers during
a redispatch. Everything relied on combinations of flags which could
appear similarly in quite distinct situations.
This patch is a major rework but there was no other solution, as the
internal logic was deeply flawed. The resulting code is cleaner, more
understandable, uses less magics and is overall more robust.
As an added bonus, "option redispatch" now works when maxqueue has
been reached on a server.
2008-06-20 13:04:11 +00:00
|
|
|
maxconn = srv_dynamic_maxconn(s);
|
|
|
|
while (s->served < maxconn) {
|
|
|
|
struct session *sess = pendconn_get_next_sess(s, p);
|
2006-06-26 00:48:02 +00:00
|
|
|
if (sess == NULL)
|
|
|
|
break;
|
2008-08-29 16:19:04 +00:00
|
|
|
task_wakeup(sess->task, TASK_WOKEN_RES);
|
2006-06-26 00:48:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Detaches the next pending connection from either a server or a proxy, and
|
|
|
|
* returns its associated session. If no pending connection is found, NULL is
|
[BUG] fix the dequeuing logic to ensure that all requests get served
The dequeuing logic was completely wrong. First, a task was assigned
to all servers to process the queue, but this task was never scheduled
and was only woken up on session free. Second, there was no reservation
of server entries when a task was assigned a server. This means that
as long as the task was not connected to the server, its presence was
not accounted for. This was causing trouble when detecting whether or
not a server had reached maxconn. Third, during a redispatch, a session
could lose its place at the server's and get blocked because another
session at the same moment would have stolen the entry. Fourth, the
redispatch option did not work when maxqueue was reached for a server,
and it was not possible to do so without indefinitely hanging a session.
The root cause of all those problems was the lack of pre-reservation of
connections at the server's, and the lack of tracking of servers during
a redispatch. Everything relied on combinations of flags which could
appear similarly in quite distinct situations.
This patch is a major rework but there was no other solution, as the
internal logic was deeply flawed. The resulting code is cleaner, more
understandable, uses less magics and is overall more robust.
As an added bonus, "option redispatch" now works when maxqueue has
been reached on a server.
2008-06-20 13:04:11 +00:00
|
|
|
* returned. Note that neither <srv> nor <px> may be NULL.
|
2008-06-13 19:12:51 +00:00
|
|
|
* Priority is given to the oldest request in the queue if both <srv> and <px>
|
|
|
|
* have pending requests. This ensures that no request will be left unserved.
|
2010-08-06 08:08:23 +00:00
|
|
|
* The <px> queue is not considered if the server (or a tracked server) is not
|
|
|
|
* RUNNING, is disabled, or has a null weight (server going down). The <srv>
|
2008-12-04 08:33:58 +00:00
|
|
|
* queue is still considered in this case, because if some connections remain
|
|
|
|
* there, it means that some requests have been forced there after it was seen
|
|
|
|
* down (eg: due to option persist).
|
[BUG] fix the dequeuing logic to ensure that all requests get served
The dequeuing logic was completely wrong. First, a task was assigned
to all servers to process the queue, but this task was never scheduled
and was only woken up on session free. Second, there was no reservation
of server entries when a task was assigned a server. This means that
as long as the task was not connected to the server, its presence was
not accounted for. This was causing trouble when detecting whether or
not a server had reached maxconn. Third, during a redispatch, a session
could lose its place at the server's and get blocked because another
session at the same moment would have stolen the entry. Fourth, the
redispatch option did not work when maxqueue was reached for a server,
and it was not possible to do so without indefinitely hanging a session.
The root cause of all those problems was the lack of pre-reservation of
connections at the server's, and the lack of tracking of servers during
a redispatch. Everything relied on combinations of flags which could
appear similarly in quite distinct situations.
This patch is a major rework but there was no other solution, as the
internal logic was deeply flawed. The resulting code is cleaner, more
understandable, uses less magics and is overall more robust.
As an added bonus, "option redispatch" now works when maxqueue has
been reached on a server.
2008-06-20 13:04:11 +00:00
|
|
|
* The session is immediately marked as "assigned", and both its <srv> and
|
|
|
|
* <srv_conn> are set to <srv>,
|
2006-06-26 00:48:02 +00:00
|
|
|
*/
|
|
|
|
struct session *pendconn_get_next_sess(struct server *srv, struct proxy *px)
|
|
|
|
{
|
2008-06-13 19:12:51 +00:00
|
|
|
struct pendconn *ps, *pp;
|
2006-06-26 00:48:02 +00:00
|
|
|
struct session *sess;
|
2010-08-06 08:08:23 +00:00
|
|
|
struct server *rsrv;
|
|
|
|
|
|
|
|
rsrv = srv->tracked;
|
|
|
|
if (!rsrv)
|
|
|
|
rsrv = srv;
|
2006-06-26 00:48:02 +00:00
|
|
|
|
2008-06-13 19:12:51 +00:00
|
|
|
ps = pendconn_from_srv(srv);
|
|
|
|
pp = pendconn_from_px(px);
|
|
|
|
/* we want to get the definitive pendconn in <ps> */
|
2010-08-06 08:08:23 +00:00
|
|
|
if (!pp || !(rsrv->state & SRV_RUNNING) || (rsrv->state & (SRV_GOINGDOWN|SRV_MAINTAIN))) {
|
2008-06-13 19:12:51 +00:00
|
|
|
if (!ps)
|
2006-06-26 00:48:02 +00:00
|
|
|
return NULL;
|
2008-06-13 19:12:51 +00:00
|
|
|
} else {
|
|
|
|
/* pendconn exists in the proxy queue */
|
|
|
|
if (!ps || tv_islt(&pp->sess->logs.tv_request, &ps->sess->logs.tv_request)) {
|
|
|
|
ps = pp;
|
|
|
|
ps->sess->srv = srv;
|
|
|
|
}
|
2006-06-26 00:48:02 +00:00
|
|
|
}
|
2008-06-13 19:12:51 +00:00
|
|
|
sess = ps->sess;
|
|
|
|
pendconn_free(ps);
|
[BUG] fix the dequeuing logic to ensure that all requests get served
The dequeuing logic was completely wrong. First, a task was assigned
to all servers to process the queue, but this task was never scheduled
and was only woken up on session free. Second, there was no reservation
of server entries when a task was assigned a server. This means that
as long as the task was not connected to the server, its presence was
not accounted for. This was causing trouble when detecting whether or
not a server had reached maxconn. Third, during a redispatch, a session
could lose its place at the server's and get blocked because another
session at the same moment would have stolen the entry. Fourth, the
redispatch option did not work when maxqueue was reached for a server,
and it was not possible to do so without indefinitely hanging a session.
The root cause of all those problems was the lack of pre-reservation of
connections at the server's, and the lack of tracking of servers during
a redispatch. Everything relied on combinations of flags which could
appear similarly in quite distinct situations.
This patch is a major rework but there was no other solution, as the
internal logic was deeply flawed. The resulting code is cleaner, more
understandable, uses less magics and is overall more robust.
As an added bonus, "option redispatch" now works when maxqueue has
been reached on a server.
2008-06-20 13:04:11 +00:00
|
|
|
|
|
|
|
/* we want to note that the session has now been assigned a server */
|
|
|
|
sess->flags |= SN_ASSIGNED;
|
2011-03-10 10:38:29 +00:00
|
|
|
sess->target.type = TARG_TYPE_SERVER;
|
|
|
|
sess->target.ptr.s = srv;
|
[BUG] fix the dequeuing logic to ensure that all requests get served
The dequeuing logic was completely wrong. First, a task was assigned
to all servers to process the queue, but this task was never scheduled
and was only woken up on session free. Second, there was no reservation
of server entries when a task was assigned a server. This means that
as long as the task was not connected to the server, its presence was
not accounted for. This was causing trouble when detecting whether or
not a server had reached maxconn. Third, during a redispatch, a session
could lose its place at the server's and get blocked because another
session at the same moment would have stolen the entry. Fourth, the
redispatch option did not work when maxqueue was reached for a server,
and it was not possible to do so without indefinitely hanging a session.
The root cause of all those problems was the lack of pre-reservation of
connections at the server's, and the lack of tracking of servers during
a redispatch. Everything relied on combinations of flags which could
appear similarly in quite distinct situations.
This patch is a major rework but there was no other solution, as the
internal logic was deeply flawed. The resulting code is cleaner, more
understandable, uses less magics and is overall more robust.
As an added bonus, "option redispatch" now works when maxqueue has
been reached on a server.
2008-06-20 13:04:11 +00:00
|
|
|
sess->srv = srv;
|
|
|
|
sess->srv_conn = srv;
|
|
|
|
srv->served++;
|
|
|
|
if (px->lbprm.server_take_conn)
|
|
|
|
px->lbprm.server_take_conn(srv);
|
|
|
|
|
2006-06-26 00:48:02 +00:00
|
|
|
return sess;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Adds the session <sess> to the pending connection list of server <sess>->srv
|
|
|
|
* or to the one of <sess>->proxy if srv is NULL. All counters and back pointers
|
|
|
|
* are updated accordingly. Returns NULL if no memory is available, otherwise the
|
[BUG] fix the dequeuing logic to ensure that all requests get served
The dequeuing logic was completely wrong. First, a task was assigned
to all servers to process the queue, but this task was never scheduled
and was only woken up on session free. Second, there was no reservation
of server entries when a task was assigned a server. This means that
as long as the task was not connected to the server, its presence was
not accounted for. This was causing trouble when detecting whether or
not a server had reached maxconn. Third, during a redispatch, a session
could lose its place at the server's and get blocked because another
session at the same moment would have stolen the entry. Fourth, the
redispatch option did not work when maxqueue was reached for a server,
and it was not possible to do so without indefinitely hanging a session.
The root cause of all those problems was the lack of pre-reservation of
connections at the server's, and the lack of tracking of servers during
a redispatch. Everything relied on combinations of flags which could
appear similarly in quite distinct situations.
This patch is a major rework but there was no other solution, as the
internal logic was deeply flawed. The resulting code is cleaner, more
understandable, uses less magics and is overall more robust.
As an added bonus, "option redispatch" now works when maxqueue has
been reached on a server.
2008-06-20 13:04:11 +00:00
|
|
|
* pendconn itself. If the session was already marked as served, its flag is
|
|
|
|
* cleared. It is illegal to call this function with a non-NULL sess->srv_conn.
|
2006-06-26 00:48:02 +00:00
|
|
|
*/
|
|
|
|
struct pendconn *pendconn_add(struct session *sess)
|
|
|
|
{
|
|
|
|
struct pendconn *p;
|
|
|
|
|
2007-05-13 18:19:55 +00:00
|
|
|
p = pool_alloc2(pool2_pendconn);
|
2006-06-26 00:48:02 +00:00
|
|
|
if (!p)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
sess->pend_pos = p;
|
|
|
|
p->sess = sess;
|
|
|
|
p->srv = sess->srv;
|
[BUG] fix the dequeuing logic to ensure that all requests get served
The dequeuing logic was completely wrong. First, a task was assigned
to all servers to process the queue, but this task was never scheduled
and was only woken up on session free. Second, there was no reservation
of server entries when a task was assigned a server. This means that
as long as the task was not connected to the server, its presence was
not accounted for. This was causing trouble when detecting whether or
not a server had reached maxconn. Third, during a redispatch, a session
could lose its place at the server's and get blocked because another
session at the same moment would have stolen the entry. Fourth, the
redispatch option did not work when maxqueue was reached for a server,
and it was not possible to do so without indefinitely hanging a session.
The root cause of all those problems was the lack of pre-reservation of
connections at the server's, and the lack of tracking of servers during
a redispatch. Everything relied on combinations of flags which could
appear similarly in quite distinct situations.
This patch is a major rework but there was no other solution, as the
internal logic was deeply flawed. The resulting code is cleaner, more
understandable, uses less magics and is overall more robust.
As an added bonus, "option redispatch" now works when maxqueue has
been reached on a server.
2008-06-20 13:04:11 +00:00
|
|
|
|
|
|
|
if (sess->flags & SN_ASSIGNED && sess->srv) {
|
2006-06-26 00:48:02 +00:00
|
|
|
LIST_ADDQ(&sess->srv->pendconns, &p->list);
|
|
|
|
sess->srv->nbpend++;
|
2008-06-13 19:48:18 +00:00
|
|
|
sess->logs.srv_queue_size += sess->srv->nbpend;
|
2009-10-04 21:12:44 +00:00
|
|
|
if (sess->srv->nbpend > sess->srv->counters.nbpend_max)
|
|
|
|
sess->srv->counters.nbpend_max = sess->srv->nbpend;
|
2006-06-26 00:48:02 +00:00
|
|
|
} else {
|
2007-03-31 22:01:37 +00:00
|
|
|
LIST_ADDQ(&sess->be->pendconns, &p->list);
|
|
|
|
sess->be->nbpend++;
|
2008-06-13 19:48:18 +00:00
|
|
|
sess->logs.prx_queue_size += sess->be->nbpend;
|
2009-10-04 21:12:44 +00:00
|
|
|
if (sess->be->nbpend > sess->be->counters.nbpend_max)
|
|
|
|
sess->be->counters.nbpend_max = sess->be->nbpend;
|
2006-06-26 00:48:02 +00:00
|
|
|
}
|
2007-03-31 22:01:37 +00:00
|
|
|
sess->be->totpend++;
|
2006-06-26 00:48:02 +00:00
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Detaches pending connection <p>, decreases the pending count, and frees
|
|
|
|
* the pending connection. The connection might have been queued to a specific
|
|
|
|
* server as well as to the proxy. The session also gets marked unqueued.
|
|
|
|
*/
|
|
|
|
void pendconn_free(struct pendconn *p)
|
|
|
|
{
|
|
|
|
LIST_DEL(&p->list);
|
|
|
|
p->sess->pend_pos = NULL;
|
|
|
|
if (p->srv)
|
|
|
|
p->srv->nbpend--;
|
|
|
|
else
|
2007-03-31 22:01:37 +00:00
|
|
|
p->sess->be->nbpend--;
|
|
|
|
p->sess->be->totpend--;
|
2007-05-13 18:19:55 +00:00
|
|
|
pool_free2(pool2_pendconn, p);
|
2006-06-26 00:48:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Local variables:
|
|
|
|
* c-indent-level: 8
|
|
|
|
* c-basic-offset: 8
|
|
|
|
* End:
|
|
|
|
*/
|