MEDIUM: adjust the maxaccept per listener depending on the number of processes

global.tune.maxaccept was used for all listeners. This becomes really not
convenient when some listeners are bound to a single process and other ones
are bound to many processes.

Now we change the principle : we count the number of processes a listener
is bound to, and apply the maxaccept either entirely if there is a single
process, or divided by twice the number of processes in order to maintain
fairness.

The default limit has also been increased from 32 to 64 as it appeared that
on small machines, 32 was too low to achieve high connection rates.
This commit is contained in:
Willy Tarreau 2012-11-19 12:39:59 +01:00
parent 37994f034c
commit 16a2147dfe
5 changed files with 32 additions and 21 deletions

View File

@ -791,14 +791,16 @@ tune.http.maxhdr <number>
limit too high.
tune.maxaccept <number>
Sets the maximum number of consecutive accepts that a process may perform on
a single wake up. High values give higher priority to high connection rates,
while lower values give higher priority to already established connections.
This value is limited to 100 by default in single process mode. However, in
multi-process mode (nbproc > 1), it defaults to 8 so that when one process
wakes up, it does not take all incoming connections for itself and leaves a
part of them to other processes. Setting this value to -1 completely disables
the limitation. It should normally not be needed to tweak this value.
Sets the maximum number of consecutive connections a process may accept in a
row before switching to other work. In single process mode, higher numbers
give better performance at high connection rates. However in multi-process
modes, keeping a bit of fairness between processes generally is better to
increase performance. This value applies individually to each listener, so
that the number of processes a listener is bound to is taken into account.
This value defaults to 64. In multi-process mode, it is divided by twice
the number of processes the listener is bound to. Setting this value to -1
completely disables the limitation. It should normally not be needed to tweak
this value.
tune.maxpollevents <number>
Sets the maximum amount of events that can be processed at once in a call to

View File

@ -159,6 +159,7 @@ struct listener {
int nbconn; /* current number of connections on this listener */
int maxconn; /* maximum connections allowed on this listener */
unsigned int backlog; /* if set, listen backlog */
unsigned int maxaccept; /* if set, max number of connections accepted at once */
struct list proto_list; /* list in the protocol header */
int (*accept)(struct listener *l, int fd, struct sockaddr_storage *addr); /* upper layer's accept() */
struct task * (*handler)(struct task *t); /* protocol handler. It is a task */

View File

@ -5957,6 +5957,7 @@ int check_config_validity()
struct tcp_rule *trule;
struct listener *listener;
unsigned int next_id;
int nbproc;
if (curproxy->uuid < 0) {
/* proxy ID not set, use automatic numbering with first
@ -5976,6 +5977,9 @@ int check_config_validity()
continue;
}
/* number of processes this proxy is bound to */
nbproc = curproxy->bind_proc ? popcount(curproxy->bind_proc) : global.nbproc;
switch (curproxy->mode) {
case PR_MODE_HEALTH:
cfgerr += proxy_cfg_ensure_no_http(curproxy);
@ -6817,6 +6821,22 @@ int check_config_validity()
listener->maxconn = curproxy->maxconn;
if (!listener->backlog)
listener->backlog = curproxy->backlog;
if (!listener->maxaccept)
listener->maxaccept = global.tune.maxaccept ? global.tune.maxaccept : 64;
/* we want to have an optimal behaviour on single process mode to
* maximize the work at once, but in multi-process we want to keep
* some fairness between processes, so we target half of the max
* number of events to be balanced over all the processes the proxy
* is bound to. Rememeber that maxaccept = -1 must be kept as it is
* used to disable the limit.
*/
if (listener->maxaccept > 0) {
if (nbproc > 1)
listener->maxaccept = (listener->maxaccept + 1) / 2;
listener->maxaccept = (listener->maxaccept + nbproc - 1) / nbproc;
}
listener->timeout = &curproxy->timeout.client;
listener->accept = session_accept;
listener->handler = process_session;

View File

@ -724,18 +724,6 @@ void init(int argc, char **argv)
if (global.tune.maxpollevents <= 0)
global.tune.maxpollevents = MAX_POLL_EVENTS;
if (global.tune.maxaccept == 0) {
/* Note: we should not try to accept too many connections at once,
* because past one point we're significantly reducing the cache
* efficiency and the highest session rate significantly drops.
* Values between 15 and 35 seem fine on a Core i5 with 4M L3 cache.
*/
if (global.nbproc > 1)
global.tune.maxaccept = 8; /* leave some conns to other processes */
else
global.tune.maxaccept = 32; /* accept more incoming conns at once */
}
if (global.tune.recv_enough == 0)
global.tune.recv_enough = MIN_RECV_AT_ONCE_ENOUGH;

View File

@ -252,7 +252,7 @@ void listener_accept(int fd)
{
struct listener *l = fdtab[fd].owner;
struct proxy *p = l->frontend;
int max_accept = global.tune.maxaccept;
int max_accept = l->maxaccept;
int cfd;
int ret;