[MEDIUM] listeners: put listeners in queue upon resource shortage

When an accept() fails because of a connection limit or a memory shortage,
we now disable it and queue it so that it's dequeued only when a connection
is released. This has improved the behaviour of the process near the fd limit
as now a listener with a no connection (eg: stats) will not loop forever
trying to get its connection accepted.

The solution is still not 100% perfect, as we'd like to have this used when
proxy limits are reached (use a per-proxy list) and for safety, we'd need
to have dedicated tasks to periodically re-enable them (eg: to overcome
temporary system-wide resource limitations when no connection is released).
This commit is contained in:
Willy Tarreau 2011-07-24 22:58:00 +02:00
parent e6ca1fcd84
commit 08ceb1012b
4 changed files with 19 additions and 10 deletions

View File

@ -123,6 +123,7 @@ extern const struct linger nolinger;
extern int stopping; /* non zero means stopping in progress */
extern char hostname[MAX_HOSTNAME_LEN];
extern char localpeer[MAX_HOSTNAME_LEN];
extern struct list global_listener_queue; /* list of the temporarily limited listeners */
#endif /* _TYPES_GLOBAL_H */

View File

@ -159,6 +159,8 @@ const struct linger nolinger = { .l_onoff = 1, .l_linger = 0 };
char hostname[MAX_HOSTNAME_LEN];
char localpeer[MAX_HOSTNAME_LEN];
/* list of the temporarily limited listeners because of lack of resource */
struct list global_listener_queue = LIST_HEAD_INIT(global_listener_queue);
/*********************************************************************/
/* general purpose functions ***************************************/

View File

@ -2092,6 +2092,10 @@ struct task *process_session(struct task *t)
if (s->listener->state == LI_FULL)
resume_listener(s->listener);
/* Dequeues all of the listeners waiting for a resource */
if (!LIST_ISEMPTY(&global_listener_queue))
dequeue_all_listeners(&global_listener_queue);
if (unlikely((global.mode & MODE_DEBUG) &&
(!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE)))) {
int len;

View File

@ -1192,6 +1192,7 @@ int stream_sock_accept(int fd)
int max_accept = global.tune.maxaccept;
int cfd;
int ret;
int loops = 0;
if (unlikely(l->nbconn >= l->maxconn)) {
listener_full(l);
@ -1208,6 +1209,7 @@ int stream_sock_accept(int fd)
struct sockaddr_storage addr;
socklen_t laddr = sizeof(addr);
loops++;
cfd = accept(fd, (struct sockaddr *)&addr, &laddr);
if (unlikely(cfd == -1)) {
switch (errno) {
@ -1220,16 +1222,14 @@ int stream_sock_accept(int fd)
send_log(p, LOG_EMERG,
"Proxy %s reached system FD limit at %d. Please check system tunables.\n",
p->id, maxfd);
if (l->nbconn)
listener_full(l);
limit_listener(l, &global_listener_queue);
return 0;
case EMFILE:
if (p)
send_log(p, LOG_EMERG,
"Proxy %s reached process FD limit at %d. Please check 'ulimit-n' and restart.\n",
p->id, maxfd);
if (l->nbconn)
listener_full(l);
limit_listener(l, &global_listener_queue);
return 0;
case ENOBUFS:
case ENOMEM:
@ -1237,8 +1237,7 @@ int stream_sock_accept(int fd)
send_log(p, LOG_EMERG,
"Proxy %s reached system memory limit at %d sockets. Please check system tunables.\n",
p->id, maxfd);
if (l->nbconn)
listener_full(l);
limit_listener(l, &global_listener_queue);
return 0;
default:
return 0;
@ -1250,6 +1249,7 @@ int stream_sock_accept(int fd)
"Proxy %s reached the configured maximum connection limit. Please check the global 'maxconn' value.\n",
p->id);
close(cfd);
limit_listener(l, &global_listener_queue);
return 0;
}
@ -1276,10 +1276,7 @@ int stream_sock_accept(int fd)
if (ret == 0) /* successful termination */
continue;
if (p) {
disable_listener(l);
p->state = PR_STIDLE;
}
limit_listener(l, &global_listener_queue);
return 0;
}
@ -1289,6 +1286,11 @@ int stream_sock_accept(int fd)
}
} /* end of while (p->feconn < p->maxconn) */
/* if we did not even enter the loop, we've reached resource limits */
if (!loops && max_accept)
limit_listener(l, &global_listener_queue);
return 0;
}