2006-06-15 19:48:13 +00:00
|
|
|
/*
|
2009-10-04 21:12:44 +00:00
|
|
|
* include/proto/server.h
|
|
|
|
* This file defines everything related to servers.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2000-2009 Willy Tarreau - w@1wt.eu
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation, version 2.1
|
|
|
|
* exclusively.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
*/
|
2006-06-15 19:48:13 +00:00
|
|
|
|
2006-06-26 00:48:02 +00:00
|
|
|
#ifndef _PROTO_SERVER_H
|
|
|
|
#define _PROTO_SERVER_H
|
|
|
|
|
|
|
|
#include <unistd.h>
|
|
|
|
|
2006-06-29 16:54:54 +00:00
|
|
|
#include <common/config.h>
|
2014-02-03 21:26:46 +00:00
|
|
|
#include <common/time.h>
|
2016-11-24 16:32:01 +00:00
|
|
|
#include <types/applet.h>
|
2015-04-13 23:15:08 +00:00
|
|
|
#include <types/dns.h>
|
2006-06-26 00:48:02 +00:00
|
|
|
#include <types/proxy.h>
|
|
|
|
#include <types/queue.h>
|
|
|
|
#include <types/server.h>
|
|
|
|
|
|
|
|
#include <proto/queue.h>
|
2014-02-03 21:26:46 +00:00
|
|
|
#include <proto/log.h>
|
2009-03-05 17:43:00 +00:00
|
|
|
#include <proto/freq_ctr.h>
|
2006-06-15 19:48:13 +00:00
|
|
|
|
2019-02-14 17:29:09 +00:00
|
|
|
|
|
|
|
__decl_hathreads(extern HA_SPINLOCK_T idle_conn_srv_lock);
|
|
|
|
extern struct eb_root idle_conn_srv;
|
|
|
|
extern struct task *idle_conn_task;
|
|
|
|
extern struct task *idle_conn_cleanup[MAX_THREADS];
|
2019-08-08 13:47:21 +00:00
|
|
|
extern struct mt_list toremove_connections[MAX_THREADS];
|
2019-02-14 17:29:09 +00:00
|
|
|
|
2013-11-01 07:46:15 +00:00
|
|
|
int srv_downtime(const struct server *s);
|
2014-02-03 21:26:46 +00:00
|
|
|
int srv_lastsession(const struct server *s);
|
2013-02-23 06:35:38 +00:00
|
|
|
int srv_getinter(const struct check *check);
|
2019-01-11 13:06:12 +00:00
|
|
|
int parse_server(const char *file, int linenum, char **args, struct proxy *curproxy, struct proxy *defproxy, int parse_addr);
|
2016-02-24 07:23:22 +00:00
|
|
|
int update_server_addr(struct server *s, void *ip, int ip_sin_family, const char *updater);
|
2016-08-02 06:18:55 +00:00
|
|
|
const char *update_server_addr_port(struct server *s, const char *addr, const char *port, char *updater);
|
2015-07-08 20:03:56 +00:00
|
|
|
struct server *server_find_by_id(struct proxy *bk, int id);
|
|
|
|
struct server *server_find_by_name(struct proxy *bk, const char *name);
|
|
|
|
struct server *server_find_best_match(struct proxy *bk, char *name, int id, int *diff);
|
2015-08-19 14:44:03 +00:00
|
|
|
void apply_server_state(void);
|
BUG/MEDIUM: servers: properly propagate the maintenance states during startup
Right now there is an issue with the way the maintenance flags are
propagated upon startup. They are not propagate, just copied from the
tracked server. This implies that depending on the server's order, some
tracking servers may not be marked down. For example this configuration
does not work as expected :
server s1 1.1.1.1:8000 track s2
server s2 1.1.1.1:8000 track s3
server s3 1.1.1.1:8000 track s4
server s4 wtap:8000 check inter 1s disabled
It results in s1/s2 being up, and s3/s4 being down, while all of them
should be down.
The only clean way to process this is to run through all "root" servers
(those not tracking any other server), and to propagate their state down
to all their trackers. This is the same algorithm used to propagate the
state changes. It has to be done both to compute the IDRAIN flag and the
IMAINT flag. However, doing so requires that tracking servers are not
marked as inherited maintenance anymore while parsing the configuration
(and given that it is wrong, better drop it).
This fix also addresses another side effect of the bug above which is
that the IDRAIN/IMAINT flags are stored in the state files, and if
restored while the tracked server doesn't have the equivalent flag,
the servers may end up in a situation where it's impossible to remove
these flags. For example in the configuration above, after removing
"disabled" on server s4, the other servers would have remained down,
and not anymore with this fix. Similarly, the combination of IMAINT
or IDRAIN with their respective forced modes was not accepted on
reload, which is wrong as well.
This bug has been present at least since 1.5, maybe even 1.4 (it came
with tracking support). The fix needs to be backported there, though
the srv-state parts are irrelevant.
This commit relies on previous patch to silence warnings on startup.
2016-11-03 18:22:19 +00:00
|
|
|
void srv_compute_all_admin_states(struct proxy *px);
|
2016-11-02 14:34:05 +00:00
|
|
|
int srv_set_addr_via_libc(struct server *srv, int *err_code);
|
|
|
|
int srv_init_addr(void);
|
2016-11-23 16:15:08 +00:00
|
|
|
struct server *cli_find_server(struct appctx *appctx, char *arg);
|
2018-10-26 12:47:32 +00:00
|
|
|
struct server *new_server(struct proxy *proxy);
|
2006-06-26 00:48:02 +00:00
|
|
|
|
2015-04-13 23:15:08 +00:00
|
|
|
/* functions related to server name resolution */
|
2017-07-06 16:46:47 +00:00
|
|
|
int snr_update_srv_status(struct server *s, int has_no_ip);
|
2017-10-31 14:21:19 +00:00
|
|
|
const char *update_server_fqdn(struct server *server, const char *fqdn, const char *updater, int dns_locked);
|
MAJOR/REORG: dns: DNS resolution task and requester queues
This patch is a major upgrade of the internal run-time DNS resolver in
HAProxy and it brings the following 2 main changes:
1. DNS resolution task
Up to now, DNS resolution was triggered by the health check task.
From now, DNS resolution task is autonomous. It is started by HAProxy
right after the scheduler is available and it is woken either when a
network IO occurs for one of its nameserver or when a timeout is
matched.
From now, this means we can enable DNS resolution for a server without
enabling health checking.
2. Introduction of a dns_requester structure
Up to now, DNS resolution was purposely made for resolving server
hostnames.
The idea, is to ensure that any HAProxy internal object should be able
to trigger a DNS resolution. For this purpose, 2 things has to be done:
- clean up the DNS code from the server structure (this was already
quite clean actually) and clean up the server's callbacks from
manipulating too much DNS resolution
- create an agnostic structure which allows linking a DNS resolution
and a requester of any type (using obj_type enum)
3. Manage requesters through queues
Up to now, there was an uniq relationship between a resolution and it's
owner (aka the requester now). It's a shame, because in some cases,
multiple objects may share the same hostname and may benefit from a
resolution being performed by a third party.
This patch introduces the notion of queues, which are basically lists of
either currently running resolution or waiting ones.
The resolutions are now available as a pool, which belongs to the resolvers.
The pool has has a default size of 64 resolutions per resolvers and is
allocated at configuration parsing.
2017-05-22 13:17:15 +00:00
|
|
|
int snr_resolution_cb(struct dns_requester *requester, struct dns_nameserver *nameserver);
|
|
|
|
int snr_resolution_error_cb(struct dns_requester *requester, int error_code);
|
2017-05-03 13:43:12 +00:00
|
|
|
struct server *snr_check_ip_callback(struct server *srv, void *ip, unsigned char *ip_family);
|
2019-02-07 13:59:29 +00:00
|
|
|
struct task *srv_cleanup_idle_connections(struct task *task, void *ctx, unsigned short state);
|
2019-02-14 17:29:09 +00:00
|
|
|
struct task *srv_cleanup_toremove_connections(struct task *task, void *context, unsigned short state);
|
2015-04-13 23:15:08 +00:00
|
|
|
|
2009-03-05 17:43:00 +00:00
|
|
|
/* increase the number of cumulated connections on the designated server */
|
2019-04-15 19:25:03 +00:00
|
|
|
static inline void srv_inc_sess_ctr(struct server *s)
|
2009-03-05 17:43:00 +00:00
|
|
|
{
|
2019-03-08 17:48:31 +00:00
|
|
|
_HA_ATOMIC_ADD(&s->counters.cum_sess, 1);
|
2017-06-08 12:04:45 +00:00
|
|
|
HA_ATOMIC_UPDATE_MAX(&s->counters.sps_max,
|
|
|
|
update_freq_ctr(&s->sess_per_sec, 1));
|
2009-03-05 17:43:00 +00:00
|
|
|
}
|
|
|
|
|
2014-02-03 21:26:46 +00:00
|
|
|
/* set the time of last session on the designated server */
|
2019-04-15 19:25:03 +00:00
|
|
|
static inline void srv_set_sess_last(struct server *s)
|
2014-02-03 21:26:46 +00:00
|
|
|
{
|
|
|
|
s->counters.last_sess = now.tv_sec;
|
|
|
|
}
|
|
|
|
|
2012-10-10 06:27:36 +00:00
|
|
|
/*
|
|
|
|
* Registers the server keyword list <kwl> as a list of valid keywords for next
|
|
|
|
* parsing sessions.
|
|
|
|
*/
|
|
|
|
void srv_register_keywords(struct srv_kw_list *kwl);
|
|
|
|
|
|
|
|
/* Return a pointer to the server keyword <kw>, or NULL if not found. */
|
|
|
|
struct srv_kw *srv_find_kw(const char *kw);
|
|
|
|
|
|
|
|
/* Dumps all registered "server" keywords to the <out> string pointer. */
|
|
|
|
void srv_dump_kws(char **out);
|
|
|
|
|
2013-11-21 10:22:01 +00:00
|
|
|
/* Recomputes the server's eweight based on its state, uweight, the current time,
|
2020-03-10 07:06:11 +00:00
|
|
|
* and the proxy's algorithm. To be used after updating sv->uweight. The warmup
|
2013-11-21 10:22:01 +00:00
|
|
|
* state is automatically disabled if the time is elapsed.
|
|
|
|
*/
|
2018-08-02 09:48:52 +00:00
|
|
|
void server_recalc_eweight(struct server *sv, int must_update);
|
2013-11-21 10:22:01 +00:00
|
|
|
|
2013-11-21 14:30:45 +00:00
|
|
|
/* returns the current server throttle rate between 0 and 100% */
|
|
|
|
static inline unsigned int server_throttle_rate(struct server *sv)
|
|
|
|
{
|
|
|
|
struct proxy *px = sv->proxy;
|
|
|
|
|
|
|
|
/* when uweight is 0, we're in soft-stop so that cannot be a slowstart,
|
|
|
|
* thus the throttle is 100%.
|
|
|
|
*/
|
|
|
|
if (!sv->uweight)
|
|
|
|
return 100;
|
|
|
|
|
2017-08-31 12:41:55 +00:00
|
|
|
return (100U * px->lbprm.wmult * sv->cur_eweight + px->lbprm.wdiv - 1) / (px->lbprm.wdiv * sv->uweight);
|
2013-11-21 14:30:45 +00:00
|
|
|
}
|
|
|
|
|
2013-02-12 01:45:51 +00:00
|
|
|
/*
|
|
|
|
* Parses weight_str and configures sv accordingly.
|
|
|
|
* Returns NULL on success, error message string otherwise.
|
|
|
|
*/
|
|
|
|
const char *server_parse_weight_change_request(struct server *sv,
|
|
|
|
const char *weight_str);
|
|
|
|
|
2015-04-13 20:54:33 +00:00
|
|
|
/*
|
2016-02-24 07:25:39 +00:00
|
|
|
* Parses addr_str and configures sv accordingly. updater precise
|
|
|
|
* the source of the change in the associated message log.
|
2015-04-13 20:54:33 +00:00
|
|
|
* Returns NULL on success, error message string otherwise.
|
|
|
|
*/
|
|
|
|
const char *server_parse_addr_change_request(struct server *sv,
|
2016-02-24 07:25:39 +00:00
|
|
|
const char *addr_str, const char *updater);
|
2015-04-13 20:54:33 +00:00
|
|
|
|
2016-04-24 21:10:06 +00:00
|
|
|
/*
|
|
|
|
* Parses maxconn_str and configures sv accordingly.
|
|
|
|
* Returns NULL on success, error message string otherwise.
|
|
|
|
*/
|
|
|
|
const char *server_parse_maxconn_change_request(struct server *sv,
|
|
|
|
const char *maxconn_str);
|
|
|
|
|
2013-11-25 01:46:40 +00:00
|
|
|
/*
|
2014-05-13 20:08:20 +00:00
|
|
|
* Return true if the server has a zero user-weight, meaning it's in draining
|
|
|
|
* mode (ie: not taking new non-persistent connections).
|
2013-11-25 01:46:40 +00:00
|
|
|
*/
|
2014-05-13 20:08:20 +00:00
|
|
|
static inline int server_is_draining(const struct server *s)
|
2013-11-25 01:46:40 +00:00
|
|
|
{
|
2017-08-31 12:41:55 +00:00
|
|
|
return !s->uweight || (s->cur_admin & SRV_ADMF_DRAIN);
|
2013-11-25 01:46:40 +00:00
|
|
|
}
|
2014-05-16 09:48:10 +00:00
|
|
|
|
|
|
|
/* Shutdown all connections of a server. The caller must pass a termination
|
2015-04-02 23:14:29 +00:00
|
|
|
* code in <why>, which must be one of SF_ERR_* indicating the reason for the
|
2014-05-16 09:48:10 +00:00
|
|
|
* shutdown.
|
|
|
|
*/
|
2016-02-22 15:08:58 +00:00
|
|
|
void srv_shutdown_streams(struct server *srv, int why);
|
2014-05-16 09:48:10 +00:00
|
|
|
|
|
|
|
/* Shutdown all connections of all backup servers of a proxy. The caller must
|
2015-04-02 23:14:29 +00:00
|
|
|
* pass a termination code in <why>, which must be one of SF_ERR_* indicating
|
2014-05-16 09:48:10 +00:00
|
|
|
* the reason for the shutdown.
|
|
|
|
*/
|
2016-02-22 15:08:58 +00:00
|
|
|
void srv_shutdown_backup_streams(struct proxy *px, int why);
|
2014-05-16 09:48:10 +00:00
|
|
|
|
2018-07-13 09:56:34 +00:00
|
|
|
void srv_append_status(struct buffer *msg, struct server *s, struct check *,
|
|
|
|
int xferred, int forced);
|
2017-10-19 12:42:30 +00:00
|
|
|
|
|
|
|
void srv_set_stopped(struct server *s, const char *reason, struct check *check);
|
|
|
|
void srv_set_running(struct server *s, const char *reason, struct check *check);
|
|
|
|
void srv_set_stopping(struct server *s, const char *reason, struct check *check);
|
2014-05-21 11:54:57 +00:00
|
|
|
|
2014-05-22 14:14:34 +00:00
|
|
|
/* Enables admin flag <mode> (among SRV_ADMF_*) on server <s>. This is used to
|
|
|
|
* enforce either maint mode or drain mode. It is not allowed to set more than
|
|
|
|
* one flag at once. The equivalent "inherited" flag is propagated to all
|
|
|
|
* tracking servers. Maintenance mode disables health checks (but not agent
|
|
|
|
* checks). When either the flag is already set or no flag is passed, nothing
|
2016-11-07 14:53:43 +00:00
|
|
|
* is done. If <cause> is non-null, it will be displayed at the end of the log
|
|
|
|
* lines to justify the state change.
|
2014-05-22 14:14:34 +00:00
|
|
|
*/
|
2016-11-07 14:53:43 +00:00
|
|
|
void srv_set_admin_flag(struct server *s, enum srv_admin mode, const char *cause);
|
2014-05-22 14:14:34 +00:00
|
|
|
|
|
|
|
/* Disables admin flag <mode> (among SRV_ADMF_*) on server <s>. This is used to
|
|
|
|
* stop enforcing either maint mode or drain mode. It is not allowed to set more
|
|
|
|
* than one flag at once. The equivalent "inherited" flag is propagated to all
|
|
|
|
* tracking servers. Leaving maintenance mode re-enables health checks. When
|
|
|
|
* either the flag is already cleared or no flag is passed, nothing is done.
|
|
|
|
*/
|
|
|
|
void srv_clr_admin_flag(struct server *s, enum srv_admin mode);
|
|
|
|
|
MINOR: server: Add dynamic session cookies.
This adds a new "dynamic" keyword for the cookie option. If set, a cookie
will be generated for each server (assuming one isn't already provided on
the "server" line), from the IP of the server, the TCP port, and a secret
key provided. To provide the secret key, a new keyword as been added,
"dynamic-cookie-key", for backends.
Example :
backend bk_web
balance roundrobin
dynamic-cookie-key "bla"
cookie WEBSRV insert dynamic
server s1 127.0.0.1:80 check
server s2 192.168.56.1:80 check
This is a first step to be able to dynamically add and remove servers,
without modifying the configuration file, and still have all the load
balancers redirect the traffic to the right server.
Provide a way to generate session cookies, based on the IP address of the
server, the TCP port, and a secret key provided.
2017-03-14 19:01:29 +00:00
|
|
|
/* Calculates the dynamic persitent cookie for a server, if a secret key has
|
|
|
|
* been provided.
|
|
|
|
*/
|
|
|
|
void srv_set_dyncookie(struct server *s);
|
|
|
|
|
2014-05-16 09:25:16 +00:00
|
|
|
/* Puts server <s> into maintenance mode, and propagate that status down to all
|
2014-05-22 14:14:34 +00:00
|
|
|
* tracking servers.
|
|
|
|
*/
|
|
|
|
static inline void srv_adm_set_maint(struct server *s)
|
|
|
|
{
|
2016-11-07 14:53:43 +00:00
|
|
|
srv_set_admin_flag(s, SRV_ADMF_FMAINT, NULL);
|
2014-05-22 14:14:34 +00:00
|
|
|
srv_clr_admin_flag(s, SRV_ADMF_FDRAIN);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Puts server <s> into drain mode, and propagate that status down to all
|
|
|
|
* tracking servers.
|
|
|
|
*/
|
|
|
|
static inline void srv_adm_set_drain(struct server *s)
|
|
|
|
{
|
2016-11-07 14:53:43 +00:00
|
|
|
srv_set_admin_flag(s, SRV_ADMF_FDRAIN, NULL);
|
2014-05-22 14:14:34 +00:00
|
|
|
srv_clr_admin_flag(s, SRV_ADMF_FMAINT);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Puts server <s> into ready mode, and propagate that status down to all
|
|
|
|
* tracking servers.
|
|
|
|
*/
|
|
|
|
static inline void srv_adm_set_ready(struct server *s)
|
|
|
|
{
|
|
|
|
srv_clr_admin_flag(s, SRV_ADMF_FDRAIN);
|
|
|
|
srv_clr_admin_flag(s, SRV_ADMF_FMAINT);
|
|
|
|
}
|
2014-05-16 09:25:16 +00:00
|
|
|
|
2016-09-21 18:26:16 +00:00
|
|
|
/* appends an initaddr method to the existing list. Returns 0 on failure. */
|
|
|
|
static inline int srv_append_initaddr(unsigned int *list, enum srv_initaddr addr)
|
|
|
|
{
|
|
|
|
int shift = 0;
|
|
|
|
|
|
|
|
while (shift + 3 < 32 && (*list >> shift))
|
|
|
|
shift += 3;
|
|
|
|
|
|
|
|
if (shift + 3 > 32)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
*list |= addr << shift;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2016-11-04 14:10:17 +00:00
|
|
|
/* returns the next initaddr method and removes it from <list> by shifting
|
|
|
|
* it right (implying that it MUST NOT be the server's. Returns SRV_IADDR_END
|
|
|
|
* at the end.
|
|
|
|
*/
|
|
|
|
static inline enum srv_initaddr srv_get_next_initaddr(unsigned int *list)
|
|
|
|
{
|
|
|
|
enum srv_initaddr ret;
|
|
|
|
|
|
|
|
ret = *list & 7;
|
|
|
|
*list >>= 3;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-01-26 11:19:01 +00:00
|
|
|
/* This adds an idle connection to the server's list if the connection is
|
|
|
|
* reusable, not held by any owner anymore, but still has available streams.
|
|
|
|
*/
|
2020-02-13 18:12:07 +00:00
|
|
|
static inline int srv_add_to_idle_list(struct server *srv, struct connection *conn, int is_safe)
|
2018-12-14 18:27:06 +00:00
|
|
|
{
|
2019-01-26 11:19:01 +00:00
|
|
|
if (srv && srv->pool_purge_delay > 0 &&
|
|
|
|
(srv->max_idle_conns == -1 || srv->max_idle_conns > srv->curr_idle_conns) &&
|
BUG/MEDIUM: connection: don't keep more idle connections than ever needed
When using "http-reuse safe", which is the default, a new incoming connection
does not automatically reuse an existing connection for the first request, as
we don't want to risk to lose the contents if we know the client will not be
able to replay the request. A side effect to this is that when dealing with
mostly http-close traffic, the reuse rate is extremely low and we keep
accumulating server-side connections that may even never be reused. At some
point we're limited to a ratio of file descriptors, but when the system is
configured with very high FD limits, we can still reach the limit of outgoing
source ports and make the system significantly slow down trying to find an
available port for outgoing connections. A simple test on my laptop with
ulimit 100000 and with the following config results in the load immediately
dropping after a few seconds :
listen l1
bind :4445
mode http
server s1 127.0.0.1:8000
As can be seen, the load falls from 38k cps to 400 cps during the first 200ms
(in fact when the source port table is full and connect() takes ages to find
a spare port for a new connection):
$ injectl464 -p 4 -o 1 -u 10 -G 127.0.0.1:4445/ -F -c -w 100
hits ^hits hits/s ^h/s bytes kB/s last errs tout htime sdht ptime
2439 2439 39338 39338 356094 5743 5743 0 0 0.4 0.5 0.4
7637 5198 38185 37666 1115002 5575 5499 0 0 0.7 0.5 0.7
7719 82 25730 820 1127002 3756 120 0 0 21.8 18.8 21.8
7797 78 19492 780 1138446 2846 114 0 0 61.4 2.5 61.4
7877 80 15754 800 1150182 2300 117 0 0 58.6 0.5 58.6
7920 43 13200 430 1156488 1927 63 0 0 58.9 0.3 58.9
At this point, lots of connections are indeed in use, for only 10 connections
on the frontend side:
$ ss -ant state established | wc -l
39022
This patch makes sure we never keep more idle connections than we've ever
had outstanding requests on a server. This way the total number of idle
connections will never exceed the sum of maximum connections. Thus highly
loaded servers will be able to get many connections and slightly loaded
servers will keep less. Ideally we should apply similar limits per process
and the per backend, but in practice this already addresses the issues
pretty well:
$ injectl464 -p 4 -o 1 -u 10 -G 127.0.0.1:4445/ -F -c -w 100
hits ^hits hits/s ^h/s bytes kB/s last errs tout htime sdht ptime
4423 4423 40209 40209 645758 5870 5870 0 0 0.2 0.4 0.2
8020 3597 40100 39966 1170920 5854 5835 0 0 0.2 0.4 0.2
12037 4017 40123 40170 1757402 5858 5864 0 0 0.2 0.4 0.2
16069 4032 40172 40320 2346074 5865 5886 0 0 0.2 0.4 0.2
20047 3978 40013 39386 2926862 5842 5750 0 0 0.3 0.4 0.3
24005 3958 40008 39979 3504730 5841 5837 0 0 0.2 0.4 0.2
$ ss -ant state established | wc -l
234
This patch must be backported to 2.0. It could be useful in 1.9 as well
eventhough pools and reuse are not enabled by default there.
2019-09-08 05:38:23 +00:00
|
|
|
(srv->cur_sess + srv->curr_idle_conns <= srv->counters.cur_sess_max) &&
|
2018-12-14 18:27:06 +00:00
|
|
|
!(conn->flags & CO_FL_PRIVATE) &&
|
|
|
|
((srv->proxy->options & PR_O_REUSE_MASK) != PR_O_REUSE_NEVR) &&
|
MEDIUM: connections: Add a way to control the number of idling connections.
As by default we add all keepalive connections to the idle pool, if we run
into a pathological case, where all client don't do keepalive, but the server
does, and haproxy is configured to only reuse "safe" connections, we will
soon find ourself having lots of idling, unusable for new sessions, connections,
while we won't have any file descriptors available to create new connections.
To fix this, add 2 new global settings, "pool_low_ratio" and "pool_high_ratio".
pool-low-fd-ratio is the % of fds we're allowed to use (against the maximum
number of fds available to haproxy) before we stop adding connections to the
idle pool, and destroy them instead. The default is 20. pool-high-fd-ratio is
the % of fds we're allowed to use (against the maximum number of fds available
to haproxy) before we start killing idling connection in the event we have to
create a new outgoing connection, and no reuse is possible. The default is 25.
2019-04-16 17:07:22 +00:00
|
|
|
!conn->mux->used_streams(conn) && conn->mux->avail_streams(conn) &&
|
|
|
|
ha_used_fds < global.tune.pool_low_count) {
|
2019-02-15 17:49:15 +00:00
|
|
|
int retadd;
|
|
|
|
|
2019-03-08 17:48:31 +00:00
|
|
|
retadd = _HA_ATOMIC_ADD(&srv->curr_idle_conns, 1);
|
2019-07-10 12:06:33 +00:00
|
|
|
if (retadd > srv->max_idle_conns) {
|
2019-03-08 17:48:31 +00:00
|
|
|
_HA_ATOMIC_SUB(&srv->curr_idle_conns, 1);
|
2019-02-15 17:49:15 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2020-03-06 17:12:03 +00:00
|
|
|
MT_LIST_DEL(&conn->list);
|
2020-03-06 17:18:56 +00:00
|
|
|
if (is_safe) {
|
|
|
|
conn->flags = (conn->flags & ~CO_FL_LIST_MASK) | CO_FL_SAFE_LIST;
|
|
|
|
MT_LIST_ADDQ(&srv->safe_conns[tid], (struct mt_list *)&conn->list);
|
|
|
|
_HA_ATOMIC_ADD(&srv->curr_safe_nb, 1);
|
|
|
|
} else {
|
|
|
|
conn->flags = (conn->flags & ~CO_FL_LIST_MASK) | CO_FL_IDLE_LIST;
|
|
|
|
MT_LIST_ADDQ(&srv->idle_conns[tid], (struct mt_list *)&conn->list);
|
|
|
|
_HA_ATOMIC_ADD(&srv->curr_idle_nb, 1);
|
|
|
|
}
|
|
|
|
_HA_ATOMIC_ADD(&srv->curr_idle_thr[tid], 1);
|
2018-12-14 18:27:06 +00:00
|
|
|
|
|
|
|
conn->idle_time = now_ms;
|
2019-02-14 17:29:09 +00:00
|
|
|
__ha_barrier_full();
|
|
|
|
if ((volatile void *)srv->idle_node.node.leaf_p == NULL) {
|
|
|
|
HA_SPIN_LOCK(OTHER_LOCK, &idle_conn_srv_lock);
|
|
|
|
if ((volatile void *)srv->idle_node.node.leaf_p == NULL) {
|
|
|
|
srv->idle_node.key = tick_add(srv->pool_purge_delay,
|
|
|
|
now_ms);
|
|
|
|
eb32_insert(&idle_conn_srv, &srv->idle_node);
|
|
|
|
if (!task_in_wq(idle_conn_task) && !
|
|
|
|
task_in_rq(idle_conn_task)) {
|
|
|
|
task_schedule(idle_conn_task,
|
|
|
|
srv->idle_node.key);
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conn_srv_lock);
|
|
|
|
}
|
2018-12-14 18:27:06 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-07-28 09:31:57 +00:00
|
|
|
#endif /* _PROTO_SERVER_H */
|
|
|
|
|
2006-06-26 00:48:02 +00:00
|
|
|
/*
|
|
|
|
* Local variables:
|
|
|
|
* c-indent-level: 8
|
|
|
|
* c-basic-offset: 8
|
|
|
|
* End:
|
|
|
|
*/
|