From 58bc9c1ced43b7802200d4d71e0eb5f850e0e801 Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Sat, 17 Oct 2020 19:32:09 +0200 Subject: [PATCH] MINOR: lb/leastconn: only take a read lock in fwlc_get_next_server() This function doesn't change the tree, it only looks for the first usable server, so let's do that under a read lock to limit the situations like the ones described in issue #881 where finding a usable server when dealing with lots of saturated ones can be expensive. At least threads will now be able to look up in parallel. It's interesting to note that s->served is not incremented during the server choice, nor is the server repositionned. So right now already, nothing prevents multiple threads from picking the same server. This will not cause a significant imbalance anyway given that the server will automatically be repositionned at the right place, but this might be something to improve in the future if it doesn't come with too high a cost. It also looks like the way a server's weight is updated could be revisited so that the write lock gets tighter at the expense of a short part of inconsistency between weights and servers still present in the tree. --- src/lb_fwlc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lb_fwlc.c b/src/lb_fwlc.c index 882dd7d43..78a38a434 100644 --- a/src/lb_fwlc.c +++ b/src/lb_fwlc.c @@ -298,7 +298,7 @@ struct server *fwlc_get_next_server(struct proxy *p, struct server *srvtoavoid) srv = avoided = NULL; - HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock); + HA_RWLOCK_RDLOCK(LBPRM_LOCK, &p->lbprm.lock); if (p->srv_act) node = eb32_first(&p->lbprm.fwlc.act); else if (p->lbprm.fbck) { @@ -334,7 +334,7 @@ struct server *fwlc_get_next_server(struct proxy *p, struct server *srvtoavoid) if (!srv) srv = avoided; out: - HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock); + HA_RWLOCK_RDUNLOCK(LBPRM_LOCK, &p->lbprm.lock); return srv; }