MINOR: lb/map: use seek lock and read locks where appropriate

- map_get_server_hash() doesn't need a write lock since it only
  reads the array, let's only use a read lock here.

- map_get_server_rr() only needs exclusivity to adjust the rr_idx
  while looking for its entry. Since this one is not used by
  map_get_server_hash(), let's turn this lock to a seek lock that
  doesn't block reads.

With 8 threads, no significant performance difference was noticed
given that lookups are usually instant with this LB algo so the
lock contention is rare.
This commit is contained in:
Willy Tarreau 2020-10-17 18:55:18 +02:00
parent cd10def825
commit ae99aeb135

View File

@ -216,7 +216,7 @@ struct server *map_get_server_rr(struct proxy *px, struct server *srvtoavoid)
int newidx, avoididx;
struct server *srv, *avoided;
HA_RWLOCK_WRLOCK(LBPRM_LOCK, &px->lbprm.lock);
HA_RWLOCK_SKLOCK(LBPRM_LOCK, &px->lbprm.lock);
if (px->lbprm.tot_weight == 0) {
avoided = NULL;
goto out;
@ -248,7 +248,7 @@ struct server *map_get_server_rr(struct proxy *px, struct server *srvtoavoid)
px->lbprm.map.rr_idx = avoididx;
out:
HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &px->lbprm.lock);
HA_RWLOCK_SKUNLOCK(LBPRM_LOCK, &px->lbprm.lock);
/* return NULL or srvtoavoid if found */
return avoided;
}
@ -265,10 +265,10 @@ struct server *map_get_server_hash(struct proxy *px, unsigned int hash)
{
struct server *srv = NULL;
HA_RWLOCK_WRLOCK(LBPRM_LOCK, &px->lbprm.lock);
HA_RWLOCK_RDLOCK(LBPRM_LOCK, &px->lbprm.lock);
if (px->lbprm.tot_weight)
srv = px->lbprm.map.srv[hash % px->lbprm.tot_weight];
HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &px->lbprm.lock);
HA_RWLOCK_RDUNLOCK(LBPRM_LOCK, &px->lbprm.lock);
return srv;
}