MINOR: lb/api: let callers of take_conn/drop_conn tell if they have the lock

The two algos defining these functions (first and leastconn) do not need the
server's lock. However it's already present in pendconn_process_next_strm()
so the API must be updated so that the functions may take it if needed and
that the callers indicate whether they already own it.

As such, the call places (backend.c and stream.c) now do not take it
anymore, queue.c was unchanged since it's already held, and both "first"
and "leastconn" were updated to take it if not already held.

A quick test on the "first" algo showed a jump from 432 to 565k rps by
just dropping the lock in stream.c!
This commit is contained in:
Willy Tarreau 2021-02-17 16:01:37 +01:00
parent b9ad30a8ad
commit 59b0fecfd9
6 changed files with 34 additions and 26 deletions

View File

@ -160,12 +160,15 @@ struct lbprm {
__decl_thread(HA_RWLOCK_T lock);
struct server *fbck; /* first backup server when !PR_O_USE_ALL_BK, or NULL */
/* Call backs for some actions. Any of them may be NULL (thus should be ignored). */
void (*update_server_eweight)(struct server *); /* to be called after eweight change */
void (*set_server_status_up)(struct server *); /* to be called after status changes to UP */
void (*set_server_status_down)(struct server *); /* to be called after status changes to DOWN */
void (*server_take_conn)(struct server *); /* to be called when connection is assigned */
void (*server_drop_conn)(struct server *); /* to be called when connection is dropped */
/* Call backs for some actions. Any of them may be NULL (thus should be ignored).
* Those marked "srvlock" will need to be called with the server lock held.
* The other ones might take it themselves if needed, based on indications.
*/
void (*update_server_eweight)(struct server *); /* to be called after eweight change // srvlock */
void (*set_server_status_up)(struct server *); /* to be called after status changes to UP // srvlock */
void (*set_server_status_down)(struct server *); /* to be called after status changes to DOWN // srvlock */
void (*server_take_conn)(struct server *, int locked); /* to be called when connection is assigned */
void (*server_drop_conn)(struct server *, int locked); /* to be called when connection is dropped */
};
#endif /* _HAPROXY_BACKEND_T_H */

View File

@ -1670,11 +1670,8 @@ int connect_server(struct stream *s)
s->flags |= SF_CURR_SESS;
count = _HA_ATOMIC_ADD(&srv->cur_sess, 1);
HA_ATOMIC_UPDATE_MAX(&srv->counters.cur_sess_max, count);
if (s->be->lbprm.server_take_conn) {
HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
s->be->lbprm.server_take_conn(srv);
HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
}
if (s->be->lbprm.server_take_conn)
s->be->lbprm.server_take_conn(srv, 0);
}
/* Now handle synchronously connected sockets. We know the stream-int

View File

@ -61,16 +61,23 @@ static inline void fas_queue_srv(struct server *s)
* connection or after it has released one. Note that it is possible that
* the server has been moved out of the tree due to failed health-checks.
*
* The server's lock must be held. The lbprm's lock will be used.
* <locked> must reflect the server's lock ownership. The lbprm's lock will
* be used.
*/
static void fas_srv_reposition(struct server *s)
static void fas_srv_reposition(struct server *s, int locked)
{
if (!locked)
HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
HA_RWLOCK_WRLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
if (s->lb_tree) {
fas_dequeue_srv(s);
fas_queue_srv(s);
}
HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
if (!locked)
HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
}
/* This function updates the server trees according to server <srv>'s new

View File

@ -67,16 +67,23 @@ static inline void fwlc_queue_srv(struct server *s, unsigned int eweight)
* connection or after it has released one. Note that it is possible that
* the server has been moved out of the tree due to failed health-checks.
*
* The server's lock must be held. The lbprm's lock will be used.
* <locked> must reflect the server's lock ownership. The lbprm's lock will
* be used.
*/
static void fwlc_srv_reposition(struct server *s)
static void fwlc_srv_reposition(struct server *s, int locked)
{
if (!locked)
HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
HA_RWLOCK_WRLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
if (s->lb_tree) {
fwlc_dequeue_srv(s);
fwlc_queue_srv(s, s->cur_eweight);
}
HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
if (!locked)
HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
}
/* This function updates the server trees according to server <srv>'s new

View File

@ -327,7 +327,7 @@ static int pendconn_process_next_strm(struct server *srv, struct proxy *px)
_HA_ATOMIC_ADD(&srv->proxy->served, 1);
__ha_barrier_atomic_store();
if (px->lbprm.server_take_conn)
px->lbprm.server_take_conn(srv);
px->lbprm.server_take_conn(srv, 1);
stream_add_srv_conn(p->strm, srv);
task_wakeup(p->strm->task, TASK_WOKEN_RES);

View File

@ -2541,11 +2541,8 @@ void sess_change_server(struct stream *sess, struct server *newsrv)
_HA_ATOMIC_SUB(&oldsrv->served, 1);
_HA_ATOMIC_SUB(&oldsrv->proxy->served, 1);
__ha_barrier_atomic_store();
if (oldsrv->proxy->lbprm.server_drop_conn) {
HA_SPIN_LOCK(SERVER_LOCK, &oldsrv->lock);
oldsrv->proxy->lbprm.server_drop_conn(oldsrv);
HA_SPIN_UNLOCK(SERVER_LOCK, &oldsrv->lock);
}
if (oldsrv->proxy->lbprm.server_drop_conn)
oldsrv->proxy->lbprm.server_drop_conn(oldsrv, 0);
stream_del_srv_conn(sess);
}
@ -2553,11 +2550,8 @@ void sess_change_server(struct stream *sess, struct server *newsrv)
_HA_ATOMIC_ADD(&newsrv->served, 1);
_HA_ATOMIC_ADD(&newsrv->proxy->served, 1);
__ha_barrier_atomic_store();
if (newsrv->proxy->lbprm.server_take_conn) {
HA_SPIN_LOCK(SERVER_LOCK, &newsrv->lock);
newsrv->proxy->lbprm.server_take_conn(newsrv);
HA_SPIN_UNLOCK(SERVER_LOCK, &newsrv->lock);
}
if (newsrv->proxy->lbprm.server_take_conn)
newsrv->proxy->lbprm.server_take_conn(newsrv, 0);
stream_add_srv_conn(sess, newsrv);
}
}