MINOR: queue: make pendconn_first() take the lock by itself

Dealing with the queue lock in the caller remains complicated. Let's
change pendconn_first() to take the queue instead of the tree head,
and handle the lock itself. It now returns an element with a locked
queue or no element with an unlocked queue. It can avoid locking if
the queue is already empty.
This commit is contained in:
Willy Tarreau 2021-06-18 20:32:50 +02:00
parent 5b39275311
commit 772e968b06

View File

@ -213,22 +213,32 @@ void pendconn_unlink(struct pendconn *p)
} }
} }
/* Retrieve the first pendconn from tree <pendconns>. Classes are always /* Retrieve the first pendconn from queue <queue>, which must *not* be
* considered first, then the time offset. The time does wrap, so the * locked. The queue will be locked as needed, and will be left locked
* lookup is performed twice, one to retrieve the first class and a second * if an element is returned, in which case it will be up to the caller
* time to retrieve the earliest time in this class. * to unlock it.
* Classes are always considered first, then the time offset. The time
* does wrap, so the lookup is performed twice, one to retrieve the first
* class and a second time to retrieve the earliest time in this class.
*/ */
static struct pendconn *pendconn_first(struct eb_root *pendconns) static struct pendconn *pendconn_first(struct queue *queue)
{ {
struct eb32_node *node, *node2 = NULL; struct eb32_node *node, *node2 = NULL;
u32 key; u32 key;
node = eb32_first(pendconns); if (!queue->length)
if (!node)
return NULL; return NULL;
HA_SPIN_LOCK(QUEUE_LOCK, &queue->lock);
node = eb32_first(&queue->head);
if (!node) {
HA_SPIN_UNLOCK(QUEUE_LOCK, &queue->lock);
return NULL;
}
key = KEY_CLASS_OFFSET_BOUNDARY(node->key); key = KEY_CLASS_OFFSET_BOUNDARY(node->key);
node2 = eb32_lookup_ge(pendconns, key); node2 = eb32_lookup_ge(&queue->head, key);
if (!node2 || if (!node2 ||
KEY_CLASS(node2->key) != KEY_CLASS(node->key)) { KEY_CLASS(node2->key) != KEY_CLASS(node->key)) {
@ -269,21 +279,9 @@ static struct pendconn *pendconn_process_next_strm(struct server *srv, struct pr
struct pendconn *pp = NULL; struct pendconn *pp = NULL;
u32 pkey, ppkey; u32 pkey, ppkey;
p = NULL; p = pendconn_first(&srv->queue);
if (srv->queue.length) { if (px_ok)
HA_SPIN_LOCK(QUEUE_LOCK, &srv->queue.lock); pp = pendconn_first(&px->queue);
p = pendconn_first(&srv->queue.head);
if (!p)
HA_SPIN_UNLOCK(QUEUE_LOCK, &srv->queue.lock);
}
pp = NULL;
if (px_ok && px->queue.length) {
HA_SPIN_LOCK(QUEUE_LOCK, &px->queue.lock);
pp = pendconn_first(&px->queue.head);
if (!pp)
HA_SPIN_UNLOCK(QUEUE_LOCK, &px->queue.lock);
}
if (!p && !pp) if (!p && !pp)
return NULL; return NULL;
@ -510,19 +508,17 @@ int pendconn_grab_from_px(struct server *s)
((s != s->proxy->lbprm.fbck) && !(s->proxy->options & PR_O_USE_ALL_BK)))) ((s != s->proxy->lbprm.fbck) && !(s->proxy->options & PR_O_USE_ALL_BK))))
return 0; return 0;
HA_SPIN_LOCK(QUEUE_LOCK, &s->proxy->queue.lock);
maxconn = srv_dynamic_maxconn(s); maxconn = srv_dynamic_maxconn(s);
while ((p = pendconn_first(&s->proxy->queue.head))) { while ((!s->maxconn || s->served + xferred < maxconn) &&
if (s->maxconn && s->served + xferred >= maxconn) (p = pendconn_first(&s->proxy->queue))) {
break;
__pendconn_unlink_prx(p); __pendconn_unlink_prx(p);
HA_SPIN_UNLOCK(QUEUE_LOCK, &s->proxy->queue.lock);
p->target = s; p->target = s;
task_wakeup(p->strm->task, TASK_WOKEN_RES); task_wakeup(p->strm->task, TASK_WOKEN_RES);
xferred++; xferred++;
} }
HA_SPIN_UNLOCK(QUEUE_LOCK, &s->proxy->queue.lock);
if (xferred) { if (xferred) {
_HA_ATOMIC_SUB(&s->proxy->queue.length, xferred); _HA_ATOMIC_SUB(&s->proxy->queue.length, xferred);
_HA_ATOMIC_SUB(&s->proxy->totpend, xferred); _HA_ATOMIC_SUB(&s->proxy->totpend, xferred);