Revert "MINOR: queue: make pendconn_first() take the lock by itself"

This reverts commit 772e968b06.

The recent changes since 5304669e1 MEDIUM: queue: make
pendconn_process_next_strm() only return the pendconn opened a tiny race
condition between stream_free() and process_srv_queue(), as the pendconn
is accessed outside of the lock, possibly while it's being freed. A
different approach is required.
This commit is contained in:
Willy Tarreau 2021-06-24 07:20:26 +02:00
parent bbab3bf22b
commit 2bf3f2cf7f

View File

@ -213,32 +213,22 @@ void pendconn_unlink(struct pendconn *p)
}
}
/* Retrieve the first pendconn from queue <queue>, which must *not* be
* locked. The queue will be locked as needed, and will be left locked
* if an element is returned, in which case it will be up to the caller
* to unlock it.
* Classes are always considered first, then the time offset. The time
* does wrap, so the lookup is performed twice, one to retrieve the first
* class and a second time to retrieve the earliest time in this class.
/* Retrieve the first pendconn from tree <pendconns>. Classes are always
* considered first, then the time offset. The time does wrap, so the
* lookup is performed twice, one to retrieve the first class and a second
* time to retrieve the earliest time in this class.
*/
static struct pendconn *pendconn_first(struct queue *queue)
static struct pendconn *pendconn_first(struct eb_root *pendconns)
{
struct eb32_node *node, *node2 = NULL;
u32 key;
if (!queue->length)
node = eb32_first(pendconns);
if (!node)
return NULL;
HA_SPIN_LOCK(QUEUE_LOCK, &queue->lock);
node = eb32_first(&queue->head);
if (!node) {
HA_SPIN_UNLOCK(QUEUE_LOCK, &queue->lock);
return NULL;
}
key = KEY_CLASS_OFFSET_BOUNDARY(node->key);
node2 = eb32_lookup_ge(&queue->head, key);
node2 = eb32_lookup_ge(pendconns, key);
if (!node2 ||
KEY_CLASS(node2->key) != KEY_CLASS(node->key)) {
@ -279,9 +269,21 @@ static struct pendconn *pendconn_process_next_strm(struct server *srv, struct pr
struct pendconn *pp = NULL;
u32 pkey, ppkey;
p = pendconn_first(&srv->queue);
if (px_ok)
pp = pendconn_first(&px->queue);
p = NULL;
if (srv->queue.length) {
HA_SPIN_LOCK(QUEUE_LOCK, &srv->queue.lock);
p = pendconn_first(&srv->queue.head);
if (!p)
HA_SPIN_UNLOCK(QUEUE_LOCK, &srv->queue.lock);
}
pp = NULL;
if (px_ok && px->queue.length) {
HA_SPIN_LOCK(QUEUE_LOCK, &px->queue.lock);
pp = pendconn_first(&px->queue.head);
if (!pp)
HA_SPIN_UNLOCK(QUEUE_LOCK, &px->queue.lock);
}
if (!p && !pp)
return NULL;
@ -508,17 +510,19 @@ int pendconn_grab_from_px(struct server *s)
((s != s->proxy->lbprm.fbck) && !(s->proxy->options & PR_O_USE_ALL_BK))))
return 0;
HA_SPIN_LOCK(QUEUE_LOCK, &s->proxy->queue.lock);
maxconn = srv_dynamic_maxconn(s);
while ((!s->maxconn || s->served + xferred < maxconn) &&
(p = pendconn_first(&s->proxy->queue))) {
__pendconn_unlink_prx(p);
HA_SPIN_UNLOCK(QUEUE_LOCK, &s->proxy->queue.lock);
while ((p = pendconn_first(&s->proxy->queue.head))) {
if (s->maxconn && s->served + xferred >= maxconn)
break;
__pendconn_unlink_prx(p);
p->target = s;
task_wakeup(p->strm->task, TASK_WOKEN_RES);
xferred++;
}
HA_SPIN_UNLOCK(QUEUE_LOCK, &s->proxy->queue.lock);
if (xferred) {
_HA_ATOMIC_SUB(&s->proxy->queue.length, xferred);
_HA_ATOMIC_SUB(&s->proxy->totpend, xferred);