MINOR: lists: rename some MT_LIST operations to clarify them

Initially when mt_lists were added, their purpose was to be used with
the scheduler, where anyone may concurrently add the same tasklet, so
it sounded natural to implement a check in MT_LIST_ADD{,Q}. Later their
usage was extended and MT_LIST_ADD{,Q} started to be used on situations
where the element to be added was exclusively owned by the one performing
the operation so a conflict was impossible. This became more obvious with
the idle connections and the new macro was called MT_LIST_ADDQ_NOCHECK.

But this remains confusing and at many places it's not expected that
an MT_LIST_ADD could possibly fail, and worse, at some places we start
by initializing it before adding (and the test is superflous) so let's
rename them to something more conventional to denote the presence of the
check or not:

   MT_LIST_ADD{,Q}    : inconditional operation, the caller owns the
                        element, and doesn't care about the element's
                        current state (exactly like LIST_ADD)
   MT_LIST_TRY_ADD{,Q}: only perform the operation if the element is not
                        already added or in the process of being added.

This means that the previously "safe" MT_LIST_ADD{,Q} are not "safe"
anymore. This also means that in case of backport mistakes in the
future causing this to be overlooked, the slower and safer functions
will still be used by default.

Note that the missing unchecked MT_LIST_ADD macro was added.

The rest of the code will have to be reviewed so that a number of
callers of MT_LIST_TRY_ADDQ are changed to MT_LIST_ADDQ to remove
the unneeded test.
This commit is contained in:
Willy Tarreau 2020-07-10 08:10:29 +02:00
parent 5254321d14
commit de4db17dee
15 changed files with 62 additions and 29 deletions

View File

@ -850,7 +850,7 @@ static inline int channel_alloc_buffer(struct channel *chn, struct buffer_wait *
return 1;
if (!MT_LIST_ADDED(&wait->list))
MT_LIST_ADDQ(&buffer_wq, &wait->list);
MT_LIST_TRY_ADDQ(&buffer_wq, &wait->list);
return 0;
}

View File

@ -225,7 +225,7 @@
* Returns 1 if we added the item, 0 otherwise (because it was already in a
* list).
*/
#define MT_LIST_ADD(_lh, _el) \
#define MT_LIST_TRY_ADD(_lh, _el) \
({ \
int _ret = 0; \
struct mt_list *lh = (_lh), *el = (_el); \
@ -265,7 +265,7 @@
* Returns 1 if we added the item, 0 otherwise (because it was already in a
* list).
*/
#define MT_LIST_ADDQ(_lh, _el) \
#define MT_LIST_TRY_ADDQ(_lh, _el) \
({ \
int _ret = 0; \
struct mt_list *lh = (_lh), *el = (_el); \
@ -300,11 +300,44 @@
(_ret); \
})
/*
* Add an item at the beginning of a list.
* It is assumed the element can't already be in a list, so it isn't checked.
*/
#define MT_LIST_ADD(_lh, _el) \
({ \
int _ret = 0; \
struct mt_list *lh = (_lh), *el = (_el); \
while (1) { \
struct mt_list *n; \
struct mt_list *p; \
n = _HA_ATOMIC_XCHG(&(lh)->next, MT_LIST_BUSY); \
if (n == MT_LIST_BUSY) \
continue; \
p = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY); \
if (p == MT_LIST_BUSY) { \
(lh)->next = n; \
__ha_barrier_store(); \
continue; \
} \
(el)->next = n; \
(el)->prev = p; \
__ha_barrier_store(); \
n->prev = (el); \
__ha_barrier_store(); \
p->next = (el); \
__ha_barrier_store(); \
_ret = 1; \
break; \
} \
(_ret); \
})
/*
* Add an item at the end of a list.
* It is assumed the element can't already be in a list, so it isn't checked
*/
#define MT_LIST_ADDQ_NOCHECK(_lh, _el) \
#define MT_LIST_ADDQ(_lh, _el) \
({ \
int _ret = 0; \
struct mt_list *lh = (_lh), *el = (_el); \
@ -336,7 +369,7 @@
/*
* Detach a list from its head. A pointer to the first element is returned
* and the list is closed. If the list was empty, NULL is returned. This may
* exclusively be used with lists modified by MT_LIST_ADD/MT_LIST_ADDQ. This
* exclusively be used with lists modified by MT_LIST_TRY_ADD/MT_LIST_TRY_ADDQ. This
* is incompatible with MT_LIST_DEL run concurrently.
* If there's at least one element, the next of the last element will always
* be NULL.

View File

@ -273,11 +273,11 @@ static inline int srv_add_to_idle_list(struct server *srv, struct connection *co
conn->idle_time = now_ms;
if (is_safe) {
conn->flags = (conn->flags & ~CO_FL_LIST_MASK) | CO_FL_SAFE_LIST;
MT_LIST_ADDQ(&srv->safe_conns[tid], (struct mt_list *)&conn->list);
MT_LIST_TRY_ADDQ(&srv->safe_conns[tid], (struct mt_list *)&conn->list);
_HA_ATOMIC_ADD(&srv->curr_safe_nb, 1);
} else {
conn->flags = (conn->flags & ~CO_FL_LIST_MASK) | CO_FL_IDLE_LIST;
MT_LIST_ADDQ(&srv->idle_conns[tid], (struct mt_list *)&conn->list);
MT_LIST_TRY_ADDQ(&srv->idle_conns[tid], (struct mt_list *)&conn->list);
_HA_ATOMIC_ADD(&srv->curr_idle_nb, 1);
}
_HA_ATOMIC_ADD(&srv->curr_idle_thr[tid], 1);

View File

@ -352,7 +352,7 @@ static inline void tasklet_wakeup_on(struct tasklet *tl, int thr)
}
} else {
/* this tasklet runs on a specific thread */
if (MT_LIST_ADDQ(&task_per_thread[thr].shared_tasklet_list, (struct mt_list *)&tl->list) == 1) {
if (MT_LIST_TRY_ADDQ(&task_per_thread[thr].shared_tasklet_list, (struct mt_list *)&tl->list) == 1) {
_HA_ATOMIC_ADD(&tasks_run_queue, 1);
if (sleeping_thread_mask & (1UL << thr)) {
_HA_ATOMIC_AND(&sleeping_thread_mask, ~(1UL << thr));
@ -652,7 +652,7 @@ static inline int notification_registered(struct list *wake)
/* adds list item <item> to work list <work> and wake up the associated task */
static inline void work_list_add(struct work_list *work, struct mt_list *item)
{
MT_LIST_ADDQ(&work->head, item);
MT_LIST_TRY_ADDQ(&work->head, item);
task_wakeup(work->task, TASK_WOKEN_OTHER);
}

View File

@ -1345,7 +1345,7 @@ int connect_server(struct stream *s)
if (tokill_conn) {
/* We got one, put it into the concerned thread's to kill list, and wake it's kill task */
MT_LIST_ADDQ(&idle_conns[i].toremove_conns,
MT_LIST_TRY_ADDQ(&idle_conns[i].toremove_conns,
(struct mt_list *)&tokill_conn->list);
task_wakeup(idle_conns[i].cleanup_task, TASK_WOKEN_OTHER);
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock);

View File

@ -2807,7 +2807,7 @@ spoe_acquire_buffer(struct buffer *buf, struct buffer_wait *buffer_wait)
if (b_alloc_margin(buf, global.tune.reserved_bufs))
return 1;
MT_LIST_ADDQ(&buffer_wq, &buffer_wait->list);
MT_LIST_TRY_ADDQ(&buffer_wq, &buffer_wait->list);
return 0;
}

View File

@ -424,7 +424,7 @@ static void limit_listener(struct listener *l, struct mt_list *list)
{
HA_SPIN_LOCK(LISTENER_LOCK, &l->lock);
if (l->state == LI_READY) {
MT_LIST_ADDQ(list, &l->wait_queue);
MT_LIST_TRY_ADDQ(list, &l->wait_queue);
fd_stop_recv(l->fd);
l->state = LI_LIMITED;
}

View File

@ -605,7 +605,7 @@ static inline struct buffer *fcgi_get_buf(struct fcgi_conn *fconn, struct buffer
unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
fconn->buf_wait.target = fconn;
fconn->buf_wait.wakeup_cb = fcgi_buf_available;
MT_LIST_ADDQ(&buffer_wq, &fconn->buf_wait.list);
MT_LIST_TRY_ADDQ(&buffer_wq, &fconn->buf_wait.list);
}
return buf;
}
@ -2956,9 +2956,9 @@ static struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned short status)
struct server *srv = objt_server(conn->target);
if (conn_in_list == CO_FL_SAFE_LIST)
MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list);
MT_LIST_TRY_ADDQ(&srv->safe_conns[tid], &conn->list);
else
MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list);
MT_LIST_TRY_ADDQ(&srv->idle_conns[tid], &conn->list);
}
return NULL;
}

View File

@ -416,7 +416,7 @@ static inline struct buffer *h1_get_buf(struct h1c *h1c, struct buffer *bptr)
unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
h1c->buf_wait.target = h1c;
h1c->buf_wait.wakeup_cb = h1_buf_available;
MT_LIST_ADDQ(&buffer_wq, &h1c->buf_wait.list);
MT_LIST_TRY_ADDQ(&buffer_wq, &h1c->buf_wait.list);
}
return buf;
}
@ -2265,9 +2265,9 @@ static struct task *h1_io_cb(struct task *t, void *ctx, unsigned short status)
struct server *srv = objt_server(conn->target);
if (conn_in_list == CO_FL_SAFE_LIST)
MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list);
MT_LIST_TRY_ADDQ(&srv->safe_conns[tid], &conn->list);
else
MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list);
MT_LIST_TRY_ADDQ(&srv->idle_conns[tid], &conn->list);
}
return NULL;
}

View File

@ -683,7 +683,7 @@ static inline struct buffer *h2_get_buf(struct h2c *h2c, struct buffer *bptr)
unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
h2c->buf_wait.target = h2c;
h2c->buf_wait.wakeup_cb = h2_buf_available;
MT_LIST_ADDQ(&buffer_wq, &h2c->buf_wait.list);
MT_LIST_TRY_ADDQ(&buffer_wq, &h2c->buf_wait.list);
}
return buf;
}
@ -3565,9 +3565,9 @@ static struct task *h2_io_cb(struct task *t, void *ctx, unsigned short status)
struct server *srv = objt_server(conn->target);
if (conn_in_list == CO_FL_SAFE_LIST)
MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list);
MT_LIST_TRY_ADDQ(&srv->safe_conns[tid], &conn->list);
else
MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list);
MT_LIST_TRY_ADDQ(&srv->idle_conns[tid], &conn->list);
}
leave:

View File

@ -5207,7 +5207,7 @@ static int srv_migrate_conns_to_remove(struct mt_list *idle_list, struct mt_list
if (toremove_nb != -1 && i >= toremove_nb)
break;
MT_LIST_DEL_SAFE_NOINIT(elt1);
MT_LIST_ADDQ_NOCHECK(toremove_list, &conn->list);
MT_LIST_ADDQ(toremove_list, &conn->list);
i++;
}
return i;

View File

@ -5676,9 +5676,9 @@ leave:
struct server *srv = objt_server(conn->target);
if (conn_in_list == CO_FL_SAFE_LIST)
MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list);
MT_LIST_TRY_ADDQ(&srv->safe_conns[tid], &conn->list);
else
MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list);
MT_LIST_TRY_ADDQ(&srv->idle_conns[tid], &conn->list);
}
return NULL;
}

View File

@ -728,7 +728,7 @@ static int stream_alloc_work_buffer(struct stream *s)
if (b_alloc_margin(&s->res.buf, 0))
return 1;
MT_LIST_ADDQ(&buffer_wq, &s->buffer_wait.list);
MT_LIST_TRY_ADDQ(&buffer_wq, &s->buffer_wait.list);
return 0;
}

View File

@ -97,7 +97,7 @@ void task_kill(struct task *t)
/* Beware: tasks that have never run don't have their ->list empty yet! */
LIST_INIT(&((struct tasklet *)t)->list);
MT_LIST_ADDQ(&task_per_thread[thr].shared_tasklet_list,
MT_LIST_TRY_ADDQ(&task_per_thread[thr].shared_tasklet_list,
(struct mt_list *)&((struct tasklet *)t)->list);
_HA_ATOMIC_ADD(&tasks_run_queue, 1);
_HA_ATOMIC_ADD(&task_per_thread[thr].task_list_size, 1);
@ -582,7 +582,7 @@ void process_runnable_tasks()
* 100% due to rounding, this is not a problem. Note that while in
* theory the sum cannot be NULL as we cannot get there without tasklets
* to process, in practice it seldom happens when multiple writers
* conflict and rollback on MT_LIST_ADDQ(shared_tasklet_list), causing
* conflict and rollback on MT_LIST_TRY_ADDQ(shared_tasklet_list), causing
* a first MT_LIST_ISEMPTY() to succeed for thread_has_task() and the
* one above to finally fail. This is extremely rare and not a problem.
*/

View File

@ -33,12 +33,12 @@ void *thread(void *pouet)
case 0:
lol = malloc(sizeof(*lol));
MT_LIST_INIT(&lol->list_elt);
MT_LIST_ADD(&pouet_list, &lol->list_elt);
MT_LIST_TRY_ADD(&pouet_list, &lol->list_elt);
break;
case 1:
lol = malloc(sizeof(*lol));
MT_LIST_INIT(&lol->list_elt);
MT_LIST_ADDQ(&pouet_list, &lol->list_elt);
MT_LIST_TRY_ADDQ(&pouet_list, &lol->list_elt);
break;
case 2: