From de4db17dee727f74668cca0e5e6d90a3cdf0c2cf Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Fri, 10 Jul 2020 08:10:29 +0200 Subject: [PATCH] MINOR: lists: rename some MT_LIST operations to clarify them Initially when mt_lists were added, their purpose was to be used with the scheduler, where anyone may concurrently add the same tasklet, so it sounded natural to implement a check in MT_LIST_ADD{,Q}. Later their usage was extended and MT_LIST_ADD{,Q} started to be used on situations where the element to be added was exclusively owned by the one performing the operation so a conflict was impossible. This became more obvious with the idle connections and the new macro was called MT_LIST_ADDQ_NOCHECK. But this remains confusing and at many places it's not expected that an MT_LIST_ADD could possibly fail, and worse, at some places we start by initializing it before adding (and the test is superflous) so let's rename them to something more conventional to denote the presence of the check or not: MT_LIST_ADD{,Q} : inconditional operation, the caller owns the element, and doesn't care about the element's current state (exactly like LIST_ADD) MT_LIST_TRY_ADD{,Q}: only perform the operation if the element is not already added or in the process of being added. This means that the previously "safe" MT_LIST_ADD{,Q} are not "safe" anymore. This also means that in case of backport mistakes in the future causing this to be overlooked, the slower and safer functions will still be used by default. Note that the missing unchecked MT_LIST_ADD macro was added. The rest of the code will have to be reviewed so that a number of callers of MT_LIST_TRY_ADDQ are changed to MT_LIST_ADDQ to remove the unneeded test. --- include/haproxy/channel.h | 2 +- include/haproxy/list.h | 41 +++++++++++++++++++++++++++++++++++---- include/haproxy/server.h | 4 ++-- include/haproxy/task.h | 4 ++-- src/backend.c | 2 +- src/flt_spoe.c | 2 +- src/listener.c | 2 +- src/mux_fcgi.c | 6 +++--- src/mux_h1.c | 6 +++--- src/mux_h2.c | 6 +++--- src/server.c | 2 +- src/ssl_sock.c | 4 ++-- src/stream.c | 2 +- src/task.c | 4 ++-- tests/test-list.c | 4 ++-- 15 files changed, 62 insertions(+), 29 deletions(-) diff --git a/include/haproxy/channel.h b/include/haproxy/channel.h index 3cd3bb3b1..004fc0374 100644 --- a/include/haproxy/channel.h +++ b/include/haproxy/channel.h @@ -850,7 +850,7 @@ static inline int channel_alloc_buffer(struct channel *chn, struct buffer_wait * return 1; if (!MT_LIST_ADDED(&wait->list)) - MT_LIST_ADDQ(&buffer_wq, &wait->list); + MT_LIST_TRY_ADDQ(&buffer_wq, &wait->list); return 0; } diff --git a/include/haproxy/list.h b/include/haproxy/list.h index 9d376494e..dae5ca2c3 100644 --- a/include/haproxy/list.h +++ b/include/haproxy/list.h @@ -225,7 +225,7 @@ * Returns 1 if we added the item, 0 otherwise (because it was already in a * list). */ -#define MT_LIST_ADD(_lh, _el) \ +#define MT_LIST_TRY_ADD(_lh, _el) \ ({ \ int _ret = 0; \ struct mt_list *lh = (_lh), *el = (_el); \ @@ -265,7 +265,7 @@ * Returns 1 if we added the item, 0 otherwise (because it was already in a * list). */ -#define MT_LIST_ADDQ(_lh, _el) \ +#define MT_LIST_TRY_ADDQ(_lh, _el) \ ({ \ int _ret = 0; \ struct mt_list *lh = (_lh), *el = (_el); \ @@ -300,11 +300,44 @@ (_ret); \ }) +/* + * Add an item at the beginning of a list. + * It is assumed the element can't already be in a list, so it isn't checked. + */ +#define MT_LIST_ADD(_lh, _el) \ + ({ \ + int _ret = 0; \ + struct mt_list *lh = (_lh), *el = (_el); \ + while (1) { \ + struct mt_list *n; \ + struct mt_list *p; \ + n = _HA_ATOMIC_XCHG(&(lh)->next, MT_LIST_BUSY); \ + if (n == MT_LIST_BUSY) \ + continue; \ + p = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY); \ + if (p == MT_LIST_BUSY) { \ + (lh)->next = n; \ + __ha_barrier_store(); \ + continue; \ + } \ + (el)->next = n; \ + (el)->prev = p; \ + __ha_barrier_store(); \ + n->prev = (el); \ + __ha_barrier_store(); \ + p->next = (el); \ + __ha_barrier_store(); \ + _ret = 1; \ + break; \ + } \ + (_ret); \ + }) + /* * Add an item at the end of a list. * It is assumed the element can't already be in a list, so it isn't checked */ -#define MT_LIST_ADDQ_NOCHECK(_lh, _el) \ +#define MT_LIST_ADDQ(_lh, _el) \ ({ \ int _ret = 0; \ struct mt_list *lh = (_lh), *el = (_el); \ @@ -336,7 +369,7 @@ /* * Detach a list from its head. A pointer to the first element is returned * and the list is closed. If the list was empty, NULL is returned. This may - * exclusively be used with lists modified by MT_LIST_ADD/MT_LIST_ADDQ. This + * exclusively be used with lists modified by MT_LIST_TRY_ADD/MT_LIST_TRY_ADDQ. This * is incompatible with MT_LIST_DEL run concurrently. * If there's at least one element, the next of the last element will always * be NULL. diff --git a/include/haproxy/server.h b/include/haproxy/server.h index 3ad60d0cd..9a400fdb7 100644 --- a/include/haproxy/server.h +++ b/include/haproxy/server.h @@ -273,11 +273,11 @@ static inline int srv_add_to_idle_list(struct server *srv, struct connection *co conn->idle_time = now_ms; if (is_safe) { conn->flags = (conn->flags & ~CO_FL_LIST_MASK) | CO_FL_SAFE_LIST; - MT_LIST_ADDQ(&srv->safe_conns[tid], (struct mt_list *)&conn->list); + MT_LIST_TRY_ADDQ(&srv->safe_conns[tid], (struct mt_list *)&conn->list); _HA_ATOMIC_ADD(&srv->curr_safe_nb, 1); } else { conn->flags = (conn->flags & ~CO_FL_LIST_MASK) | CO_FL_IDLE_LIST; - MT_LIST_ADDQ(&srv->idle_conns[tid], (struct mt_list *)&conn->list); + MT_LIST_TRY_ADDQ(&srv->idle_conns[tid], (struct mt_list *)&conn->list); _HA_ATOMIC_ADD(&srv->curr_idle_nb, 1); } _HA_ATOMIC_ADD(&srv->curr_idle_thr[tid], 1); diff --git a/include/haproxy/task.h b/include/haproxy/task.h index db919913b..080240a75 100644 --- a/include/haproxy/task.h +++ b/include/haproxy/task.h @@ -352,7 +352,7 @@ static inline void tasklet_wakeup_on(struct tasklet *tl, int thr) } } else { /* this tasklet runs on a specific thread */ - if (MT_LIST_ADDQ(&task_per_thread[thr].shared_tasklet_list, (struct mt_list *)&tl->list) == 1) { + if (MT_LIST_TRY_ADDQ(&task_per_thread[thr].shared_tasklet_list, (struct mt_list *)&tl->list) == 1) { _HA_ATOMIC_ADD(&tasks_run_queue, 1); if (sleeping_thread_mask & (1UL << thr)) { _HA_ATOMIC_AND(&sleeping_thread_mask, ~(1UL << thr)); @@ -652,7 +652,7 @@ static inline int notification_registered(struct list *wake) /* adds list item to work list and wake up the associated task */ static inline void work_list_add(struct work_list *work, struct mt_list *item) { - MT_LIST_ADDQ(&work->head, item); + MT_LIST_TRY_ADDQ(&work->head, item); task_wakeup(work->task, TASK_WOKEN_OTHER); } diff --git a/src/backend.c b/src/backend.c index 324d7565a..4a162a430 100644 --- a/src/backend.c +++ b/src/backend.c @@ -1345,7 +1345,7 @@ int connect_server(struct stream *s) if (tokill_conn) { /* We got one, put it into the concerned thread's to kill list, and wake it's kill task */ - MT_LIST_ADDQ(&idle_conns[i].toremove_conns, + MT_LIST_TRY_ADDQ(&idle_conns[i].toremove_conns, (struct mt_list *)&tokill_conn->list); task_wakeup(idle_conns[i].cleanup_task, TASK_WOKEN_OTHER); HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].takeover_lock); diff --git a/src/flt_spoe.c b/src/flt_spoe.c index 62e535ef1..b6359c63d 100644 --- a/src/flt_spoe.c +++ b/src/flt_spoe.c @@ -2807,7 +2807,7 @@ spoe_acquire_buffer(struct buffer *buf, struct buffer_wait *buffer_wait) if (b_alloc_margin(buf, global.tune.reserved_bufs)) return 1; - MT_LIST_ADDQ(&buffer_wq, &buffer_wait->list); + MT_LIST_TRY_ADDQ(&buffer_wq, &buffer_wait->list); return 0; } diff --git a/src/listener.c b/src/listener.c index cc49c705e..cad0e0cc8 100644 --- a/src/listener.c +++ b/src/listener.c @@ -424,7 +424,7 @@ static void limit_listener(struct listener *l, struct mt_list *list) { HA_SPIN_LOCK(LISTENER_LOCK, &l->lock); if (l->state == LI_READY) { - MT_LIST_ADDQ(list, &l->wait_queue); + MT_LIST_TRY_ADDQ(list, &l->wait_queue); fd_stop_recv(l->fd); l->state = LI_LIMITED; } diff --git a/src/mux_fcgi.c b/src/mux_fcgi.c index a0509e66a..05723e8ce 100644 --- a/src/mux_fcgi.c +++ b/src/mux_fcgi.c @@ -605,7 +605,7 @@ static inline struct buffer *fcgi_get_buf(struct fcgi_conn *fconn, struct buffer unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) { fconn->buf_wait.target = fconn; fconn->buf_wait.wakeup_cb = fcgi_buf_available; - MT_LIST_ADDQ(&buffer_wq, &fconn->buf_wait.list); + MT_LIST_TRY_ADDQ(&buffer_wq, &fconn->buf_wait.list); } return buf; } @@ -2956,9 +2956,9 @@ static struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned short status) struct server *srv = objt_server(conn->target); if (conn_in_list == CO_FL_SAFE_LIST) - MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list); + MT_LIST_TRY_ADDQ(&srv->safe_conns[tid], &conn->list); else - MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list); + MT_LIST_TRY_ADDQ(&srv->idle_conns[tid], &conn->list); } return NULL; } diff --git a/src/mux_h1.c b/src/mux_h1.c index 80a7b7db5..0626a7dc2 100644 --- a/src/mux_h1.c +++ b/src/mux_h1.c @@ -416,7 +416,7 @@ static inline struct buffer *h1_get_buf(struct h1c *h1c, struct buffer *bptr) unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) { h1c->buf_wait.target = h1c; h1c->buf_wait.wakeup_cb = h1_buf_available; - MT_LIST_ADDQ(&buffer_wq, &h1c->buf_wait.list); + MT_LIST_TRY_ADDQ(&buffer_wq, &h1c->buf_wait.list); } return buf; } @@ -2265,9 +2265,9 @@ static struct task *h1_io_cb(struct task *t, void *ctx, unsigned short status) struct server *srv = objt_server(conn->target); if (conn_in_list == CO_FL_SAFE_LIST) - MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list); + MT_LIST_TRY_ADDQ(&srv->safe_conns[tid], &conn->list); else - MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list); + MT_LIST_TRY_ADDQ(&srv->idle_conns[tid], &conn->list); } return NULL; } diff --git a/src/mux_h2.c b/src/mux_h2.c index 6ec8d27ca..ce26c927e 100644 --- a/src/mux_h2.c +++ b/src/mux_h2.c @@ -683,7 +683,7 @@ static inline struct buffer *h2_get_buf(struct h2c *h2c, struct buffer *bptr) unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) { h2c->buf_wait.target = h2c; h2c->buf_wait.wakeup_cb = h2_buf_available; - MT_LIST_ADDQ(&buffer_wq, &h2c->buf_wait.list); + MT_LIST_TRY_ADDQ(&buffer_wq, &h2c->buf_wait.list); } return buf; } @@ -3565,9 +3565,9 @@ static struct task *h2_io_cb(struct task *t, void *ctx, unsigned short status) struct server *srv = objt_server(conn->target); if (conn_in_list == CO_FL_SAFE_LIST) - MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list); + MT_LIST_TRY_ADDQ(&srv->safe_conns[tid], &conn->list); else - MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list); + MT_LIST_TRY_ADDQ(&srv->idle_conns[tid], &conn->list); } leave: diff --git a/src/server.c b/src/server.c index b0c061e88..05b19d4e1 100644 --- a/src/server.c +++ b/src/server.c @@ -5207,7 +5207,7 @@ static int srv_migrate_conns_to_remove(struct mt_list *idle_list, struct mt_list if (toremove_nb != -1 && i >= toremove_nb) break; MT_LIST_DEL_SAFE_NOINIT(elt1); - MT_LIST_ADDQ_NOCHECK(toremove_list, &conn->list); + MT_LIST_ADDQ(toremove_list, &conn->list); i++; } return i; diff --git a/src/ssl_sock.c b/src/ssl_sock.c index 69f6835c4..54da3a0ac 100644 --- a/src/ssl_sock.c +++ b/src/ssl_sock.c @@ -5676,9 +5676,9 @@ leave: struct server *srv = objt_server(conn->target); if (conn_in_list == CO_FL_SAFE_LIST) - MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list); + MT_LIST_TRY_ADDQ(&srv->safe_conns[tid], &conn->list); else - MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list); + MT_LIST_TRY_ADDQ(&srv->idle_conns[tid], &conn->list); } return NULL; } diff --git a/src/stream.c b/src/stream.c index 287f40647..f3253c685 100644 --- a/src/stream.c +++ b/src/stream.c @@ -728,7 +728,7 @@ static int stream_alloc_work_buffer(struct stream *s) if (b_alloc_margin(&s->res.buf, 0)) return 1; - MT_LIST_ADDQ(&buffer_wq, &s->buffer_wait.list); + MT_LIST_TRY_ADDQ(&buffer_wq, &s->buffer_wait.list); return 0; } diff --git a/src/task.c b/src/task.c index 72f09de30..102462eb8 100644 --- a/src/task.c +++ b/src/task.c @@ -97,7 +97,7 @@ void task_kill(struct task *t) /* Beware: tasks that have never run don't have their ->list empty yet! */ LIST_INIT(&((struct tasklet *)t)->list); - MT_LIST_ADDQ(&task_per_thread[thr].shared_tasklet_list, + MT_LIST_TRY_ADDQ(&task_per_thread[thr].shared_tasklet_list, (struct mt_list *)&((struct tasklet *)t)->list); _HA_ATOMIC_ADD(&tasks_run_queue, 1); _HA_ATOMIC_ADD(&task_per_thread[thr].task_list_size, 1); @@ -582,7 +582,7 @@ void process_runnable_tasks() * 100% due to rounding, this is not a problem. Note that while in * theory the sum cannot be NULL as we cannot get there without tasklets * to process, in practice it seldom happens when multiple writers - * conflict and rollback on MT_LIST_ADDQ(shared_tasklet_list), causing + * conflict and rollback on MT_LIST_TRY_ADDQ(shared_tasklet_list), causing * a first MT_LIST_ISEMPTY() to succeed for thread_has_task() and the * one above to finally fail. This is extremely rare and not a problem. */ diff --git a/tests/test-list.c b/tests/test-list.c index 5e54f6034..e2e07f977 100644 --- a/tests/test-list.c +++ b/tests/test-list.c @@ -33,12 +33,12 @@ void *thread(void *pouet) case 0: lol = malloc(sizeof(*lol)); MT_LIST_INIT(&lol->list_elt); - MT_LIST_ADD(&pouet_list, &lol->list_elt); + MT_LIST_TRY_ADD(&pouet_list, &lol->list_elt); break; case 1: lol = malloc(sizeof(*lol)); MT_LIST_INIT(&lol->list_elt); - MT_LIST_ADDQ(&pouet_list, &lol->list_elt); + MT_LIST_TRY_ADDQ(&pouet_list, &lol->list_elt); break; case 2: