mirror of
http://git.haproxy.org/git/haproxy.git/
synced 2025-05-07 18:28:01 +00:00
MEDIUM: list: Separate "locked" list from regular list.
Instead of using the same type for regular linked lists and "autolocked" linked lists, use a separate type, "struct mt_list", for the autolocked one, and introduce a set of macros, similar to the LIST_* macros, with the MT_ prefix. When we use the same entry for both regular list and autolocked list, as is done for the "list" field in struct connection, we know have to explicitely cast it to struct mt_list when using MT_ macros.
This commit is contained in:
parent
6dd4ac890b
commit
859dc80f94
@ -34,6 +34,16 @@ struct list {
|
||||
struct list *p; /* prev */
|
||||
};
|
||||
|
||||
/* This is similar to struct list, but we want to be sure the compiler will
|
||||
* yell at you if you use macroes for one when you're using the other. You have
|
||||
* to expicitely cast if that's really what you want to do.
|
||||
*/
|
||||
struct mt_list {
|
||||
struct mt_list *next;
|
||||
struct mt_list *prev;
|
||||
};
|
||||
|
||||
|
||||
/* a back-ref is a pointer to a target list entry. It is used to detect when an
|
||||
* element being deleted is currently being tracked by another user. The best
|
||||
* example is a user dumping the session table. The table does not fit in the
|
||||
@ -189,7 +199,7 @@ struct cond_wordlist {
|
||||
item = back, back = LIST_ELEM(back->member.n, typeof(back), member))
|
||||
|
||||
#include <common/hathreads.h>
|
||||
#define LLIST_BUSY ((struct list *)1)
|
||||
#define MT_LIST_BUSY ((struct mt_list *)1)
|
||||
|
||||
/*
|
||||
* Locked version of list manipulation macros.
|
||||
@ -197,95 +207,95 @@ struct cond_wordlist {
|
||||
* list is only used with the locked variants. The only "unlocked" macro you
|
||||
* can use with a locked list is LIST_INIT.
|
||||
*/
|
||||
#define LIST_ADD_LOCKED(lh, el) \
|
||||
#define MT_LIST_ADD(lh, el) \
|
||||
do { \
|
||||
while (1) { \
|
||||
struct list *n; \
|
||||
struct list *p; \
|
||||
n = _HA_ATOMIC_XCHG(&(lh)->n, LLIST_BUSY); \
|
||||
if (n == LLIST_BUSY) \
|
||||
struct mt_list *n; \
|
||||
struct mt_list *p; \
|
||||
n = _HA_ATOMIC_XCHG(&(lh)->next, MT_LIST_BUSY); \
|
||||
if (n == MT_LIST_BUSY) \
|
||||
continue; \
|
||||
p = _HA_ATOMIC_XCHG(&n->p, LLIST_BUSY); \
|
||||
if (p == LLIST_BUSY) { \
|
||||
(lh)->n = n; \
|
||||
p = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY); \
|
||||
if (p == MT_LIST_BUSY) { \
|
||||
(lh)->next = n; \
|
||||
__ha_barrier_store(); \
|
||||
continue; \
|
||||
} \
|
||||
(el)->n = n; \
|
||||
(el)->p = p; \
|
||||
(el)->next = n; \
|
||||
(el)->prev = p; \
|
||||
__ha_barrier_store(); \
|
||||
n->p = (el); \
|
||||
n->prev = (el); \
|
||||
__ha_barrier_store(); \
|
||||
p->n = (el); \
|
||||
p->next = (el); \
|
||||
__ha_barrier_store(); \
|
||||
break; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define LIST_ADDQ_LOCKED(lh, el) \
|
||||
#define MT_LIST_ADDQ(lh, el) \
|
||||
do { \
|
||||
while (1) { \
|
||||
struct list *n; \
|
||||
struct list *p; \
|
||||
p = _HA_ATOMIC_XCHG(&(lh)->p, LLIST_BUSY); \
|
||||
if (p == LLIST_BUSY) \
|
||||
struct mt_list *n; \
|
||||
struct mt_list *p; \
|
||||
p = _HA_ATOMIC_XCHG(&(lh)->prev, MT_LIST_BUSY); \
|
||||
if (p == MT_LIST_BUSY) \
|
||||
continue; \
|
||||
n = _HA_ATOMIC_XCHG(&p->n, LLIST_BUSY); \
|
||||
if (n == LLIST_BUSY) { \
|
||||
(lh)->p = p; \
|
||||
n = _HA_ATOMIC_XCHG(&p->next, MT_LIST_BUSY); \
|
||||
if (n == MT_LIST_BUSY) { \
|
||||
(lh)->prev = p; \
|
||||
__ha_barrier_store(); \
|
||||
continue; \
|
||||
} \
|
||||
(el)->n = n; \
|
||||
(el)->p = p; \
|
||||
(el)->next = n; \
|
||||
(el)->prev = p; \
|
||||
__ha_barrier_store(); \
|
||||
p->n = (el); \
|
||||
p->next = (el); \
|
||||
__ha_barrier_store(); \
|
||||
n->p = (el); \
|
||||
n->prev = (el); \
|
||||
__ha_barrier_store(); \
|
||||
break; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define LIST_DEL_LOCKED(el) \
|
||||
#define MT_LIST_DEL(el) \
|
||||
do { \
|
||||
while (1) { \
|
||||
struct list *n, *n2; \
|
||||
struct list *p, *p2 = NULL; \
|
||||
n = _HA_ATOMIC_XCHG(&(el)->n, LLIST_BUSY); \
|
||||
if (n == LLIST_BUSY) \
|
||||
struct mt_list *n, *n2; \
|
||||
struct mt_list *p, *p2 = NULL; \
|
||||
n = _HA_ATOMIC_XCHG(&(el)->next, MT_LIST_BUSY); \
|
||||
if (n == MT_LIST_BUSY) \
|
||||
continue; \
|
||||
p = _HA_ATOMIC_XCHG(&(el)->p, LLIST_BUSY); \
|
||||
if (p == LLIST_BUSY) { \
|
||||
(el)->n = n; \
|
||||
p = _HA_ATOMIC_XCHG(&(el)->prev, MT_LIST_BUSY); \
|
||||
if (p == MT_LIST_BUSY) { \
|
||||
(el)->next = n; \
|
||||
__ha_barrier_store(); \
|
||||
continue; \
|
||||
} \
|
||||
if (p != (el)) { \
|
||||
p2 = _HA_ATOMIC_XCHG(&p->n, LLIST_BUSY); \
|
||||
if (p2 == LLIST_BUSY) { \
|
||||
(el)->p = p; \
|
||||
(el)->n = n; \
|
||||
p2 = _HA_ATOMIC_XCHG(&p->next, MT_LIST_BUSY);\
|
||||
if (p2 == MT_LIST_BUSY) { \
|
||||
(el)->prev = p; \
|
||||
(el)->next = n; \
|
||||
__ha_barrier_store(); \
|
||||
continue; \
|
||||
} \
|
||||
} \
|
||||
if (n != (el)) { \
|
||||
n2 = _HA_ATOMIC_XCHG(&n->p, LLIST_BUSY); \
|
||||
if (n2 == LLIST_BUSY) { \
|
||||
n2 = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY);\
|
||||
if (n2 == MT_LIST_BUSY) { \
|
||||
if (p2 != NULL) \
|
||||
p->n = p2; \
|
||||
(el)->p = p; \
|
||||
(el)->n = n; \
|
||||
p->next = p2; \
|
||||
(el)->prev = p; \
|
||||
(el)->next = n; \
|
||||
__ha_barrier_store(); \
|
||||
continue; \
|
||||
} \
|
||||
} \
|
||||
n->p = p; \
|
||||
p->n = n; \
|
||||
n->prev = p; \
|
||||
p->next = n; \
|
||||
__ha_barrier_store(); \
|
||||
(el)->p = (el); \
|
||||
(el)->n = (el); \
|
||||
(el)->prev = (el); \
|
||||
(el)->next = (el); \
|
||||
__ha_barrier_store(); \
|
||||
break; \
|
||||
} \
|
||||
@ -293,54 +303,89 @@ struct cond_wordlist {
|
||||
|
||||
|
||||
/* Remove the first element from the list, and return it */
|
||||
#define LIST_POP_LOCKED(lh, pt, el) \
|
||||
#define MT_LIST_POP(lh, pt, el) \
|
||||
({ \
|
||||
void *_ret; \
|
||||
while (1) { \
|
||||
struct list *n, *n2; \
|
||||
struct list *p, *p2; \
|
||||
n = _HA_ATOMIC_XCHG(&(lh)->n, LLIST_BUSY); \
|
||||
if (n == LLIST_BUSY) \
|
||||
struct mt_list *n, *n2; \
|
||||
struct mt_list *p, *p2; \
|
||||
n = _HA_ATOMIC_XCHG(&(lh)->next, MT_LIST_BUSY); \
|
||||
if (n == MT_LIST_BUSY) \
|
||||
continue; \
|
||||
if (n == (lh)) { \
|
||||
(lh)->n = lh; \
|
||||
(lh)->next = lh; \
|
||||
__ha_barrier_store(); \
|
||||
_ret = NULL; \
|
||||
break; \
|
||||
} \
|
||||
p = _HA_ATOMIC_XCHG(&n->p, LLIST_BUSY); \
|
||||
if (p == LLIST_BUSY) { \
|
||||
(lh)->n = n; \
|
||||
p = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY); \
|
||||
if (p == MT_LIST_BUSY) { \
|
||||
(lh)->next = n; \
|
||||
__ha_barrier_store(); \
|
||||
continue; \
|
||||
} \
|
||||
n2 = _HA_ATOMIC_XCHG(&n->n, LLIST_BUSY); \
|
||||
if (n2 == LLIST_BUSY) { \
|
||||
n->p = p; \
|
||||
n2 = _HA_ATOMIC_XCHG(&n->next, MT_LIST_BUSY); \
|
||||
if (n2 == MT_LIST_BUSY) { \
|
||||
n->prev = p; \
|
||||
__ha_barrier_store(); \
|
||||
(lh)->n = n; \
|
||||
(lh)->next = n; \
|
||||
__ha_barrier_store(); \
|
||||
continue; \
|
||||
} \
|
||||
p2 = _HA_ATOMIC_XCHG(&n2->p, LLIST_BUSY); \
|
||||
if (p2 == LLIST_BUSY) { \
|
||||
n->n = n2; \
|
||||
n->p = p; \
|
||||
p2 = _HA_ATOMIC_XCHG(&n2->prev, MT_LIST_BUSY); \
|
||||
if (p2 == MT_LIST_BUSY) { \
|
||||
n->next = n2; \
|
||||
n->prev = p; \
|
||||
__ha_barrier_store(); \
|
||||
(lh)->n = n; \
|
||||
(lh)->next = n; \
|
||||
__ha_barrier_store(); \
|
||||
continue; \
|
||||
} \
|
||||
(lh)->n = n2; \
|
||||
(n2)->p = (lh); \
|
||||
(lh)->next = n2; \
|
||||
(n2)->prev = (lh); \
|
||||
__ha_barrier_store(); \
|
||||
(n)->p = (n); \
|
||||
(n)->n = (n); \
|
||||
(n)->prev = (n); \
|
||||
(n)->next = (n); \
|
||||
__ha_barrier_store(); \
|
||||
_ret = LIST_ELEM(n, pt, el); \
|
||||
_ret = MT_LIST_ELEM(n, pt, el); \
|
||||
break; \
|
||||
} \
|
||||
(_ret); \
|
||||
})
|
||||
|
||||
#define MT_LIST_HEAD(a) ((void *)(&(a)))
|
||||
|
||||
#define MT_LIST_INIT(l) ((l)->next = (l)->prev = (l))
|
||||
|
||||
#define MT_LIST_HEAD_INIT(l) { &l, &l }
|
||||
/* returns a pointer of type <pt> to a structure containing a list head called
|
||||
* <el> at address <lh>. Note that <lh> can be the result of a function or macro
|
||||
* since it's used only once.
|
||||
* Example: MT_LIST_ELEM(cur_node->args.next, struct node *, args)
|
||||
*/
|
||||
#define MT_LIST_ELEM(lh, pt, el) ((pt)(((void *)(lh)) - ((void *)&((pt)NULL)->el)))
|
||||
|
||||
/* checks if the list head <lh> is empty or not */
|
||||
#define MT_LIST_ISEMPTY(lh) ((lh)->next == (lh))
|
||||
|
||||
/* returns a pointer of type <pt> to a structure following the element
|
||||
* which contains list head <lh>, which is known as element <el> in
|
||||
* struct pt.
|
||||
* Example: MT_LIST_NEXT(args, struct node *, list)
|
||||
*/
|
||||
#define MT_LIST_NEXT(lh, pt, el) (MT_LIST_ELEM((lh)->next, pt, el))
|
||||
|
||||
|
||||
/* returns a pointer of type <pt> to a structure preceding the element
|
||||
* which contains list head <lh>, which is known as element <el> in
|
||||
* struct pt.
|
||||
*/
|
||||
#undef MT_LIST_PREV
|
||||
#define MT_LIST_PREV(lh, pt, el) (MT_LIST_ELEM((lh)->prev, pt, el))
|
||||
|
||||
/* checks if the list element <el> was added to a list or not. This only
|
||||
* works when detached elements are reinitialized (using LIST_DEL_INIT)
|
||||
*/
|
||||
#define MT_LIST_ADDED(el) ((el)->next != (el))
|
||||
|
||||
#endif /* _COMMON_MINI_CLIST_H */
|
||||
|
@ -625,7 +625,7 @@ static inline void conn_free(struct connection *conn)
|
||||
|
||||
conn_force_unsubscribe(conn);
|
||||
HA_SPIN_LOCK(OTHER_LOCK, &toremove_lock[tid]);
|
||||
LIST_DEL_LOCKED(&conn->list);
|
||||
MT_LIST_DEL((struct mt_list *)&conn->list);
|
||||
HA_SPIN_UNLOCK(OTHER_LOCK, &toremove_lock[tid]);
|
||||
pool_free(pool_head_connection, conn);
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ int enable_all_listeners(struct protocol *proto);
|
||||
int disable_all_listeners(struct protocol *proto);
|
||||
|
||||
/* Dequeues all of the listeners waiting for a resource in wait queue <queue>. */
|
||||
void dequeue_all_listeners(struct list *list);
|
||||
void dequeue_all_listeners(struct mt_list *list);
|
||||
|
||||
/* Must be called with the lock held. Depending on <do_close> value, it does
|
||||
* what unbind_listener or unbind_listener_no_close should do.
|
||||
|
@ -41,7 +41,7 @@ __decl_hathreads(extern HA_SPINLOCK_T idle_conn_srv_lock);
|
||||
extern struct eb_root idle_conn_srv;
|
||||
extern struct task *idle_conn_task;
|
||||
extern struct task *idle_conn_cleanup[MAX_THREADS];
|
||||
extern struct list toremove_connections[MAX_THREADS];
|
||||
extern struct mt_list toremove_connections[MAX_THREADS];
|
||||
|
||||
int srv_downtime(const struct server *s);
|
||||
int srv_lastsession(const struct server *s);
|
||||
@ -262,7 +262,7 @@ static inline int srv_add_to_idle_list(struct server *srv, struct connection *co
|
||||
return 0;
|
||||
}
|
||||
LIST_DEL(&conn->list);
|
||||
LIST_ADDQ_LOCKED(&srv->idle_orphan_conns[tid], &conn->list);
|
||||
MT_LIST_ADDQ(&srv->idle_orphan_conns[tid], (struct mt_list *)&conn->list);
|
||||
srv->curr_idle_thr[tid]++;
|
||||
|
||||
conn->idle_time = now_ms;
|
||||
|
@ -542,9 +542,9 @@ static inline int thread_has_tasks(void)
|
||||
}
|
||||
|
||||
/* adds list item <item> to work list <work> and wake up the associated task */
|
||||
static inline void work_list_add(struct work_list *work, struct list *item)
|
||||
static inline void work_list_add(struct work_list *work, struct mt_list *item)
|
||||
{
|
||||
LIST_ADDQ_LOCKED(&work->head, item);
|
||||
MT_LIST_ADDQ(&work->head, item);
|
||||
task_wakeup(work->task, TASK_WOKEN_OTHER);
|
||||
}
|
||||
|
||||
|
@ -240,7 +240,7 @@ extern int stopping; /* non zero means stopping in progress */
|
||||
extern int killed; /* >0 means a hard-stop is triggered, >1 means hard-stop immediately */
|
||||
extern char hostname[MAX_HOSTNAME_LEN];
|
||||
extern char localpeer[MAX_HOSTNAME_LEN];
|
||||
extern struct list global_listener_queue; /* list of the temporarily limited listeners */
|
||||
extern struct mt_list global_listener_queue; /* list of the temporarily limited listeners */
|
||||
extern struct task *global_listener_queue_task;
|
||||
extern unsigned int warned; /* bitfield of a few warnings to emit just once */
|
||||
extern volatile unsigned long sleeping_thread_mask;
|
||||
|
@ -202,7 +202,7 @@ struct listener {
|
||||
int (*accept)(struct listener *l, int fd, struct sockaddr_storage *addr); /* upper layer's accept() */
|
||||
enum obj_type *default_target; /* default target to use for accepted sessions or NULL */
|
||||
/* cache line boundary */
|
||||
struct list wait_queue; /* link element to make the listener wait for something (LI_LIMITED) */
|
||||
struct mt_list wait_queue; /* link element to make the listener wait for something (LI_LIMITED) */
|
||||
unsigned int thr_idx; /* thread indexes for queue distribution : (t2<<16)+t1 */
|
||||
unsigned int analysers; /* bitmap of required protocol analysers */
|
||||
int maxseg; /* for TCP, advertised MSS */
|
||||
|
@ -415,7 +415,7 @@ struct proxy {
|
||||
struct be_counters be_counters; /* backend statistics counters */
|
||||
struct fe_counters fe_counters; /* frontend statistics counters */
|
||||
|
||||
struct list listener_queue; /* list of the temporarily limited listeners because of lack of a proxy resource */
|
||||
struct mt_list listener_queue; /* list of the temporarily limited listeners because of lack of a proxy resource */
|
||||
struct stktable *table; /* table for storing sticking streams */
|
||||
|
||||
struct task *task; /* the associated task, mandatory to manage rate limiting, stopping and resource shortage, NULL if disabled */
|
||||
|
@ -223,7 +223,7 @@ struct server {
|
||||
struct list *priv_conns; /* private idle connections attached to stream interfaces */
|
||||
struct list *idle_conns; /* sharable idle connections attached or not to a stream interface */
|
||||
struct list *safe_conns; /* safe idle connections attached to stream interfaces, shared */
|
||||
struct list *idle_orphan_conns; /* Orphan connections idling */
|
||||
struct mt_list *idle_orphan_conns; /* Orphan connections idling */
|
||||
unsigned int pool_purge_delay; /* Delay before starting to purge the idle conns pool */
|
||||
unsigned int max_idle_conns; /* Max number of connection allowed in the orphan connections list */
|
||||
unsigned int curr_idle_conns; /* Current number of orphan idling connections */
|
||||
|
@ -118,7 +118,7 @@ struct tasklet {
|
||||
* TASK_WOKEN_OTHER and a context pointing to the work_list entry.
|
||||
*/
|
||||
struct work_list {
|
||||
struct list head;
|
||||
struct mt_list head;
|
||||
struct task *task;
|
||||
void *arg;
|
||||
};
|
||||
|
@ -1227,11 +1227,11 @@ int connect_server(struct stream *s)
|
||||
else if (srv->idle_conns && !LIST_ISEMPTY(&srv->idle_conns[tid]) &&
|
||||
(s->be->options & PR_O_REUSE_MASK) == PR_O_REUSE_ALWS) {
|
||||
srv_conn = LIST_ELEM(srv->idle_conns[tid].n, struct connection *, list);
|
||||
} else if (srv->idle_orphan_conns && !LIST_ISEMPTY(&srv->idle_orphan_conns[tid]) &&
|
||||
} else if (srv->idle_orphan_conns && !MT_LIST_ISEMPTY(&srv->idle_orphan_conns[tid]) &&
|
||||
(((s->be->options & PR_O_REUSE_MASK) == PR_O_REUSE_ALWS) ||
|
||||
(((s->be->options & PR_O_REUSE_MASK) != PR_O_REUSE_NEVR) &&
|
||||
s->txn && (s->txn->flags & TX_NOT_FIRST)))) {
|
||||
srv_conn = LIST_POP_LOCKED(&srv->idle_orphan_conns[tid],
|
||||
srv_conn = MT_LIST_POP(&srv->idle_orphan_conns[tid],
|
||||
struct connection *, list);
|
||||
if (srv_conn)
|
||||
reuse_orphan = 1;
|
||||
@ -1275,7 +1275,7 @@ int connect_server(struct stream *s)
|
||||
* acceptable, attempt to kill an idling connection
|
||||
*/
|
||||
/* First, try from our own idle list */
|
||||
tokill_conn = LIST_POP_LOCKED(&srv->idle_orphan_conns[tid],
|
||||
tokill_conn = MT_LIST_POP(&srv->idle_orphan_conns[tid],
|
||||
struct connection *, list);
|
||||
if (tokill_conn)
|
||||
tokill_conn->mux->destroy(tokill_conn->ctx);
|
||||
@ -1293,13 +1293,13 @@ int connect_server(struct stream *s)
|
||||
// see it possibly larger.
|
||||
ALREADY_CHECKED(i);
|
||||
|
||||
tokill_conn = LIST_POP_LOCKED(&srv->idle_orphan_conns[i],
|
||||
tokill_conn = MT_LIST_POP(&srv->idle_orphan_conns[i],
|
||||
struct connection *, list);
|
||||
if (tokill_conn) {
|
||||
/* We got one, put it into the concerned thread's to kill list, and wake it's kill task */
|
||||
|
||||
LIST_ADDQ_LOCKED(&toremove_connections[i],
|
||||
&tokill_conn->list);
|
||||
MT_LIST_ADDQ(&toremove_connections[i],
|
||||
(struct mt_list *)&tokill_conn->list);
|
||||
task_wakeup(idle_conn_cleanup[i], TASK_WOKEN_OTHER);
|
||||
break;
|
||||
}
|
||||
|
@ -3640,14 +3640,14 @@ out_uri_auth_compat:
|
||||
goto err;
|
||||
idle_conn_cleanup[i]->process = srv_cleanup_toremove_connections;
|
||||
idle_conn_cleanup[i]->context = NULL;
|
||||
LIST_INIT(&toremove_connections[i]);
|
||||
MT_LIST_INIT(&toremove_connections[i]);
|
||||
}
|
||||
}
|
||||
newsrv->idle_orphan_conns = calloc((unsigned int)global.nbthread, sizeof(*newsrv->idle_orphan_conns));
|
||||
if (!newsrv->idle_orphan_conns)
|
||||
goto err;
|
||||
for (i = 0; i < global.nbthread; i++)
|
||||
LIST_INIT(&newsrv->idle_orphan_conns[i]);
|
||||
MT_LIST_INIT(&newsrv->idle_orphan_conns[i]);
|
||||
newsrv->curr_idle_thr = calloc(global.nbthread, sizeof(int));
|
||||
if (!newsrv->curr_idle_thr)
|
||||
goto err;
|
||||
|
@ -1374,7 +1374,7 @@ static int cli_parse_set_maxconn_global(char **args, char *payload, struct appct
|
||||
global.maxconn = v;
|
||||
|
||||
/* Dequeues all of the listeners waiting for a resource */
|
||||
if (!LIST_ISEMPTY(&global_listener_queue))
|
||||
if (!MT_LIST_ISEMPTY(&global_listener_queue))
|
||||
dequeue_all_listeners(&global_listener_queue);
|
||||
|
||||
return 1;
|
||||
@ -1493,7 +1493,7 @@ static int cli_parse_set_ratelimit(char **args, char *payload, struct appctx *ap
|
||||
*res = v * mul;
|
||||
|
||||
/* Dequeues all of the listeners waiting for a resource */
|
||||
if (!LIST_ISEMPTY(&global_listener_queue))
|
||||
if (!MT_LIST_ISEMPTY(&global_listener_queue))
|
||||
dequeue_all_listeners(&global_listener_queue);
|
||||
|
||||
return 1;
|
||||
|
@ -231,7 +231,7 @@ unsigned int rlim_fd_max_at_boot = 0;
|
||||
struct mworker_proc *proc_self = NULL;
|
||||
|
||||
/* list of the temporarily limited listeners because of lack of resource */
|
||||
struct list global_listener_queue = LIST_HEAD_INIT(global_listener_queue);
|
||||
struct mt_list global_listener_queue = MT_LIST_HEAD_INIT(global_listener_queue);
|
||||
struct task *global_listener_queue_task;
|
||||
static struct task *manage_global_listener_queue(struct task *t, void *context, unsigned short state);
|
||||
|
||||
@ -2747,7 +2747,7 @@ static struct task *manage_global_listener_queue(struct task *t, void *context,
|
||||
{
|
||||
int next = TICK_ETERNITY;
|
||||
/* queue is empty, nothing to do */
|
||||
if (LIST_ISEMPTY(&global_listener_queue))
|
||||
if (MT_LIST_ISEMPTY(&global_listener_queue))
|
||||
goto out;
|
||||
|
||||
/* If there are still too many concurrent connections, let's wait for
|
||||
|
@ -273,7 +273,7 @@ static void disable_listener(struct listener *listener)
|
||||
goto end;
|
||||
if (listener->state == LI_READY)
|
||||
fd_stop_recv(listener->fd);
|
||||
LIST_DEL_LOCKED(&listener->wait_queue);
|
||||
MT_LIST_DEL(&listener->wait_queue);
|
||||
listener->state = LI_LISTEN;
|
||||
end:
|
||||
HA_SPIN_UNLOCK(LISTENER_LOCK, &listener->lock);
|
||||
@ -309,7 +309,7 @@ int pause_listener(struct listener *l)
|
||||
goto end;
|
||||
}
|
||||
|
||||
LIST_DEL_LOCKED(&l->wait_queue);
|
||||
MT_LIST_DEL(&l->wait_queue);
|
||||
|
||||
fd_stop_recv(l->fd);
|
||||
l->state = LI_PAUSED;
|
||||
@ -337,7 +337,7 @@ int resume_listener(struct listener *l)
|
||||
/* check that another thread didn't to the job in parallel (e.g. at the
|
||||
* end of listen_accept() while we'd come from dequeue_all_listeners().
|
||||
*/
|
||||
if (LIST_ADDED(&l->wait_queue))
|
||||
if (MT_LIST_ADDED(&l->wait_queue))
|
||||
goto end;
|
||||
|
||||
if ((global.mode & (MODE_DAEMON | MODE_MWORKER)) &&
|
||||
@ -375,7 +375,7 @@ int resume_listener(struct listener *l)
|
||||
if (l->state == LI_READY)
|
||||
goto end;
|
||||
|
||||
LIST_DEL_LOCKED(&l->wait_queue);
|
||||
MT_LIST_DEL(&l->wait_queue);
|
||||
|
||||
if (l->maxconn && l->nbconn >= l->maxconn) {
|
||||
l->state = LI_FULL;
|
||||
@ -405,7 +405,7 @@ static void listener_full(struct listener *l)
|
||||
{
|
||||
HA_SPIN_LOCK(LISTENER_LOCK, &l->lock);
|
||||
if (l->state >= LI_READY) {
|
||||
LIST_DEL_LOCKED(&l->wait_queue);
|
||||
MT_LIST_DEL(&l->wait_queue);
|
||||
if (l->state != LI_FULL) {
|
||||
fd_stop_recv(l->fd);
|
||||
l->state = LI_FULL;
|
||||
@ -417,11 +417,11 @@ static void listener_full(struct listener *l)
|
||||
/* Marks a ready listener as limited so that we only try to re-enable it when
|
||||
* resources are free again. It will be queued into the specified queue.
|
||||
*/
|
||||
static void limit_listener(struct listener *l, struct list *list)
|
||||
static void limit_listener(struct listener *l, struct mt_list *list)
|
||||
{
|
||||
HA_SPIN_LOCK(LISTENER_LOCK, &l->lock);
|
||||
if (l->state == LI_READY) {
|
||||
LIST_ADDQ_LOCKED(list, &l->wait_queue);
|
||||
MT_LIST_ADDQ(list, &l->wait_queue);
|
||||
fd_stop_recv(l->fd);
|
||||
l->state = LI_LIMITED;
|
||||
}
|
||||
@ -464,11 +464,11 @@ int disable_all_listeners(struct protocol *proto)
|
||||
}
|
||||
|
||||
/* Dequeues all of the listeners waiting for a resource in wait queue <queue>. */
|
||||
void dequeue_all_listeners(struct list *list)
|
||||
void dequeue_all_listeners(struct mt_list *list)
|
||||
{
|
||||
struct listener *listener;
|
||||
|
||||
while ((listener = LIST_POP_LOCKED(list, struct listener *, wait_queue))) {
|
||||
while ((listener = MT_LIST_POP(list, struct listener *, wait_queue))) {
|
||||
/* This cannot fail because the listeners are by definition in
|
||||
* the LI_LIMITED state.
|
||||
*/
|
||||
@ -484,7 +484,7 @@ void do_unbind_listener(struct listener *listener, int do_close)
|
||||
if (listener->state == LI_READY && fd_updt)
|
||||
fd_stop_recv(listener->fd);
|
||||
|
||||
LIST_DEL_LOCKED(&listener->wait_queue);
|
||||
MT_LIST_DEL(&listener->wait_queue);
|
||||
|
||||
if (listener->state >= LI_PAUSED) {
|
||||
if (do_close) {
|
||||
@ -569,7 +569,7 @@ int create_listeners(struct bind_conf *bc, const struct sockaddr_storage *ss,
|
||||
|
||||
l->fd = fd;
|
||||
memcpy(&l->addr, ss, sizeof(*ss));
|
||||
LIST_INIT(&l->wait_queue);
|
||||
MT_LIST_INIT(&l->wait_queue);
|
||||
l->state = LI_INIT;
|
||||
|
||||
proto->add(l, port);
|
||||
@ -1062,10 +1062,10 @@ void listener_accept(int fd)
|
||||
resume_listener(l);
|
||||
|
||||
/* Dequeues all of the listeners waiting for a resource */
|
||||
if (!LIST_ISEMPTY(&global_listener_queue))
|
||||
if (!MT_LIST_ISEMPTY(&global_listener_queue))
|
||||
dequeue_all_listeners(&global_listener_queue);
|
||||
|
||||
if (p && !LIST_ISEMPTY(&p->listener_queue) &&
|
||||
if (p && !MT_LIST_ISEMPTY(&p->listener_queue) &&
|
||||
(!p->fe_sps_lim || freq_ctr_remain(&p->fe_sess_per_sec, p->fe_sps_lim, 0) > 0))
|
||||
dequeue_all_listeners(&p->listener_queue);
|
||||
}
|
||||
@ -1090,10 +1090,10 @@ void listener_release(struct listener *l)
|
||||
resume_listener(l);
|
||||
|
||||
/* Dequeues all of the listeners waiting for a resource */
|
||||
if (!LIST_ISEMPTY(&global_listener_queue))
|
||||
if (!MT_LIST_ISEMPTY(&global_listener_queue))
|
||||
dequeue_all_listeners(&global_listener_queue);
|
||||
|
||||
if (!LIST_ISEMPTY(&fe->listener_queue) &&
|
||||
if (!MT_LIST_ISEMPTY(&fe->listener_queue) &&
|
||||
(!fe->fe_sps_lim || freq_ctr_remain(&fe->fe_sess_per_sec, fe->fe_sps_lim, 0) > 0))
|
||||
dequeue_all_listeners(&fe->listener_queue);
|
||||
}
|
||||
@ -1104,7 +1104,7 @@ static struct task *listener_queue_process(struct task *t, void *context, unsign
|
||||
struct work_list *wl = context;
|
||||
struct listener *l;
|
||||
|
||||
while ((l = LIST_POP_LOCKED(&wl->head, struct listener *, wait_queue))) {
|
||||
while ((l = MT_LIST_POP(&wl->head, struct listener *, wait_queue))) {
|
||||
/* The listeners are still in the LI_LIMITED state */
|
||||
resume_listener(l);
|
||||
}
|
||||
|
@ -868,7 +868,7 @@ void init_new_proxy(struct proxy *p)
|
||||
LIST_INIT(&p->tcp_rep.inspect_rules);
|
||||
LIST_INIT(&p->tcp_req.l4_rules);
|
||||
LIST_INIT(&p->tcp_req.l5_rules);
|
||||
LIST_INIT(&p->listener_queue);
|
||||
MT_LIST_INIT(&p->listener_queue);
|
||||
LIST_INIT(&p->logsrvs);
|
||||
LIST_INIT(&p->logformat);
|
||||
LIST_INIT(&p->logformat_sd);
|
||||
@ -1034,7 +1034,7 @@ struct task *manage_proxy(struct task *t, void *context, unsigned short state)
|
||||
}
|
||||
|
||||
/* The proxy is not limited so we can re-enable any waiting listener */
|
||||
if (!LIST_ISEMPTY(&p->listener_queue))
|
||||
if (!MT_LIST_ISEMPTY(&p->listener_queue))
|
||||
dequeue_all_listeners(&p->listener_queue);
|
||||
out:
|
||||
t->expire = next;
|
||||
@ -2033,7 +2033,7 @@ static int cli_parse_set_maxconn_frontend(char **args, char *payload, struct app
|
||||
resume_listener(l);
|
||||
}
|
||||
|
||||
if (px->maxconn > px->feconn && !LIST_ISEMPTY(&px->listener_queue))
|
||||
if (px->maxconn > px->feconn && !MT_LIST_ISEMPTY(&px->listener_queue))
|
||||
dequeue_all_listeners(&px->listener_queue);
|
||||
|
||||
HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock);
|
||||
|
@ -65,7 +65,7 @@ __decl_hathreads(HA_SPINLOCK_T idle_conn_srv_lock);
|
||||
struct eb_root idle_conn_srv = EB_ROOT;
|
||||
struct task *idle_conn_task = NULL;
|
||||
struct task *idle_conn_cleanup[MAX_THREADS] = { NULL };
|
||||
struct list toremove_connections[MAX_THREADS];
|
||||
struct mt_list toremove_connections[MAX_THREADS];
|
||||
__decl_hathreads(HA_SPINLOCK_T toremove_lock[MAX_THREADS]);
|
||||
|
||||
/* The server names dictionary */
|
||||
@ -5550,7 +5550,7 @@ struct task *srv_cleanup_toremove_connections(struct task *task, void *context,
|
||||
{
|
||||
struct connection *conn;
|
||||
|
||||
while ((conn = LIST_POP_LOCKED(&toremove_connections[tid],
|
||||
while ((conn = MT_LIST_POP(&toremove_connections[tid],
|
||||
struct connection *, list)) != NULL) {
|
||||
conn->mux->destroy(conn->ctx);
|
||||
}
|
||||
@ -5596,11 +5596,11 @@ struct task *srv_cleanup_idle_connections(struct task *task, void *context, unsi
|
||||
|
||||
HA_SPIN_LOCK(OTHER_LOCK, &toremove_lock[i]);
|
||||
for (j = 0; j < max_conn; j++) {
|
||||
struct connection *conn = LIST_POP_LOCKED(&srv->idle_orphan_conns[i], struct connection *, list);
|
||||
struct connection *conn = MT_LIST_POP(&srv->idle_orphan_conns[i], struct connection *, list);
|
||||
if (!conn)
|
||||
break;
|
||||
did_remove = 1;
|
||||
LIST_ADDQ_LOCKED(&toremove_connections[i], &conn->list);
|
||||
MT_LIST_ADDQ(&toremove_connections[i], (struct mt_list *)&conn->list);
|
||||
}
|
||||
HA_SPIN_UNLOCK(OTHER_LOCK, &toremove_lock[i]);
|
||||
if (did_remove && max_conn < srv->curr_idle_thr[i])
|
||||
|
@ -465,7 +465,7 @@ struct work_list *work_list_create(int nbthread,
|
||||
goto fail;
|
||||
|
||||
for (i = 0; i < nbthread; i++) {
|
||||
LIST_INIT(&wl[i].head);
|
||||
MT_LIST_INIT(&wl[i].head);
|
||||
wl[i].task = task_new(1UL << i);
|
||||
if (!wl[i].task)
|
||||
goto fail;
|
||||
|
Loading…
Reference in New Issue
Block a user