MINOR: dynbuf: make the buffer wait queue per thread

The buffer wait queue used to be global historically but this doest not
make any sense anymore given that the most common use case is to have
thread-local pools. Thus there's no point waking up waiters of other
threads after releasing an entry, as they won't benefit from it.

Let's move the queue head to the thread_info structure and use
ti->buffer_wq from now on.
This commit is contained in:
Willy Tarreau 2021-02-20 11:38:56 +01:00
parent 28d7876a0c
commit e8e5091510
10 changed files with 17 additions and 14 deletions

View File

@ -852,7 +852,7 @@ static inline int channel_alloc_buffer(struct channel *chn, struct buffer_wait *
return 1;
if (!MT_LIST_ADDED(&wait->list))
MT_LIST_ADDQ(&buffer_wq, &wait->list);
MT_LIST_ADDQ(&ti->buffer_wq, &wait->list);
return 0;
}

View File

@ -35,7 +35,6 @@
#include <haproxy/pool.h>
extern struct pool_head *pool_head_buffer;
extern struct mt_list buffer_wq;
int init_buffer();
void buffer_dump(FILE *o, struct buffer *b, int from, int to);
@ -192,13 +191,13 @@ static inline struct buffer *b_alloc_margin(struct buffer *buf, int margin)
* passing a buffer to oneself in case of failed allocations (e.g. need two
* buffers, get one, fail, release it and wake up self again). In case of
* normal buffer release where it is expected that the caller is not waiting
* for a buffer, NULL is fine.
* for a buffer, NULL is fine. It will wake waiters on the current thread only.
*/
void __offer_buffer(void *from, unsigned int threshold);
static inline void offer_buffers(void *from, unsigned int threshold)
{
if (!MT_LIST_ISEMPTY(&buffer_wq))
if (!MT_LIST_ISEMPTY(&ti->buffer_wq))
__offer_buffer(from, threshold);
}

View File

@ -45,6 +45,8 @@ struct thread_info {
#ifdef CONFIG_HAP_LOCAL_POOLS
struct list pool_lru_head; /* oldest objects */
#endif
struct mt_list buffer_wq; /* buffer waiters */
/* pad to cache line (64B) */
char __pad[0]; /* unused except to check remaining room */
char __end[0] __attribute__((aligned(64)));

View File

@ -1019,7 +1019,7 @@ struct buffer *check_get_buf(struct check *check, struct buffer *bptr)
unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
check->buf_wait.target = check;
check->buf_wait.wakeup_cb = check_buf_available;
MT_LIST_ADDQ(&buffer_wq, &check->buf_wait.list);
MT_LIST_ADDQ(&ti->buffer_wq, &check->buf_wait.list);
}
return buf;
}

View File

@ -22,18 +22,20 @@
struct pool_head *pool_head_buffer;
/* list of objects waiting for at least one buffer */
struct mt_list buffer_wq = LIST_HEAD_INIT(buffer_wq);
/* perform minimal intializations, report 0 in case of error, 1 if OK. */
int init_buffer()
{
void *buffer;
int thr;
pool_head_buffer = create_pool("buffer", global.tune.bufsize, MEM_F_SHARED|MEM_F_EXACT);
if (!pool_head_buffer)
return 0;
for (thr = 0; thr < MAX_THREADS; thr++)
MT_LIST_INIT(&ha_thread_info[thr].buffer_wq);
/* The reserved buffer is what we leave behind us. Thus we always need
* at least one extra buffer in minavail otherwise we'll end up waking
* up tasks with no memory available, causing a lot of useless wakeups.
@ -112,7 +114,7 @@ void __offer_buffer(void *from, unsigned int threshold)
*/
avail = pool_head_buffer->allocated - pool_head_buffer->used - global.tune.reserved_bufs / 2;
mt_list_for_each_entry_safe(wait, &buffer_wq, list, elt1, elt2) {
mt_list_for_each_entry_safe(wait, &ti->buffer_wq, list, elt1, elt2) {
if (avail <= threshold)
break;

View File

@ -2828,7 +2828,7 @@ spoe_acquire_buffer(struct buffer *buf, struct buffer_wait *buffer_wait)
if (b_alloc_margin(buf, global.tune.reserved_bufs))
return 1;
MT_LIST_ADDQ(&buffer_wq, &buffer_wait->list);
MT_LIST_ADDQ(&ti->buffer_wq, &buffer_wait->list);
return 0;
}

View File

@ -608,7 +608,7 @@ static inline struct buffer *fcgi_get_buf(struct fcgi_conn *fconn, struct buffer
unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
fconn->buf_wait.target = fconn;
fconn->buf_wait.wakeup_cb = fcgi_buf_available;
MT_LIST_ADDQ(&buffer_wq, &fconn->buf_wait.list);
MT_LIST_ADDQ(&ti->buffer_wq, &fconn->buf_wait.list);
}
return buf;
}

View File

@ -452,7 +452,7 @@ static inline struct buffer *h1_get_buf(struct h1c *h1c, struct buffer *bptr)
unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
h1c->buf_wait.target = h1c;
h1c->buf_wait.wakeup_cb = h1_buf_available;
MT_LIST_ADDQ(&buffer_wq, &h1c->buf_wait.list);
MT_LIST_ADDQ(&ti->buffer_wq, &h1c->buf_wait.list);
}
return buf;
}

View File

@ -810,7 +810,7 @@ static inline struct buffer *h2_get_buf(struct h2c *h2c, struct buffer *bptr)
unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
h2c->buf_wait.target = h2c;
h2c->buf_wait.wakeup_cb = h2_buf_available;
MT_LIST_ADDQ(&buffer_wq, &h2c->buf_wait.list);
MT_LIST_ADDQ(&ti->buffer_wq, &h2c->buf_wait.list);
}
return buf;
}

View File

@ -773,7 +773,7 @@ static int stream_alloc_work_buffer(struct stream *s)
if (b_alloc_margin(&s->res.buf, 0))
return 1;
MT_LIST_ADDQ(&buffer_wq, &s->buffer_wait.list);
MT_LIST_ADDQ(&ti->buffer_wq, &s->buffer_wait.list);
return 0;
}