REORG: thread/sched: move the last dynamic thread_info to thread_ctx

The last 3 fields were 3 list heads that are per-thread, and which are:
  - the pool's LRU head
  - the buffer_wq
  - the streams list head

Moving them into thread_ctx completes the removal of dynamic elements
from the struct thread_info. Now all these dynamic elements are packed
together at a single place for a thread.
This commit is contained in:
Willy Tarreau 2021-09-30 19:02:18 +02:00
parent a0b99536c8
commit b4e34766a3
14 changed files with 29 additions and 29 deletions

View File

@ -846,7 +846,7 @@ static inline int channel_alloc_buffer(struct channel *chn, struct buffer_wait *
return 1;
if (!LIST_INLIST(&wait->list))
LIST_APPEND(&ti->buffer_wq, &wait->list);
LIST_APPEND(&th_ctx->buffer_wq, &wait->list);
return 0;
}

View File

@ -112,7 +112,7 @@ void __offer_buffers(void *from, unsigned int count);
static inline void offer_buffers(void *from, unsigned int count)
{
if (!LIST_ISEMPTY(&ti->buffer_wq))
if (!LIST_ISEMPTY(&th_ctx->buffer_wq))
__offer_buffers(from, count);
}

View File

@ -42,12 +42,6 @@ enum {
* disabled, it contains the same info for the single running thread.
*/
struct thread_info {
#ifdef CONFIG_HAP_POOLS
struct list pool_lru_head; /* oldest objects */
#endif
struct list buffer_wq; /* buffer waiters */
struct list streams; /* list of streams attached to this thread */
/* pad to cache line (64B) */
char __pad[0]; /* unused except to check remaining room */
char __end[0] __attribute__((aligned(64)));
@ -70,6 +64,12 @@ struct thread_ctx {
uint8_t tl_class_mask; /* bit mask of non-empty tasklets classes */
// 7 bytes hole here
#ifdef CONFIG_HAP_POOLS
struct list pool_lru_head; /* oldest objects */
#endif
struct list buffer_wq; /* buffer waiters */
struct list streams; /* list of streams attached to this thread */
ALWAYS_ALIGN(2*sizeof(void*));
struct list tasklets[TL_CLASSES]; /* tasklets (and/or tasks) to run, by class */

View File

@ -1296,7 +1296,7 @@ struct buffer *check_get_buf(struct check *check, struct buffer *bptr)
unlikely((buf = b_alloc(bptr)) == NULL)) {
check->buf_wait.target = check;
check->buf_wait.wakeup_cb = check_buf_available;
LIST_APPEND(&ti->buffer_wq, &check->buf_wait.list);
LIST_APPEND(&th_ctx->buffer_wq, &check->buf_wait.list);
}
return buf;
}

View File

@ -34,7 +34,7 @@ int init_buffer()
return 0;
for (thr = 0; thr < MAX_THREADS; thr++)
LIST_INIT(&ha_thread_info[thr].buffer_wq);
LIST_INIT(&ha_thread_ctx[thr].buffer_wq);
/* The reserved buffer is what we leave behind us. Thus we always need
@ -109,7 +109,7 @@ void __offer_buffers(void *from, unsigned int count)
* other tasks, but that's a rough estimate. Similarly, for each cached
* event we'll need 1 buffer.
*/
list_for_each_entry_safe(wait, wait_back, &ti->buffer_wq, list) {
list_for_each_entry_safe(wait, wait_back, &th_ctx->buffer_wq, list) {
if (!count)
break;

View File

@ -2867,7 +2867,7 @@ spoe_acquire_buffer(struct buffer *buf, struct buffer_wait *buffer_wait)
if (b_alloc(buf))
return 1;
LIST_APPEND(&ti->buffer_wq, &buffer_wait->list);
LIST_APPEND(&th_ctx->buffer_wq, &buffer_wait->list);
return 0;
}

View File

@ -99,7 +99,7 @@ static struct buffer *h3_uqs_get_buf(struct h3_uqs *h3_uqs)
unlikely((buf = b_alloc(&h3_uqs->qcs->tx.buf)) == NULL)) {
h3->buf_wait.target = h3_uqs;
h3->buf_wait.wakeup_cb = qcs_buf_available;
LIST_APPEND(&ti->buffer_wq, &h3->buf_wait.list);
LIST_APPEND(&th_ctx->buffer_wq, &h3->buf_wait.list);
}
return buf;

View File

@ -612,7 +612,7 @@ static inline struct buffer *fcgi_get_buf(struct fcgi_conn *fconn, struct buffer
unlikely((buf = b_alloc(bptr)) == NULL)) {
fconn->buf_wait.target = fconn;
fconn->buf_wait.wakeup_cb = fcgi_buf_available;
LIST_APPEND(&ti->buffer_wq, &fconn->buf_wait.list);
LIST_APPEND(&th_ctx->buffer_wq, &fconn->buf_wait.list);
}
return buf;
}

View File

@ -449,7 +449,7 @@ static inline struct buffer *h1_get_buf(struct h1c *h1c, struct buffer *bptr)
unlikely((buf = b_alloc(bptr)) == NULL)) {
h1c->buf_wait.target = h1c;
h1c->buf_wait.wakeup_cb = h1_buf_available;
LIST_APPEND(&ti->buffer_wq, &h1c->buf_wait.list);
LIST_APPEND(&th_ctx->buffer_wq, &h1c->buf_wait.list);
}
return buf;
}

View File

@ -817,7 +817,7 @@ static inline struct buffer *h2_get_buf(struct h2c *h2c, struct buffer *bptr)
unlikely((buf = b_alloc(bptr)) == NULL)) {
h2c->buf_wait.target = h2c;
h2c->buf_wait.wakeup_cb = h2_buf_available;
LIST_APPEND(&ti->buffer_wq, &h2c->buf_wait.list);
LIST_APPEND(&th_ctx->buffer_wq, &h2c->buf_wait.list);
}
return buf;
}

View File

@ -442,7 +442,7 @@ struct buffer *qc_get_buf(struct qcc *qcc, struct buffer *bptr)
unlikely((buf = b_alloc(bptr)) == NULL)) {
qcc->buf_wait.target = qcc;
qcc->buf_wait.wakeup_cb = qc_buf_available;
LIST_APPEND(&ti->buffer_wq, &qcc->buf_wait.list);
LIST_APPEND(&th_ctx->buffer_wq, &qcc->buf_wait.list);
}
return buf;

View File

@ -289,7 +289,7 @@ void pool_evict_from_local_caches()
struct pool_head *pool;
do {
item = LIST_PREV(&ti->pool_lru_head, struct pool_cache_item *, by_lru);
item = LIST_PREV(&th_ctx->pool_lru_head, struct pool_cache_item *, by_lru);
/* note: by definition we remove oldest objects so they also are the
* oldest in their own pools, thus their next is the pool's head.
*/
@ -315,7 +315,7 @@ void pool_put_to_cache(struct pool_head *pool, void *ptr)
struct pool_cache_head *ph = &pool->cache[tid];
LIST_INSERT(&ph->list, &item->by_pool);
LIST_INSERT(&ti->pool_lru_head, &item->by_lru);
LIST_INSERT(&th_ctx->pool_lru_head, &item->by_lru);
ph->count++;
pool_cache_count++;
pool_cache_bytes += pool->size;
@ -640,7 +640,7 @@ static void init_pools()
int thr;
for (thr = 0; thr < MAX_THREADS; thr++) {
LIST_INIT(&ha_thread_info[thr].pool_lru_head);
LIST_INIT(&ha_thread_ctx[thr].pool_lru_head);
}
#endif
detect_allocator();

View File

@ -2017,7 +2017,7 @@ struct task *hard_stop(struct task *t, void *context, unsigned int state)
thread_isolate();
for (thr = 0; thr < global.nbthread; thr++) {
list_for_each_entry(s, &ha_thread_info[thr].streams, list) {
list_for_each_entry(s, &ha_thread_ctx[thr].streams, list) {
stream_shutdown(s, SF_ERR_KILLED);
}
}

View File

@ -548,7 +548,7 @@ struct stream *stream_new(struct session *sess, enum obj_type *origin, struct bu
s->tunnel_timeout = TICK_ETERNITY;
LIST_APPEND(&ti->streams, &s->list);
LIST_APPEND(&th_ctx->streams, &s->list);
if (flt_stream_init(s) < 0 || flt_stream_start(s) < 0)
goto out_fail_accept;
@ -720,7 +720,7 @@ static void stream_free(struct stream *s)
* only touch their node under thread isolation.
*/
LIST_DEL_INIT(&bref->users);
if (s->list.n != &ti->streams)
if (s->list.n != &th_ctx->streams)
LIST_APPEND(&LIST_ELEM(s->list.n, struct stream *, list)->back_refs, &bref->users);
bref->ref = s->list.n;
__ha_barrier_store();
@ -778,7 +778,7 @@ static int stream_alloc_work_buffer(struct stream *s)
if (b_alloc(&s->res.buf))
return 1;
LIST_APPEND(&ti->buffer_wq, &s->buffer_wait.list);
LIST_APPEND(&th_ctx->buffer_wq, &s->buffer_wait.list);
return 0;
}
@ -2818,7 +2818,7 @@ static void init_stream()
int thr;
for (thr = 0; thr < MAX_THREADS; thr++)
LIST_INIT(&ha_thread_info[thr].streams);
LIST_INIT(&ha_thread_ctx[thr].streams);
}
INITCALL0(STG_INIT, init_stream);
@ -3495,7 +3495,7 @@ static int cli_io_handler_dump_sess(struct appctx *appctx)
* pointer points back to the head of the streams list.
*/
LIST_INIT(&appctx->ctx.sess.bref.users);
appctx->ctx.sess.bref.ref = ha_thread_info[appctx->ctx.sess.thr].streams.n;
appctx->ctx.sess.bref.ref = ha_thread_ctx[appctx->ctx.sess.thr].streams.n;
appctx->st2 = STAT_ST_LIST;
/* fall through */
@ -3512,7 +3512,7 @@ static int cli_io_handler_dump_sess(struct appctx *appctx)
struct stream *curr_strm;
int done= 0;
if (appctx->ctx.sess.bref.ref == &ha_thread_info[appctx->ctx.sess.thr].streams)
if (appctx->ctx.sess.bref.ref == &ha_thread_ctx[appctx->ctx.sess.thr].streams)
done = 1;
else {
/* check if we've found a stream created after issuing the "show sess" */
@ -3525,7 +3525,7 @@ static int cli_io_handler_dump_sess(struct appctx *appctx)
appctx->ctx.sess.thr++;
if (appctx->ctx.sess.thr >= global.nbthread)
break;
appctx->ctx.sess.bref.ref = ha_thread_info[appctx->ctx.sess.thr].streams.n;
appctx->ctx.sess.bref.ref = ha_thread_ctx[appctx->ctx.sess.thr].streams.n;
continue;
}
@ -3732,7 +3732,7 @@ static int cli_parse_shutdown_session(char **args, char *payload, struct appctx
/* first, look for the requested stream in the stream table */
for (thr = 0; !strm && thr < global.nbthread; thr++) {
list_for_each_entry(strm, &ha_thread_info[thr].streams, list) {
list_for_each_entry(strm, &ha_thread_ctx[thr].streams, list) {
if (strm == ptr) {
stream_shutdown(strm, SF_ERR_KILLED);
break;