mirror of
http://git.haproxy.org/git/haproxy.git/
synced 2025-04-28 05:48:01 +00:00
MINOR: task: introduce a thread-local "sched" variable for local scheduler stuff
The aim is to rassemble all scheduler information related to the current thread. It simply points to task_per_thread[tid] without having to perform the operation at each time. We save around 1.2 kB of code on performance sensitive paths and increase the request rate by almost 1%.
This commit is contained in:
parent
d66d75656e
commit
d022e9c98b
@ -91,7 +91,7 @@ extern unsigned int niced_tasks; /* number of niced tasks in the run queue */
|
|||||||
extern struct pool_head *pool_head_task;
|
extern struct pool_head *pool_head_task;
|
||||||
extern struct pool_head *pool_head_tasklet;
|
extern struct pool_head *pool_head_tasklet;
|
||||||
extern struct pool_head *pool_head_notification;
|
extern struct pool_head *pool_head_notification;
|
||||||
extern THREAD_LOCAL struct task *curr_task; /* task currently running or NULL */
|
extern THREAD_LOCAL struct task_per_thread *sched; /* current's thread scheduler context */
|
||||||
#ifdef USE_THREAD
|
#ifdef USE_THREAD
|
||||||
extern struct eb_root timers; /* sorted timers tree, global */
|
extern struct eb_root timers; /* sorted timers tree, global */
|
||||||
extern struct eb_root rqueue; /* tree constituting the run queue */
|
extern struct eb_root rqueue; /* tree constituting the run queue */
|
||||||
@ -132,11 +132,11 @@ static inline void task_wakeup(struct task *t, unsigned int f)
|
|||||||
struct eb_root *root;
|
struct eb_root *root;
|
||||||
|
|
||||||
if (t->thread_mask == tid_bit || global.nbthread == 1)
|
if (t->thread_mask == tid_bit || global.nbthread == 1)
|
||||||
root = &task_per_thread[tid].rqueue;
|
root = &sched->rqueue;
|
||||||
else
|
else
|
||||||
root = &rqueue;
|
root = &rqueue;
|
||||||
#else
|
#else
|
||||||
struct eb_root *root = &task_per_thread[tid].rqueue;
|
struct eb_root *root = &sched->rqueue;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
state = _HA_ATOMIC_OR(&t->state, f);
|
state = _HA_ATOMIC_OR(&t->state, f);
|
||||||
@ -201,7 +201,7 @@ static inline struct task *__task_unlink_rq(struct task *t)
|
|||||||
global_rqueue_size--;
|
global_rqueue_size--;
|
||||||
} else
|
} else
|
||||||
#endif
|
#endif
|
||||||
task_per_thread[tid].rqueue_size--;
|
sched->rqueue_size--;
|
||||||
eb32sc_delete(&t->rq);
|
eb32sc_delete(&t->rq);
|
||||||
if (likely(t->nice))
|
if (likely(t->nice))
|
||||||
_HA_ATOMIC_SUB(&niced_tasks, 1);
|
_HA_ATOMIC_SUB(&niced_tasks, 1);
|
||||||
@ -236,7 +236,7 @@ static inline void tasklet_wakeup(struct tasklet *tl)
|
|||||||
*/
|
*/
|
||||||
static inline void tasklet_insert_into_tasklet_list(struct tasklet *tl)
|
static inline void tasklet_insert_into_tasklet_list(struct tasklet *tl)
|
||||||
{
|
{
|
||||||
if (MT_LIST_ADDQ(&task_per_thread[tid].task_list, &tl->list) == 1)
|
if (MT_LIST_ADDQ(&sched->task_list, &tl->list) == 1)
|
||||||
_HA_ATOMIC_ADD(&tasks_run_queue, 1);
|
_HA_ATOMIC_ADD(&tasks_run_queue, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -317,8 +317,8 @@ static inline struct task *task_new(unsigned long thread_mask)
|
|||||||
*/
|
*/
|
||||||
static inline void __task_free(struct task *t)
|
static inline void __task_free(struct task *t)
|
||||||
{
|
{
|
||||||
if (t == curr_task) {
|
if (t == sched->current) {
|
||||||
curr_task = NULL;
|
sched->current = NULL;
|
||||||
__ha_barrier_store();
|
__ha_barrier_store();
|
||||||
}
|
}
|
||||||
pool_free(pool_head_task, t);
|
pool_free(pool_head_task, t);
|
||||||
@ -346,7 +346,7 @@ static inline void task_destroy(struct task *t)
|
|||||||
/* There's no need to protect t->state with a lock, as the task
|
/* There's no need to protect t->state with a lock, as the task
|
||||||
* has to run on the current thread.
|
* has to run on the current thread.
|
||||||
*/
|
*/
|
||||||
if (t == curr_task || !(t->state & (TASK_QUEUED | TASK_RUNNING)))
|
if (t == sched->current || !(t->state & (TASK_QUEUED | TASK_RUNNING)))
|
||||||
__task_free(t);
|
__task_free(t);
|
||||||
else
|
else
|
||||||
t->process = NULL;
|
t->process = NULL;
|
||||||
@ -401,7 +401,7 @@ static inline void task_queue(struct task *task)
|
|||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key))
|
if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key))
|
||||||
__task_queue(task, &task_per_thread[tid].timers);
|
__task_queue(task, &sched->timers);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -434,7 +434,7 @@ static inline void task_schedule(struct task *task, int when)
|
|||||||
|
|
||||||
task->expire = when;
|
task->expire = when;
|
||||||
if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key))
|
if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key))
|
||||||
__task_queue(task, &task_per_thread[tid].timers);
|
__task_queue(task, &sched->timers);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -537,8 +537,8 @@ static inline int notification_registered(struct list *wake)
|
|||||||
static inline int thread_has_tasks(void)
|
static inline int thread_has_tasks(void)
|
||||||
{
|
{
|
||||||
return (!!(global_tasks_mask & tid_bit) |
|
return (!!(global_tasks_mask & tid_bit) |
|
||||||
(task_per_thread[tid].rqueue_size > 0) |
|
(sched->rqueue_size > 0) |
|
||||||
!MT_LIST_ISEMPTY(&task_per_thread[tid].task_list));
|
!MT_LIST_ISEMPTY(&sched->task_list));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* adds list item <item> to work list <work> and wake up the associated task */
|
/* adds list item <item> to work list <work> and wake up the associated task */
|
||||||
|
@ -64,6 +64,7 @@ struct task_per_thread {
|
|||||||
struct mt_list task_list; /* List of tasks to be run, mixing tasks and tasklets */
|
struct mt_list task_list; /* List of tasks to be run, mixing tasks and tasklets */
|
||||||
int task_list_size; /* Number of tasks in the task_list */
|
int task_list_size; /* Number of tasks in the task_list */
|
||||||
int rqueue_size; /* Number of elements in the per-thread run queue */
|
int rqueue_size; /* Number of elements in the per-thread run queue */
|
||||||
|
struct task *current; /* current task (not tasklet) */
|
||||||
__attribute__((aligned(64))) char end[0];
|
__attribute__((aligned(64))) char end[0];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -77,7 +77,7 @@ void ha_thread_dump(struct buffer *buf, int thr, int calling_tid)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
chunk_appendf(buf, " curr_task=");
|
chunk_appendf(buf, " curr_task=");
|
||||||
ha_task_dump(buf, curr_task, " ");
|
ha_task_dump(buf, sched->current, " ");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -2659,6 +2659,7 @@ static void *run_thread_poll_loop(void *data)
|
|||||||
__decl_hathreads(static pthread_cond_t init_cond = PTHREAD_COND_INITIALIZER);
|
__decl_hathreads(static pthread_cond_t init_cond = PTHREAD_COND_INITIALIZER);
|
||||||
|
|
||||||
ha_set_tid((unsigned long)data);
|
ha_set_tid((unsigned long)data);
|
||||||
|
sched = &task_per_thread[tid];
|
||||||
|
|
||||||
#if (_POSIX_TIMERS > 0) && defined(_POSIX_THREAD_CPUTIME)
|
#if (_POSIX_TIMERS > 0) && defined(_POSIX_THREAD_CPUTIME)
|
||||||
#ifdef USE_THREAD
|
#ifdef USE_THREAD
|
||||||
|
12
src/task.c
12
src/task.c
@ -41,7 +41,7 @@ unsigned int tasks_run_queue_cur = 0; /* copy of the run queue size */
|
|||||||
unsigned int nb_tasks_cur = 0; /* copy of the tasks count */
|
unsigned int nb_tasks_cur = 0; /* copy of the tasks count */
|
||||||
unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
|
unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
|
||||||
|
|
||||||
THREAD_LOCAL struct task *curr_task = NULL; /* task (not tasklet) currently running or NULL */
|
THREAD_LOCAL struct task_per_thread *sched = &task_per_thread[0]; /* scheduler context for the current thread */
|
||||||
|
|
||||||
__decl_aligned_spinlock(rq_lock); /* spin lock related to run queue */
|
__decl_aligned_spinlock(rq_lock); /* spin lock related to run queue */
|
||||||
__decl_aligned_rwlock(wq_lock); /* RW lock related to the wait queue */
|
__decl_aligned_rwlock(wq_lock); /* RW lock related to the wait queue */
|
||||||
@ -159,7 +159,7 @@ void __task_queue(struct task *task, struct eb_root *wq)
|
|||||||
*/
|
*/
|
||||||
int wake_expired_tasks()
|
int wake_expired_tasks()
|
||||||
{
|
{
|
||||||
struct task_per_thread * const tt = &task_per_thread[tid]; // thread's tasks
|
struct task_per_thread * const tt = sched; // thread's tasks
|
||||||
struct task *task;
|
struct task *task;
|
||||||
struct eb32_node *eb;
|
struct eb32_node *eb;
|
||||||
int ret = TICK_ETERNITY;
|
int ret = TICK_ETERNITY;
|
||||||
@ -300,7 +300,7 @@ leave:
|
|||||||
*/
|
*/
|
||||||
void process_runnable_tasks()
|
void process_runnable_tasks()
|
||||||
{
|
{
|
||||||
struct task_per_thread * const tt = &task_per_thread[tid]; // thread's tasks
|
struct task_per_thread * const tt = sched;
|
||||||
struct eb32sc_node *lrq = NULL; // next local run queue entry
|
struct eb32sc_node *lrq = NULL; // next local run queue entry
|
||||||
struct eb32sc_node *grq = NULL; // next global run queue entry
|
struct eb32sc_node *grq = NULL; // next global run queue entry
|
||||||
struct task *t;
|
struct task *t;
|
||||||
@ -418,7 +418,7 @@ void process_runnable_tasks()
|
|||||||
t->call_date = now_ns;
|
t->call_date = now_ns;
|
||||||
}
|
}
|
||||||
|
|
||||||
curr_task = t;
|
sched->current = t;
|
||||||
__ha_barrier_store();
|
__ha_barrier_store();
|
||||||
if (likely(process == process_stream))
|
if (likely(process == process_stream))
|
||||||
t = process_stream(t, ctx, state);
|
t = process_stream(t, ctx, state);
|
||||||
@ -426,7 +426,7 @@ void process_runnable_tasks()
|
|||||||
t = process(t, ctx, state);
|
t = process(t, ctx, state);
|
||||||
else {
|
else {
|
||||||
__task_free(t);
|
__task_free(t);
|
||||||
curr_task = NULL;
|
sched->current = NULL;
|
||||||
__ha_barrier_store();
|
__ha_barrier_store();
|
||||||
/* We don't want max_processed to be decremented if
|
/* We don't want max_processed to be decremented if
|
||||||
* we're just freeing a destroyed task, we should only
|
* we're just freeing a destroyed task, we should only
|
||||||
@ -434,7 +434,7 @@ void process_runnable_tasks()
|
|||||||
*/
|
*/
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
curr_task = NULL;
|
sched->current = NULL;
|
||||||
__ha_barrier_store();
|
__ha_barrier_store();
|
||||||
/* If there is a pending state we have to wake up the task
|
/* If there is a pending state we have to wake up the task
|
||||||
* immediately, else we defer it into wait queue
|
* immediately, else we defer it into wait queue
|
||||||
|
Loading…
Reference in New Issue
Block a user