MEDIUM: tasks: Get rid of active_tasks_mask.
Remove the active_tasks_mask variable, we can deduce if we've work to do by other means, and it is costly to maintain. Instead, introduce a new function, thread_has_tasks(), that returns non-zero if there's tasks scheduled for the thread, zero otherwise.
This commit is contained in:
parent
661167d136
commit
cfbb3e6560
|
@ -83,7 +83,6 @@
|
|||
|
||||
/* a few exported variables */
|
||||
extern unsigned int nb_tasks; /* total number of tasks */
|
||||
extern volatile unsigned long active_tasks_mask; /* Mask of threads with active tasks */
|
||||
extern volatile unsigned long global_tasks_mask; /* Mask of threads with tasks in the global runqueue */
|
||||
extern unsigned int tasks_run_queue; /* run queue size */
|
||||
extern unsigned int tasks_run_queue_cur;
|
||||
|
@ -233,7 +232,6 @@ static inline void tasklet_wakeup(struct tasklet *tl)
|
|||
return;
|
||||
LIST_ADDQ(&task_per_thread[tid].task_list, &tl->list);
|
||||
task_per_thread[tid].task_list_size++;
|
||||
_HA_ATOMIC_OR(&active_tasks_mask, tid_bit);
|
||||
_HA_ATOMIC_ADD(&tasks_run_queue, 1);
|
||||
|
||||
}
|
||||
|
@ -541,6 +539,13 @@ static inline int notification_registered(struct list *wake)
|
|||
return !LIST_ISEMPTY(wake);
|
||||
}
|
||||
|
||||
static inline int thread_has_tasks(void)
|
||||
{
|
||||
return (!!(global_tasks_mask & tid_bit) |
|
||||
(task_per_thread[tid].rqueue_size > 0) |
|
||||
!LIST_ISEMPTY(&task_per_thread[tid].task_list));
|
||||
}
|
||||
|
||||
/*
|
||||
* This does 3 things :
|
||||
* - wake up all expired tasks
|
||||
|
|
|
@ -47,7 +47,7 @@ void ha_thread_dump(struct buffer *buf, int thr, int calling_tid)
|
|||
"%c%cThread %-2u: act=%d glob=%d wq=%d rq=%d tl=%d tlsz=%d rqsz=%d\n"
|
||||
" stuck=%d fdcache=%d prof=%d",
|
||||
(thr == calling_tid) ? '*' : ' ', stuck ? '>' : ' ', thr + 1,
|
||||
!!(active_tasks_mask & thr_bit),
|
||||
thread_has_tasks(),
|
||||
!!(global_tasks_mask & thr_bit),
|
||||
!eb_is_empty(&task_per_thread[thr].timers),
|
||||
!eb_is_empty(&task_per_thread[thr].rqueue),
|
||||
|
|
|
@ -2528,14 +2528,14 @@ static void run_poll_loop()
|
|||
wake = 1;
|
||||
if (fd_cache_mask & tid_bit)
|
||||
activity[tid].wake_cache++;
|
||||
else if (active_tasks_mask & tid_bit)
|
||||
else if (thread_has_tasks())
|
||||
activity[tid].wake_tasks++;
|
||||
else if (signal_queue_len && tid == 0)
|
||||
activity[tid].wake_signal++;
|
||||
else {
|
||||
_HA_ATOMIC_OR(&sleeping_thread_mask, tid_bit);
|
||||
__ha_barrier_atomic_store();
|
||||
if (active_tasks_mask & tid_bit) {
|
||||
if (global_tasks_mask & tid_bit) {
|
||||
activity[tid].wake_tasks++;
|
||||
_HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit);
|
||||
} else
|
||||
|
|
15
src/task.c
15
src/task.c
|
@ -35,7 +35,6 @@ DECLARE_POOL(pool_head_tasklet, "tasklet", sizeof(struct tasklet));
|
|||
DECLARE_POOL(pool_head_notification, "notification", sizeof(struct notification));
|
||||
|
||||
unsigned int nb_tasks = 0;
|
||||
volatile unsigned long active_tasks_mask = 0; /* Mask of threads with active tasks */
|
||||
volatile unsigned long global_tasks_mask = 0; /* Mask of threads with tasks in the global runqueue */
|
||||
unsigned int tasks_run_queue = 0;
|
||||
unsigned int tasks_run_queue_cur = 0; /* copy of the run queue size */
|
||||
|
@ -82,7 +81,6 @@ void __task_wakeup(struct task *t, struct eb_root *root)
|
|||
__ha_barrier_store();
|
||||
}
|
||||
#endif
|
||||
_HA_ATOMIC_OR(&active_tasks_mask, t->thread_mask);
|
||||
t->rq.key = _HA_ATOMIC_ADD(&rqueue_ticks, 1);
|
||||
|
||||
if (likely(t->nice)) {
|
||||
|
@ -308,7 +306,7 @@ void process_runnable_tasks()
|
|||
|
||||
ti->flags &= ~TI_FL_STUCK; // this thread is still running
|
||||
|
||||
if (!(active_tasks_mask & tid_bit)) {
|
||||
if (!thread_has_tasks()) {
|
||||
activity[tid].empty_rq++;
|
||||
return;
|
||||
}
|
||||
|
@ -381,13 +379,6 @@ void process_runnable_tasks()
|
|||
grq = NULL;
|
||||
}
|
||||
|
||||
if (!(global_tasks_mask & tid_bit) && task_per_thread[tid].rqueue_size == 0) {
|
||||
_HA_ATOMIC_AND(&active_tasks_mask, ~tid_bit);
|
||||
__ha_barrier_atomic_load();
|
||||
if (global_tasks_mask & tid_bit)
|
||||
_HA_ATOMIC_OR(&active_tasks_mask, tid_bit);
|
||||
}
|
||||
|
||||
while (max_processed > 0 && !LIST_ISEMPTY(&task_per_thread[tid].task_list)) {
|
||||
struct task *t;
|
||||
unsigned short state;
|
||||
|
@ -449,10 +440,8 @@ void process_runnable_tasks()
|
|||
max_processed--;
|
||||
}
|
||||
|
||||
if (!LIST_ISEMPTY(&task_per_thread[tid].task_list)) {
|
||||
_HA_ATOMIC_OR(&active_tasks_mask, tid_bit);
|
||||
if (!LIST_ISEMPTY(&task_per_thread[tid].task_list))
|
||||
activity[tid].long_rq++;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue