mirror of
http://git.haproxy.org/git/haproxy.git/
synced 2025-01-19 04:00:46 +00:00
MINOR: tasks: add a mask of the queues with active tasklets
It is neither convenient nor scalable to check each and every tasklet queue to figure whether it's empty or not while we often need to check them all at once. This patch introduces a tasklet class mask which gets a bit 1 set for each queue representing one class of service. A single test on the mask allows to figure whether there's still some work to be done. It will later be usable to better factor the runqueue code. Bits are set when tasklets are queued. They're cleared when queues are emptied. It is possible that a queue is empty but has a bit if a tasklet was added then removed, but this is not a problem as this is properly checked for in run_tasks_from_list().
This commit is contained in:
parent
c0a08ba2df
commit
49f90bf148
@ -78,6 +78,7 @@ struct task_per_thread {
|
||||
int rqueue_size; /* Number of elements in the per-thread run queue */
|
||||
struct task *current; /* current task (not tasklet) */
|
||||
int current_queue; /* points to current tasklet list being run, -1 if none */
|
||||
uint8_t tl_class_mask; /* bit mask of non-empty tasklets classes */
|
||||
__attribute__((aligned(64))) char end[0];
|
||||
};
|
||||
|
||||
|
@ -166,9 +166,7 @@ static inline int thread_has_tasks(void)
|
||||
{
|
||||
return (!!(global_tasks_mask & tid_bit) |
|
||||
(sched->rqueue_size > 0) |
|
||||
!LIST_ISEMPTY(&sched->tasklets[TL_URGENT]) |
|
||||
!LIST_ISEMPTY(&sched->tasklets[TL_NORMAL]) |
|
||||
!LIST_ISEMPTY(&sched->tasklets[TL_BULK]) |
|
||||
!!sched->tl_class_mask |
|
||||
!MT_LIST_ISEMPTY(&sched->shared_tasklet_list));
|
||||
}
|
||||
|
||||
@ -330,16 +328,20 @@ static inline void tasklet_wakeup(struct tasklet *tl)
|
||||
if (LIST_ISEMPTY(&tl->list)) {
|
||||
if (tl->state & TASK_SELF_WAKING) {
|
||||
LIST_ADDQ(&sched->tasklets[TL_BULK], &tl->list);
|
||||
sched->tl_class_mask |= 1 << TL_BULK;
|
||||
}
|
||||
else if ((struct task *)tl == sched->current) {
|
||||
_HA_ATOMIC_OR(&tl->state, TASK_SELF_WAKING);
|
||||
LIST_ADDQ(&sched->tasklets[TL_BULK], &tl->list);
|
||||
sched->tl_class_mask |= 1 << TL_BULK;
|
||||
}
|
||||
else if (sched->current_queue < 0) {
|
||||
LIST_ADDQ(&sched->tasklets[TL_URGENT], &tl->list);
|
||||
sched->tl_class_mask |= 1 << TL_URGENT;
|
||||
}
|
||||
else {
|
||||
LIST_ADDQ(&sched->tasklets[sched->current_queue], &tl->list);
|
||||
sched->tl_class_mask |= 1 << sched->current_queue;
|
||||
}
|
||||
|
||||
_HA_ATOMIC_ADD(&tasks_run_queue, 1);
|
||||
|
18
src/task.c
18
src/task.c
@ -452,17 +452,17 @@ void process_runnable_tasks()
|
||||
max[TL_URGENT] = max[TL_NORMAL] = max[TL_BULK] = 0;
|
||||
|
||||
/* urgent tasklets list gets a default weight of ~50% */
|
||||
if (!LIST_ISEMPTY(&tt->tasklets[TL_URGENT]) ||
|
||||
if ((tt->tl_class_mask & (1 << TL_URGENT)) ||
|
||||
!MT_LIST_ISEMPTY(&tt->shared_tasklet_list))
|
||||
max[TL_URGENT] = default_weights[TL_URGENT];
|
||||
|
||||
/* normal tasklets list gets a default weight of ~37% */
|
||||
if (!LIST_ISEMPTY(&tt->tasklets[TL_NORMAL]) ||
|
||||
if ((tt->tl_class_mask & (1 << TL_NORMAL)) ||
|
||||
(sched->rqueue_size > 0) || (global_tasks_mask & tid_bit))
|
||||
max[TL_NORMAL] = default_weights[TL_NORMAL];
|
||||
|
||||
/* bulk tasklets list gets a default weight of ~13% */
|
||||
if (!LIST_ISEMPTY(&tt->tasklets[TL_BULK]))
|
||||
if ((tt->tl_class_mask & (1 << TL_BULK)))
|
||||
max[TL_BULK] = default_weights[TL_BULK];
|
||||
|
||||
/* Now compute a fair share of the weights. Total may slightly exceed
|
||||
@ -530,6 +530,7 @@ void process_runnable_tasks()
|
||||
LIST_INIT(&((struct tasklet *)t)->list);
|
||||
/* And add it to the local task list */
|
||||
tasklet_insert_into_tasklet_list(&tt->tasklets[TL_NORMAL], (struct tasklet *)t);
|
||||
tt->tl_class_mask |= 1 << TL_NORMAL;
|
||||
tt->task_list_size++;
|
||||
activity[tid].tasksw++;
|
||||
}
|
||||
@ -544,8 +545,11 @@ void process_runnable_tasks()
|
||||
* main list.
|
||||
*/
|
||||
tmp_list = MT_LIST_BEHEAD(&tt->shared_tasklet_list);
|
||||
if (tmp_list)
|
||||
if (tmp_list) {
|
||||
LIST_SPLICE_END_DETACHED(&tt->tasklets[TL_URGENT], (struct list *)tmp_list);
|
||||
if (!LIST_ISEMPTY(&tt->tasklets[TL_URGENT]))
|
||||
tt->tl_class_mask |= 1 << TL_URGENT;
|
||||
}
|
||||
|
||||
/* execute tasklets in each queue */
|
||||
for (queue = 0; queue < TL_CLASSES; queue++) {
|
||||
@ -553,6 +557,8 @@ void process_runnable_tasks()
|
||||
tt->current_queue = queue;
|
||||
max_processed -= run_tasks_from_list(&tt->tasklets[queue], max[queue]);
|
||||
tt->current_queue = -1;
|
||||
if (LIST_ISEMPTY(&tt->tasklets[queue]))
|
||||
tt->tl_class_mask &= ~(1 << queue);
|
||||
}
|
||||
}
|
||||
|
||||
@ -560,9 +566,7 @@ void process_runnable_tasks()
|
||||
if (max_processed > 0 && thread_has_tasks())
|
||||
goto not_done_yet;
|
||||
|
||||
if (!LIST_ISEMPTY(&sched->tasklets[TL_URGENT]) |
|
||||
!LIST_ISEMPTY(&sched->tasklets[TL_NORMAL]) |
|
||||
!LIST_ISEMPTY(&sched->tasklets[TL_BULK]))
|
||||
if (tt->tl_class_mask)
|
||||
activity[tid].long_rq++;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user