mirror of
http://git.haproxy.org/git/haproxy.git/
synced 2025-01-30 10:06:43 +00:00
MEDIUM: sched: implement task_kill() to kill a task
task_kill() may be used by any thread to kill any task with less overhead than a regular wakeup. In order to achieve this, it bypasses the priority tree and inserts the task directly into the shared tasklets list, cast as a tasklet. The task_list_size is updated to make sure it is properly decremented after execution of this task. The task will thus be picked by process_runnable_tasks() after checking the tree and sent to the TL_URGENT list, where it will be processed and killed. If the task is bound to more than one thread, its first thread will be the one notified. If the task was already queued or running, nothing is done, only the flag is added so that it gets killed before or after execution. Of course it's the caller's responsibility to make sur any resources allocated by this task were already cleaned up or taken over.
This commit is contained in:
parent
8a6049c268
commit
eb8c2c69fa
@ -109,6 +109,7 @@ extern struct task_per_thread task_per_thread[MAX_THREADS];
|
||||
__decl_thread(extern HA_SPINLOCK_T rq_lock); /* spin lock related to run queue */
|
||||
__decl_thread(extern HA_RWLOCK_T wq_lock); /* RW lock related to the wait queue */
|
||||
|
||||
void task_kill(struct task *t);
|
||||
void __task_wakeup(struct task *t, struct eb_root *);
|
||||
void __task_queue(struct task *task, struct eb_root *wq);
|
||||
|
||||
|
51
src/task.c
51
src/task.c
@ -57,6 +57,57 @@ static unsigned int rqueue_ticks; /* insertion count */
|
||||
|
||||
struct task_per_thread task_per_thread[MAX_THREADS];
|
||||
|
||||
|
||||
/* Flags the task <t> for immediate destruction and puts it into its first
|
||||
* thread's shared tasklet list if not yet queued/running. This will bypass
|
||||
* the priority scheduling and make the task show up as fast as possible in
|
||||
* the other thread's queue. Note that this operation isn't idempotent and is
|
||||
* not supposed to be run on the same task from multiple threads at once. It's
|
||||
* the caller's responsibility to make sure it is the only one able to kill the
|
||||
* task.
|
||||
*/
|
||||
void task_kill(struct task *t)
|
||||
{
|
||||
unsigned short state = t->state;
|
||||
unsigned int thr;
|
||||
|
||||
BUG_ON(state & TASK_KILLED);
|
||||
|
||||
while (1) {
|
||||
while (state & (TASK_RUNNING | TASK_QUEUED)) {
|
||||
/* task already in the queue and about to be executed,
|
||||
* or even currently running. Just add the flag and be
|
||||
* done with it, the process loop will detect it and kill
|
||||
* it. The CAS will fail if we arrive too late.
|
||||
*/
|
||||
if (_HA_ATOMIC_CAS(&t->state, &state, state | TASK_KILLED))
|
||||
return;
|
||||
}
|
||||
|
||||
/* We'll have to wake it up, but we must also secure it so that
|
||||
* it doesn't vanish under us. TASK_QUEUED guarantees nobody will
|
||||
* add past us.
|
||||
*/
|
||||
if (_HA_ATOMIC_CAS(&t->state, &state, state | TASK_QUEUED | TASK_KILLED)) {
|
||||
/* Bypass the tree and go directly into the shared tasklet list.
|
||||
* Note: that's a task so it must be accounted for as such. Pick
|
||||
* the task's first thread for the job.
|
||||
*/
|
||||
thr = my_ffsl(t->thread_mask) - 1;
|
||||
if (MT_LIST_ADDQ(&task_per_thread[thr].shared_tasklet_list,
|
||||
(struct mt_list *)&((struct tasklet *)t)->list)) {
|
||||
_HA_ATOMIC_ADD(&tasks_run_queue, 1);
|
||||
_HA_ATOMIC_ADD(&task_per_thread[thr].task_list_size, 1);
|
||||
if (sleeping_thread_mask & (1UL << thr)) {
|
||||
_HA_ATOMIC_AND(&sleeping_thread_mask, ~(1UL << thr));
|
||||
wake_thread(thr);
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Puts the task <t> in run queue at a position depending on t->nice. <t> is
|
||||
* returned. The nice value assigns boosts in 32th of the run queue size. A
|
||||
* nice value of -1024 sets the task to -tasks_run_queue*32, while a nice value
|
||||
|
Loading…
Reference in New Issue
Block a user