From 9d4b56b88e01ac874a158f9bd4dd8c804ba61875 Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Mon, 6 Nov 2017 08:36:53 +0100 Subject: [PATCH] MINOR: tasks: only visit filled task slots after processing them process_runnable_tasks() needs to requeue or wake up tasks after processing them in batches. By only refilling the existing ones, we avoid revisiting all the queue. The performance gain is measurable starting with two threads, where the request rate climbs to 657k/s compared to 644k. --- src/task.c | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/src/task.c b/src/task.c index 37994a331..3d61f98cf 100644 --- a/src/task.c +++ b/src/task.c @@ -188,6 +188,7 @@ void process_runnable_tasks() struct eb32sc_node *rq_next; struct task *local_tasks[16]; int local_tasks_count; + int final_tasks_count; tasks_run_queue_cur = tasks_run_queue; /* keep a copy for reporting */ nb_tasks_cur = nb_tasks; max_processed = tasks_run_queue; @@ -241,6 +242,7 @@ void process_runnable_tasks() SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock); + final_tasks_count = 0; for (i = 0; i < local_tasks_count ; i++) { t = local_tasks[i]; /* This is an optimisation to help the processor's branch @@ -250,24 +252,23 @@ void process_runnable_tasks() t = process_stream(t); else t = t->process(t); - local_tasks[i] = t; + if (t) + local_tasks[final_tasks_count++] = t; } SPIN_LOCK(TASK_RQ_LOCK, &rq_lock); - for (i = 0; i < local_tasks_count ; i++) { + for (i = 0; i < final_tasks_count ; i++) { t = local_tasks[i]; - if (likely(t != NULL)) { - t->state &= ~TASK_RUNNING; - /* If there is a pending state - * we have to wake up the task - * immediatly, else we defer - * it into wait queue - */ - if (t->pending_state) - __task_wakeup(t); - else - task_queue(t); - } + t->state &= ~TASK_RUNNING; + /* If there is a pending state + * we have to wake up the task + * immediatly, else we defer + * it into wait queue + */ + if (t->pending_state) + __task_wakeup(t); + else + task_queue(t); } } while (max_processed > 0);