[OPTIM] task_queue: assume most consecutive timers are equal

When queuing a timer, it's very likely that an expiration date is
equal to that of the previously queued timer, due to time rounding
to the millisecond. Optimizing for this case provides a noticeable
1% performance boost.
This commit is contained in:
Willy Tarreau 2008-07-05 18:16:19 +02:00
parent 91e99931b7
commit ce44f12c1e
2 changed files with 30 additions and 6 deletions

View File

@ -35,6 +35,7 @@
extern unsigned int run_queue; /* run queue size */
extern unsigned int niced_tasks; /* number of niced tasks in the run queue */
extern struct pool_head *pool2_task;
extern struct task *last_timer; /* optimization: last queued timer */
/* perform minimal initializations, report 0 in case of error, 1 if OK. */
int init_task();
@ -63,11 +64,23 @@ static inline struct task *task_sleep(struct task *t)
* - wait queue
* A pointer to the task itself is returned.
*/
static inline struct task *task_dequeue(struct task *t)
{
if (likely(t->eb.node.leaf_p)) {
if (last_timer == t)
last_timer = NULL;
eb32_delete(&t->eb);
}
return t;
}
/*
* Unlinks the task and adjusts run queue stats.
* A pointer to the task itself is returned.
*/
static inline struct task *task_delete(struct task *t)
{
if (t->eb.node.leaf_p)
eb32_delete(&t->eb);
task_dequeue(t);
if (t->state == TASK_RUNNING) {
run_queue--;
if (likely(t->nice))

View File

@ -25,6 +25,7 @@ struct pool_head *pool2_task;
unsigned int run_queue = 0;
unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
struct task *last_timer = NULL; /* optimization: last queued timer */
/* Principle of the wait queue.
*
@ -136,8 +137,7 @@ struct task *task_wakeup(struct task *t)
if (t->state == TASK_RUNNING)
return t;
if (likely(t->eb.node.leaf_p))
eb32_delete(&t->eb);
task_dequeue(t);
run_queue++;
t->eb.key = ++rqueue_ticks;
@ -180,7 +180,18 @@ struct task *task_queue(struct task *task)
/* we're queuing too far away or in the past (most likely) */
return task;
#endif
if (likely(last_timer &&
last_timer->eb.key == task->eb.key &&
last_timer->eb.node.node_p)) {
/* Most often, last queued timer has the same expiration date, so
* if it's not queued at the root, let's queue a dup directly there.
*/
eb_insert_dup(&last_timer->eb.node, &task->eb.node);
return task;
}
eb32_insert(&timers[ticks_to_tree(task->eb.key)], &task->eb);
last_timer = task;
return task;
}
@ -280,7 +291,7 @@ void process_runnable_tasks(struct timeval *next)
if (likely(t->nice))
niced_tasks--;
t->state = TASK_IDLE;
eb32_delete(&t->eb);
task_dequeue(t);
t->process(t, &temp);
tv_bound(next, &temp);