2006-06-26 00:48:02 +00:00
|
|
|
/*
|
|
|
|
* Task management functions.
|
|
|
|
*
|
2009-03-07 16:25:21 +00:00
|
|
|
* Copyright 2000-2009 Willy Tarreau <w@1wt.eu>
|
2006-06-26 00:48:02 +00:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2009-03-08 21:25:28 +00:00
|
|
|
#include <string.h>
|
|
|
|
|
2006-06-29 15:53:05 +00:00
|
|
|
#include <common/config.h>
|
2007-05-13 17:43:47 +00:00
|
|
|
#include <common/memory.h>
|
2006-06-29 15:53:05 +00:00
|
|
|
#include <common/mini-clist.h>
|
2007-04-29 08:41:56 +00:00
|
|
|
#include <common/standard.h>
|
2007-04-28 20:40:08 +00:00
|
|
|
#include <common/time.h>
|
2009-10-26 20:10:04 +00:00
|
|
|
#include <eb32tree.h>
|
2006-06-26 00:48:02 +00:00
|
|
|
|
2007-05-12 20:35:00 +00:00
|
|
|
#include <proto/proxy.h>
|
2009-03-08 15:35:27 +00:00
|
|
|
#include <proto/session.h>
|
2006-06-26 00:48:02 +00:00
|
|
|
#include <proto/task.h>
|
2007-04-29 08:41:56 +00:00
|
|
|
|
2008-06-24 06:17:16 +00:00
|
|
|
struct pool_head *pool2_task;
|
2006-06-26 00:48:02 +00:00
|
|
|
|
2009-03-21 17:13:21 +00:00
|
|
|
unsigned int nb_tasks = 0;
|
2008-06-29 20:40:23 +00:00
|
|
|
unsigned int run_queue = 0;
|
2009-03-21 17:33:52 +00:00
|
|
|
unsigned int run_queue_cur = 0; /* copy of the run queue size */
|
|
|
|
unsigned int nb_tasks_cur = 0; /* copy of the tasks count */
|
2009-03-21 09:01:42 +00:00
|
|
|
unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
|
2009-03-21 11:51:40 +00:00
|
|
|
struct eb32_node *last_timer = NULL; /* optimization: last queued timer */
|
2006-06-26 00:48:02 +00:00
|
|
|
|
2009-03-21 09:01:42 +00:00
|
|
|
static struct eb_root timers; /* sorted timers tree */
|
|
|
|
static struct eb_root rqueue; /* tree constituting the run queue */
|
|
|
|
static unsigned int rqueue_ticks; /* insertion count */
|
2008-06-24 06:17:16 +00:00
|
|
|
|
2009-03-07 16:25:21 +00:00
|
|
|
/* Puts the task <t> in run queue at a position depending on t->nice. <t> is
|
|
|
|
* returned. The nice value assigns boosts in 32th of the run queue size. A
|
|
|
|
* nice value of -1024 sets the task to -run_queue*32, while a nice value of
|
|
|
|
* 1024 sets the task to run_queue*32. The state flags are cleared, so the
|
|
|
|
* caller will have to set its flags after this call.
|
|
|
|
* The task must not already be in the run queue. If unsure, use the safer
|
|
|
|
* task_wakeup() function.
|
2008-06-30 05:51:00 +00:00
|
|
|
*/
|
2008-08-29 13:26:14 +00:00
|
|
|
struct task *__task_wakeup(struct task *t)
|
2007-04-30 11:15:14 +00:00
|
|
|
{
|
2008-06-29 20:40:23 +00:00
|
|
|
run_queue++;
|
2009-03-07 16:25:21 +00:00
|
|
|
t->rq.key = ++rqueue_ticks;
|
2008-06-30 05:51:00 +00:00
|
|
|
|
|
|
|
if (likely(t->nice)) {
|
|
|
|
int offset;
|
|
|
|
|
|
|
|
niced_tasks++;
|
|
|
|
if (likely(t->nice > 0))
|
|
|
|
offset = (unsigned)((run_queue * (unsigned int)t->nice) / 32U);
|
|
|
|
else
|
|
|
|
offset = -(unsigned)((run_queue * (unsigned int)-t->nice) / 32U);
|
2009-03-07 16:25:21 +00:00
|
|
|
t->rq.key += offset;
|
2008-06-30 05:51:00 +00:00
|
|
|
}
|
|
|
|
|
2008-08-29 16:19:04 +00:00
|
|
|
/* clear state flags at the same time */
|
2009-03-07 16:25:21 +00:00
|
|
|
t->state &= ~TASK_WOKEN_ANY;
|
2008-06-29 20:40:23 +00:00
|
|
|
|
2009-03-21 09:01:42 +00:00
|
|
|
eb32_insert(&rqueue, &t->rq);
|
2008-06-29 20:40:23 +00:00
|
|
|
return t;
|
2007-04-30 11:15:14 +00:00
|
|
|
}
|
2007-05-12 20:35:00 +00:00
|
|
|
|
2007-04-29 08:41:56 +00:00
|
|
|
/*
|
2009-03-08 15:35:27 +00:00
|
|
|
* __task_queue()
|
2007-04-29 08:41:56 +00:00
|
|
|
*
|
|
|
|
* Inserts a task into the wait queue at the position given by its expiration
|
2009-03-07 16:25:21 +00:00
|
|
|
* date. It does not matter if the task was already in the wait queue or not,
|
2009-03-08 15:35:27 +00:00
|
|
|
* as it will be unlinked. The task must not have an infinite expiration timer.
|
2009-03-21 09:01:42 +00:00
|
|
|
* Last, tasks must not be queued further than the end of the tree, which is
|
|
|
|
* between <now_ms> and <now_ms> + 2^31 ms (now+24days in 32bit).
|
2009-03-08 15:35:27 +00:00
|
|
|
*
|
|
|
|
* This function should not be used directly, it is meant to be called by the
|
|
|
|
* inline version of task_queue() which performs a few cheap preliminary tests
|
|
|
|
* before deciding to call __task_queue().
|
2007-04-29 08:41:56 +00:00
|
|
|
*/
|
2009-03-08 15:35:27 +00:00
|
|
|
void __task_queue(struct task *task)
|
2006-06-26 00:48:02 +00:00
|
|
|
{
|
2009-03-08 15:35:27 +00:00
|
|
|
if (likely(task_in_wq(task)))
|
2009-03-07 16:25:21 +00:00
|
|
|
__task_unlink_wq(task);
|
|
|
|
|
|
|
|
/* the task is not in the queue now */
|
2009-03-21 09:01:42 +00:00
|
|
|
task->wq.key = task->expire;
|
2008-06-29 15:00:59 +00:00
|
|
|
#ifdef DEBUG_CHECK_INVALID_EXPIRATION_DATES
|
2009-03-21 09:01:42 +00:00
|
|
|
if (tick_is_lt(task->wq.key, now_ms))
|
2008-06-29 15:00:59 +00:00
|
|
|
/* we're queuing too far away or in the past (most likely) */
|
2009-03-07 16:25:21 +00:00
|
|
|
return;
|
2008-06-29 15:00:59 +00:00
|
|
|
#endif
|
2008-07-05 16:16:19 +00:00
|
|
|
|
|
|
|
if (likely(last_timer &&
|
2009-03-21 11:51:40 +00:00
|
|
|
last_timer->node.bit < 0 &&
|
|
|
|
last_timer->key == task->wq.key &&
|
|
|
|
last_timer->node.node_p)) {
|
2008-07-05 16:16:19 +00:00
|
|
|
/* Most often, last queued timer has the same expiration date, so
|
|
|
|
* if it's not queued at the root, let's queue a dup directly there.
|
2009-03-21 11:51:40 +00:00
|
|
|
* Note that we can only use dups at the dup tree's root (most
|
|
|
|
* negative bit).
|
2008-07-05 16:16:19 +00:00
|
|
|
*/
|
2009-03-21 11:51:40 +00:00
|
|
|
eb_insert_dup(&last_timer->node, &task->wq.node);
|
|
|
|
if (task->wq.node.bit < last_timer->node.bit)
|
|
|
|
last_timer = &task->wq;
|
2009-03-07 16:25:21 +00:00
|
|
|
return;
|
2008-07-05 16:16:19 +00:00
|
|
|
}
|
2009-03-21 09:01:42 +00:00
|
|
|
eb32_insert(&timers, &task->wq);
|
[BUG] task.c: don't assing last_timer to node-less entries
I noticed that in __eb32_insert , if the tree is empty
(root->b[EB_LEFT] == NULL) , the node.bit is not defined.
However in __task_queue there are checks:
- if (last_timer->node.bit < 0)
- if (task->wq.node.bit < last_timer->node.bit)
which might rely upon an undefined value.
This is how I see it:
1. We insert eb32_node in an empty wait queue tree for a task (called by
process_runnable_tasks() ):
Inserting into empty wait queue &task->wq = 0x72a87c8, last_timer
pointer: (nil)
2. Then, we set the last timer to the same address:
Setting last_timer: (nil) to: 0x72a87c8
3. We get a new task to be inserted in the queue (again called by
process_runnable_tasks()) , before the __task_unlink_wq() is called for
the previous task.
4. At this point, we still have last_timer set to 0x72a87c8 , but since
it was inserted in an empty tree, it doesn't have node.bit and the
values above get dereferenced with undefined value.
The bug has no effect right now because the check for equality is still
made, so the next timer will still be queued at the right place anyway,
without any possible side-effect. But it's a pending bug waiting for a
small change somewhere to strike.
Iliya Polihronov
2009-10-06 15:53:37 +00:00
|
|
|
|
|
|
|
/* Make sure we don't assign the last_timer to a node-less entry */
|
|
|
|
if (task->wq.node.node_p && (!last_timer || (task->wq.node.bit < last_timer->node.bit)))
|
2009-03-21 11:51:40 +00:00
|
|
|
last_timer = &task->wq;
|
2009-03-07 16:25:21 +00:00
|
|
|
return;
|
2007-04-29 08:41:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2008-06-24 06:17:16 +00:00
|
|
|
* Extract all expired timers from the timer queue, and wakes up all
|
2009-03-21 09:01:42 +00:00
|
|
|
* associated tasks. Returns the date of next event (or eternity) in <next>.
|
2007-04-29 08:41:56 +00:00
|
|
|
*/
|
2008-07-06 22:09:58 +00:00
|
|
|
void wake_expired_tasks(int *next)
|
2007-04-29 08:41:56 +00:00
|
|
|
{
|
|
|
|
struct task *task;
|
2008-06-24 06:17:16 +00:00
|
|
|
struct eb32_node *eb;
|
2008-06-29 15:00:59 +00:00
|
|
|
|
2009-03-21 09:01:42 +00:00
|
|
|
eb = eb32_lookup_ge(&timers, now_ms - TIMER_LOOK_BACK);
|
|
|
|
while (1) {
|
|
|
|
if (unlikely(!eb)) {
|
|
|
|
/* we might have reached the end of the tree, typically because
|
|
|
|
* <now_ms> is in the first half and we're first scanning the last
|
|
|
|
* half. Let's loop back to the beginning of the tree now.
|
|
|
|
*/
|
|
|
|
eb = eb32_first(&timers);
|
|
|
|
if (likely(!eb))
|
|
|
|
break;
|
|
|
|
}
|
2008-06-29 15:00:59 +00:00
|
|
|
|
2009-03-21 09:01:42 +00:00
|
|
|
if (likely(tick_is_lt(now_ms, eb->key))) {
|
|
|
|
/* timer not expired yet, revisit it later */
|
|
|
|
*next = eb->key;
|
|
|
|
return;
|
|
|
|
}
|
2008-06-24 06:17:16 +00:00
|
|
|
|
2009-03-21 09:01:42 +00:00
|
|
|
/* timer looks expired, detach it from the queue */
|
|
|
|
task = eb32_entry(eb, struct task, wq);
|
|
|
|
eb = eb32_next(eb);
|
|
|
|
__task_unlink_wq(task);
|
2009-03-08 06:46:27 +00:00
|
|
|
|
2009-03-21 09:01:42 +00:00
|
|
|
/* It is possible that this task was left at an earlier place in the
|
|
|
|
* tree because a recent call to task_queue() has not moved it. This
|
|
|
|
* happens when the new expiration date is later than the old one.
|
|
|
|
* Since it is very unlikely that we reach a timeout anyway, it's a
|
|
|
|
* lot cheaper to proceed like this because we almost never update
|
|
|
|
* the tree. We may also find disabled expiration dates there. Since
|
|
|
|
* we have detached the task from the tree, we simply call task_queue
|
2009-07-14 21:48:55 +00:00
|
|
|
* to take care of this. Note that we might occasionally requeue it at
|
|
|
|
* the same place, before <eb>, so we have to check if this happens,
|
|
|
|
* and adjust <eb>, otherwise we may skip it which is not what we want.
|
2009-08-09 07:09:54 +00:00
|
|
|
* We may also not requeue the task (and not point eb at it) if its
|
|
|
|
* expiration time is not set.
|
2009-03-21 09:01:42 +00:00
|
|
|
*/
|
|
|
|
if (!tick_is_expired(task->expire, now_ms)) {
|
2009-08-09 07:09:54 +00:00
|
|
|
if (!tick_isset(task->expire))
|
|
|
|
continue;
|
|
|
|
__task_queue(task);
|
2009-07-14 21:48:55 +00:00
|
|
|
if (!eb || eb->key > task->wq.key)
|
|
|
|
eb = &task->wq;
|
2009-03-21 09:01:42 +00:00
|
|
|
continue;
|
2006-06-26 00:48:02 +00:00
|
|
|
}
|
2009-03-21 09:01:42 +00:00
|
|
|
task_wakeup(task, TASK_WOKEN_TIMER);
|
|
|
|
}
|
2008-06-24 06:17:16 +00:00
|
|
|
|
2008-06-29 15:00:59 +00:00
|
|
|
/* We have found no task to expire in any tree */
|
2008-07-06 22:09:58 +00:00
|
|
|
*next = TICK_ETERNITY;
|
2008-06-29 15:00:59 +00:00
|
|
|
return;
|
2006-06-26 00:48:02 +00:00
|
|
|
}
|
|
|
|
|
2008-06-29 20:40:23 +00:00
|
|
|
/* The run queue is chronologically sorted in a tree. An insertion counter is
|
|
|
|
* used to assign a position to each task. This counter may be combined with
|
|
|
|
* other variables (eg: nice value) to set the final position in the tree. The
|
|
|
|
* counter may wrap without a problem, of course. We then limit the number of
|
2008-06-30 05:51:00 +00:00
|
|
|
* tasks processed at once to 1/4 of the number of tasks in the queue, and to
|
|
|
|
* 200 max in any case, so that general latency remains low and so that task
|
2009-03-21 09:01:42 +00:00
|
|
|
* positions have a chance to be considered.
|
2006-06-26 00:48:02 +00:00
|
|
|
*
|
2008-06-29 20:40:23 +00:00
|
|
|
* The function adjusts <next> if a new event is closer.
|
2006-06-26 00:48:02 +00:00
|
|
|
*/
|
2008-07-06 22:09:58 +00:00
|
|
|
void process_runnable_tasks(int *next)
|
2006-06-26 00:48:02 +00:00
|
|
|
{
|
2007-01-06 23:38:00 +00:00
|
|
|
struct task *t;
|
2008-06-29 20:40:23 +00:00
|
|
|
struct eb32_node *eb;
|
|
|
|
unsigned int max_processed;
|
2009-03-08 08:38:41 +00:00
|
|
|
int expire;
|
2006-06-26 00:48:02 +00:00
|
|
|
|
2009-03-21 17:33:52 +00:00
|
|
|
run_queue_cur = run_queue; /* keep a copy for reporting */
|
|
|
|
nb_tasks_cur = nb_tasks;
|
2008-06-30 05:51:00 +00:00
|
|
|
max_processed = run_queue;
|
2011-09-10 18:08:49 +00:00
|
|
|
|
|
|
|
if (!run_queue)
|
|
|
|
return;
|
|
|
|
|
2008-06-30 05:51:00 +00:00
|
|
|
if (max_processed > 200)
|
|
|
|
max_processed = 200;
|
|
|
|
|
|
|
|
if (likely(niced_tasks))
|
2009-03-21 10:53:09 +00:00
|
|
|
max_processed = (max_processed + 3) / 4;
|
2008-06-29 20:40:23 +00:00
|
|
|
|
2009-03-08 08:38:41 +00:00
|
|
|
expire = *next;
|
2009-03-21 09:01:42 +00:00
|
|
|
eb = eb32_lookup_ge(&rqueue, rqueue_ticks - TIMER_LOOK_BACK);
|
|
|
|
while (max_processed--) {
|
|
|
|
/* Note: this loop is one of the fastest code path in
|
|
|
|
* the whole program. It should not be re-arranged
|
|
|
|
* without a good reason.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (unlikely(!eb)) {
|
|
|
|
/* we might have reached the end of the tree, typically because
|
|
|
|
* <rqueue_ticks> is in the first half and we're first scanning
|
|
|
|
* the last half. Let's loop back to the beginning of the tree now.
|
|
|
|
*/
|
|
|
|
eb = eb32_first(&rqueue);
|
|
|
|
if (likely(!eb))
|
|
|
|
break;
|
|
|
|
}
|
2008-06-29 20:40:23 +00:00
|
|
|
|
2009-03-21 09:01:42 +00:00
|
|
|
/* detach the task from the queue */
|
|
|
|
t = eb32_entry(eb, struct task, rq);
|
|
|
|
eb = eb32_next(eb);
|
|
|
|
__task_unlink_rq(t);
|
2008-06-29 20:40:23 +00:00
|
|
|
|
2009-03-21 09:01:42 +00:00
|
|
|
t->state |= TASK_RUNNING;
|
|
|
|
/* This is an optimisation to help the processor's branch
|
|
|
|
* predictor take this most common call.
|
|
|
|
*/
|
2009-03-28 16:54:35 +00:00
|
|
|
t->calls++;
|
2009-03-21 09:01:42 +00:00
|
|
|
if (likely(t->process == process_session))
|
|
|
|
t = process_session(t);
|
|
|
|
else
|
|
|
|
t = t->process(t);
|
2009-03-08 15:35:27 +00:00
|
|
|
|
2009-03-21 09:01:42 +00:00
|
|
|
if (likely(t != NULL)) {
|
|
|
|
t->state &= ~TASK_RUNNING;
|
|
|
|
if (t->expire) {
|
|
|
|
task_queue(t);
|
|
|
|
expire = tick_first_2nz(expire, t->expire);
|
2009-03-08 08:38:41 +00:00
|
|
|
}
|
2009-03-21 12:26:05 +00:00
|
|
|
|
|
|
|
/* if the task has put itself back into the run queue, we want to ensure
|
|
|
|
* it will be served at the proper time, especially if it's reniced.
|
|
|
|
*/
|
|
|
|
if (unlikely(task_in_rq(t)) && (!eb || tick_is_lt(t->rq.key, eb->key))) {
|
|
|
|
eb = eb32_lookup_ge(&rqueue, rqueue_ticks - TIMER_LOOK_BACK);
|
|
|
|
}
|
2008-06-29 20:40:23 +00:00
|
|
|
}
|
2009-03-21 09:01:42 +00:00
|
|
|
}
|
2009-03-08 08:38:41 +00:00
|
|
|
*next = expire;
|
2006-06-26 00:48:02 +00:00
|
|
|
}
|
|
|
|
|
2009-03-07 16:25:21 +00:00
|
|
|
/* perform minimal intializations, report 0 in case of error, 1 if OK. */
|
|
|
|
int init_task()
|
|
|
|
{
|
|
|
|
memset(&timers, 0, sizeof(timers));
|
|
|
|
memset(&rqueue, 0, sizeof(rqueue));
|
|
|
|
pool2_task = create_pool("task", sizeof(struct task), MEM_F_SHARED);
|
|
|
|
return pool2_task != NULL;
|
|
|
|
}
|
|
|
|
|
2006-06-26 00:48:02 +00:00
|
|
|
/*
|
|
|
|
* Local variables:
|
|
|
|
* c-indent-level: 8
|
|
|
|
* c-basic-offset: 8
|
|
|
|
* End:
|
|
|
|
*/
|