[MEDIUM] introduce task->nice and boot access to statistics

The run queue scheduler now considers task->nice to queue a task and
to pick a task out of the queue. This makes it possible to boost the
access to statistics (both via HTTP and UNIX socket). The UNIX socket
receives twice as much a boost as the HTTP socket because it is more
sensible.
This commit is contained in:
Willy Tarreau 2008-06-30 07:51:00 +02:00
parent 58b458d8ba
commit 91e99931b7
5 changed files with 42 additions and 9 deletions

View File

@ -32,7 +32,8 @@
#include <types/task.h>
extern unsigned int run_queue; /* run queue size */
extern unsigned int run_queue; /* run queue size */
extern unsigned int niced_tasks; /* number of niced tasks in the run queue */
extern struct pool_head *pool2_task;
/* perform minimal initializations, report 0 in case of error, 1 if OK. */
@ -50,6 +51,8 @@ static inline struct task *task_sleep(struct task *t)
t->state = TASK_IDLE;
eb32_delete(&t->eb);
run_queue--;
if (likely(t->nice))
niced_tasks--;
}
return t;
}
@ -65,9 +68,11 @@ static inline struct task *task_delete(struct task *t)
if (t->eb.node.leaf_p)
eb32_delete(&t->eb);
if (t->state == TASK_RUNNING)
if (t->state == TASK_RUNNING) {
run_queue--;
if (likely(t->nice))
niced_tasks--;
}
return t;
}
@ -79,6 +84,7 @@ static inline struct task *task_init(struct task *t)
{
t->eb.node.leaf_p = NULL;
t->state = TASK_IDLE;
t->nice = 0;
return t;
}

View File

@ -39,6 +39,7 @@ struct task {
struct timeval expire; /* next expiration time for this task, use only for fast sorting */
void (*process)(struct task *t, struct timeval *next); /* the function which processes the task */
void *context; /* the task's context */
int nice; /* the task's current nice value from -1024 to +1024 */
};
#endif /* _TYPES_TASK_H */

View File

@ -5160,6 +5160,7 @@ int stats_check_uri_auth(struct session *t, struct proxy *backend)
t->logs.tv_request = now;
t->data_source = DATA_SRC_STATS;
t->data_state = DATA_ST_INIT;
t->task->nice = -32; /* small boost for HTTP statistics */
produce_content(t);
return 1;
}

View File

@ -447,6 +447,7 @@ int uxst_event_accept(int fd) {
task_init(t);
t->process = l->handler;
t->context = s;
t->nice = -64; /* we want to boost priority for local stats */
s->task = t;
s->fe = NULL;

View File

@ -24,6 +24,7 @@
struct pool_head *pool2_task;
unsigned int run_queue = 0;
unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
/* Principle of the wait queue.
*
@ -125,7 +126,11 @@ int init_task()
return pool2_task != NULL;
}
/* puts the task <t> in run queue <q>, and returns <t> */
/* Puts the task <t> in run queue at a position depending on t->nice.
* <t> is returned. The nice value assigns boosts in 32th of the run queue
* size. A nice value of -1024 sets the task to -run_queue*32, while a nice
* value of 1024 sets the task to run_queue*32.
*/
struct task *task_wakeup(struct task *t)
{
if (t->state == TASK_RUNNING)
@ -136,6 +141,18 @@ struct task *task_wakeup(struct task *t)
run_queue++;
t->eb.key = ++rqueue_ticks;
if (likely(t->nice)) {
int offset;
niced_tasks++;
if (likely(t->nice > 0))
offset = (unsigned)((run_queue * (unsigned int)t->nice) / 32U);
else
offset = -(unsigned)((run_queue * (unsigned int)-t->nice) / 32U);
t->eb.key += offset;
}
t->state = TASK_RUNNING;
eb32_insert(&rqueue[ticks_to_tree(t->eb.key)], &t->eb);
@ -218,10 +235,10 @@ void wake_expired_tasks(struct timeval *next)
* used to assign a position to each task. This counter may be combined with
* other variables (eg: nice value) to set the final position in the tree. The
* counter may wrap without a problem, of course. We then limit the number of
* tasks processed at once to 1/8 of the number of tasks in the queue, so that
* general latency remains low and so that task positions have a chance to be
* considered. It also reduces the number of trees to be evaluated when no task
* remains.
* tasks processed at once to 1/4 of the number of tasks in the queue, and to
* 200 max in any case, so that general latency remains low and so that task
* positions have a chance to be considered. It also reduces the number of
* trees to be evaluated when no task remains.
*
* Just like with timers, we start with tree[(current - 1)], which holds past
* values, and stop when we reach the middle of the list. In practise, we visit
@ -240,7 +257,12 @@ void process_runnable_tasks(struct timeval *next)
if (!run_queue)
return;
max_processed = (run_queue + 7) / 8;
max_processed = run_queue;
if (max_processed > 200)
max_processed = 200;
if (likely(niced_tasks))
max_processed /= 4;
tree = ticks_to_tree(rqueue_ticks);
stop = (tree + TIMER_TREES / 2) & TIMER_TREE_MASK;
@ -255,6 +277,8 @@ void process_runnable_tasks(struct timeval *next)
eb = eb32_next(eb);
run_queue--;
if (likely(t->nice))
niced_tasks--;
t->state = TASK_IDLE;
eb32_delete(&t->eb);