From 91e99931b7a94dfa6dc62c384b1183b7bc70b7c7 Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Mon, 30 Jun 2008 07:51:00 +0200 Subject: [PATCH] [MEDIUM] introduce task->nice and boot access to statistics The run queue scheduler now considers task->nice to queue a task and to pick a task out of the queue. This makes it possible to boost the access to statistics (both via HTTP and UNIX socket). The UNIX socket receives twice as much a boost as the HTTP socket because it is more sensible. --- include/proto/task.h | 12 +++++++++--- include/types/task.h | 1 + src/proto_http.c | 1 + src/proto_uxst.c | 1 + src/task.c | 36 ++++++++++++++++++++++++++++++------ 5 files changed, 42 insertions(+), 9 deletions(-) diff --git a/include/proto/task.h b/include/proto/task.h index f99bbdb7e..de50cbab3 100644 --- a/include/proto/task.h +++ b/include/proto/task.h @@ -32,7 +32,8 @@ #include -extern unsigned int run_queue; /* run queue size */ +extern unsigned int run_queue; /* run queue size */ +extern unsigned int niced_tasks; /* number of niced tasks in the run queue */ extern struct pool_head *pool2_task; /* perform minimal initializations, report 0 in case of error, 1 if OK. */ @@ -50,6 +51,8 @@ static inline struct task *task_sleep(struct task *t) t->state = TASK_IDLE; eb32_delete(&t->eb); run_queue--; + if (likely(t->nice)) + niced_tasks--; } return t; } @@ -65,9 +68,11 @@ static inline struct task *task_delete(struct task *t) if (t->eb.node.leaf_p) eb32_delete(&t->eb); - if (t->state == TASK_RUNNING) + if (t->state == TASK_RUNNING) { run_queue--; - + if (likely(t->nice)) + niced_tasks--; + } return t; } @@ -79,6 +84,7 @@ static inline struct task *task_init(struct task *t) { t->eb.node.leaf_p = NULL; t->state = TASK_IDLE; + t->nice = 0; return t; } diff --git a/include/types/task.h b/include/types/task.h index b6c9c72b5..4e579c1a9 100644 --- a/include/types/task.h +++ b/include/types/task.h @@ -39,6 +39,7 @@ struct task { struct timeval expire; /* next expiration time for this task, use only for fast sorting */ void (*process)(struct task *t, struct timeval *next); /* the function which processes the task */ void *context; /* the task's context */ + int nice; /* the task's current nice value from -1024 to +1024 */ }; #endif /* _TYPES_TASK_H */ diff --git a/src/proto_http.c b/src/proto_http.c index 7b35ff7cc..308ecb002 100644 --- a/src/proto_http.c +++ b/src/proto_http.c @@ -5160,6 +5160,7 @@ int stats_check_uri_auth(struct session *t, struct proxy *backend) t->logs.tv_request = now; t->data_source = DATA_SRC_STATS; t->data_state = DATA_ST_INIT; + t->task->nice = -32; /* small boost for HTTP statistics */ produce_content(t); return 1; } diff --git a/src/proto_uxst.c b/src/proto_uxst.c index c4c3d3790..6968562ba 100644 --- a/src/proto_uxst.c +++ b/src/proto_uxst.c @@ -447,6 +447,7 @@ int uxst_event_accept(int fd) { task_init(t); t->process = l->handler; t->context = s; + t->nice = -64; /* we want to boost priority for local stats */ s->task = t; s->fe = NULL; diff --git a/src/task.c b/src/task.c index dd9638aa0..b28ae0b03 100644 --- a/src/task.c +++ b/src/task.c @@ -24,6 +24,7 @@ struct pool_head *pool2_task; unsigned int run_queue = 0; +unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */ /* Principle of the wait queue. * @@ -125,7 +126,11 @@ int init_task() return pool2_task != NULL; } -/* puts the task in run queue , and returns */ +/* Puts the task in run queue at a position depending on t->nice. + * is returned. The nice value assigns boosts in 32th of the run queue + * size. A nice value of -1024 sets the task to -run_queue*32, while a nice + * value of 1024 sets the task to run_queue*32. + */ struct task *task_wakeup(struct task *t) { if (t->state == TASK_RUNNING) @@ -136,6 +141,18 @@ struct task *task_wakeup(struct task *t) run_queue++; t->eb.key = ++rqueue_ticks; + + if (likely(t->nice)) { + int offset; + + niced_tasks++; + if (likely(t->nice > 0)) + offset = (unsigned)((run_queue * (unsigned int)t->nice) / 32U); + else + offset = -(unsigned)((run_queue * (unsigned int)-t->nice) / 32U); + t->eb.key += offset; + } + t->state = TASK_RUNNING; eb32_insert(&rqueue[ticks_to_tree(t->eb.key)], &t->eb); @@ -218,10 +235,10 @@ void wake_expired_tasks(struct timeval *next) * used to assign a position to each task. This counter may be combined with * other variables (eg: nice value) to set the final position in the tree. The * counter may wrap without a problem, of course. We then limit the number of - * tasks processed at once to 1/8 of the number of tasks in the queue, so that - * general latency remains low and so that task positions have a chance to be - * considered. It also reduces the number of trees to be evaluated when no task - * remains. + * tasks processed at once to 1/4 of the number of tasks in the queue, and to + * 200 max in any case, so that general latency remains low and so that task + * positions have a chance to be considered. It also reduces the number of + * trees to be evaluated when no task remains. * * Just like with timers, we start with tree[(current - 1)], which holds past * values, and stop when we reach the middle of the list. In practise, we visit @@ -240,7 +257,12 @@ void process_runnable_tasks(struct timeval *next) if (!run_queue) return; - max_processed = (run_queue + 7) / 8; + max_processed = run_queue; + if (max_processed > 200) + max_processed = 200; + + if (likely(niced_tasks)) + max_processed /= 4; tree = ticks_to_tree(rqueue_ticks); stop = (tree + TIMER_TREES / 2) & TIMER_TREE_MASK; @@ -255,6 +277,8 @@ void process_runnable_tasks(struct timeval *next) eb = eb32_next(eb); run_queue--; + if (likely(t->nice)) + niced_tasks--; t->state = TASK_IDLE; eb32_delete(&t->eb);