mirror of
http://git.haproxy.org/git/haproxy.git/
synced 2025-05-09 03:09:28 +00:00
MINOR: task: Rename run_queue and run_queue_cur counters
<run_queue> is used to track the number of task in the run queue and <run_queue_cur> is a copy used for the reporting purpose. These counters has been renamed, respectively, <tasks_run_queue> and <tasks_run_queue_cur>. So the naming is consistent between tasks and applets. [wt: needed for next fixes, backport to 1.7 and 1.6]
This commit is contained in:
parent
1cbe74cd83
commit
34c5cc98da
@ -302,7 +302,7 @@ static inline void stream_offer_buffers()
|
|||||||
*/
|
*/
|
||||||
avail = pool2_buffer->allocated - pool2_buffer->used - global.tune.reserved_bufs / 2;
|
avail = pool2_buffer->allocated - pool2_buffer->used - global.tune.reserved_bufs / 2;
|
||||||
|
|
||||||
if (avail > (int)run_queue)
|
if (avail > (int)tasks_run_queue)
|
||||||
__stream_offer_buffers(avail);
|
__stream_offer_buffers(avail);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -80,8 +80,8 @@
|
|||||||
|
|
||||||
/* a few exported variables */
|
/* a few exported variables */
|
||||||
extern unsigned int nb_tasks; /* total number of tasks */
|
extern unsigned int nb_tasks; /* total number of tasks */
|
||||||
extern unsigned int run_queue; /* run queue size */
|
extern unsigned int tasks_run_queue; /* run queue size */
|
||||||
extern unsigned int run_queue_cur;
|
extern unsigned int tasks_run_queue_cur;
|
||||||
extern unsigned int nb_tasks_cur;
|
extern unsigned int nb_tasks_cur;
|
||||||
extern unsigned int niced_tasks; /* number of niced tasks in the run queue */
|
extern unsigned int niced_tasks; /* number of niced tasks in the run queue */
|
||||||
extern struct pool_head *pool2_task;
|
extern struct pool_head *pool2_task;
|
||||||
@ -132,16 +132,16 @@ static inline struct task *task_unlink_wq(struct task *t)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Unlink the task from the run queue. The run_queue size and number of niced
|
* Unlink the task from the run queue. The tasks_run_queue size and number of
|
||||||
* tasks are updated too. A pointer to the task itself is returned. The task
|
* niced tasks are updated too. A pointer to the task itself is returned. The
|
||||||
* *must* already be in the run queue before calling this function. If unsure,
|
* task *must* already be in the run queue before calling this function. If
|
||||||
* use the safer task_unlink_rq() function. Note that the pointer to the next
|
* unsure, use the safer task_unlink_rq() function. Note that the pointer to the
|
||||||
* run queue entry is neither checked nor updated.
|
* next run queue entry is neither checked nor updated.
|
||||||
*/
|
*/
|
||||||
static inline struct task *__task_unlink_rq(struct task *t)
|
static inline struct task *__task_unlink_rq(struct task *t)
|
||||||
{
|
{
|
||||||
eb32_delete(&t->rq);
|
eb32_delete(&t->rq);
|
||||||
run_queue--;
|
tasks_run_queue--;
|
||||||
if (likely(t->nice))
|
if (likely(t->nice))
|
||||||
niced_tasks--;
|
niced_tasks--;
|
||||||
return t;
|
return t;
|
||||||
|
@ -1730,7 +1730,7 @@ void run_poll_loop()
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
/* expire immediately if events are pending */
|
/* expire immediately if events are pending */
|
||||||
if (fd_cache_num || run_queue || signal_queue_len || applets_active_queue)
|
if (fd_cache_num || tasks_run_queue || signal_queue_len || applets_active_queue)
|
||||||
next = now_ms;
|
next = now_ms;
|
||||||
|
|
||||||
/* The poller will ensure it returns around <next> */
|
/* The poller will ensure it returns around <next> */
|
||||||
|
@ -2083,7 +2083,7 @@ static void stats_dump_html_info(struct stream_interface *si, struct uri_auth *u
|
|||||||
global.rlimit_nofile,
|
global.rlimit_nofile,
|
||||||
global.maxsock, global.maxconn, global.maxpipes,
|
global.maxsock, global.maxconn, global.maxpipes,
|
||||||
actconn, pipes_used, pipes_used+pipes_free, read_freq_ctr(&global.conn_per_sec),
|
actconn, pipes_used, pipes_used+pipes_free, read_freq_ctr(&global.conn_per_sec),
|
||||||
run_queue_cur, nb_tasks_cur, idle_pct
|
tasks_run_queue_cur, nb_tasks_cur, idle_pct
|
||||||
);
|
);
|
||||||
|
|
||||||
/* scope_txt = search query, appctx->ctx.stats.scope_len is always <= STAT_SCOPE_TXT_MAXLEN */
|
/* scope_txt = search query, appctx->ctx.stats.scope_len is always <= STAT_SCOPE_TXT_MAXLEN */
|
||||||
@ -2996,7 +2996,7 @@ int stats_fill_info(struct field *info, int len)
|
|||||||
info[INF_MAX_ZLIB_MEM_USAGE] = mkf_u32(FO_CONFIG|FN_LIMIT, global.maxzlibmem);
|
info[INF_MAX_ZLIB_MEM_USAGE] = mkf_u32(FO_CONFIG|FN_LIMIT, global.maxzlibmem);
|
||||||
#endif
|
#endif
|
||||||
info[INF_TASKS] = mkf_u32(0, nb_tasks_cur);
|
info[INF_TASKS] = mkf_u32(0, nb_tasks_cur);
|
||||||
info[INF_RUN_QUEUE] = mkf_u32(0, run_queue_cur);
|
info[INF_RUN_QUEUE] = mkf_u32(0, tasks_run_queue_cur);
|
||||||
info[INF_IDLE_PCT] = mkf_u32(FN_AVG, idle_pct);
|
info[INF_IDLE_PCT] = mkf_u32(FN_AVG, idle_pct);
|
||||||
info[INF_NODE] = mkf_str(FO_CONFIG|FN_OUTPUT|FS_SERVICE, global.node);
|
info[INF_NODE] = mkf_str(FO_CONFIG|FN_OUTPUT|FS_SERVICE, global.node);
|
||||||
if (global.desc)
|
if (global.desc)
|
||||||
|
@ -449,7 +449,7 @@ void __stream_offer_buffers(int rqlimit)
|
|||||||
struct stream *sess, *bak;
|
struct stream *sess, *bak;
|
||||||
|
|
||||||
list_for_each_entry_safe(sess, bak, &buffer_wq, buffer_wait) {
|
list_for_each_entry_safe(sess, bak, &buffer_wq, buffer_wait) {
|
||||||
if (rqlimit <= run_queue)
|
if (rqlimit <= tasks_run_queue)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (sess->task->state & TASK_RUNNING)
|
if (sess->task->state & TASK_RUNNING)
|
||||||
|
22
src/task.c
22
src/task.c
@ -26,8 +26,8 @@
|
|||||||
struct pool_head *pool2_task;
|
struct pool_head *pool2_task;
|
||||||
|
|
||||||
unsigned int nb_tasks = 0;
|
unsigned int nb_tasks = 0;
|
||||||
unsigned int run_queue = 0;
|
unsigned int tasks_run_queue = 0;
|
||||||
unsigned int run_queue_cur = 0; /* copy of the run queue size */
|
unsigned int tasks_run_queue_cur = 0; /* copy of the run queue size */
|
||||||
unsigned int nb_tasks_cur = 0; /* copy of the tasks count */
|
unsigned int nb_tasks_cur = 0; /* copy of the tasks count */
|
||||||
unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
|
unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
|
||||||
struct eb32_node *last_timer = NULL; /* optimization: last queued timer */
|
struct eb32_node *last_timer = NULL; /* optimization: last queued timer */
|
||||||
@ -39,15 +39,15 @@ static unsigned int rqueue_ticks; /* insertion count */
|
|||||||
|
|
||||||
/* Puts the task <t> in run queue at a position depending on t->nice. <t> is
|
/* Puts the task <t> in run queue at a position depending on t->nice. <t> is
|
||||||
* returned. The nice value assigns boosts in 32th of the run queue size. A
|
* returned. The nice value assigns boosts in 32th of the run queue size. A
|
||||||
* nice value of -1024 sets the task to -run_queue*32, while a nice value of
|
* nice value of -1024 sets the task to -tasks_run_queue*32, while a nice value
|
||||||
* 1024 sets the task to run_queue*32. The state flags are cleared, so the
|
* of 1024 sets the task to tasks_run_queue*32. The state flags are cleared, so
|
||||||
* caller will have to set its flags after this call.
|
* the caller will have to set its flags after this call.
|
||||||
* The task must not already be in the run queue. If unsure, use the safer
|
* The task must not already be in the run queue. If unsure, use the safer
|
||||||
* task_wakeup() function.
|
* task_wakeup() function.
|
||||||
*/
|
*/
|
||||||
struct task *__task_wakeup(struct task *t)
|
struct task *__task_wakeup(struct task *t)
|
||||||
{
|
{
|
||||||
run_queue++;
|
tasks_run_queue++;
|
||||||
t->rq.key = ++rqueue_ticks;
|
t->rq.key = ++rqueue_ticks;
|
||||||
|
|
||||||
if (likely(t->nice)) {
|
if (likely(t->nice)) {
|
||||||
@ -55,9 +55,9 @@ struct task *__task_wakeup(struct task *t)
|
|||||||
|
|
||||||
niced_tasks++;
|
niced_tasks++;
|
||||||
if (likely(t->nice > 0))
|
if (likely(t->nice > 0))
|
||||||
offset = (unsigned)((run_queue * (unsigned int)t->nice) / 32U);
|
offset = (unsigned)((tasks_run_queue * (unsigned int)t->nice) / 32U);
|
||||||
else
|
else
|
||||||
offset = -(unsigned)((run_queue * (unsigned int)-t->nice) / 32U);
|
offset = -(unsigned)((tasks_run_queue * (unsigned int)-t->nice) / 32U);
|
||||||
t->rq.key += offset;
|
t->rq.key += offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -191,11 +191,11 @@ void process_runnable_tasks()
|
|||||||
struct task *t;
|
struct task *t;
|
||||||
unsigned int max_processed;
|
unsigned int max_processed;
|
||||||
|
|
||||||
run_queue_cur = run_queue; /* keep a copy for reporting */
|
tasks_run_queue_cur = tasks_run_queue; /* keep a copy for reporting */
|
||||||
nb_tasks_cur = nb_tasks;
|
nb_tasks_cur = nb_tasks;
|
||||||
max_processed = run_queue;
|
max_processed = tasks_run_queue;
|
||||||
|
|
||||||
if (!run_queue)
|
if (!tasks_run_queue)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (max_processed > 200)
|
if (max_processed > 200)
|
||||||
|
Loading…
Reference in New Issue
Block a user