MINOR: task: Rename run_queue and run_queue_cur counters

<run_queue> is used to track the number of task in the run queue and
<run_queue_cur> is a copy used for the reporting purpose. These counters has
been renamed, respectively, <tasks_run_queue> and <tasks_run_queue_cur>. So the
naming is consistent between tasks and applets.

[wt: needed for next fixes, backport to 1.7 and 1.6]
This commit is contained in:
Christopher Faulet 2016-12-06 09:15:30 +01:00 committed by Willy Tarreau
parent 1cbe74cd83
commit 34c5cc98da
6 changed files with 24 additions and 24 deletions

View File

@ -302,7 +302,7 @@ static inline void stream_offer_buffers()
*/
avail = pool2_buffer->allocated - pool2_buffer->used - global.tune.reserved_bufs / 2;
if (avail > (int)run_queue)
if (avail > (int)tasks_run_queue)
__stream_offer_buffers(avail);
}

View File

@ -80,8 +80,8 @@
/* a few exported variables */
extern unsigned int nb_tasks; /* total number of tasks */
extern unsigned int run_queue; /* run queue size */
extern unsigned int run_queue_cur;
extern unsigned int tasks_run_queue; /* run queue size */
extern unsigned int tasks_run_queue_cur;
extern unsigned int nb_tasks_cur;
extern unsigned int niced_tasks; /* number of niced tasks in the run queue */
extern struct pool_head *pool2_task;
@ -132,16 +132,16 @@ static inline struct task *task_unlink_wq(struct task *t)
}
/*
* Unlink the task from the run queue. The run_queue size and number of niced
* tasks are updated too. A pointer to the task itself is returned. The task
* *must* already be in the run queue before calling this function. If unsure,
* use the safer task_unlink_rq() function. Note that the pointer to the next
* run queue entry is neither checked nor updated.
* Unlink the task from the run queue. The tasks_run_queue size and number of
* niced tasks are updated too. A pointer to the task itself is returned. The
* task *must* already be in the run queue before calling this function. If
* unsure, use the safer task_unlink_rq() function. Note that the pointer to the
* next run queue entry is neither checked nor updated.
*/
static inline struct task *__task_unlink_rq(struct task *t)
{
eb32_delete(&t->rq);
run_queue--;
tasks_run_queue--;
if (likely(t->nice))
niced_tasks--;
return t;

View File

@ -1730,7 +1730,7 @@ void run_poll_loop()
break;
/* expire immediately if events are pending */
if (fd_cache_num || run_queue || signal_queue_len || applets_active_queue)
if (fd_cache_num || tasks_run_queue || signal_queue_len || applets_active_queue)
next = now_ms;
/* The poller will ensure it returns around <next> */

View File

@ -2083,7 +2083,7 @@ static void stats_dump_html_info(struct stream_interface *si, struct uri_auth *u
global.rlimit_nofile,
global.maxsock, global.maxconn, global.maxpipes,
actconn, pipes_used, pipes_used+pipes_free, read_freq_ctr(&global.conn_per_sec),
run_queue_cur, nb_tasks_cur, idle_pct
tasks_run_queue_cur, nb_tasks_cur, idle_pct
);
/* scope_txt = search query, appctx->ctx.stats.scope_len is always <= STAT_SCOPE_TXT_MAXLEN */
@ -2996,7 +2996,7 @@ int stats_fill_info(struct field *info, int len)
info[INF_MAX_ZLIB_MEM_USAGE] = mkf_u32(FO_CONFIG|FN_LIMIT, global.maxzlibmem);
#endif
info[INF_TASKS] = mkf_u32(0, nb_tasks_cur);
info[INF_RUN_QUEUE] = mkf_u32(0, run_queue_cur);
info[INF_RUN_QUEUE] = mkf_u32(0, tasks_run_queue_cur);
info[INF_IDLE_PCT] = mkf_u32(FN_AVG, idle_pct);
info[INF_NODE] = mkf_str(FO_CONFIG|FN_OUTPUT|FS_SERVICE, global.node);
if (global.desc)

View File

@ -449,7 +449,7 @@ void __stream_offer_buffers(int rqlimit)
struct stream *sess, *bak;
list_for_each_entry_safe(sess, bak, &buffer_wq, buffer_wait) {
if (rqlimit <= run_queue)
if (rqlimit <= tasks_run_queue)
break;
if (sess->task->state & TASK_RUNNING)

View File

@ -26,8 +26,8 @@
struct pool_head *pool2_task;
unsigned int nb_tasks = 0;
unsigned int run_queue = 0;
unsigned int run_queue_cur = 0; /* copy of the run queue size */
unsigned int tasks_run_queue = 0;
unsigned int tasks_run_queue_cur = 0; /* copy of the run queue size */
unsigned int nb_tasks_cur = 0; /* copy of the tasks count */
unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
struct eb32_node *last_timer = NULL; /* optimization: last queued timer */
@ -39,15 +39,15 @@ static unsigned int rqueue_ticks; /* insertion count */
/* Puts the task <t> in run queue at a position depending on t->nice. <t> is
* returned. The nice value assigns boosts in 32th of the run queue size. A
* nice value of -1024 sets the task to -run_queue*32, while a nice value of
* 1024 sets the task to run_queue*32. The state flags are cleared, so the
* caller will have to set its flags after this call.
* nice value of -1024 sets the task to -tasks_run_queue*32, while a nice value
* of 1024 sets the task to tasks_run_queue*32. The state flags are cleared, so
* the caller will have to set its flags after this call.
* The task must not already be in the run queue. If unsure, use the safer
* task_wakeup() function.
*/
struct task *__task_wakeup(struct task *t)
{
run_queue++;
tasks_run_queue++;
t->rq.key = ++rqueue_ticks;
if (likely(t->nice)) {
@ -55,9 +55,9 @@ struct task *__task_wakeup(struct task *t)
niced_tasks++;
if (likely(t->nice > 0))
offset = (unsigned)((run_queue * (unsigned int)t->nice) / 32U);
offset = (unsigned)((tasks_run_queue * (unsigned int)t->nice) / 32U);
else
offset = -(unsigned)((run_queue * (unsigned int)-t->nice) / 32U);
offset = -(unsigned)((tasks_run_queue * (unsigned int)-t->nice) / 32U);
t->rq.key += offset;
}
@ -191,11 +191,11 @@ void process_runnable_tasks()
struct task *t;
unsigned int max_processed;
run_queue_cur = run_queue; /* keep a copy for reporting */
tasks_run_queue_cur = tasks_run_queue; /* keep a copy for reporting */
nb_tasks_cur = nb_tasks;
max_processed = run_queue;
max_processed = tasks_run_queue;
if (!run_queue)
if (!tasks_run_queue)
return;
if (max_processed > 200)