MINOR: tasks/activity: report the context switch and task wakeup rates

It's particularly useful to spot runaway tasks to see this. The context
switch rate covers all tasklet calls (tasks and I/O handlers) while the
task wakeups only covers tasks picked from the run queue to be executed.
High values there will indicate either an intense traffic or a bug that
mades a task go wild.
This commit is contained in:
Willy Tarreau 2019-04-24 08:10:57 +02:00
parent 69b5a7f1a3
commit 0212fadd65
3 changed files with 17 additions and 1 deletions

View File

@ -50,6 +50,8 @@ struct activity {
/* one cache line */
struct freq_ctr cpust_1s; // avg amount of half-ms stolen over last second
struct freq_ctr_period cpust_15s; // avg amount of half-ms stolen over last 15s
struct freq_ctr ctxsw_rate;// context switching rate over last second
struct freq_ctr tasks_rate;// task wakeup rate over last second
unsigned int avg_loop_us; // average run time per loop over last 1024 runs
unsigned int accepted; // accepted incoming connections
unsigned int accq_pushed; // accept queue connections pushed

View File

@ -1087,6 +1087,8 @@ static int cli_io_handler_show_activity(struct appctx *appctx)
chunk_appendf(&trash, "\nstream:"); for (thr = 0; thr < global.nbthread; thr++) chunk_appendf(&trash, " %u", activity[thr].stream);
chunk_appendf(&trash, "\nempty_rq:"); for (thr = 0; thr < global.nbthread; thr++) chunk_appendf(&trash, " %u", activity[thr].empty_rq);
chunk_appendf(&trash, "\nlong_rq:"); for (thr = 0; thr < global.nbthread; thr++) chunk_appendf(&trash, " %u", activity[thr].long_rq);
chunk_appendf(&trash, "\nctxsw_rate:"); for (thr = 0; thr < global.nbthread; thr++) chunk_appendf(&trash, " %u", read_freq_ctr(&activity[thr].ctxsw_rate));
chunk_appendf(&trash, "\ntasks_rate:"); for (thr = 0; thr < global.nbthread; thr++) chunk_appendf(&trash, " %u", read_freq_ctr(&activity[thr].tasks_rate));
chunk_appendf(&trash, "\ncpust_ms_tot:"); for (thr = 0; thr < global.nbthread; thr++) chunk_appendf(&trash, " %u", activity[thr].cpust_total/2);
chunk_appendf(&trash, "\ncpust_ms_1s:"); for (thr = 0; thr < global.nbthread; thr++) chunk_appendf(&trash, " %u", read_freq_ctr(&activity[thr].cpust_1s)/2);
chunk_appendf(&trash, "\ncpust_ms_15s:"); for (thr = 0; thr < global.nbthread; thr++) chunk_appendf(&trash, " %u", read_freq_ctr_period(&activity[thr].cpust_15s, 15000)/2);

View File

@ -20,10 +20,11 @@
#include <eb32sctree.h>
#include <eb32tree.h>
#include <proto/fd.h>
#include <proto/freq_ctr.h>
#include <proto/proxy.h>
#include <proto/stream.h>
#include <proto/task.h>
#include <proto/fd.h>
DECLARE_POOL(pool_head_task, "task", sizeof(struct task));
DECLARE_POOL(pool_head_tasklet, "tasklet", sizeof(struct tasklet));
@ -278,6 +279,8 @@ void process_runnable_tasks()
struct eb32sc_node *lrq = NULL; // next local run queue entry
struct eb32sc_node *grq = NULL; // next global run queue entry
struct task *t;
int to_process;
int wakeups;
int max_processed;
if (!(active_tasks_mask & tid_bit)) {
@ -292,6 +295,9 @@ void process_runnable_tasks()
if (likely(niced_tasks))
max_processed = (max_processed + 3) / 4;
to_process = max_processed;
wakeups = 0;
/* Note: the grq lock is always held when grq is not null */
while (task_per_thread[tid].task_list_size < max_processed) {
@ -344,6 +350,7 @@ void process_runnable_tasks()
/* And add it to the local task list */
task_insert_into_tasklet_list(t);
wakeups++;
}
/* release the rqueue lock */
@ -419,6 +426,11 @@ void process_runnable_tasks()
_HA_ATOMIC_OR(&active_tasks_mask, tid_bit);
activity[tid].long_rq++;
}
if (wakeups)
update_freq_ctr(&activity[tid].tasks_rate, wakeups);
if (to_process - max_processed)
update_freq_ctr(&activity[tid].ctxsw_rate, to_process - max_processed);
}
/*