MINOR: activity: report the number of times poll() reports I/O

The "show activity" output mentions a number of indicators to explain
wake up reasons but doesn't have the number of times poll() sees some
I/O. And given that multiple events can happen simultaneously, it's
not always possible to deduce this metric by subtracting.

This patch adds a new "poll_io" counter that allows one to see how
often poll() returns with at least one active FD. This should help
detect stuck events and measure various ratios of poll sub-metrics.
This commit is contained in:
Willy Tarreau 2020-06-17 20:25:18 +02:00
parent c208a54ab2
commit e545153c50
7 changed files with 17 additions and 3 deletions

View File

@ -40,6 +40,7 @@ struct activity {
unsigned int loops; // complete loops in run_poll_loop()
unsigned int wake_tasks; // active tasks prevented poll() from sleeping
unsigned int wake_signal; // pending signal prevented poll() from sleeping
unsigned int poll_io; // number of times poll() reported I/O events
unsigned int poll_exp; // number of times poll() sees an expired timeout (includes wake_*)
unsigned int poll_drop; // poller dropped a dead FD from the update list
unsigned int poll_dead; // poller woke up with a dead FD
@ -51,7 +52,7 @@ struct activity {
unsigned int empty_rq; // calls to process_runnable_tasks() with nothing for the thread
unsigned int long_rq; // process_runnable_tasks() left with tasks in the run queue
unsigned int cpust_total; // sum of half-ms stolen per thread
/* two unused entries left before end of first cache line */
/* one unused entry left before end of first cache line */
ALWAYS_ALIGN(64);
struct freq_ctr cpust_1s; // avg amount of half-ms stolen over last second

View File

@ -1129,6 +1129,7 @@ static int cli_io_handler_show_activity(struct appctx *appctx)
chunk_appendf(&trash, "loops:"); SHOW_TOT(thr, activity[thr].loops);
chunk_appendf(&trash, "wake_tasks:"); SHOW_TOT(thr, activity[thr].wake_tasks);
chunk_appendf(&trash, "wake_signal:"); SHOW_TOT(thr, activity[thr].wake_signal);
chunk_appendf(&trash, "poll_io:"); SHOW_TOT(thr, activity[thr].poll_io);
chunk_appendf(&trash, "poll_exp:"); SHOW_TOT(thr, activity[thr].poll_exp);
chunk_appendf(&trash, "poll_drop:"); SHOW_TOT(thr, activity[thr].poll_drop);
chunk_appendf(&trash, "poll_dead:"); SHOW_TOT(thr, activity[thr].poll_dead);

View File

@ -180,8 +180,10 @@ static void _do_poll(struct poller *p, int exp, int wake)
status = epoll_wait(epoll_fd[tid], epoll_events, global.tune.maxpollevents, timeout);
tv_update_date(timeout, status);
if (status)
if (status) {
activity[tid].poll_io++;
break;
}
if (timeout || !wait_time)
break;
if (signal_queue_len || wake)

View File

@ -207,6 +207,9 @@ static void _do_poll(struct poller *p, int exp, int wake)
thread_harmless_end();
if (nevlist > 0)
activity[tid].poll_io++;
for (i = 0; i < nevlist; i++) {
unsigned int n = 0;
int events, rebind_events;

View File

@ -161,8 +161,10 @@ static void _do_poll(struct poller *p, int exp, int wake)
&timeout_ts); // const struct timespec *timeout
tv_update_date(timeout, status);
if (status)
if (status) {
activity[tid].poll_io++;
break;
}
if (timeout || !wait_time)
break;
if (signal_queue_len || wake)

View File

@ -211,6 +211,9 @@ static void _do_poll(struct poller *p, int exp, int wake)
thread_harmless_end();
if (status > 0)
activity[tid].poll_io++;
for (count = 0; status > 0 && count < nbfd; count++) {
unsigned int n;
int e = poll_events[count].revents;

View File

@ -188,6 +188,8 @@ static void _do_poll(struct poller *p, int exp, int wake)
if (status <= 0)
return;
activity[tid].poll_io++;
for (fds = 0; (fds * BITS_PER_INT) < maxfd; fds++) {
if ((((int *)(tmp_evts[DIR_RD]))[fds] | ((int *)(tmp_evts[DIR_WR]))[fds]) == 0)
continue;