mirror of
http://git.haproxy.org/git/haproxy.git/
synced 2025-04-04 23:29:42 +00:00
MINOR: time: add report_idle() to report process-wide idle time
Before threads were introduced in 1.8, idle_pct used to be a global
variable indicating the overall process idle time. Threads made it
thread-local, meaning that its reporting in the stats made little
sense, though this was not easy to spot. In 2.0, the idle_pct variable
moved to the struct thread_info via commit 81036f273
("MINOR: time:
move the cpu, mono, and idle time to thread_info"). It made it more
obvious that the idle_pct was per thread, and also allowed to more
accurately measure it. But no more effort was made in that direction.
This patch introduces a new report_idle() function that accurately
averages the per-thread idle time over all running threads (i.e. it
should remain valid even if some threads are paused or stopped), and
makes use of it in the stats / "show info" reports.
Sending traffic over only two connections of an 8-thread process
would previously show this erratic CPU usage pattern:
$ while :; do socat /tmp/sock1 - <<< "show info"|grep ^Idle;sleep 0.1;done
Idle_pct: 30
Idle_pct: 35
Idle_pct: 100
Idle_pct: 100
Idle_pct: 100
Idle_pct: 100
Idle_pct: 100
Idle_pct: 100
Idle_pct: 35
Idle_pct: 33
Idle_pct: 100
Idle_pct: 100
Idle_pct: 100
Idle_pct: 100
Idle_pct: 100
Idle_pct: 100
Now it shows this more accurate measurement:
$ while :; do socat /tmp/sock1 - <<< "show info"|grep ^Idle;sleep 0.1;done
Idle_pct: 83
Idle_pct: 83
Idle_pct: 83
Idle_pct: 83
Idle_pct: 83
Idle_pct: 83
Idle_pct: 83
Idle_pct: 83
Idle_pct: 83
Idle_pct: 83
Idle_pct: 83
Idle_pct: 83
Idle_pct: 83
Idle_pct: 83
Idle_pct: 83
This is not technically a bug but this lack of precision definitely affects
some users who rely on the idle_pct measurement. This should at least be
backported to 2.4, and might be to some older releases depending on users
demand.
This commit is contained in:
parent
e365aa28d4
commit
fe456c581f
@ -577,10 +577,26 @@ static inline void measure_idle()
|
||||
if (samp_time < 500000)
|
||||
return;
|
||||
|
||||
ti->idle_pct = (100ULL * idle_time + samp_time / 2) / samp_time;
|
||||
HA_ATOMIC_STORE(&ti->idle_pct, (100ULL * idle_time + samp_time / 2) / samp_time);
|
||||
idle_time = samp_time = 0;
|
||||
}
|
||||
|
||||
/* report the average CPU idle percentage over all running threads, between 0 and 100 */
|
||||
static inline uint report_idle()
|
||||
{
|
||||
uint total = 0;
|
||||
uint rthr = 0;
|
||||
uint thr;
|
||||
|
||||
for (thr = 0; thr < MAX_THREADS; thr++) {
|
||||
if (!(all_threads_mask & (1UL << thr)))
|
||||
continue;
|
||||
total += HA_ATOMIC_LOAD(&ha_thread_info[thr].idle_pct);
|
||||
rthr++;
|
||||
}
|
||||
return rthr ? total / rthr : 0;
|
||||
}
|
||||
|
||||
/* Collect date and time information before calling poll(). This will be used
|
||||
* to count the run time of the past loop and the sleep time of the next poll.
|
||||
*/
|
||||
|
@ -3400,7 +3400,7 @@ static void stats_dump_html_info(struct stream_interface *si, struct uri_auth *u
|
||||
actconn, pipes_used, pipes_used+pipes_free, read_freq_ctr(&global.conn_per_sec),
|
||||
bps >= 1000000000UL ? (bps / 1000000000.0) : bps >= 1000000UL ? (bps / 1000000.0) : (bps / 1000.0),
|
||||
bps >= 1000000000UL ? 'G' : bps >= 1000000UL ? 'M' : 'k',
|
||||
total_run_queues(), total_allocated_tasks(), ti->idle_pct
|
||||
total_run_queues(), total_allocated_tasks(), report_idle()
|
||||
);
|
||||
|
||||
/* scope_txt = search query, appctx->ctx.stats.scope_len is always <= STAT_SCOPE_TXT_MAXLEN */
|
||||
@ -4433,7 +4433,7 @@ int stats_fill_info(struct field *info, int len, uint flags)
|
||||
#endif
|
||||
info[INF_TASKS] = mkf_u32(0, total_allocated_tasks());
|
||||
info[INF_RUN_QUEUE] = mkf_u32(0, total_run_queues());
|
||||
info[INF_IDLE_PCT] = mkf_u32(FN_AVG, ti->idle_pct);
|
||||
info[INF_IDLE_PCT] = mkf_u32(FN_AVG, report_idle());
|
||||
info[INF_NODE] = mkf_str(FO_CONFIG|FN_OUTPUT|FS_SERVICE, global.node);
|
||||
if (global.desc)
|
||||
info[INF_DESCRIPTION] = mkf_str(FO_CONFIG|FN_OUTPUT|FS_SERVICE, global.desc);
|
||||
|
Loading…
Reference in New Issue
Block a user