REORG: sched: move idle time calculation from time.h to task.h

time.h is a horrible place to put activity calculation, it's a
historical mistake because the functions were there. We already have
most of the parts in sched.{c,h} and these ones make an exception in
the middle, forcing time.h to include some thread stuff and to access
the before/after_poll and idle_pct values.

Let's move these 3 functions to task.h with the other ones. They were
prefixed with "sched_" instead of the historical "tv_" which already
made no sense anymore.
This commit is contained in:
Willy Tarreau 2021-09-30 17:53:22 +02:00
parent 6136989a22
commit 6dfab112e1
7 changed files with 65 additions and 59 deletions

View File

@ -614,6 +614,56 @@ static inline void task_schedule(struct task *task, int when)
}
}
/* Update the idle time value twice a second, to be called after
* tv_update_date() when called after poll(), and currently called only by
* sched_leaving_poll() below. It relies on <before_poll> to be updated to
* the system time before calling poll().
*/
static inline void sched_measure_idle()
{
/* Let's compute the idle to work ratio. We worked between after_poll
* and before_poll, and slept between before_poll and date. The idle_pct
* is updated at most twice every second. Note that the current second
* rarely changes so we avoid a multiply when not needed.
*/
int delta;
if ((delta = date.tv_sec - before_poll.tv_sec))
delta *= 1000000;
idle_time += delta + (date.tv_usec - before_poll.tv_usec);
if ((delta = date.tv_sec - after_poll.tv_sec))
delta *= 1000000;
samp_time += delta + (date.tv_usec - after_poll.tv_usec);
after_poll.tv_sec = date.tv_sec; after_poll.tv_usec = date.tv_usec;
if (samp_time < 500000)
return;
HA_ATOMIC_STORE(&ti->idle_pct, (100ULL * idle_time + samp_time / 2) / samp_time);
idle_time = samp_time = 0;
}
/* Collect date and time information after leaving poll(). <timeout> must be
* set to the maximum sleep time passed to poll (in milliseconds), and
* <interrupted> must be zero if the poller reached the timeout or non-zero
* otherwise, which generally is provided by the poller's return value.
*/
static inline void sched_leaving_poll(int timeout, int interrupted)
{
sched_measure_idle();
ti->prev_cpu_time = now_cpu_time();
ti->prev_mono_time = now_mono_time();
}
/* Collect date and time information before calling poll(). This will be used
* to count the run time of the past loop and the sleep time of the next poll.
*/
static inline void sched_entering_poll()
{
gettimeofday(&before_poll, NULL);
}
/* This function register a new signal. "lua" is the current lua
* execution context. It contains a pointer to the associated task.
* "link" is a list head attached to an other task that must be wake

View File

@ -552,55 +552,6 @@ static inline uint64_t now_cpu_time_thread(const struct thread_info *thr)
#endif
}
/* Update the idle time value twice a second, to be called after
* tv_update_date() when called after poll(). It relies on <before_poll> to be
* updated to the system time before calling poll().
*/
static inline void measure_idle()
{
/* Let's compute the idle to work ratio. We worked between after_poll
* and before_poll, and slept between before_poll and date. The idle_pct
* is updated at most twice every second. Note that the current second
* rarely changes so we avoid a multiply when not needed.
*/
int delta;
if ((delta = date.tv_sec - before_poll.tv_sec))
delta *= 1000000;
idle_time += delta + (date.tv_usec - before_poll.tv_usec);
if ((delta = date.tv_sec - after_poll.tv_sec))
delta *= 1000000;
samp_time += delta + (date.tv_usec - after_poll.tv_usec);
after_poll.tv_sec = date.tv_sec; after_poll.tv_usec = date.tv_usec;
if (samp_time < 500000)
return;
HA_ATOMIC_STORE(&ti->idle_pct, (100ULL * idle_time + samp_time / 2) / samp_time);
idle_time = samp_time = 0;
}
/* Collect date and time information before calling poll(). This will be used
* to count the run time of the past loop and the sleep time of the next poll.
*/
static inline void tv_entering_poll()
{
gettimeofday(&before_poll, NULL);
}
/* Collect date and time information after leaving poll(). <timeout> must be
* set to the maximum sleep time passed to poll (in milliseconds), and
* <interrupted> must be zero if the poller reached the timeout or non-zero
* otherwise, which generally is provided by the poller's return value.
*/
static inline void tv_leaving_poll(int timeout, int interrupted)
{
measure_idle();
ti->prev_cpu_time = now_cpu_time();
ti->prev_mono_time = now_mono_time();
}
#endif /* _HAPROXY_TIME_H */
/*

View File

@ -20,6 +20,7 @@
#include <haproxy/global.h>
#include <haproxy/signal.h>
#include <haproxy/ticks.h>
#include <haproxy/task.h>
#include <haproxy/time.h>
#include <haproxy/tools.h>
@ -188,7 +189,7 @@ static void _do_poll(struct poller *p, int exp, int wake)
/* now let's wait for polled events */
wait_time = wake ? 0 : compute_poll_timeout(exp);
tv_entering_poll();
sched_entering_poll();
activity_count_runtime();
do {
int timeout = (global.tune.options & GTUNE_BUSY_POLLING) ? 0 : wait_time;
@ -208,7 +209,7 @@ static void _do_poll(struct poller *p, int exp, int wake)
break;
} while (1);
tv_leaving_poll(wait_time, status);
sched_leaving_poll(wait_time, status);
thread_harmless_end();
thread_idle_end();

View File

@ -23,6 +23,7 @@
#include <haproxy/fd.h>
#include <haproxy/global.h>
#include <haproxy/signal.h>
#include <haproxy/task.h>
#include <haproxy/ticks.h>
#include <haproxy/time.h>
@ -158,7 +159,7 @@ static void _do_poll(struct poller *p, int exp, int wake)
* Determine how long to wait for events to materialise on the port.
*/
wait_time = wake ? 0 : compute_poll_timeout(exp);
tv_entering_poll();
sched_entering_poll();
activity_count_runtime();
do {
@ -202,7 +203,7 @@ static void _do_poll(struct poller *p, int exp, int wake)
break;
} while(1);
tv_leaving_poll(wait_time, nevlist);
sched_leaving_poll(wait_time, nevlist);
thread_harmless_end();
thread_idle_end();

View File

@ -22,6 +22,7 @@
#include <haproxy/fd.h>
#include <haproxy/global.h>
#include <haproxy/signal.h>
#include <haproxy/task.h>
#include <haproxy/ticks.h>
#include <haproxy/time.h>
@ -145,7 +146,7 @@ static void _do_poll(struct poller *p, int exp, int wake)
/* now let's wait for events */
wait_time = wake ? 0 : compute_poll_timeout(exp);
fd = global.tune.maxpollevents;
tv_entering_poll();
sched_entering_poll();
activity_count_runtime();
do {
@ -174,7 +175,7 @@ static void _do_poll(struct poller *p, int exp, int wake)
break;
} while (1);
tv_leaving_poll(wait_time, status);
sched_leaving_poll(wait_time, status);
thread_harmless_end();
thread_idle_end();

View File

@ -21,6 +21,7 @@
#include <haproxy/api.h>
#include <haproxy/fd.h>
#include <haproxy/global.h>
#include <haproxy/task.h>
#include <haproxy/ticks.h>
#include <haproxy/time.h>
@ -201,11 +202,11 @@ static void _do_poll(struct poller *p, int exp, int wake)
/* now let's wait for events */
wait_time = wake ? 0 : compute_poll_timeout(exp);
tv_entering_poll();
sched_entering_poll();
activity_count_runtime();
status = poll(poll_events, nbfd, wait_time);
tv_update_date(wait_time, status);
tv_leaving_poll(wait_time, status);
sched_leaving_poll(wait_time, status);
thread_harmless_end();
thread_idle_end();

View File

@ -18,6 +18,7 @@
#include <haproxy/api.h>
#include <haproxy/fd.h>
#include <haproxy/global.h>
#include <haproxy/task.h>
#include <haproxy/ticks.h>
#include <haproxy/time.h>
@ -172,7 +173,7 @@ static void _do_poll(struct poller *p, int exp, int wake)
delta_ms = wake ? 0 : compute_poll_timeout(exp);
delta.tv_sec = (delta_ms / 1000);
delta.tv_usec = (delta_ms % 1000) * 1000;
tv_entering_poll();
sched_entering_poll();
activity_count_runtime();
status = select(maxfd,
readnotnull ? tmp_evts[DIR_RD] : NULL,
@ -180,7 +181,7 @@ static void _do_poll(struct poller *p, int exp, int wake)
NULL,
&delta);
tv_update_date(delta_ms, status);
tv_leaving_poll(delta_ms, status);
sched_leaving_poll(delta_ms, status);
thread_harmless_end();
thread_idle_end();