mirror of
http://git.haproxy.org/git/haproxy.git/
synced 2025-01-09 23:39:55 +00:00
REORG: task: uninline the loop time measurement code
It's pointless to inline this, it's called exactly once per poll loop, and it depends on time.h which is quite deep. Let's move that to task.c along with sched_report_idle().
This commit is contained in:
parent
8de90c71b3
commit
d8b325c748
@ -37,7 +37,6 @@
|
||||
#include <haproxy/task-t.h>
|
||||
#include <haproxy/thread.h>
|
||||
#include <haproxy/ticks.h>
|
||||
#include <haproxy/time.h>
|
||||
|
||||
|
||||
/* Principle of the wait queue.
|
||||
@ -117,6 +116,8 @@ void __task_wakeup(struct task *t);
|
||||
void __task_queue(struct task *task, struct eb_root *wq);
|
||||
|
||||
uint sched_report_idle();
|
||||
void sched_leaving_poll(int timeout, int interrupted);
|
||||
void sched_entering_poll();
|
||||
|
||||
unsigned int run_tasks_from_lists(unsigned int budgets[]);
|
||||
|
||||
@ -619,79 +620,6 @@ static inline void task_schedule(struct task *task, int when)
|
||||
}
|
||||
}
|
||||
|
||||
/* Update the idle time value twice a second, to be called after
|
||||
* tv_update_date() when called after poll(), and currently called only by
|
||||
* sched_leaving_poll() below. It relies on <before_poll> to be updated to
|
||||
* the system time before calling poll().
|
||||
*/
|
||||
static inline void sched_measure_idle()
|
||||
{
|
||||
/* Let's compute the idle to work ratio. We worked between after_poll
|
||||
* and before_poll, and slept between before_poll and date. The idle_pct
|
||||
* is updated at most twice every second. Note that the current second
|
||||
* rarely changes so we avoid a multiply when not needed.
|
||||
*/
|
||||
int delta;
|
||||
|
||||
if ((delta = date.tv_sec - before_poll.tv_sec))
|
||||
delta *= 1000000;
|
||||
idle_time += delta + (date.tv_usec - before_poll.tv_usec);
|
||||
|
||||
if ((delta = date.tv_sec - after_poll.tv_sec))
|
||||
delta *= 1000000;
|
||||
samp_time += delta + (date.tv_usec - after_poll.tv_usec);
|
||||
|
||||
after_poll.tv_sec = date.tv_sec; after_poll.tv_usec = date.tv_usec;
|
||||
if (samp_time < 500000)
|
||||
return;
|
||||
|
||||
HA_ATOMIC_STORE(&ti->idle_pct, (100ULL * idle_time + samp_time / 2) / samp_time);
|
||||
idle_time = samp_time = 0;
|
||||
}
|
||||
|
||||
/* Collect date and time information after leaving poll(). <timeout> must be
|
||||
* set to the maximum sleep time passed to poll (in milliseconds), and
|
||||
* <interrupted> must be zero if the poller reached the timeout or non-zero
|
||||
* otherwise, which generally is provided by the poller's return value.
|
||||
*/
|
||||
static inline void sched_leaving_poll(int timeout, int interrupted)
|
||||
{
|
||||
sched_measure_idle();
|
||||
ti->prev_cpu_time = now_cpu_time();
|
||||
ti->prev_mono_time = now_mono_time();
|
||||
}
|
||||
|
||||
/* Collect date and time information before calling poll(). This will be used
|
||||
* to count the run time of the past loop and the sleep time of the next poll.
|
||||
* It also compares the elasped and cpu times during the activity period to
|
||||
* estimate the amount of stolen time, which is reported if higher than half
|
||||
* a millisecond.
|
||||
*/
|
||||
static inline void sched_entering_poll()
|
||||
{
|
||||
uint64_t new_mono_time;
|
||||
uint64_t new_cpu_time;
|
||||
int64_t stolen;
|
||||
|
||||
gettimeofday(&before_poll, NULL);
|
||||
|
||||
new_cpu_time = now_cpu_time();
|
||||
new_mono_time = now_mono_time();
|
||||
|
||||
if (ti->prev_cpu_time && ti->prev_mono_time) {
|
||||
new_cpu_time -= ti->prev_cpu_time;
|
||||
new_mono_time -= ti->prev_mono_time;
|
||||
stolen = new_mono_time - new_cpu_time;
|
||||
if (unlikely(stolen >= 500000)) {
|
||||
stolen /= 500000;
|
||||
/* more than half a millisecond difference might
|
||||
* indicate an undesired preemption.
|
||||
*/
|
||||
report_stolen_time(stolen);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* This function register a new signal. "lua" is the current lua
|
||||
* execution context. It contains a pointer to the associated task.
|
||||
* "link" is a list head attached to an other task that must be wake
|
||||
|
74
src/task.c
74
src/task.c
@ -22,6 +22,7 @@
|
||||
#include <haproxy/list.h>
|
||||
#include <haproxy/pool.h>
|
||||
#include <haproxy/task.h>
|
||||
#include <haproxy/time.h>
|
||||
#include <haproxy/tools.h>
|
||||
|
||||
extern struct task *process_stream(struct task *t, void *context, unsigned int state);
|
||||
@ -880,6 +881,79 @@ uint sched_report_idle()
|
||||
return rthr ? total / rthr : 0;
|
||||
}
|
||||
|
||||
/* Update the idle time value twice a second, to be called after
|
||||
* tv_update_date() when called after poll(), and currently called only by
|
||||
* sched_leaving_poll() below. It relies on <before_poll> to be updated to
|
||||
* the system time before calling poll().
|
||||
*/
|
||||
static inline void sched_measure_idle()
|
||||
{
|
||||
/* Let's compute the idle to work ratio. We worked between after_poll
|
||||
* and before_poll, and slept between before_poll and date. The idle_pct
|
||||
* is updated at most twice every second. Note that the current second
|
||||
* rarely changes so we avoid a multiply when not needed.
|
||||
*/
|
||||
int delta;
|
||||
|
||||
if ((delta = date.tv_sec - before_poll.tv_sec))
|
||||
delta *= 1000000;
|
||||
idle_time += delta + (date.tv_usec - before_poll.tv_usec);
|
||||
|
||||
if ((delta = date.tv_sec - after_poll.tv_sec))
|
||||
delta *= 1000000;
|
||||
samp_time += delta + (date.tv_usec - after_poll.tv_usec);
|
||||
|
||||
after_poll.tv_sec = date.tv_sec; after_poll.tv_usec = date.tv_usec;
|
||||
if (samp_time < 500000)
|
||||
return;
|
||||
|
||||
HA_ATOMIC_STORE(&ti->idle_pct, (100ULL * idle_time + samp_time / 2) / samp_time);
|
||||
idle_time = samp_time = 0;
|
||||
}
|
||||
|
||||
/* Collect date and time information after leaving poll(). <timeout> must be
|
||||
* set to the maximum sleep time passed to poll (in milliseconds), and
|
||||
* <interrupted> must be zero if the poller reached the timeout or non-zero
|
||||
* otherwise, which generally is provided by the poller's return value.
|
||||
*/
|
||||
void sched_leaving_poll(int timeout, int interrupted)
|
||||
{
|
||||
sched_measure_idle();
|
||||
ti->prev_cpu_time = now_cpu_time();
|
||||
ti->prev_mono_time = now_mono_time();
|
||||
}
|
||||
|
||||
/* Collect date and time information before calling poll(). This will be used
|
||||
* to count the run time of the past loop and the sleep time of the next poll.
|
||||
* It also compares the elasped and cpu times during the activity period to
|
||||
* estimate the amount of stolen time, which is reported if higher than half
|
||||
* a millisecond.
|
||||
*/
|
||||
void sched_entering_poll()
|
||||
{
|
||||
uint64_t new_mono_time;
|
||||
uint64_t new_cpu_time;
|
||||
int64_t stolen;
|
||||
|
||||
gettimeofday(&before_poll, NULL);
|
||||
|
||||
new_cpu_time = now_cpu_time();
|
||||
new_mono_time = now_mono_time();
|
||||
|
||||
if (ti->prev_cpu_time && ti->prev_mono_time) {
|
||||
new_cpu_time -= ti->prev_cpu_time;
|
||||
new_mono_time -= ti->prev_mono_time;
|
||||
stolen = new_mono_time - new_cpu_time;
|
||||
if (unlikely(stolen >= 500000)) {
|
||||
stolen /= 500000;
|
||||
/* more than half a millisecond difference might
|
||||
* indicate an undesired preemption.
|
||||
*/
|
||||
report_stolen_time(stolen);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Delete every tasks before running the master polling loop
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user