2021-10-08 07:33:24 +00:00
|
|
|
/*
|
|
|
|
* General time-keeping code and variables
|
|
|
|
*
|
|
|
|
* Copyright 2000-2021 Willy Tarreau <w@1wt.eu>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/time.h>
|
2021-10-08 12:48:30 +00:00
|
|
|
#include <signal.h>
|
2021-10-08 07:33:24 +00:00
|
|
|
#include <time.h>
|
|
|
|
|
2021-10-08 10:27:54 +00:00
|
|
|
#ifdef USE_THREAD
|
|
|
|
#include <pthread.h>
|
|
|
|
#endif
|
|
|
|
|
2021-10-08 07:33:24 +00:00
|
|
|
#include <haproxy/api.h>
|
2021-10-08 08:43:59 +00:00
|
|
|
#include <haproxy/activity.h>
|
2021-10-08 07:33:24 +00:00
|
|
|
#include <haproxy/clock.h>
|
2021-10-08 12:48:30 +00:00
|
|
|
#include <haproxy/signal-t.h>
|
2021-10-08 07:33:24 +00:00
|
|
|
#include <haproxy/time.h>
|
|
|
|
#include <haproxy/tinfo-t.h>
|
|
|
|
#include <haproxy/tools.h>
|
|
|
|
|
|
|
|
struct timeval start_date; /* the process's start date in wall-clock time */
|
2023-05-17 07:02:21 +00:00
|
|
|
struct timeval ready_date; /* date when the process was considered ready */
|
2023-04-28 12:50:29 +00:00
|
|
|
ullong start_time_ns; /* the process's start date in internal monotonic time (ns) */
|
2023-04-28 07:16:15 +00:00
|
|
|
volatile ullong global_now_ns; /* common monotonic date between all threads, in ns (wraps every 585 yr) */
|
2021-10-08 07:33:24 +00:00
|
|
|
volatile uint global_now_ms; /* common monotonic date in milliseconds (may wrap) */
|
|
|
|
|
2023-04-28 07:16:15 +00:00
|
|
|
THREAD_ALIGNED(64) static llong now_offset; /* global offset between system time and global time in ns */
|
2021-10-08 07:33:24 +00:00
|
|
|
|
2023-04-28 07:16:15 +00:00
|
|
|
THREAD_LOCAL ullong now_ns; /* internal monotonic date derived from real clock, in ns (wraps every 585 yr) */
|
2021-10-08 07:33:24 +00:00
|
|
|
THREAD_LOCAL uint now_ms; /* internal monotonic date in milliseconds (may wrap) */
|
|
|
|
THREAD_LOCAL struct timeval date; /* the real current date (wall-clock time) */
|
|
|
|
|
2021-10-08 09:38:30 +00:00
|
|
|
static THREAD_LOCAL struct timeval before_poll; /* system date before calling poll() */
|
|
|
|
static THREAD_LOCAL struct timeval after_poll; /* system date after leaving poll() */
|
2021-10-08 08:43:59 +00:00
|
|
|
static THREAD_LOCAL unsigned int samp_time; /* total elapsed time over current sample */
|
|
|
|
static THREAD_LOCAL unsigned int idle_time; /* total idle time over current sample */
|
2021-10-08 07:33:24 +00:00
|
|
|
static THREAD_LOCAL unsigned int iso_time_sec; /* last iso time value for this thread */
|
|
|
|
static THREAD_LOCAL char iso_time_str[34]; /* ISO time representation of gettimeofday() */
|
|
|
|
|
2021-10-08 13:09:17 +00:00
|
|
|
#if defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0) && defined(_POSIX_THREAD_CPUTIME)
|
|
|
|
static clockid_t per_thread_clock_id[MAX_THREADS];
|
|
|
|
#endif
|
|
|
|
|
2021-10-08 07:33:24 +00:00
|
|
|
/* returns the system's monotonic time in nanoseconds if supported, otherwise zero */
|
|
|
|
uint64_t now_mono_time(void)
|
|
|
|
{
|
|
|
|
uint64_t ret = 0;
|
2021-10-08 12:48:30 +00:00
|
|
|
#if defined(_POSIX_TIMERS) && defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0) && defined(_POSIX_MONOTONIC_CLOCK)
|
2021-10-08 07:33:24 +00:00
|
|
|
struct timespec ts;
|
|
|
|
clock_gettime(CLOCK_MONOTONIC, &ts);
|
|
|
|
ret = ts.tv_sec * 1000000000ULL + ts.tv_nsec;
|
|
|
|
#endif
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-11-25 07:56:46 +00:00
|
|
|
/* Returns the system's monotonic time in nanoseconds.
|
|
|
|
* Uses the coarse clock source if supported (for fast but
|
|
|
|
* less precise queries with limited resource usage).
|
|
|
|
* Fallback to now_mono_time() if coarse source is not supported,
|
|
|
|
* which may itself return 0 if not supported either.
|
|
|
|
*/
|
|
|
|
uint64_t now_mono_time_fast(void)
|
|
|
|
{
|
|
|
|
#if defined(CLOCK_MONOTONIC_COARSE)
|
|
|
|
struct timespec ts;
|
|
|
|
|
|
|
|
clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
|
|
|
|
return (ts.tv_sec * 1000000000ULL + ts.tv_nsec);
|
|
|
|
#else
|
|
|
|
/* fallback to regular mono time,
|
|
|
|
* returns 0 if not supported
|
|
|
|
*/
|
|
|
|
return now_mono_time();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2021-10-08 07:33:24 +00:00
|
|
|
/* returns the current thread's cumulated CPU time in nanoseconds if supported, otherwise zero */
|
|
|
|
uint64_t now_cpu_time(void)
|
|
|
|
{
|
|
|
|
uint64_t ret = 0;
|
|
|
|
#if defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0) && defined(_POSIX_THREAD_CPUTIME)
|
|
|
|
struct timespec ts;
|
|
|
|
clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts);
|
|
|
|
ret = ts.tv_sec * 1000000000ULL + ts.tv_nsec;
|
|
|
|
#endif
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-04-04 15:21:40 +00:00
|
|
|
/* Returns the current thread's cumulated CPU time in nanoseconds.
|
|
|
|
*
|
|
|
|
* thread_local timer is cached so that call is less precise but also less
|
|
|
|
* expensive if heavily used.
|
|
|
|
* We use the mono time as a cache expiration hint since now_cpu_time() is
|
|
|
|
* known to be much more expensive than now_mono_time_fast() on systems
|
|
|
|
* supporting the COARSE clock source.
|
|
|
|
*
|
|
|
|
* Returns 0 if either now_mono_time_fast() or now_cpu_time() are not
|
|
|
|
* supported.
|
|
|
|
*/
|
|
|
|
uint64_t now_cpu_time_fast(void)
|
|
|
|
{
|
|
|
|
static THREAD_LOCAL uint64_t mono_cache = 0;
|
|
|
|
static THREAD_LOCAL uint64_t cpu_cache = 0;
|
|
|
|
uint64_t mono_cur;
|
|
|
|
|
|
|
|
mono_cur = now_mono_time_fast();
|
|
|
|
if (unlikely(mono_cur != mono_cache)) {
|
|
|
|
/* global mono clock was updated: local cache is outdated */
|
|
|
|
cpu_cache = now_cpu_time();
|
|
|
|
mono_cache = mono_cur;
|
|
|
|
}
|
|
|
|
return cpu_cache;
|
|
|
|
}
|
|
|
|
|
2021-10-08 07:33:24 +00:00
|
|
|
/* returns another thread's cumulated CPU time in nanoseconds if supported, otherwise zero */
|
2021-10-08 13:09:17 +00:00
|
|
|
uint64_t now_cpu_time_thread(int thr)
|
2021-10-08 07:33:24 +00:00
|
|
|
{
|
|
|
|
uint64_t ret = 0;
|
|
|
|
#if defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0) && defined(_POSIX_THREAD_CPUTIME)
|
|
|
|
struct timespec ts;
|
2021-10-08 13:09:17 +00:00
|
|
|
clock_gettime(per_thread_clock_id[thr], &ts);
|
2021-10-08 07:33:24 +00:00
|
|
|
ret = ts.tv_sec * 1000000000ULL + ts.tv_nsec;
|
|
|
|
#endif
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-10-08 10:27:54 +00:00
|
|
|
/* set the clock source for the local thread */
|
|
|
|
void clock_set_local_source(void)
|
|
|
|
{
|
|
|
|
#if defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0) && defined(_POSIX_THREAD_CPUTIME)
|
|
|
|
#ifdef USE_THREAD
|
2021-10-08 13:09:17 +00:00
|
|
|
pthread_getcpuclockid(pthread_self(), &per_thread_clock_id[tid]);
|
2021-10-08 10:27:54 +00:00
|
|
|
#else
|
2021-10-08 13:09:17 +00:00
|
|
|
per_thread_clock_id[tid] = CLOCK_THREAD_CPUTIME_ID;
|
2021-10-08 10:27:54 +00:00
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2021-10-08 12:48:30 +00:00
|
|
|
/* registers a timer <tmr> of type timer_t delivering signal <sig> with value
|
|
|
|
* <val>. It tries on the current thread's clock ID first and falls back to
|
|
|
|
* CLOCK_REALTIME. Returns non-zero on success, 1 on failure.
|
|
|
|
*/
|
|
|
|
int clock_setup_signal_timer(void *tmr, int sig, int val)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
#if defined(USE_RT) && (_POSIX_TIMERS > 0) && defined(_POSIX_THREAD_CPUTIME)
|
|
|
|
struct sigevent sev = { };
|
|
|
|
timer_t *timer = tmr;
|
|
|
|
sigset_t set;
|
|
|
|
|
|
|
|
/* unblock the WDTSIG signal we intend to use */
|
|
|
|
sigemptyset(&set);
|
|
|
|
sigaddset(&set, WDTSIG);
|
|
|
|
ha_sigmask(SIG_UNBLOCK, &set, NULL);
|
|
|
|
|
|
|
|
/* this timer will signal WDTSIG when it fires, with tid in the si_int
|
|
|
|
* field (important since any thread will receive the signal).
|
|
|
|
*/
|
|
|
|
sev.sigev_notify = SIGEV_SIGNAL;
|
|
|
|
sev.sigev_signo = sig;
|
|
|
|
sev.sigev_value.sival_int = val;
|
2021-10-08 13:09:17 +00:00
|
|
|
if (timer_create(per_thread_clock_id[tid], &sev, timer) != -1 ||
|
2021-10-08 12:48:30 +00:00
|
|
|
timer_create(CLOCK_REALTIME, &sev, timer) != -1)
|
|
|
|
ret = 1;
|
|
|
|
#endif
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-04-28 07:16:15 +00:00
|
|
|
/* clock_update_date: sets <date> to system time, and sets <now_ns> to something
|
|
|
|
* as close as possible to real time, following a monotonic function. The main
|
2021-10-08 07:33:24 +00:00
|
|
|
* principle consists in detecting backwards and forwards time jumps and adjust
|
|
|
|
* an offset to correct them. This function should be called once after each
|
|
|
|
* poll, and never farther apart than MAX_DELAY_MS*2. The poll's timeout should
|
|
|
|
* be passed in <max_wait>, and the return value in <interrupted> (a non-zero
|
|
|
|
* value means that we have not expired the timeout).
|
|
|
|
*
|
|
|
|
* clock_init_process_date() must have been called once first, and
|
|
|
|
* clock_init_thread_date() must also have been called once for each thread.
|
|
|
|
*
|
|
|
|
* An offset is used to adjust the current time (date), to figure a monotonic
|
2023-04-28 07:16:15 +00:00
|
|
|
* local time (now_ns). The offset is not critical, as it is only updated after
|
|
|
|
* a clock jump is detected. From this point all threads will apply it to their
|
2021-10-08 07:33:24 +00:00
|
|
|
* locally measured time, and will then agree around a common monotonic
|
2023-04-28 07:16:15 +00:00
|
|
|
* global_now_ns value that serves to further refine their local time. Both
|
|
|
|
* now_ns and global_now_ns are 64-bit integers counting nanoseconds since a
|
|
|
|
* vague reference (it starts roughly 20s before the next wrap-around of the
|
|
|
|
* millisecond counter after boot). The offset is also an integral number of
|
|
|
|
* nanoseconds, but it's signed so that the clock can be adjusted in the two
|
|
|
|
* directions.
|
2021-10-08 07:33:24 +00:00
|
|
|
*/
|
2022-09-21 05:37:27 +00:00
|
|
|
void clock_update_local_date(int max_wait, int interrupted)
|
2021-10-08 07:33:24 +00:00
|
|
|
{
|
2022-09-21 05:37:27 +00:00
|
|
|
struct timeval min_deadline, max_deadline;
|
2021-10-08 07:33:24 +00:00
|
|
|
|
|
|
|
gettimeofday(&date, NULL);
|
|
|
|
|
|
|
|
/* compute the minimum and maximum local date we may have reached based
|
|
|
|
* on our past date and the associated timeout. There are three possible
|
|
|
|
* extremities:
|
|
|
|
* - the new date cannot be older than before_poll
|
|
|
|
* - if not interrupted, the new date cannot be older than
|
|
|
|
* before_poll+max_wait
|
|
|
|
* - in any case the new date cannot be newer than
|
|
|
|
* before_poll+max_wait+some margin (100ms used here).
|
|
|
|
* In case of violation, we'll ignore the current date and instead
|
|
|
|
* restart from the last date we knew.
|
|
|
|
*/
|
|
|
|
_tv_ms_add(&min_deadline, &before_poll, max_wait);
|
|
|
|
_tv_ms_add(&max_deadline, &before_poll, max_wait + 100);
|
|
|
|
|
|
|
|
if (unlikely(__tv_islt(&date, &before_poll) || // big jump backwards
|
|
|
|
(!interrupted && __tv_islt(&date, &min_deadline)) || // small jump backwards
|
|
|
|
__tv_islt(&max_deadline, &date))) { // big jump forwards
|
|
|
|
if (!interrupted)
|
2023-04-28 07:16:15 +00:00
|
|
|
now_ns += ms_to_ns(max_wait);
|
2021-10-08 07:33:24 +00:00
|
|
|
} else {
|
|
|
|
/* The date is still within expectations. Let's apply the
|
|
|
|
* now_offset to the system date. Note: ofs if made of two
|
|
|
|
* independent signed ints.
|
|
|
|
*/
|
2023-04-28 07:16:15 +00:00
|
|
|
now_ns = tv_to_ns(&date) + HA_ATOMIC_LOAD(&now_offset);
|
2021-10-08 07:33:24 +00:00
|
|
|
}
|
2023-04-28 07:16:15 +00:00
|
|
|
now_ms = ns_to_ms(now_ns);
|
2022-09-21 05:37:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void clock_update_global_date()
|
|
|
|
{
|
2023-04-28 07:16:15 +00:00
|
|
|
ullong old_now_ns;
|
2022-09-21 05:37:27 +00:00
|
|
|
uint old_now_ms;
|
|
|
|
|
2021-10-08 07:33:24 +00:00
|
|
|
/* now that we have bounded the local time, let's check if it's
|
|
|
|
* realistic regarding the global date, which only moves forward,
|
|
|
|
* otherwise catch up.
|
|
|
|
*/
|
2023-04-28 07:16:15 +00:00
|
|
|
old_now_ns = _HA_ATOMIC_LOAD(&global_now_ns);
|
2021-10-08 07:33:24 +00:00
|
|
|
old_now_ms = global_now_ms;
|
|
|
|
|
|
|
|
do {
|
2023-04-28 07:16:15 +00:00
|
|
|
if (now_ns < old_now_ns)
|
|
|
|
now_ns = old_now_ns;
|
2021-10-08 07:33:24 +00:00
|
|
|
|
2023-04-28 07:16:15 +00:00
|
|
|
/* now <now_ns> is expected to be the most accurate date,
|
|
|
|
* equal to <global_now_ns> or newer. Updating the global
|
MINOR: clock: do not update the global date too often
Tests with forced wakeups on a 24c/48t machine showed that we're caping
at 7.3M loops/s, which means 6.6 microseconds of loop delay without
having anything to do.
This is caused by two factors:
- the load and update of the now_offset variable
- the update of the global_now variable
What is happening is that threads are not running within the one-
microsecond time precision provided by gettimeofday(), so each thread
waking up sees a slightly different date and causes undesired updates
to global_now. But worse, these undesired updates mean that we then
have to adjust the now_offset to match that, and adds significant noise
to this variable, which then needs to be updated upon each call.
By only allowing sightly less precision we can completely eliminate
that contention. Here we're ignoring the 5 lowest bits of the usec
part, meaning that the global_now variable may be off by up to 31 us
(16 on avg). The variable is only used to correct the time drift some
threads might be observing in environments where CPU clocks are not
synchronized, and it's used by freq counters. In both cases we don't
need that level of precision and even one millisecond would be pretty
fine. We're just 30 times better at almost no cost since the global_now
and now_offset variables now only need to be updated 30000 times a
second in the worst case, which is unnoticeable.
After this change, the wakeup rate jumped from 7.3M/s to 66M/s, meaning
that the loop delay went from 6.6us to 0.73us, that's a 9x improvement
when under load! With real tasks we're seeing a boost from 28M to 52M
wakeups/s. The clock_update_global_date() function now only takes
1.6%, it's good enough so that we don't need to go further.
2022-09-21 06:21:45 +00:00
|
|
|
* date too often causes extreme contention and is not
|
|
|
|
* needed: it's only used to help threads run at the
|
|
|
|
* same date in case of local drift, and the global date,
|
|
|
|
* which changes, is only used by freq counters (a choice
|
|
|
|
* which is debatable by the way since it changes under us).
|
|
|
|
* Tests have seen that the contention can be reduced from
|
|
|
|
* 37% in this function to almost 0% when keeping clocks
|
|
|
|
* synchronized no better than 32 microseconds, so that's
|
|
|
|
* what we're doing here.
|
2021-10-08 07:33:24 +00:00
|
|
|
*/
|
2023-04-28 07:16:15 +00:00
|
|
|
now_ms = ns_to_ms(now_ns);
|
MINOR: clock: do not update the global date too often
Tests with forced wakeups on a 24c/48t machine showed that we're caping
at 7.3M loops/s, which means 6.6 microseconds of loop delay without
having anything to do.
This is caused by two factors:
- the load and update of the now_offset variable
- the update of the global_now variable
What is happening is that threads are not running within the one-
microsecond time precision provided by gettimeofday(), so each thread
waking up sees a slightly different date and causes undesired updates
to global_now. But worse, these undesired updates mean that we then
have to adjust the now_offset to match that, and adds significant noise
to this variable, which then needs to be updated upon each call.
By only allowing sightly less precision we can completely eliminate
that contention. Here we're ignoring the 5 lowest bits of the usec
part, meaning that the global_now variable may be off by up to 31 us
(16 on avg). The variable is only used to correct the time drift some
threads might be observing in environments where CPU clocks are not
synchronized, and it's used by freq counters. In both cases we don't
need that level of precision and even one millisecond would be pretty
fine. We're just 30 times better at almost no cost since the global_now
and now_offset variables now only need to be updated 30000 times a
second in the worst case, which is unnoticeable.
After this change, the wakeup rate jumped from 7.3M/s to 66M/s, meaning
that the loop delay went from 6.6us to 0.73us, that's a 9x improvement
when under load! With real tasks we're seeing a boost from 28M to 52M
wakeups/s. The clock_update_global_date() function now only takes
1.6%, it's good enough so that we don't need to go further.
2022-09-21 06:21:45 +00:00
|
|
|
|
2023-04-28 07:16:15 +00:00
|
|
|
if (!((now_ns ^ old_now_ns) & ~0x7FFFULL))
|
MINOR: clock: do not update the global date too often
Tests with forced wakeups on a 24c/48t machine showed that we're caping
at 7.3M loops/s, which means 6.6 microseconds of loop delay without
having anything to do.
This is caused by two factors:
- the load and update of the now_offset variable
- the update of the global_now variable
What is happening is that threads are not running within the one-
microsecond time precision provided by gettimeofday(), so each thread
waking up sees a slightly different date and causes undesired updates
to global_now. But worse, these undesired updates mean that we then
have to adjust the now_offset to match that, and adds significant noise
to this variable, which then needs to be updated upon each call.
By only allowing sightly less precision we can completely eliminate
that contention. Here we're ignoring the 5 lowest bits of the usec
part, meaning that the global_now variable may be off by up to 31 us
(16 on avg). The variable is only used to correct the time drift some
threads might be observing in environments where CPU clocks are not
synchronized, and it's used by freq counters. In both cases we don't
need that level of precision and even one millisecond would be pretty
fine. We're just 30 times better at almost no cost since the global_now
and now_offset variables now only need to be updated 30000 times a
second in the worst case, which is unnoticeable.
After this change, the wakeup rate jumped from 7.3M/s to 66M/s, meaning
that the loop delay went from 6.6us to 0.73us, that's a 9x improvement
when under load! With real tasks we're seeing a boost from 28M to 52M
wakeups/s. The clock_update_global_date() function now only takes
1.6%, it's good enough so that we don't need to go further.
2022-09-21 06:21:45 +00:00
|
|
|
return;
|
|
|
|
|
2023-04-28 07:16:15 +00:00
|
|
|
/* let's try to update the global_now_ns (both in nanoseconds
|
2021-10-08 07:33:24 +00:00
|
|
|
* and ms forms) or loop again.
|
|
|
|
*/
|
2023-04-28 07:16:15 +00:00
|
|
|
} while ((!_HA_ATOMIC_CAS(&global_now_ns, &old_now_ns, now_ns) ||
|
2021-10-08 07:33:24 +00:00
|
|
|
(now_ms != old_now_ms && !_HA_ATOMIC_CAS(&global_now_ms, &old_now_ms, now_ms))) &&
|
|
|
|
__ha_cpu_relax());
|
|
|
|
|
2023-04-28 07:16:15 +00:00
|
|
|
/* <now_ns> and <now_ms> are now updated to the last value of
|
|
|
|
* global_now_ns and global_now_ms, which were also monotonically
|
|
|
|
* updated. We can compute the latest offset, we don't care who writes
|
|
|
|
* it last, the variations will not break the monotonic property.
|
2021-10-08 07:33:24 +00:00
|
|
|
*/
|
2023-04-28 07:16:15 +00:00
|
|
|
HA_ATOMIC_STORE(&now_offset, now_ns - tv_to_ns(&date));
|
2021-10-08 07:33:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* must be called once at boot to initialize some global variables */
|
|
|
|
void clock_init_process_date(void)
|
|
|
|
{
|
|
|
|
now_offset = 0;
|
|
|
|
gettimeofday(&date, NULL);
|
2023-04-28 07:16:15 +00:00
|
|
|
after_poll = before_poll = date;
|
|
|
|
now_ns = global_now_ns = tv_to_ns(&date);
|
|
|
|
global_now_ms = ns_to_ms(now_ns);
|
2023-02-07 13:44:44 +00:00
|
|
|
|
|
|
|
/* force time to wrap 20s after boot: we first compute the time offset
|
|
|
|
* that once applied to the wall-clock date will make the local time
|
|
|
|
* wrap in 5 seconds. This offset is applied to the process-wide time,
|
|
|
|
* and will be used to recompute the local time, both of which will
|
|
|
|
* match and continue from this shifted date.
|
|
|
|
*/
|
2023-04-28 07:16:15 +00:00
|
|
|
now_offset = sec_to_ns((uint)((uint)(-global_now_ms) / 1000U - BOOT_TIME_WRAP_SEC));
|
|
|
|
global_now_ns += now_offset;
|
|
|
|
now_ns = global_now_ns;
|
|
|
|
now_ms = global_now_ms = ns_to_ms(now_ns);
|
2023-02-07 13:44:44 +00:00
|
|
|
|
2021-09-30 16:28:49 +00:00
|
|
|
th_ctx->idle_pct = 100;
|
2021-10-08 07:33:24 +00:00
|
|
|
clock_update_date(0, 1);
|
|
|
|
}
|
|
|
|
|
2023-05-16 17:01:55 +00:00
|
|
|
void clock_adjust_now_offset(void)
|
|
|
|
{
|
|
|
|
HA_ATOMIC_STORE(&now_offset, now_ns - tv_to_ns(&date));
|
|
|
|
}
|
|
|
|
|
2021-10-08 07:33:24 +00:00
|
|
|
/* must be called once per thread to initialize their thread-local variables.
|
|
|
|
* Note that other threads might also be initializing and running in parallel.
|
|
|
|
*/
|
|
|
|
void clock_init_thread_date(void)
|
|
|
|
{
|
|
|
|
gettimeofday(&date, NULL);
|
|
|
|
after_poll = before_poll = date;
|
|
|
|
|
2023-04-28 07:16:15 +00:00
|
|
|
now_ns = _HA_ATOMIC_LOAD(&global_now_ns);
|
2021-09-30 16:28:49 +00:00
|
|
|
th_ctx->idle_pct = 100;
|
2022-11-10 10:47:47 +00:00
|
|
|
th_ctx->prev_cpu_time = now_cpu_time();
|
2021-10-08 07:33:24 +00:00
|
|
|
clock_update_date(0, 1);
|
|
|
|
}
|
|
|
|
|
2021-10-08 08:43:59 +00:00
|
|
|
/* report the average CPU idle percentage over all running threads, between 0 and 100 */
|
|
|
|
uint clock_report_idle(void)
|
|
|
|
{
|
|
|
|
uint total = 0;
|
|
|
|
uint rthr = 0;
|
|
|
|
uint thr;
|
|
|
|
|
|
|
|
for (thr = 0; thr < MAX_THREADS; thr++) {
|
2022-06-27 14:22:22 +00:00
|
|
|
if (!ha_thread_info[thr].tg ||
|
|
|
|
!(ha_thread_info[thr].tg->threads_enabled & ha_thread_info[thr].ltid_bit))
|
2021-10-08 08:43:59 +00:00
|
|
|
continue;
|
2021-09-30 16:28:49 +00:00
|
|
|
total += HA_ATOMIC_LOAD(&ha_thread_ctx[thr].idle_pct);
|
2021-10-08 08:43:59 +00:00
|
|
|
rthr++;
|
|
|
|
}
|
|
|
|
return rthr ? total / rthr : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update the idle time value twice a second, to be called after
|
|
|
|
* clock_update_date() when called after poll(), and currently called only by
|
|
|
|
* clock_leaving_poll() below. It relies on <before_poll> to be updated to
|
|
|
|
* the system time before calling poll().
|
|
|
|
*/
|
|
|
|
static inline void clock_measure_idle(void)
|
|
|
|
{
|
|
|
|
/* Let's compute the idle to work ratio. We worked between after_poll
|
|
|
|
* and before_poll, and slept between before_poll and date. The idle_pct
|
|
|
|
* is updated at most twice every second. Note that the current second
|
|
|
|
* rarely changes so we avoid a multiply when not needed.
|
|
|
|
*/
|
|
|
|
int delta;
|
|
|
|
|
|
|
|
if ((delta = date.tv_sec - before_poll.tv_sec))
|
|
|
|
delta *= 1000000;
|
|
|
|
idle_time += delta + (date.tv_usec - before_poll.tv_usec);
|
|
|
|
|
|
|
|
if ((delta = date.tv_sec - after_poll.tv_sec))
|
|
|
|
delta *= 1000000;
|
|
|
|
samp_time += delta + (date.tv_usec - after_poll.tv_usec);
|
|
|
|
|
|
|
|
after_poll.tv_sec = date.tv_sec; after_poll.tv_usec = date.tv_usec;
|
|
|
|
if (samp_time < 500000)
|
|
|
|
return;
|
|
|
|
|
2021-09-30 16:28:49 +00:00
|
|
|
HA_ATOMIC_STORE(&th_ctx->idle_pct, (100ULL * idle_time + samp_time / 2) / samp_time);
|
2021-10-08 08:43:59 +00:00
|
|
|
idle_time = samp_time = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Collect date and time information after leaving poll(). <timeout> must be
|
|
|
|
* set to the maximum sleep time passed to poll (in milliseconds), and
|
|
|
|
* <interrupted> must be zero if the poller reached the timeout or non-zero
|
|
|
|
* otherwise, which generally is provided by the poller's return value.
|
|
|
|
*/
|
|
|
|
void clock_leaving_poll(int timeout, int interrupted)
|
|
|
|
{
|
|
|
|
clock_measure_idle();
|
2021-09-30 16:28:49 +00:00
|
|
|
th_ctx->prev_cpu_time = now_cpu_time();
|
|
|
|
th_ctx->prev_mono_time = now_mono_time();
|
2021-10-08 08:43:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Collect date and time information before calling poll(). This will be used
|
|
|
|
* to count the run time of the past loop and the sleep time of the next poll.
|
2022-10-29 04:34:32 +00:00
|
|
|
* It also compares the elapsed and cpu times during the activity period to
|
2021-10-08 08:43:59 +00:00
|
|
|
* estimate the amount of stolen time, which is reported if higher than half
|
|
|
|
* a millisecond.
|
|
|
|
*/
|
|
|
|
void clock_entering_poll(void)
|
|
|
|
{
|
|
|
|
uint64_t new_mono_time;
|
|
|
|
uint64_t new_cpu_time;
|
2021-10-08 09:34:46 +00:00
|
|
|
uint32_t run_time;
|
2021-10-08 08:43:59 +00:00
|
|
|
int64_t stolen;
|
|
|
|
|
|
|
|
gettimeofday(&before_poll, NULL);
|
|
|
|
|
2021-10-08 09:34:46 +00:00
|
|
|
run_time = (before_poll.tv_sec - after_poll.tv_sec) * 1000000U + (before_poll.tv_usec - after_poll.tv_usec);
|
|
|
|
|
2021-10-08 08:43:59 +00:00
|
|
|
new_cpu_time = now_cpu_time();
|
|
|
|
new_mono_time = now_mono_time();
|
|
|
|
|
2021-09-30 16:28:49 +00:00
|
|
|
if (th_ctx->prev_cpu_time && th_ctx->prev_mono_time) {
|
|
|
|
new_cpu_time -= th_ctx->prev_cpu_time;
|
|
|
|
new_mono_time -= th_ctx->prev_mono_time;
|
2021-10-08 08:43:59 +00:00
|
|
|
stolen = new_mono_time - new_cpu_time;
|
|
|
|
if (unlikely(stolen >= 500000)) {
|
|
|
|
stolen /= 500000;
|
|
|
|
/* more than half a millisecond difference might
|
|
|
|
* indicate an undesired preemption.
|
|
|
|
*/
|
|
|
|
report_stolen_time(stolen);
|
|
|
|
}
|
|
|
|
}
|
2021-10-08 09:34:46 +00:00
|
|
|
|
|
|
|
/* update the average runtime */
|
|
|
|
activity_count_runtime(run_time);
|
2021-10-08 08:43:59 +00:00
|
|
|
}
|
|
|
|
|
2021-10-08 07:33:24 +00:00
|
|
|
/* returns the current date as returned by gettimeofday() in ISO+microsecond
|
|
|
|
* format. It uses a thread-local static variable that the reader can consume
|
|
|
|
* for as long as it wants until next call. Thus, do not call it from a signal
|
|
|
|
* handler. If <pad> is non-0, a trailing space will be added. It will always
|
|
|
|
* return exactly 32 or 33 characters (depending on padding) and will always be
|
|
|
|
* zero-terminated, thus it will always fit into a 34 bytes buffer.
|
|
|
|
* This also always include the local timezone (in +/-HH:mm format) .
|
|
|
|
*/
|
|
|
|
char *timeofday_as_iso_us(int pad)
|
|
|
|
{
|
|
|
|
struct timeval new_date;
|
|
|
|
struct tm tm;
|
|
|
|
const char *offset;
|
|
|
|
char c;
|
|
|
|
|
|
|
|
gettimeofday(&new_date, NULL);
|
|
|
|
if (new_date.tv_sec != iso_time_sec || !new_date.tv_sec) {
|
|
|
|
get_localtime(new_date.tv_sec, &tm);
|
|
|
|
offset = get_gmt_offset(new_date.tv_sec, &tm);
|
|
|
|
if (unlikely(strftime(iso_time_str, sizeof(iso_time_str), "%Y-%m-%dT%H:%M:%S.000000+00:00", &tm) != 32))
|
2023-04-07 16:11:39 +00:00
|
|
|
strlcpy2(iso_time_str, "YYYY-mm-ddTHH:MM:SS.000000-00:00", sizeof(iso_time_str)); // make the failure visible but respect format.
|
2021-10-08 07:33:24 +00:00
|
|
|
iso_time_str[26] = offset[0];
|
|
|
|
iso_time_str[27] = offset[1];
|
|
|
|
iso_time_str[28] = offset[2];
|
|
|
|
iso_time_str[30] = offset[3];
|
|
|
|
iso_time_str[31] = offset[4];
|
|
|
|
iso_time_sec = new_date.tv_sec;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* utoa_pad adds a trailing 0 so we save the char for restore */
|
|
|
|
c = iso_time_str[26];
|
|
|
|
utoa_pad(new_date.tv_usec, iso_time_str + 20, 7);
|
|
|
|
iso_time_str[26] = c;
|
|
|
|
if (pad) {
|
|
|
|
iso_time_str[32] = ' ';
|
|
|
|
iso_time_str[33] = 0;
|
|
|
|
}
|
|
|
|
return iso_time_str;
|
|
|
|
}
|