MEDIUM: clock: replace timeval "now" with integer "now_ns"

This puts an end to the occasional confusion between the "now" date
that is internal, monotonic and not synchronized with the system's
date, and "date" which is the system's date and not necessarily
monotonic. Variable "now" was removed and replaced with a 64-bit
integer "now_ns" which is a counter of nanoseconds. It wraps every
585 years, so if all goes well (i.e. if humanity does not need
haproxy anymore in 500 years), it will just never wrap. This implies
that now_ns is never nul and that the zero value can reliably be used
as "not set yet" for a timestamp if needed. This will also simplify
date checks where it becomes possible again to do "date1<date2".

All occurrences of "tv_to_ns(&now)" were simply replaced by "now_ns".
Due to the intricacies between now, global_now and now_offset, all 3
had to be turned to nanoseconds at once. It's not a problem since all
of them were solely used in 3 functions in clock.c, but they make the
patch look bigger than it really  is.

The clock_update_local_date() and clock_update_global_date() functions
are now much simpler as there's no need anymore to perform conversions
nor to round the timeval up or down.

The wrapping continues to happen by presetting the internal offset in
the short future so that the 32-bit now_ms continues to wrap 20 seconds
after boot.

The start_time used to calculate uptime can still be turned to
nanoseconds now. One interrogation concerns global_now_ms which is used
only for the freq counters. It's unclear whether there's more value in
using two variables that need to be synchronized sequentially like today
or to just use global_now_ns divided by 1 million. Both approaches will
work equally well on modern systems, the difference might come from
smaller ones. Better not change anyhting for now.

One benefit of the new approach is that we now have an internal date
with a resolution of the nanosecond and the precision of the microsecond,
which can be useful to extend some measurements given that timestamps
also have this resolution.
This commit is contained in:
Willy Tarreau 2023-04-28 09:16:15 +02:00
parent eed5da1037
commit 69530f59ae
31 changed files with 131 additions and 159 deletions

View File

@ -65,7 +65,7 @@ static inline int be_usable_srv(struct proxy *be)
/* set the time of last session on the backend */
static inline void be_set_sess_last(struct proxy *be)
{
be->be_counters.last_sess = ns_to_sec(tv_to_ns(&now));
be->be_counters.last_sess = ns_to_sec(now_ns);
}
/* This function returns non-zero if the designated server will be

View File

@ -27,9 +27,9 @@
extern struct timeval start_date; /* the process's start date in wall-clock time */
extern struct timeval start_time; /* the process's start date in internal monotonic time */
extern volatile ullong global_now; /* common monotonic date between all threads (32:32) */
extern volatile ullong global_now_ns; /* common monotonic date between all threads, in ns (wraps every 585 yr) */
extern THREAD_LOCAL struct timeval now; /* internal monotonic date derived from real clock */
extern THREAD_LOCAL ullong now_ns; /* internal monotonic date derived from real clock, in ns (wraps every 585 yr) */
extern THREAD_LOCAL struct timeval date; /* the real current date (wall-clock time) */
uint64_t now_cpu_time_thread(int thr);

View File

@ -101,7 +101,7 @@ static inline struct stconn *qc_attach_sc(struct qcs *qcs, struct buffer *buf)
se_expect_no_data(qcs->sd);
/* TODO duplicated from mux_h2 */
sess->t_idle = ns_to_ms(tv_to_ns(&now) - sess->accept_ts) - sess->t_handshake;
sess->t_idle = ns_to_ms(now_ns - sess->accept_ts) - sess->t_handshake;
if (!sc_new_from_endp(qcs->sd, sess, buf))
return NULL;
@ -116,7 +116,7 @@ static inline struct stconn *qc_attach_sc(struct qcs *qcs, struct buffer *buf)
/* TODO duplicated from mux_h2 */
sess->accept_date = date;
sess->accept_ts = tv_to_ns(&now);
sess->accept_ts = now_ns;
sess->t_handshake = 0;
sess->t_idle = 0;

View File

@ -183,7 +183,7 @@ static inline void srv_inc_sess_ctr(struct server *s)
/* set the time of last session on the designated server */
static inline void srv_set_sess_last(struct server *s)
{
s->counters.last_sess = ns_to_sec(tv_to_ns(&now));
s->counters.last_sess = ns_to_sec(now_ns);
}
/* returns the current server throttle rate between 0 and 100% */

View File

@ -1087,7 +1087,7 @@ static int cli_io_handler_show_activity(struct appctx *appctx)
} while (0)
/* retrieve uptime */
up = tv_to_ns(&now) - tv_to_ns(&start_time);
up = now_ns - tv_to_ns(&start_time);
up_sec = ns_to_sec(up);
up_usec = (up / 1000U) % 1000000U;

View File

@ -64,7 +64,7 @@
int be_lastsession(const struct proxy *be)
{
if (be->be_counters.last_sess)
return ns_to_sec(tv_to_ns(&now)) - be->be_counters.last_sess;
return ns_to_sec(now_ns) - be->be_counters.last_sess;
return -1;
}
@ -2024,7 +2024,7 @@ void back_try_conn_req(struct stream *s)
sc_shutdown(sc);
sc->flags |= SC_FL_ERROR;
s->logs.t_queue = ns_to_ms(tv_to_ns(&now) - s->logs.accept_ts);
s->logs.t_queue = ns_to_ms(now_ns - s->logs.accept_ts);
/* we may need to know the position in the queue for logging */
pendconn_cond_unlink(s->pend_pos);
@ -2060,7 +2060,7 @@ void back_try_conn_req(struct stream *s)
if (unlikely(!(s->flags & SF_ASSIGNED)))
sc->state = SC_ST_REQ;
else {
s->logs.t_queue = ns_to_ms(tv_to_ns(&now) - s->logs.accept_ts);
s->logs.t_queue = ns_to_ms(now_ns - s->logs.accept_ts);
sc->state = SC_ST_ASS;
}
DBG_TRACE_STATE("dequeue connection request", STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
@ -2072,7 +2072,7 @@ void back_try_conn_req(struct stream *s)
/* ... and timeout expired */
s->conn_exp = TICK_ETERNITY;
s->flags &= ~SF_CONN_EXP;
s->logs.t_queue = ns_to_ms(tv_to_ns(&now) - s->logs.accept_ts);
s->logs.t_queue = ns_to_ms(now_ns - s->logs.accept_ts);
/* we may need to know the position in the queue for logging */
pendconn_cond_unlink(s->pend_pos);
@ -2094,7 +2094,7 @@ void back_try_conn_req(struct stream *s)
/* Connection remains in queue, check if we have to abort it */
if (back_may_abort_req(req, s)) {
s->logs.t_queue = ns_to_ms(tv_to_ns(&now) - s->logs.accept_ts);
s->logs.t_queue = ns_to_ms(now_ns - s->logs.accept_ts);
/* we may need to know the position in the queue for logging */
pendconn_cond_unlink(s->pend_pos);
@ -2219,7 +2219,7 @@ void back_handle_st_req(struct stream *s)
}
/* The server is assigned */
s->logs.t_queue = ns_to_ms(tv_to_ns(&now) - s->logs.accept_ts);
s->logs.t_queue = ns_to_ms(now_ns - s->logs.accept_ts);
sc->state = SC_ST_ASS;
be_set_sess_last(s->be);
DBG_TRACE_STATE("connection request assigned to a server", STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
@ -2441,8 +2441,8 @@ void back_handle_st_rdy(struct stream *s)
BUG_ON(!sc_appctx(s->scb));
if (!s->logs.request_ts)
s->logs.request_ts = tv_to_ns(&now);
s->logs.t_queue = ns_to_ms(tv_to_ns(&now) - s->logs.accept_ts);
s->logs.request_ts = now_ns;
s->logs.t_queue = ns_to_ms(now_ns - s->logs.accept_ts);
be_set_sess_last(s->be);
}
@ -2505,7 +2505,7 @@ void back_handle_st_rdy(struct stream *s)
*/
void set_backend_down(struct proxy *be)
{
be->last_change = ns_to_sec(tv_to_ns(&now));
be->last_change = ns_to_sec(now_ns);
_HA_ATOMIC_INC(&be->down_trans);
if (!(global.mode & MODE_STARTING)) {
@ -2578,10 +2578,10 @@ no_cookie:
}
int be_downtime(struct proxy *px) {
if (px->lbprm.tot_weight && px->last_change < ns_to_sec(tv_to_ns(&now))) // ignore negative time
if (px->lbprm.tot_weight && px->last_change < ns_to_sec(now_ns)) // ignore negative time
return px->down_time;
return ns_to_sec(tv_to_ns(&now)) - px->last_change + px->down_time;
return ns_to_sec(now_ns) - px->last_change + px->down_time;
}
/*

View File

@ -660,7 +660,7 @@ static struct peer *cfg_peers_add_peer(struct peers *peers,
peers->remote = p;
p->conf.file = strdup(file);
p->conf.line = linenum;
p->last_change = ns_to_sec(tv_to_ns(&now));
p->last_change = ns_to_sec(now_ns);
p->xprt = xprt_get(XPRT_RAW);
p->sock_init_arg = NULL;
HA_SPIN_INIT(&p->lock);
@ -838,7 +838,7 @@ int cfg_parse_peers(const char *file, int linenum, char **args, int kwm)
cfg_peers = curpeers;
curpeers->conf.file = strdup(file);
curpeers->conf.line = linenum;
curpeers->last_change = ns_to_sec(tv_to_ns(&now));
curpeers->last_change = ns_to_sec(now_ns);
curpeers->id = strdup(args[1]);
curpeers->disabled = 0;
}

View File

@ -471,7 +471,7 @@ void set_server_check_status(struct check *check, short status, const char *desc
if (status == HCHK_STATUS_START) {
check->result = CHK_RES_UNKNOWN; /* no result yet */
check->desc[0] = '\0';
check->start = tv_to_ns(&now);
check->start = now_ns;
return;
}
@ -492,7 +492,7 @@ void set_server_check_status(struct check *check, short status, const char *desc
check->duration = -1;
else if (check->start) {
/* set_server_check_status() may be called more than once */
check->duration = ns_to_ms(tv_to_ns(&now) - check->start);
check->duration = ns_to_ms(now_ns - check->start);
check->start = 0;
}
@ -1029,9 +1029,9 @@ int httpchk_build_status_header(struct server *s, struct buffer *buf)
s->queue.length);
if ((s->cur_state == SRV_ST_STARTING) &&
ns_to_sec(tv_to_ns(&now)) < s->last_change + s->slowstart &&
ns_to_sec(tv_to_ns(&now)) >= s->last_change) {
ratio = MAX(1, 100 * (ns_to_sec(tv_to_ns(&now)) - s->last_change) / s->slowstart);
ns_to_sec(now_ns) < s->last_change + s->slowstart &&
ns_to_sec(now_ns) >= s->last_change) {
ratio = MAX(1, 100 * (ns_to_sec(now_ns) - s->last_change) / s->slowstart);
chunk_appendf(buf, "; throttle=%d%%", ratio);
}
@ -1499,7 +1499,7 @@ int start_check_task(struct check *check, int mininter,
/* check this every ms */
t->expire = tick_add(now_ms, MS_TO_TICKS(mininter * srvpos / nbcheck));
check->start = tv_to_ns(&now);
check->start = now_ns;
task_queue(t);
return 1;

View File

@ -454,7 +454,7 @@ static struct proxy *cli_alloc_fe(const char *name, const char *file, int line)
init_new_proxy(fe);
fe->next = proxies_list;
proxies_list = fe;
fe->last_change = ns_to_sec(tv_to_ns(&now));
fe->last_change = ns_to_sec(now_ns);
fe->id = strdup("GLOBAL");
fe->cap = PR_CAP_FE|PR_CAP_INT;
fe->maxconn = 10; /* default to 10 concurrent connections */
@ -2626,7 +2626,7 @@ read_again:
/* If there is data available for analysis, log the end of the idle time. */
if (c_data(req) && s->logs.t_idle == -1)
s->logs.t_idle = ns_to_ms(tv_to_ns(&now) - s->logs.accept_ts) - s->logs.t_handshake;
s->logs.t_idle = ns_to_ms(now_ns - s->logs.accept_ts) - s->logs.t_handshake;
to_forward = pcli_parse_request(s, req, &errmsg, &next_pid);
if (to_forward > 0) {
@ -2762,7 +2762,7 @@ int pcli_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
sess_change_server(s, NULL);
}
s->logs.t_close = ns_to_ms(tv_to_ns(&now) - s->logs.accept_ts);
s->logs.t_close = ns_to_ms(now_ns - s->logs.accept_ts);
stream_process_counters(s);
/* don't count other requests' data */
@ -2784,7 +2784,7 @@ int pcli_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
stream_update_time_stats(s);
s->logs.accept_date = date; /* user-visible date for logging */
s->logs.accept_ts = tv_to_ns(&now); /* corrected date for internal use */
s->logs.accept_ts = now_ns; /* corrected date for internal use */
s->logs.t_handshake = 0; /* There are no handshake in keep alive connection. */
s->logs.t_idle = -1;
s->logs.request_ts = 0;

View File

@ -28,13 +28,13 @@
struct timeval start_date; /* the process's start date in wall-clock time */
struct timeval start_time; /* the process's start date in internal monotonic time */
volatile ullong global_now; /* common monotonic date between all threads (32:32) */
volatile ullong global_now_ns; /* common monotonic date between all threads, in ns (wraps every 585 yr) */
volatile uint global_now_ms; /* common monotonic date in milliseconds (may wrap) */
THREAD_ALIGNED(64) static ullong now_offset; /* global offset between system time and global time */
THREAD_ALIGNED(64) static llong now_offset; /* global offset between system time and global time in ns */
THREAD_LOCAL ullong now_ns; /* internal monotonic date derived from real clock, in ns (wraps every 585 yr) */
THREAD_LOCAL uint now_ms; /* internal monotonic date in milliseconds (may wrap) */
THREAD_LOCAL struct timeval now; /* internal monotonic date derived from real clock */
THREAD_LOCAL struct timeval date; /* the real current date (wall-clock time) */
static THREAD_LOCAL struct timeval before_poll; /* system date before calling poll() */
@ -174,8 +174,8 @@ int clock_setup_signal_timer(void *tmr, int sig, int val)
return ret;
}
/* clock_update_date: sets <date> to system time, and sets <now> to something as
* close as possible to real time, following a monotonic function. The main
/* clock_update_date: sets <date> to system time, and sets <now_ns> to something
* as close as possible to real time, following a monotonic function. The main
* principle consists in detecting backwards and forwards time jumps and adjust
* an offset to correct them. This function should be called once after each
* poll, and never farther apart than MAX_DELAY_MS*2. The poll's timeout should
@ -186,19 +186,19 @@ int clock_setup_signal_timer(void *tmr, int sig, int val)
* clock_init_thread_date() must also have been called once for each thread.
*
* An offset is used to adjust the current time (date), to figure a monotonic
* local time (now). The offset is not critical, as it is only updated after a
* clock jump is detected. From this point all threads will apply it to their
* local time (now_ns). The offset is not critical, as it is only updated after
* a clock jump is detected. From this point all threads will apply it to their
* locally measured time, and will then agree around a common monotonic
* global_now value that serves to further refine their local time. As it is
* not possible to atomically update a timeval, both global_now and the
* now_offset values are instead stored as 64-bit integers made of two 32 bit
* values for the tv_sec and tv_usec parts. The offset is made of two signed
* ints so that the clock can be adjusted in the two directions.
* global_now_ns value that serves to further refine their local time. Both
* now_ns and global_now_ns are 64-bit integers counting nanoseconds since a
* vague reference (it starts roughly 20s before the next wrap-around of the
* millisecond counter after boot). The offset is also an integral number of
* nanoseconds, but it's signed so that the clock can be adjusted in the two
* directions.
*/
void clock_update_local_date(int max_wait, int interrupted)
{
struct timeval min_deadline, max_deadline;
ullong ofs;
gettimeofday(&date, NULL);
@ -216,56 +216,39 @@ void clock_update_local_date(int max_wait, int interrupted)
_tv_ms_add(&min_deadline, &before_poll, max_wait);
_tv_ms_add(&max_deadline, &before_poll, max_wait + 100);
ofs = HA_ATOMIC_LOAD(&now_offset);
if (unlikely(__tv_islt(&date, &before_poll) || // big jump backwards
(!interrupted && __tv_islt(&date, &min_deadline)) || // small jump backwards
__tv_islt(&max_deadline, &date))) { // big jump forwards
if (!interrupted)
_tv_ms_add(&now, &now, max_wait);
now_ns += ms_to_ns(max_wait);
} else {
/* The date is still within expectations. Let's apply the
* now_offset to the system date. Note: ofs if made of two
* independent signed ints.
*/
now.tv_sec = date.tv_sec + (int)(ofs >> 32); // note: may be positive or negative
now.tv_usec = date.tv_usec + (int)ofs; // note: may be positive or negative
if ((int)now.tv_usec < 0) {
now.tv_usec += 1000000;
now.tv_sec -= 1;
} else if (now.tv_usec >= 1000000) {
now.tv_usec -= 1000000;
now.tv_sec += 1;
}
now_ns = tv_to_ns(&date) + HA_ATOMIC_LOAD(&now_offset);
}
now_ms = __tv_to_ms(&now);
now_ms = ns_to_ms(now_ns);
}
void clock_update_global_date()
{
struct timeval tmp_now;
ullong old_now_ns;
uint old_now_ms;
ullong old_now;
ullong new_now;
ullong ofs_new;
uint sec_ofs, usec_ofs;
/* now that we have bounded the local time, let's check if it's
* realistic regarding the global date, which only moves forward,
* otherwise catch up.
*/
old_now = global_now;
old_now_ns = _HA_ATOMIC_LOAD(&global_now_ns);
old_now_ms = global_now_ms;
do {
tmp_now.tv_sec = (unsigned int)(old_now >> 32);
tmp_now.tv_usec = old_now & 0xFFFFFFFFU;
if (now_ns < old_now_ns)
now_ns = old_now_ns;
if (__tv_islt(&now, &tmp_now))
now = tmp_now;
/* now <now> is expected to be the most accurate date,
* equal to <global_now> or newer. Updating the global
/* now <now_ns> is expected to be the most accurate date,
* equal to <global_now_ns> or newer. Updating the global
* date too often causes extreme contention and is not
* needed: it's only used to help threads run at the
* same date in case of local drift, and the global date,
@ -276,34 +259,24 @@ void clock_update_global_date()
* synchronized no better than 32 microseconds, so that's
* what we're doing here.
*/
now_ms = ns_to_ms(now_ns);
new_now = ((ullong)now.tv_sec << 32) + (uint)now.tv_usec;
now_ms = __tv_to_ms(&now);
if (!((new_now ^ old_now) & ~0x1FULL))
if (!((now_ns ^ old_now_ns) & ~0x7FFFULL))
return;
/* let's try to update the global <now> (both in timeval
/* let's try to update the global_now_ns (both in nanoseconds
* and ms forms) or loop again.
*/
} while ((!_HA_ATOMIC_CAS(&global_now, &old_now, new_now) ||
} while ((!_HA_ATOMIC_CAS(&global_now_ns, &old_now_ns, now_ns) ||
(now_ms != old_now_ms && !_HA_ATOMIC_CAS(&global_now_ms, &old_now_ms, now_ms))) &&
__ha_cpu_relax());
/* <now> and <now_ms> are now updated to the last value of global_now
* and global_now_ms, which were also monotonically updated. We can
* compute the latest offset, we don't care who writes it last, the
* variations will not break the monotonic property.
/* <now_ns> and <now_ms> are now updated to the last value of
* global_now_ns and global_now_ms, which were also monotonically
* updated. We can compute the latest offset, we don't care who writes
* it last, the variations will not break the monotonic property.
*/
sec_ofs = now.tv_sec - date.tv_sec;
usec_ofs = now.tv_usec - date.tv_usec;
if ((int)usec_ofs < 0) {
usec_ofs += 1000000;
sec_ofs -= 1;
}
ofs_new = ((ullong)sec_ofs << 32) + usec_ofs;
HA_ATOMIC_STORE(&now_offset, ofs_new);
HA_ATOMIC_STORE(&now_offset, now_ns - tv_to_ns(&date));
}
/* must be called once at boot to initialize some global variables */
@ -311,9 +284,9 @@ void clock_init_process_date(void)
{
now_offset = 0;
gettimeofday(&date, NULL);
now = after_poll = before_poll = date;
global_now = ((ullong)date.tv_sec << 32) + (uint)date.tv_usec;
global_now_ms = now.tv_sec * 1000 + now.tv_usec / 1000;
after_poll = before_poll = date;
now_ns = global_now_ns = tv_to_ns(&date);
global_now_ms = ns_to_ms(now_ns);
/* force time to wrap 20s after boot: we first compute the time offset
* that once applied to the wall-clock date will make the local time
@ -321,8 +294,10 @@ void clock_init_process_date(void)
* and will be used to recompute the local time, both of which will
* match and continue from this shifted date.
*/
now_offset = (uint64_t)(-(global_now_ms / 1000U) - BOOT_TIME_WRAP_SEC) << 32;
global_now += now_offset;
now_offset = sec_to_ns((uint)((uint)(-global_now_ms) / 1000U - BOOT_TIME_WRAP_SEC));
global_now_ns += now_offset;
now_ns = global_now_ns;
now_ms = global_now_ms = ns_to_ms(now_ns);
th_ctx->idle_pct = 100;
clock_update_date(0, 1);
@ -333,14 +308,10 @@ void clock_init_process_date(void)
*/
void clock_init_thread_date(void)
{
ullong old_now;
gettimeofday(&date, NULL);
after_poll = before_poll = date;
old_now = _HA_ATOMIC_LOAD(&global_now);
now.tv_sec = old_now >> 32;
now.tv_usec = (uint)old_now;
now_ns = _HA_ATOMIC_LOAD(&global_now_ns);
th_ctx->idle_pct = 100;
th_ctx->prev_cpu_time = now_cpu_time();
clock_update_date(0, 1);

View File

@ -264,9 +264,9 @@ static inline void
spoe_update_stat_time(ullong *since, long *t)
{
if (*t == -1)
*t = ns_to_ms(tv_to_ns(&now) - *since);
*t = ns_to_ms(now_ns - *since);
else
*t += ns_to_ms(tv_to_ns(&now) - *since);
*t += ns_to_ms(now_ns - *since);
*since = 0;
}
@ -1621,7 +1621,7 @@ spoe_handle_sending_frame_appctx(struct appctx *appctx, int *skip)
LIST_APPEND(&SPOE_APPCTX(appctx)->waiting_queue, &ctx->list);
}
_HA_ATOMIC_INC(&agent->counters.nb_waiting);
ctx->stats.wait_ts = tv_to_ns(&now);
ctx->stats.wait_ts = now_ns;
SPOE_APPCTX(appctx)->frag_ctx.ctx = NULL;
SPOE_APPCTX(appctx)->frag_ctx.cursid = 0;
SPOE_APPCTX(appctx)->frag_ctx.curfid = 0;
@ -1678,7 +1678,7 @@ spoe_handle_receiving_frame_appctx(struct appctx *appctx, int *skip)
LIST_INIT(&ctx->list);
_HA_ATOMIC_DEC(&agent->counters.nb_waiting);
spoe_update_stat_time(&ctx->stats.wait_ts, &ctx->stats.t_waiting);
ctx->stats.response_ts = tv_to_ns(&now);
ctx->stats.response_ts = now_ns;
if (ctx->spoe_appctx) {
ctx->spoe_appctx->cur_fpa--;
ctx->spoe_appctx = NULL;
@ -2130,7 +2130,7 @@ spoe_queue_context(struct spoe_context *ctx)
* it. */
_HA_ATOMIC_INC(&agent->counters.nb_sending);
spoe_update_stat_time(&ctx->stats.request_ts, &ctx->stats.t_request);
ctx->stats.queue_ts = tv_to_ns(&now);
ctx->stats.queue_ts = now_ns;
if (ctx->spoe_appctx)
return 1;
LIST_APPEND(&agent->rt[tid].sending_queue, &ctx->list);
@ -2600,8 +2600,8 @@ spoe_start_processing(struct spoe_agent *agent, struct spoe_context *ctx, int di
return 0;
agent->rt[tid].processing++;
ctx->stats.start_ts = tv_to_ns(&now);
ctx->stats.request_ts = tv_to_ns(&now);
ctx->stats.start_ts = now_ns;
ctx->stats.request_ts = now_ns;
ctx->stats.t_request = -1;
ctx->stats.t_queue = -1;
ctx->stats.t_waiting = -1;
@ -2712,7 +2712,7 @@ spoe_process_messages(struct stream *s, struct spoe_context *ctx,
if (ctx->state == SPOE_CTX_ST_ENCODING_MSGS) {
if (ctx->stats.request_ts == 0)
ctx->stats.request_ts = tv_to_ns(&now);
ctx->stats.request_ts = now_ns;
if (!spoe_acquire_buffer(&ctx->buffer, &ctx->buffer_wait))
goto out;
ret = spoe_encode_messages(s, ctx, messages, dir, type);
@ -3019,7 +3019,7 @@ spoe_init(struct proxy *px, struct flt_conf *fconf)
/* conf->agent_fe was already initialized during the config
* parsing. Finish initialization. */
conf->agent_fe.last_change = ns_to_sec(tv_to_ns(&now));
conf->agent_fe.last_change = ns_to_sec(now_ns);
conf->agent_fe.cap = PR_CAP_FE;
conf->agent_fe.mode = PR_MODE_TCP;
conf->agent_fe.maxconn = 0;

View File

@ -1509,7 +1509,7 @@ static void init_early(int argc, char **argv)
tzset();
clock_init_process_date();
start_date = date;
start_time = now;
start_time = NS_TO_TV(now_ns);
pid = getpid();
/* Set local host name and adjust some environment variables.

View File

@ -8295,7 +8295,7 @@ __LJMP static int hlua_txn_done(lua_State *L)
finst = ((htxn->dir == SMP_OPT_DIR_REQ) ? SF_FINST_R : SF_FINST_H);
if (htxn->dir == SMP_OPT_DIR_REQ) {
/* let's log the request time */
s->logs.request_ts = tv_to_ns(&now);
s->logs.request_ts = now_ns;
if (s->sess->fe == s->be) /* report it if the request was intercepted by the frontend */
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.intercepted_req);
}

View File

@ -305,7 +305,8 @@ int hlua_now(lua_State *L)
*/
struct timeval tv;
tv_remain(&start_time, &now, &tv);
tv = NS_TO_TV(now_ns);
tv_remain(&start_time, &tv, &tv); // tv = now_ns - start_time
tv_add(&tv, &tv, &start_date);
lua_newtable(L);

View File

@ -1284,7 +1284,7 @@ static enum act_return http_action_auth(struct act_rule *rule, struct proxy *px,
goto fail;
/* Note: Only eval on the request */
s->logs.request_ts = tv_to_ns(&now);
s->logs.request_ts = now_ns;
req->analysers &= AN_REQ_FLT_END;
if (s->sess->fe == s->be) /* report it if the request was intercepted by the frontend */
@ -2279,7 +2279,7 @@ static enum act_return http_action_return(struct act_rule *rule, struct proxy *p
if (rule->from == ACT_F_HTTP_REQ) {
/* let's log the request time */
s->logs.request_ts = tv_to_ns(&now);
s->logs.request_ts = now_ns;
req->analysers &= AN_REQ_FLT_END;
if (s->sess->fe == s->be) /* report it if the request was intercepted by the frontend */

View File

@ -439,7 +439,7 @@ int http_process_req_common(struct stream *s, struct channel *req, int an_bit, s
if (!s->target && http_stats_check_uri(s, txn, px)) {
s->target = &http_stats_applet.obj_type;
if (unlikely(!sc_applet_create(s->scb, objt_applet(s->target)))) {
s->logs.request_ts = tv_to_ns(&now);
s->logs.request_ts = now_ns;
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_RESOURCE;
goto return_int_err;
@ -562,7 +562,7 @@ int http_process_req_common(struct stream *s, struct channel *req, int an_bit, s
if (s->be->cookie_name || sess->fe->capture_name)
http_manage_client_side_cookies(s, req);
s->logs.request_ts = tv_to_ns(&now);
s->logs.request_ts = now_ns;
stream_inc_http_err_ctr(s);
_HA_ATOMIC_INC(&sess->fe->fe_counters.denied_req);
if (s->flags & SF_BE_ASSIGNED)
@ -716,7 +716,7 @@ int http_process_request(struct stream *s, struct channel *req, int an_bit)
req->analyse_exp = TICK_ETERNITY;
req->analysers &= ~an_bit;
s->logs.request_ts = tv_to_ns(&now);
s->logs.request_ts = now_ns;
/* OK let's go on with the BODY now */
DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
return 1;
@ -782,7 +782,7 @@ int http_process_tarpit(struct stream *s, struct channel *req, int an_bit)
* It will not cause trouble to the logs because we can exclude
* the tarpitted connections by filtering on the 'PT' status flags.
*/
s->logs.t_queue = ns_to_ms(tv_to_ns(&now) - s->logs.accept_ts);
s->logs.t_queue = ns_to_ms(now_ns - s->logs.accept_ts);
http_reply_and_close(s, txn->status, (!(s->scf->flags & SC_FL_ERROR) ? http_error_message(s) : NULL));
http_set_term_flags(s);
@ -824,7 +824,7 @@ int http_wait_for_request_body(struct stream *s, struct channel *req, int an_bit
http_end:
/* The situation will not evolve, so let's give up on the analysis. */
s->logs.request_ts = tv_to_ns(&now); /* update the request timer to reflect full request */
s->logs.request_ts = now_ns; /* update the request timer to reflect full request */
req->analysers &= ~an_bit;
req->analyse_exp = TICK_ETERNITY;
DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
@ -1585,7 +1585,7 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
end:
/* we want to have the response time before we start processing it */
s->logs.t_data = ns_to_ms(tv_to_ns(&now) - s->logs.accept_ts);
s->logs.t_data = ns_to_ms(now_ns - s->logs.accept_ts);
/* end of job, return OK */
rep->analysers &= ~an_bit;
@ -2442,7 +2442,7 @@ int http_apply_redirect_rule(struct redirect_rule *rule, struct stream *s, struc
if (rule->flags & REDIRECT_FLAG_FROM_REQ) {
/* let's log the request time */
s->logs.request_ts = tv_to_ns(&now);
s->logs.request_ts = now_ns;
req->analysers &= AN_REQ_FLT_END;
if (s->sess->fe == s->be) /* report it if the request was intercepted by the frontend */

View File

@ -2018,7 +2018,7 @@ int sess_build_logline(struct session *sess, struct stream *s, char *dst, size_t
tmp_strm_log.t_queue = -1;
tmp_strm_log.t_connect = -1;
tmp_strm_log.t_data = -1;
tmp_strm_log.t_close = ns_to_ms(tv_to_ns(&now) - sess->accept_ts);
tmp_strm_log.t_close = ns_to_ms(now_ns - sess->accept_ts);
tmp_strm_log.bytes_in = 0;
tmp_strm_log.bytes_out = 0;
tmp_strm_log.prx_queue_pos = 0;
@ -3760,7 +3760,7 @@ int cfg_parse_log_forward(const char *file, int linenum, char **args, int kwm)
px->conf.file = strdup(file);
px->conf.line = linenum;
px->mode = PR_MODE_SYSLOG;
px->last_change = ns_to_sec(tv_to_ns(&now));
px->last_change = ns_to_sec(now_ns);
px->cap = PR_CAP_FE;
px->maxconn = 10;
px->timeout.client = TICK_ETERNITY;

View File

@ -145,7 +145,7 @@ int init_email_alert(struct mailers *mls, struct proxy *p, char **err)
/* check this in one ms */
t->expire = TICK_ETERNITY;
check->start = tv_to_ns(&now);
check->start = now_ns;
task_queue(t);
}

View File

@ -3017,7 +3017,7 @@ static int h1_process(struct h1c * h1c)
}
if (h1s->sess->t_idle == -1)
h1s->sess->t_idle = ns_to_ms(tv_to_ns(&now) - h1s->sess->accept_ts) - h1s->sess->t_handshake;
h1s->sess->t_idle = ns_to_ms(now_ns - h1s->sess->accept_ts) - h1s->sess->t_handshake;
/* Get the stream rxbuf */
buf = h1_get_buf(h1c, &h1s->rxbuf);
@ -3427,7 +3427,7 @@ static void h1_detach(struct sedesc *sd)
h1c = h1s->h1c;
sess->accept_date = date;
sess->accept_ts = tv_to_ns(&now);
sess->accept_ts = now_ns;
sess->t_handshake = 0;
sess->t_idle = -1;

View File

@ -1573,7 +1573,7 @@ static struct h2s *h2c_frt_stream_new(struct h2c *h2c, int id, struct buffer *in
* request) and the idle time, which is the delay since the previous
* request. We can set the value now, it will be copied by stream_new().
*/
sess->t_idle = ns_to_ms(tv_to_ns(&now) - sess->accept_ts) - sess->t_handshake;
sess->t_idle = ns_to_ms(now_ns - sess->accept_ts) - sess->t_handshake;
if (!sc_new_from_endp(h2s->sd, sess, input))
goto out_close;
@ -1586,7 +1586,7 @@ static struct h2s *h2c_frt_stream_new(struct h2c *h2c, int id, struct buffer *in
* right now.
*/
sess->accept_date = date;
sess->accept_ts = tv_to_ns(&now);
sess->accept_ts = now_ns;
sess->t_handshake = 0;
sess->t_idle = 0;

View File

@ -79,7 +79,7 @@ int mworker_ext_launch_all()
continue;
}
child->timestamp = ns_to_sec(tv_to_ns(&now));
child->timestamp = ns_to_sec(now_ns);
ret = fork();
if (ret < 0) {

View File

@ -3247,7 +3247,7 @@ static void peer_session_forceshutdown(struct peer *peer)
/* Pre-configures a peers frontend to accept incoming connections */
void peers_setup_frontend(struct proxy *fe)
{
fe->last_change = ns_to_sec(tv_to_ns(&now));
fe->last_change = ns_to_sec(now_ns);
fe->cap = PR_CAP_FE | PR_CAP_BE;
fe->mode = PR_MODE_PEERS;
fe->maxconn = 0;

View File

@ -1650,7 +1650,7 @@ struct proxy *alloc_new_proxy(const char *name, unsigned int cap, char **errmsg)
}
init_new_proxy(curproxy);
curproxy->last_change = ns_to_sec(tv_to_ns(&now));
curproxy->last_change = ns_to_sec(now_ns);
curproxy->id = strdup(name);
curproxy->cap = cap;
@ -2839,7 +2839,7 @@ static int dump_servers_state(struct stconn *sc)
dump_server_addr(&srv->check.addr, srv_check_addr);
dump_server_addr(&srv->agent.addr, srv_agent_addr);
srv_time_since_last_change = ns_to_sec(tv_to_ns(&now)) - srv->last_change;
srv_time_since_last_change = ns_to_sec(now_ns) - srv->last_change;
bk_f_forced_id = px->options & PR_O_FORCED_ID ? 1 : 0;
srv_f_forced_id = srv->flags & SRV_F_FORCED_ID ? 1 : 0;

View File

@ -114,10 +114,10 @@ unsigned int srv_dynamic_maxconn(const struct server *s)
s->proxy->beconn * s->maxconn / s->proxy->fullconn);
if ((s->cur_state == SRV_ST_STARTING) &&
ns_to_sec(tv_to_ns(&now)) < s->last_change + s->slowstart &&
ns_to_sec(tv_to_ns(&now)) >= s->last_change) {
ns_to_sec(now_ns) < s->last_change + s->slowstart &&
ns_to_sec(now_ns) >= s->last_change) {
unsigned int ratio;
ratio = 100 * (ns_to_sec(tv_to_ns(&now)) - s->last_change) / s->slowstart;
ratio = 100 * (ns_to_sec(now_ns) - s->last_change) / s->slowstart;
max = MAX(1, max * ratio / 100);
}
return max;

View File

@ -3234,7 +3234,7 @@ int check_action_do_resolve(struct act_rule *rule, struct proxy *px, char **err)
void resolvers_setup_proxy(struct proxy *px)
{
px->last_change = ns_to_sec(tv_to_ns(&now));
px->last_change = ns_to_sec(now_ns);
px->cap = PR_CAP_FE | PR_CAP_BE;
px->maxconn = 0;
px->conn_retries = 1;

View File

@ -138,16 +138,16 @@ const char *srv_op_st_chg_cause(enum srv_op_st_chg_cause cause)
int srv_downtime(const struct server *s)
{
if ((s->cur_state != SRV_ST_STOPPED) || s->last_change >= ns_to_sec(tv_to_ns(&now))) // ignore negative time
if ((s->cur_state != SRV_ST_STOPPED) || s->last_change >= ns_to_sec(now_ns)) // ignore negative time
return s->down_time;
return ns_to_sec(tv_to_ns(&now)) - s->last_change + s->down_time;
return ns_to_sec(now_ns) - s->last_change + s->down_time;
}
int srv_lastsession(const struct server *s)
{
if (s->counters.last_sess)
return ns_to_sec(tv_to_ns(&now)) - s->counters.last_sess;
return ns_to_sec(now_ns) - s->counters.last_sess;
return -1;
}
@ -1867,7 +1867,7 @@ void server_recalc_eweight(struct server *sv, int must_update)
struct proxy *px = sv->proxy;
unsigned w;
if (ns_to_sec(tv_to_ns(&now)) < sv->last_change || ns_to_sec(tv_to_ns(&now)) >= sv->last_change + sv->slowstart) {
if (ns_to_sec(now_ns) < sv->last_change || ns_to_sec(now_ns) >= sv->last_change + sv->slowstart) {
/* go to full throttle if the slowstart interval is reached */
if (sv->next_state == SRV_ST_STARTING)
sv->next_state = SRV_ST_RUNNING;
@ -1877,7 +1877,7 @@ void server_recalc_eweight(struct server *sv, int must_update)
* It must also start immediately, at least at the minimal step when leaving maintenance.
*/
if ((sv->next_state == SRV_ST_STARTING) && (px->lbprm.algo & BE_LB_PROP_DYN))
w = (px->lbprm.wdiv * (ns_to_sec(tv_to_ns(&now)) - sv->last_change) + sv->slowstart) / sv->slowstart;
w = (px->lbprm.wdiv * (ns_to_sec(now_ns) - sv->last_change) + sv->slowstart) / sv->slowstart;
else
w = px->lbprm.wdiv;
@ -2358,7 +2358,7 @@ struct server *new_server(struct proxy *proxy)
event_hdl_sub_list_init(&srv->e_subs);
srv->next_state = SRV_ST_RUNNING; /* early server setup */
srv->last_change = ns_to_sec(tv_to_ns(&now));
srv->last_change = ns_to_sec(now_ns);
srv->check.obj_type = OBJ_TYPE_CHECK;
srv->check.status = HCHK_STATUS_INI;
@ -4685,7 +4685,7 @@ static int init_srv_slowstart(struct server *srv)
if (srv->next_state == SRV_ST_STARTING) {
task_schedule(srv->warmup,
tick_add(now_ms,
MS_TO_TICKS(MAX(1000, (ns_to_sec(tv_to_ns(&now)) - srv->last_change)) / 20)));
MS_TO_TICKS(MAX(1000, (ns_to_sec(now_ns) - srv->last_change)) / 20)));
}
}
@ -5752,14 +5752,14 @@ static void srv_update_status(struct server *s, int type, int cause)
if (srv_prev_state != s->cur_state) {
if (srv_prev_state == SRV_ST_STOPPED) {
/* server was down and no longer is */
if (s->last_change < ns_to_sec(tv_to_ns(&now))) // ignore negative times
s->down_time += ns_to_sec(tv_to_ns(&now)) - s->last_change;
if (s->last_change < ns_to_sec(now_ns)) // ignore negative times
s->down_time += ns_to_sec(now_ns) - s->last_change;
}
else if (s->cur_state == SRV_ST_STOPPED) {
/* server was up and is currently down */
s->counters.down_trans++;
}
s->last_change = ns_to_sec(tv_to_ns(&now));
s->last_change = ns_to_sec(now_ns);
}
/* check if backend stats must be updated due to the server state change */
@ -5769,9 +5769,9 @@ static void srv_update_status(struct server *s, int type, int cause)
/* backend was down and is back up again:
* no helper function, updating last_change and backend downtime stats
*/
if (s->proxy->last_change < ns_to_sec(tv_to_ns(&now))) // ignore negative times
s->proxy->down_time += ns_to_sec(tv_to_ns(&now)) - s->proxy->last_change;
s->proxy->last_change = ns_to_sec(tv_to_ns(&now));
if (s->proxy->last_change < ns_to_sec(now_ns)) // ignore negative times
s->proxy->down_time += ns_to_sec(now_ns) - s->proxy->last_change;
s->proxy->last_change = ns_to_sec(now_ns);
}
}

View File

@ -321,7 +321,7 @@ static void srv_state_srv_update(struct server *srv, int version, char **params)
srv_adm_set_drain(srv);
}
srv->last_change = ns_to_sec(tv_to_ns(&now)) - srv_last_time_change;
srv->last_change = ns_to_sec(now_ns) - srv_last_time_change;
srv->check.status = srv_check_status;
srv->check.result = srv_check_result;

View File

@ -45,7 +45,7 @@ struct session *session_new(struct proxy *fe, struct listener *li, enum obj_type
sess->fe = fe;
sess->origin = origin;
sess->accept_date = date; /* user-visible date for logging */
sess->accept_ts = tv_to_ns(&now); /* corrected date for internal use */
sess->accept_ts = now_ns; /* corrected date for internal use */
sess->stkctr = NULL;
if (pool_head_stk_ctr) {
sess->stkctr = pool_alloc(pool_head_stk_ctr);
@ -432,7 +432,7 @@ int conn_complete_session(struct connection *conn)
{
struct session *sess = conn->owner;
sess->t_handshake = ns_to_ms(tv_to_ns(&now) - sess->accept_ts);
sess->t_handshake = ns_to_ms(now_ns - sess->accept_ts);
if (conn->flags & CO_FL_ERROR)
goto fail;

View File

@ -289,7 +289,7 @@ static int cli_parse_show_events(char **args, char *payload, struct appctx *appc
/* Pre-configures a ring proxy to emit connections */
void sink_setup_proxy(struct proxy *px)
{
px->last_change = ns_to_sec(tv_to_ns(&now));
px->last_change = ns_to_sec(now_ns);
px->cap = PR_CAP_BE;
px->maxconn = 0;
px->conn_retries = 1;

View File

@ -2345,7 +2345,7 @@ int stats_fill_sv_stats(struct proxy *px, struct server *sv, int flags,
metric = mkf_str(FO_STATUS, fld_status);
break;
case ST_F_LASTCHG:
metric = mkf_u32(FN_AGE, ns_to_sec(tv_to_ns(&now)) - sv->last_change);
metric = mkf_u32(FN_AGE, ns_to_sec(now_ns) - sv->last_change);
break;
case ST_F_WEIGHT:
metric = mkf_u32(FN_AVG, (sv->cur_eweight * px->lbprm.wmult + px->lbprm.wdiv - 1) / px->lbprm.wdiv);
@ -2800,7 +2800,7 @@ int stats_fill_be_stats(struct proxy *px, int flags, struct field *stats, int le
metric = mkf_u64(FN_COUNTER, px->down_trans);
break;
case ST_F_LASTCHG:
metric = mkf_u32(FN_AGE, ns_to_sec(tv_to_ns(&now)) - px->last_change);
metric = mkf_u32(FN_AGE, ns_to_sec(now_ns) - px->last_change);
break;
case ST_F_DOWNTIME:
if (px->srv)
@ -3555,7 +3555,7 @@ static void stats_dump_html_info(struct stconn *sc, struct uri_auth *uri)
{
struct appctx *appctx = __sc_appctx(sc);
struct show_stat_ctx *ctx = appctx->svcctx;
unsigned int up = (ns_to_sec(tv_to_ns(&now)) - start_time.tv_sec);
unsigned int up = (ns_to_sec(now_ns) - start_time.tv_sec);
char scope_txt[STAT_SCOPE_TXT_MAXLEN + sizeof STAT_SCOPE_PATTERN];
const char *scope_ptr = stats_scope_ptr(appctx, sc);
unsigned long long bps;
@ -4628,7 +4628,7 @@ int stats_fill_info(struct field *info, int len, uint flags)
}
glob_out_b32 *= 32; // values are 32-byte units
up = tv_to_ns(&now) - tv_to_ns(&start_time);
up = now_ns - tv_to_ns(&start_time);
up_sec = ns_to_sec(up);
up_usec = (up / 1000U) % 1000000U;

View File

@ -902,7 +902,7 @@ static void back_establish(struct stream *s)
/* First, centralize the timers information, and clear any irrelevant
* timeout.
*/
s->logs.t_connect = ns_to_ms(tv_to_ns(&now) - s->logs.accept_ts);
s->logs.t_connect = ns_to_ms(now_ns - s->logs.accept_ts);
s->conn_exp = TICK_ETERNITY;
s->flags &= ~SF_CONN_EXP;
@ -2595,7 +2595,7 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
}
if (!(s->flags & SF_IGNORE)) {
s->logs.t_close = ns_to_ms(tv_to_ns(&now) - s->logs.accept_ts);
s->logs.t_close = ns_to_ms(now_ns - s->logs.accept_ts);
stream_process_counters(s);
@ -3372,7 +3372,7 @@ static int stats_dump_full_strm_to_buffer(struct stconn *sc, struct stream *strm
chunk_appendf(&trash,
" age=%s)\n",
human_time(ns_to_sec(tv_to_ns(&now)) - strm->logs.accept_date.tv_sec, 1));
human_time(ns_to_sec(now_ns) - strm->logs.accept_date.tv_sec, 1));
if (strm->txn)
chunk_appendf(&trash,
@ -3699,7 +3699,7 @@ static int cli_io_handler_dump_sess(struct appctx *appctx)
chunk_appendf(&trash,
" ts=%02x epoch=%#x age=%s calls=%u rate=%u cpu=%llu lat=%llu",
curr_strm->task->state, curr_strm->stream_epoch,
human_time(ns_to_sec(tv_to_ns(&now)) - ns_to_sec(curr_strm->logs.accept_ts), 1),
human_time(ns_to_sec(now_ns) - ns_to_sec(curr_strm->logs.accept_ts), 1),
curr_strm->task->calls, read_freq_ctr(&curr_strm->call_rate),
(unsigned long long)curr_strm->cpu_time, (unsigned long long)curr_strm->lat_time);