REORG: thread/clock: move the clock parts of thread_info to thread_ctx

The "thread_info" name was initially chosen to store all info about
threads but since we now have a separate per-thread context, there is
no point keeping some of its elements in the thread_info struct.

As such, this patch moves prev_cpu_time, prev_mono_time and idle_pct to
thread_ctx, into the thread context, with the scheduler parts. Instead
of accessing them via "ti->" we now access them via "th_ctx->", which
makes more sense as they're totally dynamic, and will be required for
future evolutions. There's no room problem for now, the structure still
has 84 bytes available at the end.
This commit is contained in:
Willy Tarreau 2021-09-30 18:28:49 +02:00
parent 1a9c922b53
commit 45c38e22bf
6 changed files with 17 additions and 17 deletions

View File

@ -42,9 +42,6 @@ enum {
* disabled, it contains the same info for the single running thread.
*/
struct thread_info {
uint64_t prev_cpu_time; /* previous per thread CPU time */
uint64_t prev_mono_time; /* previous system wide monotonic time */
unsigned int idle_pct; /* idle to total ratio over last sample (percent) */
unsigned int flags; /* thread info flags, TI_FL_* */
#ifdef CONFIG_HAP_POOLS
@ -82,6 +79,9 @@ struct thread_ctx {
struct mt_list shared_tasklet_list; /* Tasklet to be run, woken up by other threads */
unsigned int rq_total; /* total size of the run queue, prio_tree + tasklets */
int tasks_in_list; /* Number of tasks in the per-thread tasklets list */
uint64_t prev_cpu_time; /* previous per thread CPU time */
uint64_t prev_mono_time; /* previous system wide monotonic time */
uint idle_pct; /* idle to total ratio over last sample (percent) */
ALWAYS_ALIGN(128);
};

View File

@ -247,7 +247,7 @@ void clock_init_process_date(void)
now = after_poll = before_poll = date;
global_now = ((ullong)date.tv_sec << 32) + (uint)date.tv_usec;
global_now_ms = now.tv_sec * 1000 + now.tv_usec / 1000;
ti->idle_pct = 100;
th_ctx->idle_pct = 100;
clock_update_date(0, 1);
}
@ -264,7 +264,7 @@ void clock_init_thread_date(void)
old_now = _HA_ATOMIC_LOAD(&global_now);
now.tv_sec = old_now >> 32;
now.tv_usec = (uint)old_now;
ti->idle_pct = 100;
th_ctx->idle_pct = 100;
clock_update_date(0, 1);
}
@ -278,7 +278,7 @@ uint clock_report_idle(void)
for (thr = 0; thr < MAX_THREADS; thr++) {
if (!(all_threads_mask & (1UL << thr)))
continue;
total += HA_ATOMIC_LOAD(&ha_thread_info[thr].idle_pct);
total += HA_ATOMIC_LOAD(&ha_thread_ctx[thr].idle_pct);
rthr++;
}
return rthr ? total / rthr : 0;
@ -310,7 +310,7 @@ static inline void clock_measure_idle(void)
if (samp_time < 500000)
return;
HA_ATOMIC_STORE(&ti->idle_pct, (100ULL * idle_time + samp_time / 2) / samp_time);
HA_ATOMIC_STORE(&th_ctx->idle_pct, (100ULL * idle_time + samp_time / 2) / samp_time);
idle_time = samp_time = 0;
}
@ -322,8 +322,8 @@ static inline void clock_measure_idle(void)
void clock_leaving_poll(int timeout, int interrupted)
{
clock_measure_idle();
ti->prev_cpu_time = now_cpu_time();
ti->prev_mono_time = now_mono_time();
th_ctx->prev_cpu_time = now_cpu_time();
th_ctx->prev_mono_time = now_mono_time();
}
/* Collect date and time information before calling poll(). This will be used
@ -346,9 +346,9 @@ void clock_entering_poll(void)
new_cpu_time = now_cpu_time();
new_mono_time = now_mono_time();
if (ti->prev_cpu_time && ti->prev_mono_time) {
new_cpu_time -= ti->prev_cpu_time;
new_mono_time -= ti->prev_mono_time;
if (th_ctx->prev_cpu_time && th_ctx->prev_mono_time) {
new_cpu_time -= th_ctx->prev_cpu_time;
new_mono_time -= th_ctx->prev_mono_time;
stolen = new_mono_time - new_cpu_time;
if (unlikely(stolen >= 500000)) {
stolen /= 500000;

View File

@ -360,7 +360,7 @@ static int rfc195x_flush_or_finish(struct comp_ctx *comp_ctx, struct buffer *out
/* Verify compression rate limiting and CPU usage */
if ((global.comp_rate_lim > 0 && (read_freq_ctr(&global.comp_bps_out) > global.comp_rate_lim)) || /* rate */
(ti->idle_pct < compress_min_idle)) { /* idle */
(th_ctx->idle_pct < compress_min_idle)) { /* idle */
if (comp_ctx->cur_lvl > 0)
strm->level = --comp_ctx->cur_lvl;
}
@ -618,7 +618,7 @@ static int deflate_flush_or_finish(struct comp_ctx *comp_ctx, struct buffer *out
/* compression limit */
if ((global.comp_rate_lim > 0 && (read_freq_ctr(&global.comp_bps_out) > global.comp_rate_lim)) || /* rate */
(ti->idle_pct < compress_min_idle)) { /* idle */
(th_ctx->idle_pct < compress_min_idle)) { /* idle */
/* decrease level */
if (comp_ctx->cur_lvl > 0) {
comp_ctx->cur_lvl--;

View File

@ -150,7 +150,7 @@ void ha_backtrace_to_stderr()
void ha_thread_dump(struct buffer *buf, int thr, int calling_tid)
{
unsigned long thr_bit = 1UL << thr;
unsigned long long p = ha_thread_info[thr].prev_cpu_time;
unsigned long long p = ha_thread_ctx[thr].prev_cpu_time;
unsigned long long n = now_cpu_time_thread(thr);
int stuck = !!(ha_thread_info[thr].flags & TI_FL_STUCK);

View File

@ -564,7 +564,7 @@ select_compression_response_header(struct comp_state *st, struct stream *s, stru
goto fail;
/* limit cpu usage */
if (ti->idle_pct < compress_min_idle)
if (th_ctx->idle_pct < compress_min_idle)
goto fail;
/* initialize compression */

View File

@ -71,7 +71,7 @@ void wdt_handler(int sig, siginfo_t *si, void *arg)
if (thr < 0 || thr >= global.nbthread)
break;
p = ha_thread_info[thr].prev_cpu_time;
p = ha_thread_ctx[thr].prev_cpu_time;
n = now_cpu_time_thread(thr);
/* not yet reached the deadline of 1 sec */