MINOR: sched: add TASK_F_WANTS_TIME to make the scheduler update the call date
Currently tasks being profiled have th_ctx->sched_call_date set to the current nanosecond in monotonic time. But there's no other way to have this, despite the scheduler being capable of it. Let's just declare a new task flag, TASK_F_WANTS_TIME, that makes the scheduler take the time just before calling the handler. This way, a task that needs nanosecond resolution on the call date will be able to be called with an up-to-date date without having to abuse now_mono_time() if not needed. In addition, if CLOCK_MONOTONIC is not supported (now_mono_time() always returns 0), the date is set to the most recently known now_ns, which is guaranteed to be atomic and is only updated once per poll loop. This date can be more conveniently retrieved using task_mono_time(). This can be useful, e.g. for pacing. The code was slightly adjusted so as to merge the common parts between the profiling case and this one.
This commit is contained in:
parent
12969c1b17
commit
c5052bad8a
|
@ -60,11 +60,13 @@
|
||||||
#define TASK_F_USR1 0x00010000 /* preserved user flag 1, application-specific, def:0 */
|
#define TASK_F_USR1 0x00010000 /* preserved user flag 1, application-specific, def:0 */
|
||||||
#define TASK_F_UEVT1 0x00020000 /* one-shot user event type 1, application specific, def:0 */
|
#define TASK_F_UEVT1 0x00020000 /* one-shot user event type 1, application specific, def:0 */
|
||||||
#define TASK_F_UEVT2 0x00040000 /* one-shot user event type 2, application specific, def:0 */
|
#define TASK_F_UEVT2 0x00040000 /* one-shot user event type 2, application specific, def:0 */
|
||||||
/* unused: 0x80000..0x80000000 */
|
#define TASK_F_WANTS_TIME 0x00080000 /* task/tasklet wants th_ctx->sched_call_date to be set */
|
||||||
|
/* unused: 0x100000..0x80000000 */
|
||||||
|
|
||||||
/* These flags are persistent across scheduler calls */
|
/* These flags are persistent across scheduler calls */
|
||||||
#define TASK_PERSISTENT (TASK_SELF_WAKING | TASK_KILLED | \
|
#define TASK_PERSISTENT (TASK_SELF_WAKING | TASK_KILLED | \
|
||||||
TASK_HEAVY | TASK_F_TASKLET | TASK_F_USR1)
|
TASK_HEAVY | TASK_F_TASKLET | TASK_F_USR1 | \
|
||||||
|
TASK_F_WANTS_TIME)
|
||||||
|
|
||||||
/* This function is used to report state in debugging tools. Please reflect
|
/* This function is used to report state in debugging tools. Please reflect
|
||||||
* below any single-bit flag addition above in the same order via the
|
* below any single-bit flag addition above in the same order via the
|
||||||
|
|
|
@ -193,6 +193,12 @@ static inline int thread_has_tasks(void)
|
||||||
(int)!MT_LIST_ISEMPTY(&th_ctx->shared_tasklet_list));
|
(int)!MT_LIST_ISEMPTY(&th_ctx->shared_tasklet_list));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* returns the most recent known date of the task's call from the scheduler */
|
||||||
|
static inline uint64_t task_mono_time(void)
|
||||||
|
{
|
||||||
|
return th_ctx->sched_call_date;
|
||||||
|
}
|
||||||
|
|
||||||
/* puts the task <t> in run queue with reason flags <f>, and returns <t> */
|
/* puts the task <t> in run queue with reason flags <f>, and returns <t> */
|
||||||
/* This will put the task in the local runqueue if the task is only runnable
|
/* This will put the task in the local runqueue if the task is only runnable
|
||||||
* by the current thread, in the global runqueue otherwies. With DEBUG_TASK,
|
* by the current thread, in the global runqueue otherwies. With DEBUG_TASK,
|
||||||
|
|
19
src/task.c
19
src/task.c
|
@ -567,17 +567,24 @@ unsigned int run_tasks_from_lists(unsigned int budgets[])
|
||||||
t->calls++;
|
t->calls++;
|
||||||
|
|
||||||
th_ctx->sched_wake_date = t->wake_date;
|
th_ctx->sched_wake_date = t->wake_date;
|
||||||
if (th_ctx->sched_wake_date) {
|
if (th_ctx->sched_wake_date || (t->state & TASK_F_WANTS_TIME)) {
|
||||||
uint32_t now_ns = now_mono_time();
|
/* take the most accurate clock we have, either
|
||||||
uint32_t lat = now_ns - th_ctx->sched_wake_date;
|
* mono_time() or last now_ns (monotonic but only
|
||||||
|
* incremented once per poll loop).
|
||||||
t->wake_date = 0;
|
*/
|
||||||
|
th_ctx->sched_call_date = now_mono_time();
|
||||||
|
if (unlikely(!th_ctx->sched_call_date))
|
||||||
th_ctx->sched_call_date = now_ns;
|
th_ctx->sched_call_date = now_ns;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (th_ctx->sched_wake_date) {
|
||||||
|
t->wake_date = 0;
|
||||||
profile_entry = sched_activity_entry(sched_activity, t->process, t->caller);
|
profile_entry = sched_activity_entry(sched_activity, t->process, t->caller);
|
||||||
th_ctx->sched_profile_entry = profile_entry;
|
th_ctx->sched_profile_entry = profile_entry;
|
||||||
HA_ATOMIC_ADD(&profile_entry->lat_time, lat);
|
HA_ATOMIC_ADD(&profile_entry->lat_time, (uint32_t)(th_ctx->sched_call_date - th_ctx->sched_wake_date));
|
||||||
HA_ATOMIC_INC(&profile_entry->calls);
|
HA_ATOMIC_INC(&profile_entry->calls);
|
||||||
}
|
}
|
||||||
|
|
||||||
__ha_barrier_store();
|
__ha_barrier_store();
|
||||||
|
|
||||||
th_ctx->current = t;
|
th_ctx->current = t;
|
||||||
|
|
Loading…
Reference in New Issue