mirror of
http://git.haproxy.org/git/haproxy.git/
synced 2025-02-03 12:03:02 +00:00
MEDIUM: task: remove TASK_SHARED_WQ and only use t->tid
TASK_SHARED_WQ was set upon task creation and never changed afterwards. Thus if a task was created to run anywhere (e.g. a check or a Lua task), all its timers would always pass through the shared timers queue with a lock. Now we know that tid<0 indicates a shared task, so we can use that to decide whether or not to use the shared queue. The task might be migrated using task_set_affinity() but it's always dequeued first so the check will still be valid. Not only this removes a flag that's difficult to keep synchronized with the thread ID, but it should significantly lower the load on systems with many checks. A quick test with 5000 servers and fast checks that were saturating the CPU shows that the check rate increased by 20% (hence the CPU usage dropped by 17%). It's worth noting that run_task_lists() almost no longer appears in perf top now.
This commit is contained in:
parent
3b7a19c2a6
commit
159e3acf5d
@ -283,7 +283,6 @@ void show_task_state(unsigned int f)
|
||||
SHOW_FLAG(f, TASK_IN_LIST);
|
||||
SHOW_FLAG(f, TASK_KILLED);
|
||||
SHOW_FLAG(f, TASK_SELF_WAKING);
|
||||
SHOW_FLAG(f, TASK_SHARED_WQ);
|
||||
SHOW_FLAG(f, TASK_QUEUED);
|
||||
SHOW_FLAG(f, TASK_GLOBAL);
|
||||
SHOW_FLAG(f, TASK_RUNNING);
|
||||
|
@ -34,8 +34,7 @@
|
||||
#define TASK_RUNNING 0x00000001 /* the task is currently running */
|
||||
#define TASK_GLOBAL 0x00000002 /* The task is currently in the global runqueue */
|
||||
#define TASK_QUEUED 0x00000004 /* The task has been (re-)added to the run queue */
|
||||
#define TASK_SHARED_WQ 0x00000008 /* The task's expiration may be updated by other
|
||||
* threads, must be set before first queue/wakeup */
|
||||
/* unused 0x00000008 */
|
||||
#define TASK_SELF_WAKING 0x00000010 /* task/tasklet found waking itself */
|
||||
#define TASK_KILLED 0x00000020 /* task/tasklet killed, may now be freed */
|
||||
#define TASK_IN_LIST 0x00000040 /* tasklet is in a tasklet list */
|
||||
@ -59,7 +58,7 @@
|
||||
/* unused: 0x20000..0x80000000 */
|
||||
|
||||
/* These flags are persistent across scheduler calls */
|
||||
#define TASK_PERSISTENT (TASK_SHARED_WQ | TASK_SELF_WAKING | TASK_KILLED | \
|
||||
#define TASK_PERSISTENT (TASK_SELF_WAKING | TASK_KILLED | \
|
||||
TASK_HEAVY | TASK_F_TASKLET | TASK_F_USR1)
|
||||
|
||||
struct notification {
|
||||
|
@ -278,8 +278,8 @@ static inline struct task *task_unlink_wq(struct task *t)
|
||||
unsigned long locked;
|
||||
|
||||
if (likely(task_in_wq(t))) {
|
||||
locked = t->state & TASK_SHARED_WQ;
|
||||
BUG_ON(!locked && t->tid != tid);
|
||||
locked = t->tid < 0;
|
||||
BUG_ON(t->tid >= 0 && t->tid != tid);
|
||||
if (locked)
|
||||
HA_RWLOCK_WRLOCK(TASK_WQ_LOCK, &wq_lock);
|
||||
__task_unlink_wq(t);
|
||||
@ -310,7 +310,7 @@ static inline void task_queue(struct task *task)
|
||||
return;
|
||||
|
||||
#ifdef USE_THREAD
|
||||
if (task->state & TASK_SHARED_WQ) {
|
||||
if (task->tid < 0) {
|
||||
HA_RWLOCK_WRLOCK(TASK_WQ_LOCK, &wq_lock);
|
||||
if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key))
|
||||
__task_queue(task, &timers);
|
||||
@ -318,7 +318,7 @@ static inline void task_queue(struct task *task)
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
BUG_ON(task->tid != tid); // should have TASK_SHARED_WQ
|
||||
BUG_ON(task->tid != tid);
|
||||
if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key))
|
||||
__task_queue(task, &th_ctx->timers);
|
||||
}
|
||||
@ -547,8 +547,6 @@ static inline struct task *task_init(struct task *t, int tid)
|
||||
tid = 0;
|
||||
#endif
|
||||
t->tid = tid;
|
||||
if (tid < 0)
|
||||
t->state |= TASK_SHARED_WQ;
|
||||
t->nice = 0;
|
||||
t->calls = 0;
|
||||
t->call_date = 0;
|
||||
@ -709,7 +707,7 @@ static inline void task_schedule(struct task *task, int when)
|
||||
return;
|
||||
|
||||
#ifdef USE_THREAD
|
||||
if (task->state & TASK_SHARED_WQ) {
|
||||
if (task->tid < 0) {
|
||||
/* FIXME: is it really needed to lock the WQ during the check ? */
|
||||
HA_RWLOCK_WRLOCK(TASK_WQ_LOCK, &wq_lock);
|
||||
if (task_in_wq(task))
|
||||
|
@ -302,8 +302,8 @@ void __task_wakeup(struct task *t)
|
||||
void __task_queue(struct task *task, struct eb_root *wq)
|
||||
{
|
||||
#ifdef USE_THREAD
|
||||
BUG_ON((wq == &timers && !(task->state & TASK_SHARED_WQ)) ||
|
||||
(wq == &th_ctx->timers && (task->state & TASK_SHARED_WQ)) ||
|
||||
BUG_ON((wq == &timers && task->tid >= 0) ||
|
||||
(wq == &th_ctx->timers && task->tid < 0) ||
|
||||
(wq != &timers && wq != &th_ctx->timers));
|
||||
#endif
|
||||
/* if this happens the process is doomed anyway, so better catch it now
|
||||
|
Loading…
Reference in New Issue
Block a user