mirror of
http://git.haproxy.org/git/haproxy.git/
synced 2025-04-04 23:29:42 +00:00
BUILD: task: use list_to_mt_list() instead of casting list to mt_list
There were a few casts of list* to mt_list* that were upsetting some old compilers (not sure about the effect on others). We had created list_to_mt_list() purposely for this, let's use it instead of applying this cast.
This commit is contained in:
parent
f3d5c4b032
commit
cc5cd5b8d8
@ -400,7 +400,7 @@ static inline void _tasklet_wakeup_on(struct tasklet *tl, int thr, const char *f
|
|||||||
*/
|
*/
|
||||||
static inline void tasklet_remove_from_tasklet_list(struct tasklet *t)
|
static inline void tasklet_remove_from_tasklet_list(struct tasklet *t)
|
||||||
{
|
{
|
||||||
if (MT_LIST_DELETE((struct mt_list *)&t->list)) {
|
if (MT_LIST_DELETE(list_to_mt_list(&t->list))) {
|
||||||
_HA_ATOMIC_AND(&t->state, ~TASK_IN_LIST);
|
_HA_ATOMIC_AND(&t->state, ~TASK_IN_LIST);
|
||||||
_HA_ATOMIC_DEC(&ha_thread_ctx[t->tid >= 0 ? t->tid : tid].rq_total);
|
_HA_ATOMIC_DEC(&ha_thread_ctx[t->tid >= 0 ? t->tid : tid].rq_total);
|
||||||
}
|
}
|
||||||
@ -556,7 +556,7 @@ static inline void task_destroy(struct task *t)
|
|||||||
/* Should only be called by the thread responsible for the tasklet */
|
/* Should only be called by the thread responsible for the tasklet */
|
||||||
static inline void tasklet_free(struct tasklet *tl)
|
static inline void tasklet_free(struct tasklet *tl)
|
||||||
{
|
{
|
||||||
if (MT_LIST_DELETE((struct mt_list *)&tl->list))
|
if (MT_LIST_DELETE(list_to_mt_list(&tl->list)))
|
||||||
_HA_ATOMIC_DEC(&ha_thread_ctx[tl->tid >= 0 ? tl->tid : tid].rq_total);
|
_HA_ATOMIC_DEC(&ha_thread_ctx[tl->tid >= 0 ? tl->tid : tid].rq_total);
|
||||||
|
|
||||||
#ifdef DEBUG_TASK
|
#ifdef DEBUG_TASK
|
||||||
|
@ -89,7 +89,7 @@ void task_kill(struct task *t)
|
|||||||
|
|
||||||
/* Beware: tasks that have never run don't have their ->list empty yet! */
|
/* Beware: tasks that have never run don't have their ->list empty yet! */
|
||||||
MT_LIST_APPEND(&ha_thread_ctx[thr].shared_tasklet_list,
|
MT_LIST_APPEND(&ha_thread_ctx[thr].shared_tasklet_list,
|
||||||
(struct mt_list *)&((struct tasklet *)t)->list);
|
list_to_mt_list(&((struct tasklet *)t)->list));
|
||||||
_HA_ATOMIC_INC(&ha_thread_ctx[thr].rq_total);
|
_HA_ATOMIC_INC(&ha_thread_ctx[thr].rq_total);
|
||||||
_HA_ATOMIC_INC(&ha_thread_ctx[thr].tasks_in_list);
|
_HA_ATOMIC_INC(&ha_thread_ctx[thr].tasks_in_list);
|
||||||
if (sleeping_thread_mask & (1UL << thr)) {
|
if (sleeping_thread_mask & (1UL << thr)) {
|
||||||
@ -128,7 +128,7 @@ void tasklet_kill(struct tasklet *t)
|
|||||||
if (_HA_ATOMIC_CAS(&t->state, &state, state | TASK_IN_LIST | TASK_KILLED)) {
|
if (_HA_ATOMIC_CAS(&t->state, &state, state | TASK_IN_LIST | TASK_KILLED)) {
|
||||||
thr = t->tid > 0 ? t->tid: tid;
|
thr = t->tid > 0 ? t->tid: tid;
|
||||||
MT_LIST_APPEND(&ha_thread_ctx[thr].shared_tasklet_list,
|
MT_LIST_APPEND(&ha_thread_ctx[thr].shared_tasklet_list,
|
||||||
(struct mt_list *)&t->list);
|
list_to_mt_list(&t->list));
|
||||||
_HA_ATOMIC_INC(&ha_thread_ctx[thr].rq_total);
|
_HA_ATOMIC_INC(&ha_thread_ctx[thr].rq_total);
|
||||||
if (sleeping_thread_mask & (1UL << thr)) {
|
if (sleeping_thread_mask & (1UL << thr)) {
|
||||||
_HA_ATOMIC_AND(&sleeping_thread_mask, ~(1UL << thr));
|
_HA_ATOMIC_AND(&sleeping_thread_mask, ~(1UL << thr));
|
||||||
@ -172,7 +172,7 @@ void __tasklet_wakeup_on(struct tasklet *tl, int thr)
|
|||||||
_HA_ATOMIC_INC(&th_ctx->rq_total);
|
_HA_ATOMIC_INC(&th_ctx->rq_total);
|
||||||
} else {
|
} else {
|
||||||
/* this tasklet runs on a specific thread. */
|
/* this tasklet runs on a specific thread. */
|
||||||
MT_LIST_APPEND(&ha_thread_ctx[thr].shared_tasklet_list, (struct mt_list *)&tl->list);
|
MT_LIST_APPEND(&ha_thread_ctx[thr].shared_tasklet_list, list_to_mt_list(&tl->list));
|
||||||
_HA_ATOMIC_INC(&ha_thread_ctx[thr].rq_total);
|
_HA_ATOMIC_INC(&ha_thread_ctx[thr].rq_total);
|
||||||
if (sleeping_thread_mask & (1UL << thr)) {
|
if (sleeping_thread_mask & (1UL << thr)) {
|
||||||
_HA_ATOMIC_AND(&sleeping_thread_mask, ~(1UL << thr));
|
_HA_ATOMIC_AND(&sleeping_thread_mask, ~(1UL << thr));
|
||||||
|
Loading…
Reference in New Issue
Block a user