MINOR: fd/threads: make _GET_NEXT()/_GET_PREV() use the volatile attribute

These macros are either used between atomic ops which cause the volatile
to be implicit, or with an explicit volatile cast. However not having it
in the macro causes some traps in the code because certain loop paths
cannot safely be used without risking infinite loops if one isn't careful
enough.

Let's place the volatile attribute inside the macros and remove them from
the explicit places to avoid this. It was verified that the output executable
remains exactly the same byte-wise.
This commit is contained in:
Willy Tarreau 2019-12-20 07:20:00 +01:00
parent 54907bb848
commit 337fb719ee
1 changed files with 4 additions and 4 deletions

View File

@ -115,8 +115,8 @@ int poller_wr_pipe[MAX_THREADS]; // Pipe to wake the threads
volatile int ha_used_fds = 0; // Number of FD we're currently using
#define _GET_NEXT(fd, off) ((struct fdlist_entry *)(void *)((char *)(&fdtab[fd]) + off))->next
#define _GET_PREV(fd, off) ((struct fdlist_entry *)(void *)((char *)(&fdtab[fd]) + off))->prev
#define _GET_NEXT(fd, off) ((volatile struct fdlist_entry *)(void *)((char *)(&fdtab[fd]) + off))->next
#define _GET_PREV(fd, off) ((volatile struct fdlist_entry *)(void *)((char *)(&fdtab[fd]) + off))->prev
/* adds fd <fd> to fd list <list> if it was not yet in it */
void fd_add_to_fd_list(volatile struct fdlist *list, int fd, int off)
{
@ -207,7 +207,7 @@ void fd_rm_from_fd_list(volatile struct fdlist *list, int fd, int off)
#else
lock_self_next:
next = ({ volatile int *next = &_GET_NEXT(fd, off); *next; });
next = _GET_NEXT(fd, off);
if (next == -2)
goto lock_self_next;
if (next <= -3)
@ -215,7 +215,7 @@ void fd_rm_from_fd_list(volatile struct fdlist *list, int fd, int off)
if (unlikely(!_HA_ATOMIC_CAS(&_GET_NEXT(fd, off), &next, -2)))
goto lock_self_next;
lock_self_prev:
prev = ({ volatile int *prev = &_GET_PREV(fd, off); *prev; });
prev = _GET_PREV(fd, off);
if (prev == -2)
goto lock_self_prev;
if (unlikely(!_HA_ATOMIC_CAS(&_GET_PREV(fd, off), &prev, -2)))