MINOR: threads: remove the previous synchronization point

It's not needed anymore as it is fully covered by the new rendez-vous
point. This also removes the pipe and its polling.
This commit is contained in:
Willy Tarreau 2018-08-02 11:01:05 +02:00
parent 85c459d7e8
commit 647c70b681
4 changed files with 0 additions and 161 deletions

View File

@ -94,14 +94,6 @@ enum { tid = 0 };
#define HA_BARRIER() do { } while (0)
#define THREAD_SYNC_INIT() do { /* do nothing */ } while(0)
#define THREAD_SYNC_ENABLE() do { /* do nothing */ } while(0)
#define THREAD_WANT_SYNC() do { /* do nothing */ } while(0)
#define THREAD_ENTER_SYNC() do { /* do nothing */ } while(0)
#define THREAD_EXIT_SYNC() do { /* do nothing */ } while(0)
#define THREAD_NO_SYNC() ({ 0; })
#define THREAD_NEED_SYNC() ({ 1; })
#define HA_SPIN_INIT(l) do { /* do nothing */ } while(0)
#define HA_SPIN_DESTROY(l) do { /* do nothing */ } while(0)
#define HA_SPIN_LOCK(lbl, l) do { /* do nothing */ } while(0)
@ -278,21 +270,6 @@ static inline unsigned long thread_isolated()
#define HA_BARRIER() pl_barrier()
#define THREAD_SYNC_INIT() thread_sync_init()
#define THREAD_SYNC_ENABLE() thread_sync_enable()
#define THREAD_WANT_SYNC() thread_want_sync()
#define THREAD_ENTER_SYNC() thread_enter_sync()
#define THREAD_EXIT_SYNC() thread_exit_sync()
#define THREAD_NO_SYNC() thread_no_sync()
#define THREAD_NEED_SYNC() thread_need_sync()
int thread_sync_init();
void thread_sync_enable(void);
void thread_want_sync(void);
void thread_enter_sync(void);
void thread_exit_sync(void);
int thread_no_sync(void);
int thread_need_sync(void);
void thread_harmless_till_end();
void thread_isolate();
void thread_release();
@ -368,7 +345,6 @@ static inline unsigned long thread_isolated()
/* WARNING!!! if you update this enum, please also keep lock_label() up to date below */
enum lock_label {
THREAD_SYNC_LOCK = 0,
FD_LOCK,
TASK_RQ_LOCK,
TASK_WQ_LOCK,
@ -485,7 +461,6 @@ struct ha_rwlock {
static inline const char *lock_label(enum lock_label label)
{
switch (label) {
case THREAD_SYNC_LOCK: return "THREAD_SYNC";
case FD_LOCK: return "FD";
case TASK_RQ_LOCK: return "TASK_RQ";
case TASK_WQ_LOCK: return "TASK_WQ";
@ -974,8 +949,6 @@ static inline void __ha_compiler_barrier(void)
__asm __volatile("" ::: "memory");
}
/* Dummy I/O handler used by the sync pipe.*/
void thread_sync_io_handler(int fd);
int parse_nbthread(const char *arg, char **err);
#endif /* _COMMON_HATHREADS_H */

View File

@ -893,7 +893,6 @@ static int cli_io_handler_show_fd(struct appctx *appctx)
(fdt.iocb == conn_fd_handler) ? "conn_fd_handler" :
(fdt.iocb == dgram_fd_handler) ? "dgram_fd_handler" :
(fdt.iocb == listener_accept) ? "listener_accept" :
(fdt.iocb == thread_sync_io_handler) ? "thread_sync_io_handler" :
(fdt.iocb == poller_pipe_io_handler) ? "poller_pipe_io_handler" :
"unknown");

View File

@ -2461,7 +2461,6 @@ static void *run_thread_poll_loop(void *data)
}
protocol_enable_all();
THREAD_SYNC_ENABLE();
run_poll_loop();
list_for_each_entry(ptdf, &per_thread_deinit_list, list)
@ -3014,8 +3013,6 @@ int main(int argc, char **argv)
int i;
sigset_t blocked_sig, old_sig;
THREAD_SYNC_INIT();
/* Init tids array */
for (i = 0; i < global.nbthread; i++)
tids[i] = i;

View File

@ -20,16 +20,8 @@
#include <proto/fd.h>
/* Dummy I/O handler used by the sync pipe.*/
void thread_sync_io_handler(int fd)
{
}
#ifdef USE_THREAD
static HA_SPINLOCK_T sync_lock;
static int threads_sync_pipe[2];
static unsigned long threads_want_sync = 0;
volatile unsigned long threads_want_rdv_mask = 0;
volatile unsigned long threads_harmless_mask = 0;
volatile unsigned long all_threads_mask = 1; // nbthread 1 assumed by default
@ -41,127 +33,6 @@ THREAD_LOCAL unsigned long tid_bit = (1UL << 0);
struct lock_stat lock_stats[LOCK_LABELS];
#endif
/* Initializes the sync point. It creates a pipe used by threads to wake up all
* others when a sync is requested. It also initializes the mask of all created
* threads. It returns 0 on success and -1 if an error occurred.
*/
int thread_sync_init()
{
int rfd;
if (pipe(threads_sync_pipe) < 0)
return -1;
rfd = threads_sync_pipe[0];
fcntl(rfd, F_SETFL, O_NONBLOCK);
fd_insert(rfd, thread_sync_io_handler, thread_sync_io_handler, MAX_THREADS_MASK);
return 0;
}
/* Enables the sync point. */
void thread_sync_enable(void)
{
fd_want_recv(threads_sync_pipe[0]);
}
/* Called when a thread want to pass into the sync point. It subscribes the
* current thread in threads waiting for sync by update a bit-field. It this is
* the first one, it wakeup all other threads by writing on the sync pipe.
*/
void thread_want_sync()
{
if (all_threads_mask & (all_threads_mask - 1)) {
if (threads_want_sync & tid_bit)
return;
if (HA_ATOMIC_OR(&threads_want_sync, tid_bit) == tid_bit)
shut_your_big_mouth_gcc(write(threads_sync_pipe[1], "S", 1));
}
else {
threads_want_sync = 1;
}
}
/* Returns 1 if no thread has requested a sync. Otherwise, it returns 0. */
int thread_no_sync()
{
return (threads_want_sync == 0UL);
}
/* Returns 1 if the current thread has requested a sync. Otherwise, it returns
* 0.
*/
int thread_need_sync()
{
return ((threads_want_sync & tid_bit) != 0UL);
}
/* Thread barrier. Synchronizes all threads at the barrier referenced by
* <barrier>. The calling thread shall block until all other threads have called
* thread_sync_barrier specifying the same barrier.
*
* If you need to use several barriers at differnt points, you need to use a
* different <barrier> for each point.
*/
static inline void thread_sync_barrier(volatile unsigned long *barrier)
{
unsigned long old = all_threads_mask;
HA_ATOMIC_CAS(barrier, &old, 0);
HA_ATOMIC_OR(barrier, tid_bit);
/* Note below: we need to wait for all threads to join here, but in
* case several threads are scheduled on the same CPU, busy polling
* will instead degrade the performance, forcing other threads to
* wait longer (typically in epoll_wait()). Let's use sched_yield()
* when available instead.
*/
while ((*barrier & all_threads_mask) != all_threads_mask) {
#if _POSIX_PRIORITY_SCHEDULING
sched_yield();
#else
pl_cpu_relax();
#endif
}
}
/* Enter into the sync point and lock it if the current thread has requested a
* sync. */
void thread_enter_sync()
{
static volatile unsigned long barrier = 0;
if (!(all_threads_mask & (all_threads_mask - 1)))
return;
thread_sync_barrier(&barrier);
if (threads_want_sync & tid_bit)
HA_SPIN_LOCK(THREAD_SYNC_LOCK, &sync_lock);
}
/* Exit from the sync point and unlock it if it was previously locked. If the
* current thread is the last one to have requested a sync, the sync pipe is
* flushed.
*/
void thread_exit_sync()
{
static volatile unsigned long barrier = 0;
if (!(all_threads_mask & (all_threads_mask - 1)))
return;
if (threads_want_sync & tid_bit)
HA_SPIN_UNLOCK(THREAD_SYNC_LOCK, &sync_lock);
if (HA_ATOMIC_AND(&threads_want_sync, ~tid_bit) == 0) {
char c;
shut_your_big_mouth_gcc(read(threads_sync_pipe[0], &c, 1));
fd_done_recv(threads_sync_pipe[0]);
}
thread_sync_barrier(&barrier);
}
/* Marks the thread as harmless until the last thread using the rendez-vous
* point quits. Given that we can wait for a long time, sched_yield() is used
* when available to offer the CPU resources to competing threads if needed.
@ -228,7 +99,6 @@ void thread_release()
__attribute__((constructor))
static void __hathreads_init(void)
{
HA_SPIN_INIT(&sync_lock);
#if defined(DEBUG_THREAD) || defined(DEBUG_FULL)
memset(lock_stats, 0, sizeof(lock_stats));
#endif