2017-10-12 14:09:09 +00:00
|
|
|
/*
|
|
|
|
* functions about threads.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2017 Christopher Fauet - cfaulet@haproxy.com
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2019-01-26 13:27:06 +00:00
|
|
|
#define _GNU_SOURCE
|
2017-10-19 09:59:15 +00:00
|
|
|
#include <unistd.h>
|
2018-07-30 08:34:35 +00:00
|
|
|
#include <stdlib.h>
|
2017-10-19 09:59:15 +00:00
|
|
|
#include <fcntl.h>
|
|
|
|
|
2019-01-26 13:27:06 +00:00
|
|
|
#ifdef USE_CPU_AFFINITY
|
|
|
|
#include <sched.h>
|
|
|
|
#endif
|
|
|
|
|
2019-04-10 22:06:47 +00:00
|
|
|
#ifdef __FreeBSD__
|
|
|
|
#include <sys/cpuset.h>
|
|
|
|
#endif
|
|
|
|
|
2020-06-04 22:00:29 +00:00
|
|
|
#include <haproxy/cfgparse.h>
|
2020-06-09 07:07:15 +00:00
|
|
|
#include <haproxy/fd.h>
|
|
|
|
#include <haproxy/global.h>
|
2020-05-28 13:29:19 +00:00
|
|
|
#include <haproxy/thread.h>
|
2020-06-03 16:09:46 +00:00
|
|
|
#include <haproxy/tools.h>
|
2017-10-12 14:09:09 +00:00
|
|
|
|
2019-09-13 04:03:12 +00:00
|
|
|
struct thread_info ha_thread_info[MAX_THREADS] = { };
|
|
|
|
THREAD_LOCAL struct thread_info *ti = &ha_thread_info[0];
|
2017-10-12 14:09:09 +00:00
|
|
|
|
|
|
|
#ifdef USE_THREAD
|
|
|
|
|
2018-08-02 08:16:17 +00:00
|
|
|
volatile unsigned long threads_want_rdv_mask = 0;
|
|
|
|
volatile unsigned long threads_harmless_mask = 0;
|
2019-06-09 10:20:02 +00:00
|
|
|
volatile unsigned long threads_sync_mask = 0;
|
2018-07-30 08:34:35 +00:00
|
|
|
volatile unsigned long all_threads_mask = 1; // nbthread 1 assumed by default
|
2018-08-01 17:12:20 +00:00
|
|
|
THREAD_LOCAL unsigned int tid = 0;
|
|
|
|
THREAD_LOCAL unsigned long tid_bit = (1UL << 0);
|
2019-01-26 13:27:06 +00:00
|
|
|
int thread_cpus_enabled_at_boot = 1;
|
2018-08-01 17:12:20 +00:00
|
|
|
|
2017-10-19 09:59:15 +00:00
|
|
|
|
2017-10-12 14:09:09 +00:00
|
|
|
#if defined(DEBUG_THREAD) || defined(DEBUG_FULL)
|
|
|
|
struct lock_stat lock_stats[LOCK_LABELS];
|
|
|
|
#endif
|
|
|
|
|
2018-08-02 08:16:17 +00:00
|
|
|
/* Marks the thread as harmless until the last thread using the rendez-vous
|
|
|
|
* point quits. Given that we can wait for a long time, sched_yield() is used
|
|
|
|
* when available to offer the CPU resources to competing threads if needed.
|
|
|
|
*/
|
|
|
|
void thread_harmless_till_end()
|
|
|
|
{
|
2019-03-08 17:51:17 +00:00
|
|
|
_HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
|
2018-08-02 08:16:17 +00:00
|
|
|
while (threads_want_rdv_mask & all_threads_mask) {
|
2019-05-17 14:33:13 +00:00
|
|
|
ha_thread_relax();
|
2018-08-02 08:16:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Isolates the current thread : request the ability to work while all other
|
|
|
|
* threads are harmless. Only returns once all of them are harmless, with the
|
|
|
|
* current thread's bit in threads_harmless_mask cleared. Needs to be completed
|
|
|
|
* using thread_release().
|
|
|
|
*/
|
|
|
|
void thread_isolate()
|
|
|
|
{
|
|
|
|
unsigned long old;
|
|
|
|
|
2019-03-08 17:51:17 +00:00
|
|
|
_HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
|
|
|
|
__ha_barrier_atomic_store();
|
|
|
|
_HA_ATOMIC_OR(&threads_want_rdv_mask, tid_bit);
|
2018-08-02 08:16:17 +00:00
|
|
|
|
|
|
|
/* wait for all threads to become harmless */
|
|
|
|
old = threads_harmless_mask;
|
|
|
|
while (1) {
|
|
|
|
if (unlikely((old & all_threads_mask) != all_threads_mask))
|
|
|
|
old = threads_harmless_mask;
|
2019-03-08 17:51:17 +00:00
|
|
|
else if (_HA_ATOMIC_CAS(&threads_harmless_mask, &old, old & ~tid_bit))
|
2018-08-02 08:16:17 +00:00
|
|
|
break;
|
|
|
|
|
2019-05-17 14:33:13 +00:00
|
|
|
ha_thread_relax();
|
2018-08-02 08:16:17 +00:00
|
|
|
}
|
|
|
|
/* one thread gets released at a time here, with its harmess bit off.
|
|
|
|
* The loss of this bit makes the other one continue to spin while the
|
|
|
|
* thread is working alone.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Cancels the effect of thread_isolate() by releasing the current thread's bit
|
|
|
|
* in threads_want_rdv_mask and by marking this thread as harmless until the
|
|
|
|
* last worker finishes.
|
|
|
|
*/
|
|
|
|
void thread_release()
|
|
|
|
{
|
2019-03-08 17:51:17 +00:00
|
|
|
_HA_ATOMIC_AND(&threads_want_rdv_mask, ~tid_bit);
|
2019-06-09 06:44:19 +00:00
|
|
|
while (threads_want_rdv_mask & all_threads_mask) {
|
|
|
|
_HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
|
|
|
|
while (threads_want_rdv_mask & all_threads_mask)
|
|
|
|
ha_thread_relax();
|
|
|
|
HA_ATOMIC_AND(&threads_harmless_mask, ~tid_bit);
|
|
|
|
}
|
2018-08-02 08:16:17 +00:00
|
|
|
}
|
2017-10-19 09:59:15 +00:00
|
|
|
|
2019-06-09 10:20:02 +00:00
|
|
|
/* Cancels the effect of thread_isolate() by releasing the current thread's bit
|
|
|
|
* in threads_want_rdv_mask and by marking this thread as harmless until the
|
|
|
|
* last worker finishes. The difference with thread_release() is that this one
|
|
|
|
* will not leave the function before others are notified to do the same, so it
|
|
|
|
* guarantees that the current thread will not pass through a subsequent call
|
|
|
|
* to thread_isolate() before others finish.
|
|
|
|
*/
|
|
|
|
void thread_sync_release()
|
|
|
|
{
|
|
|
|
_HA_ATOMIC_OR(&threads_sync_mask, tid_bit);
|
|
|
|
__ha_barrier_atomic_store();
|
|
|
|
_HA_ATOMIC_AND(&threads_want_rdv_mask, ~tid_bit);
|
|
|
|
|
|
|
|
while (threads_want_rdv_mask & all_threads_mask) {
|
|
|
|
_HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
|
|
|
|
while (threads_want_rdv_mask & all_threads_mask)
|
|
|
|
ha_thread_relax();
|
|
|
|
HA_ATOMIC_AND(&threads_harmless_mask, ~tid_bit);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* the current thread is not harmless anymore, thread_isolate()
|
|
|
|
* is forced to wait till all waiters finish.
|
|
|
|
*/
|
|
|
|
_HA_ATOMIC_AND(&threads_sync_mask, ~tid_bit);
|
|
|
|
while (threads_sync_mask & all_threads_mask)
|
|
|
|
ha_thread_relax();
|
|
|
|
}
|
|
|
|
|
2019-05-22 06:43:34 +00:00
|
|
|
/* send signal <sig> to thread <thr> */
|
|
|
|
void ha_tkill(unsigned int thr, int sig)
|
|
|
|
{
|
2019-09-13 04:03:12 +00:00
|
|
|
pthread_kill(ha_thread_info[thr].pthread, sig);
|
2019-05-22 06:43:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* send signal <sig> to all threads. The calling thread is signaled last in
|
|
|
|
* order to allow all threads to synchronize in the handler.
|
|
|
|
*/
|
|
|
|
void ha_tkillall(int sig)
|
|
|
|
{
|
|
|
|
unsigned int thr;
|
|
|
|
|
|
|
|
for (thr = 0; thr < global.nbthread; thr++) {
|
|
|
|
if (!(all_threads_mask & (1UL << thr)))
|
|
|
|
continue;
|
|
|
|
if (thr == tid)
|
|
|
|
continue;
|
2019-09-13 04:03:12 +00:00
|
|
|
pthread_kill(ha_thread_info[thr].pthread, sig);
|
2019-05-22 06:43:34 +00:00
|
|
|
}
|
|
|
|
raise(sig);
|
|
|
|
}
|
|
|
|
|
2018-11-25 18:28:23 +00:00
|
|
|
/* these calls are used as callbacks at init time */
|
|
|
|
void ha_spin_init(HA_SPINLOCK_T *l)
|
|
|
|
{
|
|
|
|
HA_SPIN_INIT(l);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* these calls are used as callbacks at init time */
|
|
|
|
void ha_rwlock_init(HA_RWLOCK_T *l)
|
|
|
|
{
|
|
|
|
HA_RWLOCK_INIT(l);
|
|
|
|
}
|
|
|
|
|
2019-01-26 13:27:06 +00:00
|
|
|
/* returns the number of CPUs the current process is enabled to run on */
|
|
|
|
static int thread_cpus_enabled()
|
|
|
|
{
|
|
|
|
int ret = 1;
|
|
|
|
|
|
|
|
#ifdef USE_CPU_AFFINITY
|
|
|
|
#if defined(__linux__) && defined(CPU_COUNT)
|
|
|
|
cpu_set_t mask;
|
|
|
|
|
|
|
|
if (sched_getaffinity(0, sizeof(mask), &mask) == 0)
|
|
|
|
ret = CPU_COUNT(&mask);
|
2019-04-10 22:06:47 +00:00
|
|
|
#elif defined(__FreeBSD__) && defined(USE_CPU_AFFINITY)
|
|
|
|
cpuset_t cpuset;
|
|
|
|
if (cpuset_getaffinity(CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1,
|
|
|
|
sizeof(cpuset), &cpuset) == 0)
|
|
|
|
ret = CPU_COUNT(&cpuset);
|
2019-01-26 13:27:06 +00:00
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
ret = MAX(ret, 1);
|
|
|
|
ret = MIN(ret, MAX_THREADS);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-10-12 14:09:09 +00:00
|
|
|
__attribute__((constructor))
|
2020-05-28 13:29:19 +00:00
|
|
|
static void __thread_init(void)
|
2017-10-12 14:09:09 +00:00
|
|
|
{
|
2019-01-26 12:35:03 +00:00
|
|
|
char *ptr = NULL;
|
|
|
|
|
|
|
|
if (MAX_THREADS < 1 || MAX_THREADS > LONGBITS) {
|
|
|
|
ha_alert("MAX_THREADS value must be between 1 and %d inclusive; "
|
|
|
|
"HAProxy was built with value %d, please fix it and rebuild.\n",
|
|
|
|
LONGBITS, MAX_THREADS);
|
|
|
|
exit(1);
|
|
|
|
}
|
2019-01-26 13:27:06 +00:00
|
|
|
|
2020-09-02 07:53:47 +00:00
|
|
|
#if defined(__GNUC__) && (__GNUC__ >= 3) && defined(__GNU_LIBRARY__) && !defined(__clang__)
|
BUG/MINOR: threads: work around a libgcc_s issue with chrooting
Sander Hoentjen reported another issue related to libgcc_s in issue #671.
What happens is that when the old process quits, pthread_exit() calls
something from libgcc_s.so after the process was chrooted, and this is
the first call to that library, causing an attempt to load it. In a
chroot, this fails, thus libthread aborts. The behavior widely differs
between operating systems because some decided to use a static build for
this library.
In 2.2 this was resolved as a side effect of a workaround for the same issue
with the backtrace() call, which is also in libgcc_s. This was in commit
0214b45 ("MINOR: debug: call backtrace() once upon startup"). But backtraces
are not necessarily enabled, and we need something for older versions.
By inspecting a significant number of ligcc_s on various gcc versions and
platforms, it appears that a few functions have been present since gcc 3.0,
one of which, _Unwind_Find_FDE() has no side effect (it only returns a
pointer). What this patch does is that in the thread initialization code,
if built with gcc >= 3.0, a call to this function is made in order to make
sure that libgcc_s is loaded at start up time and that there will be no
need to load it upon exit.
An easy way to check which libs are loaded under Linux is :
$ strace -e trace=openat ./haproxy -v
With this patch applied, libgcc_s now appears during init.
Sander confirmed that this patch was enough to put an end to the core
dumps on exit in 2.0, so this patch should be backported there, and maybe
as far as 1.8.
2020-09-02 06:04:35 +00:00
|
|
|
/* make sure libgcc_s is already loaded, because pthread_exit() may
|
|
|
|
* may need it on exit after the chroot! _Unwind_Find_FDE() is defined
|
|
|
|
* there since gcc 3.0, has no side effect, doesn't take any argument
|
|
|
|
* and seems to be present on all supported platforms.
|
|
|
|
*/
|
|
|
|
{
|
|
|
|
extern void _Unwind_Find_FDE(void);
|
|
|
|
_Unwind_Find_FDE();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-01-26 13:27:06 +00:00
|
|
|
thread_cpus_enabled_at_boot = thread_cpus_enabled();
|
|
|
|
|
|
|
|
memprintf(&ptr, "Built with multi-threading support (MAX_THREADS=%d, default=%d).",
|
|
|
|
MAX_THREADS, thread_cpus_enabled_at_boot);
|
2019-01-26 12:35:03 +00:00
|
|
|
hap_register_build_opts(ptr, 1);
|
|
|
|
|
2017-10-12 14:09:09 +00:00
|
|
|
#if defined(DEBUG_THREAD) || defined(DEBUG_FULL)
|
|
|
|
memset(lock_stats, 0, sizeof(lock_stats));
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-12-15 15:48:14 +00:00
|
|
|
#else
|
|
|
|
|
|
|
|
REGISTER_BUILD_OPTS("Built without multi-threading support (USE_THREAD not set).");
|
|
|
|
|
2018-07-30 08:34:35 +00:00
|
|
|
#endif // USE_THREAD
|
|
|
|
|
|
|
|
|
|
|
|
/* Parse the number of threads in argument <arg>, returns it and adjusts a few
|
|
|
|
* internal variables accordingly, or fails and returns zero with an error
|
|
|
|
* reason in <errmsg>. May be called multiple times while parsing.
|
|
|
|
*/
|
|
|
|
int parse_nbthread(const char *arg, char **err)
|
|
|
|
{
|
|
|
|
long nbthread;
|
|
|
|
char *errptr;
|
|
|
|
|
|
|
|
nbthread = strtol(arg, &errptr, 10);
|
|
|
|
if (!*arg || *errptr) {
|
|
|
|
memprintf(err, "passed a missing or unparsable integer value in '%s'", arg);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef USE_THREAD
|
|
|
|
if (nbthread != 1) {
|
|
|
|
memprintf(err, "specified with a value other than 1 while HAProxy is not compiled with threads support. Please check build options for USE_THREAD");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
if (nbthread < 1 || nbthread > MAX_THREADS) {
|
|
|
|
memprintf(err, "value must be between 1 and %d (was %ld)", MAX_THREADS, nbthread);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-02-02 16:05:03 +00:00
|
|
|
all_threads_mask = nbits(nbthread);
|
2017-10-12 14:09:09 +00:00
|
|
|
#endif
|
2018-07-30 08:34:35 +00:00
|
|
|
return nbthread;
|
|
|
|
}
|