2017-10-12 14:09:09 +00:00
|
|
|
/*
|
|
|
|
* functions about threads.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2017 Christopher Fauet - cfaulet@haproxy.com
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2019-01-26 13:27:06 +00:00
|
|
|
#define _GNU_SOURCE
|
2017-10-19 09:59:15 +00:00
|
|
|
#include <unistd.h>
|
2018-07-30 08:34:35 +00:00
|
|
|
#include <stdlib.h>
|
2017-10-19 09:59:15 +00:00
|
|
|
#include <fcntl.h>
|
|
|
|
|
2019-01-26 13:27:06 +00:00
|
|
|
#ifdef USE_CPU_AFFINITY
|
|
|
|
#include <sched.h>
|
|
|
|
#endif
|
|
|
|
|
2019-04-10 22:06:47 +00:00
|
|
|
#ifdef __FreeBSD__
|
|
|
|
#include <sys/cpuset.h>
|
|
|
|
#endif
|
|
|
|
|
2017-11-03 22:39:25 +00:00
|
|
|
#include <common/cfgparse.h>
|
2017-10-12 14:09:09 +00:00
|
|
|
#include <common/hathreads.h>
|
2017-10-19 09:59:15 +00:00
|
|
|
#include <common/standard.h>
|
2018-11-26 09:19:54 +00:00
|
|
|
#include <types/global.h>
|
2017-10-19 09:59:15 +00:00
|
|
|
#include <proto/fd.h>
|
2017-10-12 14:09:09 +00:00
|
|
|
|
2019-09-13 04:03:12 +00:00
|
|
|
struct thread_info ha_thread_info[MAX_THREADS] = { };
|
|
|
|
THREAD_LOCAL struct thread_info *ti = &ha_thread_info[0];
|
2017-10-12 14:09:09 +00:00
|
|
|
|
|
|
|
#ifdef USE_THREAD
|
|
|
|
|
2018-08-02 08:16:17 +00:00
|
|
|
volatile unsigned long threads_want_rdv_mask = 0;
|
|
|
|
volatile unsigned long threads_harmless_mask = 0;
|
2019-06-09 10:20:02 +00:00
|
|
|
volatile unsigned long threads_sync_mask = 0;
|
2018-07-30 08:34:35 +00:00
|
|
|
volatile unsigned long all_threads_mask = 1; // nbthread 1 assumed by default
|
2018-08-01 17:12:20 +00:00
|
|
|
THREAD_LOCAL unsigned int tid = 0;
|
|
|
|
THREAD_LOCAL unsigned long tid_bit = (1UL << 0);
|
2019-01-26 13:27:06 +00:00
|
|
|
int thread_cpus_enabled_at_boot = 1;
|
2018-08-01 17:12:20 +00:00
|
|
|
|
2017-10-19 09:59:15 +00:00
|
|
|
|
2017-10-12 14:09:09 +00:00
|
|
|
#if defined(DEBUG_THREAD) || defined(DEBUG_FULL)
|
|
|
|
struct lock_stat lock_stats[LOCK_LABELS];
|
|
|
|
#endif
|
|
|
|
|
2018-08-02 08:16:17 +00:00
|
|
|
/* Marks the thread as harmless until the last thread using the rendez-vous
|
|
|
|
* point quits. Given that we can wait for a long time, sched_yield() is used
|
|
|
|
* when available to offer the CPU resources to competing threads if needed.
|
|
|
|
*/
|
|
|
|
void thread_harmless_till_end()
|
|
|
|
{
|
2019-03-08 17:51:17 +00:00
|
|
|
_HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
|
2018-08-02 08:16:17 +00:00
|
|
|
while (threads_want_rdv_mask & all_threads_mask) {
|
2019-05-17 14:33:13 +00:00
|
|
|
ha_thread_relax();
|
2018-08-02 08:16:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Isolates the current thread : request the ability to work while all other
|
|
|
|
* threads are harmless. Only returns once all of them are harmless, with the
|
|
|
|
* current thread's bit in threads_harmless_mask cleared. Needs to be completed
|
|
|
|
* using thread_release().
|
|
|
|
*/
|
|
|
|
void thread_isolate()
|
|
|
|
{
|
|
|
|
unsigned long old;
|
|
|
|
|
2019-03-08 17:51:17 +00:00
|
|
|
_HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
|
|
|
|
__ha_barrier_atomic_store();
|
|
|
|
_HA_ATOMIC_OR(&threads_want_rdv_mask, tid_bit);
|
2018-08-02 08:16:17 +00:00
|
|
|
|
|
|
|
/* wait for all threads to become harmless */
|
|
|
|
old = threads_harmless_mask;
|
|
|
|
while (1) {
|
|
|
|
if (unlikely((old & all_threads_mask) != all_threads_mask))
|
|
|
|
old = threads_harmless_mask;
|
2019-03-08 17:51:17 +00:00
|
|
|
else if (_HA_ATOMIC_CAS(&threads_harmless_mask, &old, old & ~tid_bit))
|
2018-08-02 08:16:17 +00:00
|
|
|
break;
|
|
|
|
|
2019-05-17 14:33:13 +00:00
|
|
|
ha_thread_relax();
|
2018-08-02 08:16:17 +00:00
|
|
|
}
|
|
|
|
/* one thread gets released at a time here, with its harmess bit off.
|
|
|
|
* The loss of this bit makes the other one continue to spin while the
|
|
|
|
* thread is working alone.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Cancels the effect of thread_isolate() by releasing the current thread's bit
|
|
|
|
* in threads_want_rdv_mask and by marking this thread as harmless until the
|
|
|
|
* last worker finishes.
|
|
|
|
*/
|
|
|
|
void thread_release()
|
|
|
|
{
|
2019-03-08 17:51:17 +00:00
|
|
|
_HA_ATOMIC_AND(&threads_want_rdv_mask, ~tid_bit);
|
2019-06-09 06:44:19 +00:00
|
|
|
while (threads_want_rdv_mask & all_threads_mask) {
|
|
|
|
_HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
|
|
|
|
while (threads_want_rdv_mask & all_threads_mask)
|
|
|
|
ha_thread_relax();
|
|
|
|
HA_ATOMIC_AND(&threads_harmless_mask, ~tid_bit);
|
|
|
|
}
|
2018-08-02 08:16:17 +00:00
|
|
|
}
|
2017-10-19 09:59:15 +00:00
|
|
|
|
2019-06-09 10:20:02 +00:00
|
|
|
/* Cancels the effect of thread_isolate() by releasing the current thread's bit
|
|
|
|
* in threads_want_rdv_mask and by marking this thread as harmless until the
|
|
|
|
* last worker finishes. The difference with thread_release() is that this one
|
|
|
|
* will not leave the function before others are notified to do the same, so it
|
|
|
|
* guarantees that the current thread will not pass through a subsequent call
|
|
|
|
* to thread_isolate() before others finish.
|
|
|
|
*/
|
|
|
|
void thread_sync_release()
|
|
|
|
{
|
|
|
|
_HA_ATOMIC_OR(&threads_sync_mask, tid_bit);
|
|
|
|
__ha_barrier_atomic_store();
|
|
|
|
_HA_ATOMIC_AND(&threads_want_rdv_mask, ~tid_bit);
|
|
|
|
|
|
|
|
while (threads_want_rdv_mask & all_threads_mask) {
|
|
|
|
_HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
|
|
|
|
while (threads_want_rdv_mask & all_threads_mask)
|
|
|
|
ha_thread_relax();
|
|
|
|
HA_ATOMIC_AND(&threads_harmless_mask, ~tid_bit);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* the current thread is not harmless anymore, thread_isolate()
|
|
|
|
* is forced to wait till all waiters finish.
|
|
|
|
*/
|
|
|
|
_HA_ATOMIC_AND(&threads_sync_mask, ~tid_bit);
|
|
|
|
while (threads_sync_mask & all_threads_mask)
|
|
|
|
ha_thread_relax();
|
|
|
|
}
|
|
|
|
|
2019-05-22 06:43:34 +00:00
|
|
|
/* send signal <sig> to thread <thr> */
|
|
|
|
void ha_tkill(unsigned int thr, int sig)
|
|
|
|
{
|
2019-09-13 04:03:12 +00:00
|
|
|
pthread_kill(ha_thread_info[thr].pthread, sig);
|
2019-05-22 06:43:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* send signal <sig> to all threads. The calling thread is signaled last in
|
|
|
|
* order to allow all threads to synchronize in the handler.
|
|
|
|
*/
|
|
|
|
void ha_tkillall(int sig)
|
|
|
|
{
|
|
|
|
unsigned int thr;
|
|
|
|
|
|
|
|
for (thr = 0; thr < global.nbthread; thr++) {
|
|
|
|
if (!(all_threads_mask & (1UL << thr)))
|
|
|
|
continue;
|
|
|
|
if (thr == tid)
|
|
|
|
continue;
|
2019-09-13 04:03:12 +00:00
|
|
|
pthread_kill(ha_thread_info[thr].pthread, sig);
|
2019-05-22 06:43:34 +00:00
|
|
|
}
|
|
|
|
raise(sig);
|
|
|
|
}
|
|
|
|
|
2018-11-25 18:28:23 +00:00
|
|
|
/* these calls are used as callbacks at init time */
|
|
|
|
void ha_spin_init(HA_SPINLOCK_T *l)
|
|
|
|
{
|
|
|
|
HA_SPIN_INIT(l);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* these calls are used as callbacks at init time */
|
|
|
|
void ha_rwlock_init(HA_RWLOCK_T *l)
|
|
|
|
{
|
|
|
|
HA_RWLOCK_INIT(l);
|
|
|
|
}
|
|
|
|
|
2019-01-26 13:27:06 +00:00
|
|
|
/* returns the number of CPUs the current process is enabled to run on */
|
|
|
|
static int thread_cpus_enabled()
|
|
|
|
{
|
|
|
|
int ret = 1;
|
|
|
|
|
|
|
|
#ifdef USE_CPU_AFFINITY
|
|
|
|
#if defined(__linux__) && defined(CPU_COUNT)
|
|
|
|
cpu_set_t mask;
|
|
|
|
|
|
|
|
if (sched_getaffinity(0, sizeof(mask), &mask) == 0)
|
|
|
|
ret = CPU_COUNT(&mask);
|
2019-04-10 22:06:47 +00:00
|
|
|
#elif defined(__FreeBSD__) && defined(USE_CPU_AFFINITY)
|
|
|
|
cpuset_t cpuset;
|
|
|
|
if (cpuset_getaffinity(CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1,
|
|
|
|
sizeof(cpuset), &cpuset) == 0)
|
|
|
|
ret = CPU_COUNT(&cpuset);
|
2019-01-26 13:27:06 +00:00
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
ret = MAX(ret, 1);
|
|
|
|
ret = MIN(ret, MAX_THREADS);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-10-12 14:09:09 +00:00
|
|
|
__attribute__((constructor))
|
|
|
|
static void __hathreads_init(void)
|
|
|
|
{
|
2019-01-26 12:35:03 +00:00
|
|
|
char *ptr = NULL;
|
|
|
|
|
|
|
|
if (MAX_THREADS < 1 || MAX_THREADS > LONGBITS) {
|
|
|
|
ha_alert("MAX_THREADS value must be between 1 and %d inclusive; "
|
|
|
|
"HAProxy was built with value %d, please fix it and rebuild.\n",
|
|
|
|
LONGBITS, MAX_THREADS);
|
|
|
|
exit(1);
|
|
|
|
}
|
2019-01-26 13:27:06 +00:00
|
|
|
|
|
|
|
thread_cpus_enabled_at_boot = thread_cpus_enabled();
|
|
|
|
|
|
|
|
memprintf(&ptr, "Built with multi-threading support (MAX_THREADS=%d, default=%d).",
|
|
|
|
MAX_THREADS, thread_cpus_enabled_at_boot);
|
2019-01-26 12:35:03 +00:00
|
|
|
hap_register_build_opts(ptr, 1);
|
|
|
|
|
2017-10-12 14:09:09 +00:00
|
|
|
#if defined(DEBUG_THREAD) || defined(DEBUG_FULL)
|
|
|
|
memset(lock_stats, 0, sizeof(lock_stats));
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-12-15 15:48:14 +00:00
|
|
|
#else
|
|
|
|
|
|
|
|
REGISTER_BUILD_OPTS("Built without multi-threading support (USE_THREAD not set).");
|
|
|
|
|
2018-07-30 08:34:35 +00:00
|
|
|
#endif // USE_THREAD
|
|
|
|
|
|
|
|
|
|
|
|
/* Parse the number of threads in argument <arg>, returns it and adjusts a few
|
|
|
|
* internal variables accordingly, or fails and returns zero with an error
|
|
|
|
* reason in <errmsg>. May be called multiple times while parsing.
|
|
|
|
*/
|
|
|
|
int parse_nbthread(const char *arg, char **err)
|
|
|
|
{
|
|
|
|
long nbthread;
|
|
|
|
char *errptr;
|
|
|
|
|
|
|
|
nbthread = strtol(arg, &errptr, 10);
|
|
|
|
if (!*arg || *errptr) {
|
|
|
|
memprintf(err, "passed a missing or unparsable integer value in '%s'", arg);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef USE_THREAD
|
|
|
|
if (nbthread != 1) {
|
|
|
|
memprintf(err, "specified with a value other than 1 while HAProxy is not compiled with threads support. Please check build options for USE_THREAD");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
if (nbthread < 1 || nbthread > MAX_THREADS) {
|
|
|
|
memprintf(err, "value must be between 1 and %d (was %ld)", MAX_THREADS, nbthread);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-02-02 16:05:03 +00:00
|
|
|
all_threads_mask = nbits(nbthread);
|
2017-10-12 14:09:09 +00:00
|
|
|
#endif
|
2018-07-30 08:34:35 +00:00
|
|
|
return nbthread;
|
|
|
|
}
|