1
0
mirror of http://git.haproxy.org/git/haproxy.git/ synced 2025-04-04 23:29:42 +00:00

MAJOR: ssl: Change default locks on ssl session cache.

Prevously pthread process shared lock were used by default,
if USE_SYSCALL_FUTEX is not specified.

This patch implements an OS independant kind of lock:
An active spinlock is usedf if USE_SYSCALL_FUTEX is not specified.

The old behavior is still available if USE_PTHREAD_PSHARED=1.
This commit is contained in:
Emeric Brun 2014-05-07 23:11:42 +02:00 committed by Willy Tarreau
parent caa19cc867
commit cd1a526a90
2 changed files with 72 additions and 26 deletions

View File

@ -17,6 +17,7 @@
# USE_PCRE_JIT : enable JIT for faster regex on libpcre >= 8.32 # USE_PCRE_JIT : enable JIT for faster regex on libpcre >= 8.32
# USE_POLL : enable poll(). Automatic. # USE_POLL : enable poll(). Automatic.
# USE_PRIVATE_CACHE : disable shared memory cache of ssl sessions. # USE_PRIVATE_CACHE : disable shared memory cache of ssl sessions.
# USE_PTHREAD_PSHARED : enable pthread process shared mutex on sslcache.
# USE_REGPARM : enable regparm optimization. Recommended on x86. # USE_REGPARM : enable regparm optimization. Recommended on x86.
# USE_STATIC_PCRE : enable static libpcre. Recommended. # USE_STATIC_PCRE : enable static libpcre. Recommended.
# USE_TPROXY : enable transparent proxy. Automatic. # USE_TPROXY : enable transparent proxy. Automatic.
@ -536,10 +537,13 @@ OPTIONS_OBJS += src/ssl_sock.o src/shctx.o
ifneq ($(USE_PRIVATE_CACHE),) ifneq ($(USE_PRIVATE_CACHE),)
OPTIONS_CFLAGS += -DUSE_PRIVATE_CACHE OPTIONS_CFLAGS += -DUSE_PRIVATE_CACHE
else else
ifneq ($(USE_PTHREAD_PSHARED),)
OPTIONS_CFLAGS += -DUSE_PTHREAD_PSHARED
OPTIONS_LDFLAGS += -lpthread
else
ifneq ($(USE_FUTEX),) ifneq ($(USE_FUTEX),)
OPTIONS_CFLAGS += -DUSE_SYSCALL_FUTEX OPTIONS_CFLAGS += -DUSE_SYSCALL_FUTEX
else endif
OPTIONS_LDFLAGS += -lpthread
endif endif
endif endif
endif endif

View File

@ -13,6 +13,9 @@
#include <sys/mman.h> #include <sys/mman.h>
#ifndef USE_PRIVATE_CACHE #ifndef USE_PRIVATE_CACHE
#ifdef USE_PTHREAD_PSHARED
#include <pthread.h>
#else
#ifdef USE_SYSCALL_FUTEX #ifdef USE_SYSCALL_FUTEX
#include <unistd.h> #include <unistd.h>
#ifndef u32 #ifndef u32
@ -20,9 +23,8 @@
#endif #endif
#include <linux/futex.h> #include <linux/futex.h>
#include <sys/syscall.h> #include <sys/syscall.h>
#else /* USE_SYSCALL_FUTEX */ #endif
#include <pthread.h> #endif
#endif /* USE_SYSCALL_FUTEX */
#endif #endif
#include <arpa/inet.h> #include <arpa/inet.h>
#include "ebmbtree.h" #include "ebmbtree.h"
@ -60,10 +62,10 @@ struct shared_block {
struct shared_context { struct shared_context {
#ifndef USE_PRIVATE_CACHE #ifndef USE_PRIVATE_CACHE
#ifdef USE_SYSCALL_FUTEX #ifdef USE_PTHREAD_PSHARED
unsigned int waiters;
#else /* USE_SYSCALL_FUTEX */
pthread_mutex_t mutex; pthread_mutex_t mutex;
#else
unsigned int waiters;
#endif #endif
#endif #endif
struct shsess_packet_hdr upd; struct shsess_packet_hdr upd;
@ -75,17 +77,63 @@ struct shared_context {
/* Static shared context */ /* Static shared context */
static struct shared_context *shctx = NULL; static struct shared_context *shctx = NULL;
#ifndef USE_PRIVATE_CACHE
static int use_shared_mem = 0;
#endif
/* Lock functions */ /* Lock functions */
#ifdef USE_PRIVATE_CACHE
#if defined (USE_PRIVATE_CACHE)
#define shared_context_lock() #define shared_context_lock()
#define shared_context_unlock() #define shared_context_unlock()
#elif defined (USE_PTHREAD_PSHARED)
static int use_shared_mem = 0;
#define shared_context_lock() if (use_shared_mem) pthread_mutex_lock(&shctx->mutex)
#define shared_context_unlock() if (use_shared_mem) pthread_mutex_unlock(&shctx->mutex)
#else #else
static int use_shared_mem = 0;
#ifdef USE_SYSCALL_FUTEX #ifdef USE_SYSCALL_FUTEX
static inline void _shared_context_wait4lock(unsigned int *count, unsigned int *uaddr, int value)
{
syscall(SYS_futex, uaddr, FUTEX_WAIT, value, NULL, 0, 0);
}
static inline void _shared_context_awakelocker(unsigned int *uaddr)
{
syscall(SYS_futex, uaddr, FUTEX_WAKE, 1, NULL, 0, 0);
}
#else /* internal spin lock */
#if defined (__i486__) || defined (__i586__) || defined (__i686__) || defined (__x86_64__)
static inline void relax()
{
__asm volatile("rep;nop\n" ::: "memory");
}
#else /* if no x86_64 or i586 arch: use less optimized but generic asm */
static inline void relax()
{
__asm volatile("" ::: "memory");
}
#endif
static inline void _shared_context_wait4lock(unsigned int *count, unsigned int *uaddr, int value)
{
int i;
for (i = 0; i < *count; i++) {
relax();
relax();
}
*count = *count << 1;
}
#define _shared_context_awakelocker(a)
#endif
#if defined (__i486__) || defined (__i586__) || defined (__i686__) || defined (__x86_64__) #if defined (__i486__) || defined (__i586__) || defined (__i686__) || defined (__x86_64__)
static inline unsigned int xchg(unsigned int *ptr, unsigned int x) static inline unsigned int xchg(unsigned int *ptr, unsigned int x)
{ {
@ -139,6 +187,7 @@ static inline unsigned char atomic_dec(unsigned int *ptr)
static inline void _shared_context_lock(void) static inline void _shared_context_lock(void)
{ {
unsigned int x; unsigned int x;
unsigned int count = 4;
x = cmpxchg(&shctx->waiters, 0, 1); x = cmpxchg(&shctx->waiters, 0, 1);
if (x) { if (x) {
@ -146,7 +195,7 @@ static inline void _shared_context_lock(void)
x = xchg(&shctx->waiters, 2); x = xchg(&shctx->waiters, 2);
while (x) { while (x) {
syscall(SYS_futex, &shctx->waiters, FUTEX_WAIT, 2, NULL, 0, 0); _shared_context_wait4lock(&count, &shctx->waiters, 2);
x = xchg(&shctx->waiters, 2); x = xchg(&shctx->waiters, 2);
} }
} }
@ -156,7 +205,7 @@ static inline void _shared_context_unlock(void)
{ {
if (atomic_dec(&shctx->waiters)) { if (atomic_dec(&shctx->waiters)) {
shctx->waiters = 0; shctx->waiters = 0;
syscall(SYS_futex, &shctx->waiters, FUTEX_WAKE, 1, NULL, 0, 0); _shared_context_awakelocker(&shctx->waiters);
} }
} }
@ -164,13 +213,6 @@ static inline void _shared_context_unlock(void)
#define shared_context_unlock() if (use_shared_mem) _shared_context_unlock() #define shared_context_unlock() if (use_shared_mem) _shared_context_unlock()
#else /* USE_SYSCALL_FUTEX */
#define shared_context_lock() if (use_shared_mem) pthread_mutex_lock(&shctx->mutex)
#define shared_context_unlock() if (use_shared_mem) pthread_mutex_unlock(&shctx->mutex)
#endif
#endif #endif
/* List Macros */ /* List Macros */
@ -508,9 +550,9 @@ int shared_context_init(int size, int shared)
{ {
int i; int i;
#ifndef USE_PRIVATE_CACHE #ifndef USE_PRIVATE_CACHE
#ifndef USE_SYSCALL_FUTEX #ifdef USE_PTHREAD_PSHARED
pthread_mutexattr_t attr; pthread_mutexattr_t attr;
#endif /* USE_SYSCALL_FUTEX */ #endif
#endif #endif
struct shared_block *prev,*cur; struct shared_block *prev,*cur;
int maptype = MAP_PRIVATE; int maptype = MAP_PRIVATE;
@ -537,9 +579,7 @@ int shared_context_init(int size, int shared)
#ifndef USE_PRIVATE_CACHE #ifndef USE_PRIVATE_CACHE
if (maptype == MAP_SHARED) { if (maptype == MAP_SHARED) {
#ifdef USE_SYSCALL_FUTEX #ifdef USE_PTHREAD_PSHARED
shctx->waiters = 0;
#else
if (pthread_mutexattr_init(&attr)) { if (pthread_mutexattr_init(&attr)) {
munmap(shctx, sizeof(struct shared_context)+(size*sizeof(struct shared_block))); munmap(shctx, sizeof(struct shared_context)+(size*sizeof(struct shared_block)));
shctx = NULL; shctx = NULL;
@ -559,6 +599,8 @@ int shared_context_init(int size, int shared)
shctx = NULL; shctx = NULL;
return SHCTX_E_INIT_LOCK; return SHCTX_E_INIT_LOCK;
} }
#else
shctx->waiters = 0;
#endif #endif
use_shared_mem = 1; use_shared_mem = 1;
} }