From cd1a526a90f1f39a8740e667df88dc88a06959a3 Mon Sep 17 00:00:00 2001 From: Emeric Brun Date: Wed, 7 May 2014 23:11:42 +0200 Subject: [PATCH] MAJOR: ssl: Change default locks on ssl session cache. Prevously pthread process shared lock were used by default, if USE_SYSCALL_FUTEX is not specified. This patch implements an OS independant kind of lock: An active spinlock is usedf if USE_SYSCALL_FUTEX is not specified. The old behavior is still available if USE_PTHREAD_PSHARED=1. --- Makefile | 8 +++-- src/shctx.c | 90 +++++++++++++++++++++++++++++++++++++++-------------- 2 files changed, 72 insertions(+), 26 deletions(-) diff --git a/Makefile b/Makefile index f95ba03ac..f722e11f8 100644 --- a/Makefile +++ b/Makefile @@ -17,6 +17,7 @@ # USE_PCRE_JIT : enable JIT for faster regex on libpcre >= 8.32 # USE_POLL : enable poll(). Automatic. # USE_PRIVATE_CACHE : disable shared memory cache of ssl sessions. +# USE_PTHREAD_PSHARED : enable pthread process shared mutex on sslcache. # USE_REGPARM : enable regparm optimization. Recommended on x86. # USE_STATIC_PCRE : enable static libpcre. Recommended. # USE_TPROXY : enable transparent proxy. Automatic. @@ -536,10 +537,13 @@ OPTIONS_OBJS += src/ssl_sock.o src/shctx.o ifneq ($(USE_PRIVATE_CACHE),) OPTIONS_CFLAGS += -DUSE_PRIVATE_CACHE else +ifneq ($(USE_PTHREAD_PSHARED),) +OPTIONS_CFLAGS += -DUSE_PTHREAD_PSHARED +OPTIONS_LDFLAGS += -lpthread +else ifneq ($(USE_FUTEX),) OPTIONS_CFLAGS += -DUSE_SYSCALL_FUTEX -else -OPTIONS_LDFLAGS += -lpthread +endif endif endif endif diff --git a/src/shctx.c b/src/shctx.c index 86e605651..f33b7ca8e 100644 --- a/src/shctx.c +++ b/src/shctx.c @@ -13,6 +13,9 @@ #include #ifndef USE_PRIVATE_CACHE +#ifdef USE_PTHREAD_PSHARED +#include +#else #ifdef USE_SYSCALL_FUTEX #include #ifndef u32 @@ -20,9 +23,8 @@ #endif #include #include -#else /* USE_SYSCALL_FUTEX */ -#include -#endif /* USE_SYSCALL_FUTEX */ +#endif +#endif #endif #include #include "ebmbtree.h" @@ -60,10 +62,10 @@ struct shared_block { struct shared_context { #ifndef USE_PRIVATE_CACHE -#ifdef USE_SYSCALL_FUTEX - unsigned int waiters; -#else /* USE_SYSCALL_FUTEX */ +#ifdef USE_PTHREAD_PSHARED pthread_mutex_t mutex; +#else + unsigned int waiters; #endif #endif struct shsess_packet_hdr upd; @@ -75,17 +77,63 @@ struct shared_context { /* Static shared context */ static struct shared_context *shctx = NULL; -#ifndef USE_PRIVATE_CACHE -static int use_shared_mem = 0; -#endif /* Lock functions */ -#ifdef USE_PRIVATE_CACHE + +#if defined (USE_PRIVATE_CACHE) + #define shared_context_lock() #define shared_context_unlock() +#elif defined (USE_PTHREAD_PSHARED) +static int use_shared_mem = 0; + +#define shared_context_lock() if (use_shared_mem) pthread_mutex_lock(&shctx->mutex) +#define shared_context_unlock() if (use_shared_mem) pthread_mutex_unlock(&shctx->mutex) + #else +static int use_shared_mem = 0; + #ifdef USE_SYSCALL_FUTEX +static inline void _shared_context_wait4lock(unsigned int *count, unsigned int *uaddr, int value) +{ + syscall(SYS_futex, uaddr, FUTEX_WAIT, value, NULL, 0, 0); +} + +static inline void _shared_context_awakelocker(unsigned int *uaddr) +{ + syscall(SYS_futex, uaddr, FUTEX_WAKE, 1, NULL, 0, 0); +} + +#else /* internal spin lock */ + +#if defined (__i486__) || defined (__i586__) || defined (__i686__) || defined (__x86_64__) +static inline void relax() +{ + __asm volatile("rep;nop\n" ::: "memory"); +} +#else /* if no x86_64 or i586 arch: use less optimized but generic asm */ +static inline void relax() +{ + __asm volatile("" ::: "memory"); +} +#endif + +static inline void _shared_context_wait4lock(unsigned int *count, unsigned int *uaddr, int value) +{ + int i; + + for (i = 0; i < *count; i++) { + relax(); + relax(); + } + *count = *count << 1; +} + +#define _shared_context_awakelocker(a) + +#endif + #if defined (__i486__) || defined (__i586__) || defined (__i686__) || defined (__x86_64__) static inline unsigned int xchg(unsigned int *ptr, unsigned int x) { @@ -139,6 +187,7 @@ static inline unsigned char atomic_dec(unsigned int *ptr) static inline void _shared_context_lock(void) { unsigned int x; + unsigned int count = 4; x = cmpxchg(&shctx->waiters, 0, 1); if (x) { @@ -146,7 +195,7 @@ static inline void _shared_context_lock(void) x = xchg(&shctx->waiters, 2); while (x) { - syscall(SYS_futex, &shctx->waiters, FUTEX_WAIT, 2, NULL, 0, 0); + _shared_context_wait4lock(&count, &shctx->waiters, 2); x = xchg(&shctx->waiters, 2); } } @@ -156,7 +205,7 @@ static inline void _shared_context_unlock(void) { if (atomic_dec(&shctx->waiters)) { shctx->waiters = 0; - syscall(SYS_futex, &shctx->waiters, FUTEX_WAKE, 1, NULL, 0, 0); + _shared_context_awakelocker(&shctx->waiters); } } @@ -164,13 +213,6 @@ static inline void _shared_context_unlock(void) #define shared_context_unlock() if (use_shared_mem) _shared_context_unlock() -#else /* USE_SYSCALL_FUTEX */ - -#define shared_context_lock() if (use_shared_mem) pthread_mutex_lock(&shctx->mutex) - -#define shared_context_unlock() if (use_shared_mem) pthread_mutex_unlock(&shctx->mutex) - -#endif #endif /* List Macros */ @@ -508,9 +550,9 @@ int shared_context_init(int size, int shared) { int i; #ifndef USE_PRIVATE_CACHE -#ifndef USE_SYSCALL_FUTEX +#ifdef USE_PTHREAD_PSHARED pthread_mutexattr_t attr; -#endif /* USE_SYSCALL_FUTEX */ +#endif #endif struct shared_block *prev,*cur; int maptype = MAP_PRIVATE; @@ -537,9 +579,7 @@ int shared_context_init(int size, int shared) #ifndef USE_PRIVATE_CACHE if (maptype == MAP_SHARED) { -#ifdef USE_SYSCALL_FUTEX - shctx->waiters = 0; -#else +#ifdef USE_PTHREAD_PSHARED if (pthread_mutexattr_init(&attr)) { munmap(shctx, sizeof(struct shared_context)+(size*sizeof(struct shared_block))); shctx = NULL; @@ -559,6 +599,8 @@ int shared_context_init(int size, int shared) shctx = NULL; return SHCTX_E_INIT_LOCK; } +#else + shctx->waiters = 0; #endif use_shared_mem = 1; }