REORG: shctx: move lock functions and struct

Move locks functions to proto/shctx.h, and structures to types/shctx.h
in order to simplify the split ssl/shctx.
This commit is contained in:
William Lallemand 2017-10-09 14:17:39 +02:00 committed by Willy Tarreau
parent 83215a44b8
commit 24a7a75be6
3 changed files with 210 additions and 191 deletions

View File

@ -13,23 +13,24 @@
#ifndef SHCTX_H
#define SHCTX_H
#include <types/shctx.h>
#include <openssl/ssl.h>
#include <stdint.h>
#ifndef SHSESS_BLOCK_MIN_SIZE
#define SHSESS_BLOCK_MIN_SIZE 128
#ifndef USE_PRIVATE_CACHE
#ifdef USE_PTHREAD_PSHARED
#include <pthread.h>
#else
#ifdef USE_SYSCALL_FUTEX
#include <unistd.h>
#include <linux/futex.h>
#include <sys/syscall.h>
#endif
#endif
#endif
#ifndef SHSESS_MAX_DATA_LEN
#define SHSESS_MAX_DATA_LEN 4096
#endif
#ifndef SHCTX_APPNAME
#define SHCTX_APPNAME "haproxy"
#endif
#define SHCTX_E_ALLOC_CACHE -1
#define SHCTX_E_INIT_LOCK -2
/* Allocate shared memory context.
* <size> is the number of allocated blocks into cache (default 128 bytes)
@ -40,12 +41,149 @@
* Returns: -1 on alloc failure, <size> if it performs context alloc,
* and 0 if cache is already allocated.
*/
int shared_context_init(int size, int use_shared_memory);
int shared_context_init(int size, int shared);
/* Set shared cache callbacks on an ssl context.
* Set session cache mode to server and disable openssl internal cache.
* Shared context MUST be firstly initialized */
void shared_context_set_cache(SSL_CTX *ctx);
/* Lock functions */
#if defined (USE_PRIVATE_CACHE)
#define shared_context_lock(shctx)
#define shared_context_unlock(shctx)
#elif defined (USE_PTHREAD_PSHARED)
extern int use_shared_mem;
#define shared_context_lock(shctx) if (use_shared_mem) pthread_mutex_lock(&shctx->mutex)
#define shared_context_unlock(shctx) if (use_shared_mem) pthread_mutex_unlock(&shctx->mutex)
#else
extern int use_shared_mem;
#ifdef USE_SYSCALL_FUTEX
static inline void _shared_context_wait4lock(unsigned int *count, unsigned int *uaddr, int value)
{
syscall(SYS_futex, uaddr, FUTEX_WAIT, value, NULL, 0, 0);
}
static inline void _shared_context_awakelocker(unsigned int *uaddr)
{
syscall(SYS_futex, uaddr, FUTEX_WAKE, 1, NULL, 0, 0);
}
#else /* internal spin lock */
#if defined (__i486__) || defined (__i586__) || defined (__i686__) || defined (__x86_64__)
static inline void relax()
{
__asm volatile("rep;nop\n" ::: "memory");
}
#else /* if no x86_64 or i586 arch: use less optimized but generic asm */
static inline void relax()
{
__asm volatile("" ::: "memory");
}
#endif
static inline void _shared_context_wait4lock(unsigned int *count, unsigned int *uaddr, int value)
{
int i;
for (i = 0; i < *count; i++) {
relax();
relax();
}
*count = *count << 1;
}
#define _shared_context_awakelocker(a)
#endif
#if defined (__i486__) || defined (__i586__) || defined (__i686__) || defined (__x86_64__)
static inline unsigned int xchg(unsigned int *ptr, unsigned int x)
{
__asm volatile("lock xchgl %0,%1"
: "=r" (x), "+m" (*ptr)
: "0" (x)
: "memory");
return x;
}
static inline unsigned int cmpxchg(unsigned int *ptr, unsigned int old, unsigned int new)
{
unsigned int ret;
__asm volatile("lock cmpxchgl %2,%1"
: "=a" (ret), "+m" (*ptr)
: "r" (new), "0" (old)
: "memory");
return ret;
}
static inline unsigned char atomic_dec(unsigned int *ptr)
{
unsigned char ret;
__asm volatile("lock decl %0\n"
"setne %1\n"
: "+m" (*ptr), "=qm" (ret)
:
: "memory");
return ret;
}
#else /* if no x86_64 or i586 arch: use less optimized gcc >= 4.1 built-ins */
static inline unsigned int xchg(unsigned int *ptr, unsigned int x)
{
return __sync_lock_test_and_set(ptr, x);
}
static inline unsigned int cmpxchg(unsigned int *ptr, unsigned int old, unsigned int new)
{
return __sync_val_compare_and_swap(ptr, old, new);
}
static inline unsigned char atomic_dec(unsigned int *ptr)
{
return __sync_sub_and_fetch(ptr, 1) ? 1 : 0;
}
#endif
static inline void _shared_context_lock()
{
unsigned int x;
unsigned int count = 4;
x = cmpxchg(&shctx->waiters, 0, 1);
if (x) {
if (x != 2)
x = xchg(&shctx->waiters, 2);
while (x) {
_shared_context_wait4lock(&count, &shctx->waiters, 2);
x = xchg(&shctx->waiters, 2);
}
}
}
static inline void _shared_context_unlock()
{
if (atomic_dec(&shctx->waiters)) {
shctx->waiters = 0;
_shared_context_awakelocker(&shctx->waiters);
}
}
#define shared_context_lock() if (use_shared_mem) _shared_context_lock()
#define shared_context_unlock() if (use_shared_mem) _shared_context_unlock()
#endif
#endif /* SHCTX_H */

51
include/types/shctx.h Normal file
View File

@ -0,0 +1,51 @@
#ifndef __TYPES_SHCTX
#define __TYPES_SHCTX
#include <openssl/ssl.h> /* shared session depend of openssl */
#ifndef SHSESS_BLOCK_MIN_SIZE
#define SHSESS_BLOCK_MIN_SIZE 128
#endif
#ifndef SHSESS_MAX_DATA_LEN
#define SHSESS_MAX_DATA_LEN 4096
#endif
#ifndef SHCTX_APPNAME
#define SHCTX_APPNAME "haproxy"
#endif
#define SHCTX_E_ALLOC_CACHE -1
#define SHCTX_E_INIT_LOCK -2
struct shared_session {
struct ebmb_node key;
unsigned char key_data[SSL_MAX_SSL_SESSION_ID_LENGTH];
unsigned char data[SHSESS_BLOCK_MIN_SIZE];
};
struct shared_block {
union {
struct shared_session session;
unsigned char data[sizeof(struct shared_session)];
} data;
short int data_len;
struct shared_block *p;
struct shared_block *n;
};
struct shared_context {
#ifndef USE_PRIVATE_CACHE
#ifdef USE_PTHREAD_PSHARED
pthread_mutex_t mutex;
#else
unsigned int waiters;
#endif
#endif
struct shared_block active;
struct shared_block free;
};
extern struct shared_context *shctx;
#endif

View File

@ -12,189 +12,19 @@
*/
#include <sys/mman.h>
#ifndef USE_PRIVATE_CACHE
#ifdef USE_PTHREAD_PSHARED
#include <pthread.h>
#else
#ifdef USE_SYSCALL_FUTEX
#include <unistd.h>
#include <linux/futex.h>
#include <sys/syscall.h>
#endif
#endif
#endif
#include <arpa/inet.h>
#include <ebmbtree.h>
#include <types/global.h>
#include "proto/shctx.h"
#include <proto/shctx.h>
#include <proto/openssl-compat.h>
struct shared_session {
struct ebmb_node key;
unsigned char key_data[SSL_MAX_SSL_SESSION_ID_LENGTH];
unsigned char data[SHSESS_BLOCK_MIN_SIZE];
};
#include <types/global.h>
#include <types/shctx.h>
struct shared_block {
union {
struct shared_session session;
unsigned char data[sizeof(struct shared_session)];
} data;
short int data_len;
struct shared_block *p;
struct shared_block *n;
};
struct shared_context {
#ifndef USE_PRIVATE_CACHE
#ifdef USE_PTHREAD_PSHARED
pthread_mutex_t mutex;
#else
unsigned int waiters;
#endif
#endif
struct shared_block active;
struct shared_block free;
};
/* Static shared context */
static struct shared_context *shctx = NULL;
/* Lock functions */
#if defined (USE_PRIVATE_CACHE)
#define shared_context_lock()
#define shared_context_unlock()
#elif defined (USE_PTHREAD_PSHARED)
static int use_shared_mem = 0;
#define shared_context_lock() if (use_shared_mem) pthread_mutex_lock(&shctx->mutex)
#define shared_context_unlock() if (use_shared_mem) pthread_mutex_unlock(&shctx->mutex)
#else
static int use_shared_mem = 0;
#ifdef USE_SYSCALL_FUTEX
static inline void _shared_context_wait4lock(unsigned int *count, unsigned int *uaddr, int value)
{
syscall(SYS_futex, uaddr, FUTEX_WAIT, value, NULL, 0, 0);
}
static inline void _shared_context_awakelocker(unsigned int *uaddr)
{
syscall(SYS_futex, uaddr, FUTEX_WAKE, 1, NULL, 0, 0);
}
#else /* internal spin lock */
#if defined (__i486__) || defined (__i586__) || defined (__i686__) || defined (__x86_64__)
static inline void relax()
{
__asm volatile("rep;nop\n" ::: "memory");
}
#else /* if no x86_64 or i586 arch: use less optimized but generic asm */
static inline void relax()
{
__asm volatile("" ::: "memory");
}
#endif
static inline void _shared_context_wait4lock(unsigned int *count, unsigned int *uaddr, int value)
{
int i;
for (i = 0; i < *count; i++) {
relax();
relax();
}
*count = *count << 1;
}
#define _shared_context_awakelocker(a)
#endif
#if defined (__i486__) || defined (__i586__) || defined (__i686__) || defined (__x86_64__)
static inline unsigned int xchg(unsigned int *ptr, unsigned int x)
{
__asm volatile("lock xchgl %0,%1"
: "=r" (x), "+m" (*ptr)
: "0" (x)
: "memory");
return x;
}
static inline unsigned int cmpxchg(unsigned int *ptr, unsigned int old, unsigned int new)
{
unsigned int ret;
__asm volatile("lock cmpxchgl %2,%1"
: "=a" (ret), "+m" (*ptr)
: "r" (new), "0" (old)
: "memory");
return ret;
}
static inline unsigned char atomic_dec(unsigned int *ptr)
{
unsigned char ret;
__asm volatile("lock decl %0\n"
"setne %1\n"
: "+m" (*ptr), "=qm" (ret)
:
: "memory");
return ret;
}
#else /* if no x86_64 or i586 arch: use less optimized gcc >= 4.1 built-ins */
static inline unsigned int xchg(unsigned int *ptr, unsigned int x)
{
return __sync_lock_test_and_set(ptr, x);
}
static inline unsigned int cmpxchg(unsigned int *ptr, unsigned int old, unsigned int new)
{
return __sync_val_compare_and_swap(ptr, old, new);
}
static inline unsigned char atomic_dec(unsigned int *ptr)
{
return __sync_sub_and_fetch(ptr, 1) ? 1 : 0;
}
#endif
static inline void _shared_context_lock(void)
{
unsigned int x;
unsigned int count = 4;
x = cmpxchg(&shctx->waiters, 0, 1);
if (x) {
if (x != 2)
x = xchg(&shctx->waiters, 2);
while (x) {
_shared_context_wait4lock(&count, &shctx->waiters, 2);
x = xchg(&shctx->waiters, 2);
}
}
}
static inline void _shared_context_unlock(void)
{
if (atomic_dec(&shctx->waiters)) {
shctx->waiters = 0;
_shared_context_awakelocker(&shctx->waiters);
}
}
#define shared_context_lock() if (use_shared_mem) _shared_context_lock()
#define shared_context_unlock() if (use_shared_mem) _shared_context_unlock()
struct shared_context *shctx = NULL;
#if !defined (USE_PRIVATE_CACHE)
int use_shared_mem = 0;
#endif
/* List Macros */