MEDIUM: cache: Switch shctx spinlock to rwlock and restrict its scope

Since a lock on the cache tree was added in the latest cache changes, we
do not need to use the shared_context's lock to lock more than pure
shared_context related data anymore. This already existing lock will now
only cover the 'avail' list from the shared_context. It can then be
changed to a rwlock instead of a spinlock because we might want to only
run through the avail list sometimes.

Apart form changing the type of the shctx lock, the main modification
introduced by this patch is to limit the amount of code covered by the
shctx lock. This lock does not need to cover any code strictly related
to the cache tree anymore.
This commit is contained in:
Remi Tricot-Le Breton 2023-11-16 17:38:21 +01:00 committed by William Lallemand
parent a0d7c290ec
commit ed35b9411a
5 changed files with 59 additions and 42 deletions

View File

@ -46,7 +46,7 @@ struct shared_block {
};
struct shared_context {
__decl_thread(HA_SPINLOCK_T lock);
__decl_thread(HA_RWLOCK_T lock);
struct list avail; /* list for active and free blocks */
unsigned int nbav; /* number of available blocks */
unsigned int max_obj_size; /* maximum object size (in bytes). */

View File

@ -37,9 +37,26 @@ int shctx_row_data_get(struct shared_context *shctx, struct shared_block *first,
extern int use_shared_mem;
#define shctx_lock(shctx) if (use_shared_mem) HA_SPIN_LOCK(SHCTX_LOCK, &shctx->lock)
#define shctx_unlock(shctx) if (use_shared_mem) HA_SPIN_UNLOCK(SHCTX_LOCK, &shctx->lock)
static inline void shctx_rdlock(struct shared_context *shctx)
{
if (use_shared_mem)
HA_RWLOCK_RDLOCK(SHCTX_LOCK, &shctx->lock);
}
static inline void shctx_rdunlock(struct shared_context *shctx)
{
if (use_shared_mem)
HA_RWLOCK_RDUNLOCK(SHCTX_LOCK, &shctx->lock);
}
static inline void shctx_wrlock(struct shared_context *shctx)
{
if (use_shared_mem)
HA_RWLOCK_WRLOCK(SHCTX_LOCK, &shctx->lock);
}
static inline void shctx_wrunlock(struct shared_context *shctx)
{
if (use_shared_mem)
HA_RWLOCK_WRUNLOCK(SHCTX_LOCK, &shctx->lock);
}
/* List Macros */

View File

@ -575,9 +575,9 @@ cache_store_strm_deinit(struct stream *s, struct filter *filter)
/* Everything should be released in the http_end filter, but we need to do it
* there too, in case of errors */
if (st && st->first_block) {
shctx_lock(shctx);
shctx_wrlock(shctx);
shctx_row_reattach(shctx, st->first_block);
shctx_unlock(shctx);
shctx_wrunlock(shctx);
}
if (st) {
pool_free(pool_head_cache_st, st);
@ -636,9 +636,9 @@ static inline void disable_cache_entry(struct cache_st *st,
eb32_delete(&object->eb);
object->eb.key = 0;
cache_wrunlock(cache);
shctx_lock(shctx);
shctx_wrlock(shctx);
shctx_row_reattach(shctx, st->first_block);
shctx_unlock(shctx);
shctx_wrunlock(shctx);
pool_free(pool_head_cache_st, st);
}
@ -711,13 +711,14 @@ cache_store_http_payload(struct stream *s, struct filter *filter, struct http_ms
}
end:
shctx_lock(shctx);
shctx_wrlock(shctx);
fb = shctx_row_reserve_hot(shctx, st->first_block, trash.data);
if (!fb) {
shctx_unlock(shctx);
shctx_wrunlock(shctx);
goto no_cache;
}
shctx_unlock(shctx);
shctx_wrunlock(shctx);
ret = shctx_row_data_append(shctx, st->first_block,
(unsigned char *)b_head(&trash), b_data(&trash));
@ -749,12 +750,12 @@ cache_store_http_end(struct stream *s, struct filter *filter,
object = (struct cache_entry *)st->first_block->data;
shctx_lock(shctx);
shctx_wrlock(shctx);
/* The whole payload was cached, the entry can now be used. */
object->complete = 1;
/* remove from the hotlist */
shctx_row_reattach(shctx, st->first_block);
shctx_unlock(shctx);
shctx_wrunlock(shctx);
}
if (st) {
@ -1172,13 +1173,13 @@ enum act_return http_action_store_cache(struct act_rule *rule, struct proxy *px,
}
cache_wrunlock(cache);
shctx_lock(shctx);
shctx_wrlock(shctx);
first = shctx_row_reserve_hot(shctx, NULL, sizeof(struct cache_entry));
shctx_wrunlock(shctx);
if (!first) {
shctx_unlock(shctx);
goto out;
}
shctx_unlock(shctx);
/* the received memory is not initialized, we need at least to mark
* the object as not indexed yet.
*/
@ -1274,12 +1275,12 @@ enum act_return http_action_store_cache(struct act_rule *rule, struct proxy *px,
if (set_secondary_key_encoding(htx, object->secondary_key))
goto out;
shctx_lock(shctx);
shctx_wrlock(shctx);
if (!shctx_row_reserve_hot(shctx, first, trash.data)) {
shctx_unlock(shctx);
shctx_wrunlock(shctx);
goto out;
}
shctx_unlock(shctx);
shctx_wrunlock(shctx);
/* cache the headers in a http action because it allows to chose what
* to cache, for example you might want to cache a response before
@ -1311,9 +1312,9 @@ out:
cache_wrunlock(cache);
object->eb.key = 0;
}
shctx_lock(shctx);
shctx_wrlock(shctx);
shctx_row_reattach(shctx, first);
shctx_unlock(shctx);
shctx_wrunlock(shctx);
}
return ACT_RET_CONT;
@ -1334,9 +1335,9 @@ static void http_cache_applet_release(struct appctx *appctx)
struct shared_context *shctx = shctx_ptr(cache);
struct shared_block *first = block_ptr(cache_ptr);
shctx_lock(shctx);
shctx_wrlock(shctx);
shctx_row_reattach(shctx, first);
shctx_unlock(shctx);
shctx_wrunlock(shctx);
}
@ -1854,7 +1855,6 @@ enum act_return http_action_req_cache_use(struct act_rule *rule, struct proxy *p
else
_HA_ATOMIC_INC(&px->be_counters.p.http.cache_lookups);
shctx_lock(shctx_ptr(cache));
cache_rdlock(cache);
res = entry_exist(cache, s->txn->cache_hash, 0);
/* We must not use an entry that is not complete but the check will be
@ -1863,28 +1863,29 @@ enum act_return http_action_req_cache_use(struct act_rule *rule, struct proxy *p
if (res) {
struct appctx *appctx;
entry_block = block_ptr(res);
shctx_wrlock(shctx);
shctx_row_detach(shctx, entry_block);
shctx_wrunlock(shctx);
cache_rdunlock(cache);
shctx_unlock(shctx);
/* In case of Vary, we could have multiple entries with the same
* primary hash. We need to calculate the secondary hash in order
* to find the actual entry we want (if it exists). */
if (res->secondary_key_signature) {
if (!http_request_build_secondary_key(s, res->secondary_key_signature)) {
shctx_lock(shctx);
cache_rdlock(cache);
sec_entry = secondary_entry_exist(cache, res,
s->txn->cache_secondary_hash, 0);
if (sec_entry && sec_entry != res) {
/* The wrong row was added to the hot list. */
shctx_wrlock(shctx);
shctx_row_reattach(shctx, entry_block);
entry_block = block_ptr(sec_entry);
shctx_row_detach(shctx, entry_block);
shctx_wrunlock(shctx);
}
res = sec_entry;
cache_rdunlock(cache);
shctx_unlock(shctx);
}
else
res = NULL;
@ -1895,9 +1896,9 @@ enum act_return http_action_req_cache_use(struct act_rule *rule, struct proxy *p
* can't use the cache's entry and must forward the request to
* the server. */
if (!res || !res->complete) {
shctx_lock(shctx);
shctx_wrlock(shctx);
shctx_row_reattach(shctx, entry_block);
shctx_unlock(shctx);
shctx_wrunlock(shctx);
return ACT_RET_CONT;
}
@ -1920,14 +1921,13 @@ enum act_return http_action_req_cache_use(struct act_rule *rule, struct proxy *p
return ACT_RET_CONT;
} else {
s->target = NULL;
shctx_lock(shctx);
shctx_wrlock(shctx);
shctx_row_reattach(shctx, entry_block);
shctx_unlock(shctx);
shctx_wrunlock(shctx);
return ACT_RET_CONT;
}
}
cache_rdunlock(cache);
shctx_unlock(shctx);
/* Shared context does not need to be locked while we calculate the
* secondary hash. */
@ -2677,17 +2677,16 @@ static int cli_io_handler_show_cache(struct appctx *appctx)
unsigned int i;
struct shared_context *shctx = shctx_ptr(cache);
shctx_lock(shctx);
next_key = ctx->next_key;
if (!next_key) {
shctx_rdlock(shctx);
chunk_printf(buf, "%p: %s (shctx:%p, available blocks:%d)\n", cache, cache->id, shctx_ptr(cache), shctx_ptr(cache)->nbav);
shctx_rdunlock(shctx);
if (applet_putchk(appctx, buf) == -1) {
shctx_unlock(shctx);
goto yield;
}
}
shctx_unlock(shctx);
ctx->cache = cache;

View File

@ -291,6 +291,7 @@ int shctx_init(struct shared_context **orig_shctx, int maxblocks, int blocksize,
shctx->nbav = 0;
LIST_INIT(&shctx->avail);
HA_RWLOCK_INIT(&shctx->lock);
shctx->block_size = blocksize;
shctx->max_obj_size = maxobjsz == (unsigned int)-1 ? 0 : maxobjsz;

View File

@ -4408,10 +4408,10 @@ int sh_ssl_sess_new_cb(SSL *ssl, SSL_SESSION *sess)
i2d_SSL_SESSION(sess, &p);
shctx_lock(ssl_shctx);
shctx_wrlock(ssl_shctx);
/* store to cache */
sh_ssl_sess_store(encid, encsess, data_len);
shctx_unlock(ssl_shctx);
shctx_wrunlock(ssl_shctx);
err:
/* reset original length values */
SSL_SESSION_set1_id(sess, encid, sid_length);
@ -4442,13 +4442,13 @@ SSL_SESSION *sh_ssl_sess_get_cb(SSL *ssl, __OPENSSL_110_CONST__ unsigned char *k
}
/* lock cache */
shctx_lock(ssl_shctx);
shctx_wrlock(ssl_shctx);
/* lookup for session */
sh_ssl_sess = sh_ssl_sess_tree_lookup(key);
if (!sh_ssl_sess) {
/* no session found: unlock cache and exit */
shctx_unlock(ssl_shctx);
shctx_wrunlock(ssl_shctx);
_HA_ATOMIC_INC(&global.shctx_misses);
return NULL;
}
@ -4458,7 +4458,7 @@ SSL_SESSION *sh_ssl_sess_get_cb(SSL *ssl, __OPENSSL_110_CONST__ unsigned char *k
shctx_row_data_get(ssl_shctx, first, data, sizeof(struct sh_ssl_sess_hdr), first->len-sizeof(struct sh_ssl_sess_hdr));
shctx_unlock(ssl_shctx);
shctx_wrunlock(ssl_shctx);
/* decode ASN1 session */
p = data;
@ -4490,7 +4490,7 @@ void sh_ssl_sess_remove_cb(SSL_CTX *ctx, SSL_SESSION *sess)
sid_data = tmpkey;
}
shctx_lock(ssl_shctx);
shctx_wrlock(ssl_shctx);
/* lookup for session */
sh_ssl_sess = sh_ssl_sess_tree_lookup(sid_data);
@ -4500,7 +4500,7 @@ void sh_ssl_sess_remove_cb(SSL_CTX *ctx, SSL_SESSION *sess)
}
/* unlock cache */
shctx_unlock(ssl_shctx);
shctx_wrunlock(ssl_shctx);
}
/* Set session cache mode to server and disable openssl internal cache.