MEDIUM: stick-table: touch updates under an upgradable read lock

Instead of taking the update's write lock in stktable_touch_with_exp(),
while most of the time under high load there is nothing to update because
the entry is touched before having been synchronized present, let's do
the check under a read lock and upgrade it to perform the update if
needed. These updates are rare and the contention is not expected to be
very high, so at the first failure to upgrade we retry directly with a
write lock.

By doing so the performance has almost doubled again, from 1140 to 2050k
with a peers section enabled. The contention is now on taking the read
lock itself, so there's little to be gained beyond this in this function.
This commit is contained in:
Willy Tarreau 2023-08-07 21:03:24 +02:00
parent 87e072eea5
commit cfeca3a3a3
1 changed files with 50 additions and 12 deletions

View File

@ -433,7 +433,7 @@ struct stksess *stktable_lookup(struct stktable *t, struct stksess *ts)
void stktable_touch_with_exp(struct stktable *t, struct stksess *ts, int local, int expire, int decrefcnt)
{
struct eb32_node * eb;
int locked = 0;
int use_wrlock = 0;
int do_wakeup = 0;
if (expire != HA_ATOMIC_LOAD(&ts->expire)) {
@ -444,16 +444,39 @@ void stktable_touch_with_exp(struct stktable *t, struct stksess *ts, int local,
/* If sync is enabled */
if (t->sync_task) {
if (local) {
/* If this entry is not in the tree
* or not scheduled for at least one peer.
*/
if (!locked++)
HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->updt_lock);
try_lock_again:
/* We'll need to reliably check that the entry is in the tree.
* It's only inserted/deleted using a write lock so a read lock
* is sufficient to verify this. We may then need to upgrade it
* to perform an update (which is rare under load), and if the
* upgrade fails, we'll try again with a write lock directly.
*/
if (use_wrlock)
HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->updt_lock);
else
HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &t->updt_lock);
if (local) {
/* Check if this entry is not in the tree or not
* scheduled for at least one peer.
*/
if (!ts->upd.node.leaf_p
|| (int)(t->commitupdate - ts->upd.key) >= 0
|| (int)(ts->upd.key - t->localupdate) >= 0) {
/* Time to upgrade the read lock to write lock if needed */
if (!use_wrlock) {
if (HA_RWLOCK_TRYRDTOSK(STK_TABLE_LOCK, &t->updt_lock) != 0) {
/* failed, try again */
HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &t->updt_lock);
use_wrlock = 1;
goto try_lock_again;
}
HA_RWLOCK_SKTOWR(STK_TABLE_LOCK, &t->updt_lock);
use_wrlock = 1;
}
/* here we're write-locked */
ts->upd.key = ++t->update;
t->localupdate = t->update;
eb32_delete(&ts->upd);
@ -467,10 +490,22 @@ void stktable_touch_with_exp(struct stktable *t, struct stksess *ts, int local,
}
else {
/* If this entry is not in the tree */
if (!locked++)
HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->updt_lock);
if (!ts->upd.node.leaf_p) {
/* Time to upgrade the read lock to write lock if needed */
if (!use_wrlock) {
if (HA_RWLOCK_TRYRDTOSK(STK_TABLE_LOCK, &t->updt_lock) != 0) {
/* failed, try again */
HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &t->updt_lock);
use_wrlock = 1;
goto try_lock_again;
}
HA_RWLOCK_SKTOWR(STK_TABLE_LOCK, &t->updt_lock);
use_wrlock = 1;
}
/* here we're write-locked */
ts->upd.key= (++t->update)+(2147483648U);
eb = eb32_insert(&t->updates, &ts->upd);
if (eb != &ts->upd) {
@ -479,10 +514,13 @@ void stktable_touch_with_exp(struct stktable *t, struct stksess *ts, int local,
}
}
}
}
if (locked)
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->updt_lock);
/* drop the lock now */
if (use_wrlock)
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->updt_lock);
else
HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &t->updt_lock);
}
if (decrefcnt)
HA_ATOMIC_DEC(&ts->ref_cnt);