mirror of git://git.musl-libc.org/musl
sanitize number of spins in userspace before futex wait
the previous spin limit of 10000 was utterly unreasonable. empirically, it could consume up to 200000 cycles, whereas a failed futex wait (EAGAIN) typically takes 1000 cycles or less, and even a true wait/wake round seems much less expensive. the new counts (100 for general wait, 200 in barrier) were simply chosen to be in the range of what's reasonable without having adverse effects on casual micro-benchmark tests I have been running. they may still be too high, from a standpoint of not wasting cpu cycles, but at least they're a lot better than before. rigorous testing across different archs and cpu models should be performed at some point to determine whether further adjustments should be made.
This commit is contained in:
parent
ea818ea834
commit
b8a9c90e4f
|
@ -2,7 +2,7 @@
|
|||
|
||||
void __wait(volatile int *addr, volatile int *waiters, int val, int priv)
|
||||
{
|
||||
int spins=10000;
|
||||
int spins=100;
|
||||
if (priv) priv = 128;
|
||||
while (spins--) {
|
||||
if (*addr==val) a_spin();
|
||||
|
|
|
@ -79,7 +79,7 @@ int pthread_barrier_wait(pthread_barrier_t *b)
|
|||
/* First thread to enter the barrier becomes the "instance owner" */
|
||||
if (!inst) {
|
||||
struct instance new_inst = { 0 };
|
||||
int spins = 10000;
|
||||
int spins = 200;
|
||||
b->_b_inst = inst = &new_inst;
|
||||
a_store(&b->_b_lock, 0);
|
||||
if (b->_b_waiters) __wake(&b->_b_lock, 1, 1);
|
||||
|
|
Loading…
Reference in New Issue