streamline mutex unlock to remove a useless branch, use a_store to unlock

this roughly halves the cost of pthread_mutex_unlock, at least for
non-robust, normal-type mutexes.

the a_store change is in preparation for future support of archs which
require a memory barrier or special atomic store operation, and also
should prevent the possibility of the compiler misordering writes.
This commit is contained in:
Rich Felker 2011-03-30 09:06:00 -04:00
parent 124b4ebc8a
commit 02084109f0

View File

@ -14,11 +14,15 @@ int pthread_mutex_unlock(pthread_mutex_t *m)
self->robust_list.pending = &m->_m_next;
*(void **)m->_m_prev = m->_m_next;
if (m->_m_next) ((void **)m->_m_next)[-1] = m->_m_prev;
a_store(&m->_m_lock, 0);
self->robust_list.pending = 0;
} else {
a_store(&m->_m_lock, 0);
}
} else {
a_store(&m->_m_lock, 0);
}
m->_m_lock = 0;
if (m->_m_waiters) __wake(&m->_m_lock, 1, 0);
if (m->_m_type >= 4) self->robust_list.pending = 0;
return 0;
}