unify lock and owner fields of mutex structure

this change is necessary to free up one slot in the mutex structure so
that we can use doubly-linked lists in the implementation of robust
mutexes.
This commit is contained in:
Rich Felker 2011-03-17 12:21:32 -04:00
parent e914f8b7ec
commit b1c43161c2
4 changed files with 6 additions and 8 deletions

View File

@ -47,7 +47,6 @@ struct pthread {
#define _m_type __u.__i[0]
#define _m_lock __u.__i[1]
#define _m_waiters __u.__i[2]
#define _m_owner __u.__i[3]
#define _m_count __u.__i[4]
#define _c_block __u.__i[0]
#define _c_clock __u.__i[1]

View File

@ -4,10 +4,11 @@ int pthread_mutex_lock(pthread_mutex_t *m)
{
int r;
while ((r=pthread_mutex_trylock(m)) == EBUSY) {
if (!(r=m->_m_lock)) continue;
if (m->_m_type == PTHREAD_MUTEX_ERRORCHECK
&& m->_m_owner == pthread_self()->tid)
&& r == pthread_self()->tid)
return EDEADLK;
__wait(&m->_m_lock, &m->_m_waiters, 1, 0);
__wait(&m->_m_lock, &m->_m_waiters, r, 0);
}
return r;
}

View File

@ -9,14 +9,13 @@ int pthread_mutex_trylock(pthread_mutex_t *m)
tid = pthread_self()->tid;
if (m->_m_owner == tid && m->_m_type == PTHREAD_MUTEX_RECURSIVE) {
if (m->_m_lock == tid && m->_m_type == PTHREAD_MUTEX_RECURSIVE) {
if ((unsigned)m->_m_count >= INT_MAX) return EAGAIN;
m->_m_count++;
return 0;
}
if (m->_m_owner || a_xchg(&m->_m_lock, 1)) return EBUSY;
m->_m_owner = tid;
if (m->_m_lock || a_cas(&m->_m_lock, 0, tid)) return EBUSY;
m->_m_count = 1;
return 0;
}

View File

@ -3,13 +3,12 @@
int pthread_mutex_unlock(pthread_mutex_t *m)
{
if (m->_m_type != PTHREAD_MUTEX_NORMAL) {
if (m->_m_owner != pthread_self()->tid)
if (m->_m_lock != pthread_self()->tid)
return EPERM;
if (m->_m_type == PTHREAD_MUTEX_RECURSIVE && --m->_m_count)
return 0;
}
m->_m_owner = 0;
m->_m_lock = 0;
if (m->_m_waiters) __wake(&m->_m_lock, 1, 0);
return 0;