#define _m_lock __u.__i[1]
#define _m_waiters __u.__i[2]
#define _m_owner __u.__i[3]
+#define _m_count __u.__i[4]
#define _c_block __u.__i[0]
#define _c_clock __u.__i[1]
#define _rw_wrlock __u.__i[0]
int pthread_mutex_trylock(pthread_mutex_t *m)
{
- if (m->_m_type == PTHREAD_MUTEX_RECURSIVE) {
- pthread_t self = pthread_self();
- if (m->_m_owner == self->tid) {
- if ((unsigned)m->_m_lock >= INT_MAX) return EAGAIN;
- a_inc(&m->_m_lock);
+ pthread_t self;
+ if (m->_m_type != PTHREAD_MUTEX_NORMAL) {
+ self = pthread_self();
+ if (m->_m_type == PTHREAD_MUTEX_RECURSIVE
+ && m->_m_owner == self->tid) {
+ if ((unsigned)m->_m_count >= INT_MAX) return EAGAIN;
+ m->_m_count++;
return 0;
}
- if (a_fetch_add(&m->_m_lock, 1)) {
- if (a_fetch_add(&m->_m_lock, -1)==1 && m->_m_waiters)
- __wake(&m->_m_lock, 1, 0);
- return EBUSY;
- }
- m->_m_owner = self->tid;
- return 0;
}
if (a_xchg(&m->_m_lock, 1))
if (m->_m_type == PTHREAD_MUTEX_ERRORCHECK
- && m->_m_owner == pthread_self()->tid) return EDEADLK;
+ && m->_m_owner == self->tid) return EDEADLK;
else return EBUSY;
- if (m->_m_type == PTHREAD_MUTEX_ERRORCHECK)
- m->_m_owner = pthread_self()->tid;
+ if (m->_m_type != PTHREAD_MUTEX_NORMAL) {
+ m->_m_owner = self->tid;
+ m->_m_count = 1;
+ }
return 0;
}
int pthread_mutex_unlock(pthread_mutex_t *m)
{
- if (m->_m_type == PTHREAD_MUTEX_RECURSIVE) {
- if (a_fetch_add(&m->_m_lock, -1)==1 && m->_m_waiters)
- __wake(&m->_m_lock, 1, 0);
- return 0;
+ if (m->_m_type != PTHREAD_MUTEX_NORMAL) {
+ if (m->_m_owner != pthread_self()->tid)
+ return EPERM;
+ if (m->_m_type == PTHREAD_MUTEX_RECURSIVE && --m->_m_count)
+ return 0;
}
- if (m->_m_type == PTHREAD_MUTEX_ERRORCHECK
- && m->_m_owner != pthread_self()->tid)
- return EPERM;
-
m->_m_owner = 0;
m->_m_lock = 0;
if (m->_m_waiters) __wake(&m->_m_lock, 1, 0);