int pthread_mutex_trylock(pthread_mutex_t *);
int pthread_mutex_timedlock(pthread_mutex_t *, const struct timespec *);
int pthread_mutex_destroy(pthread_mutex_t *);
+int pthread_mutex_consistent(pthread_mutex_t *);
int pthread_cond_init(pthread_cond_t *, const pthread_condattr_t *);
int pthread_cond_destroy(pthread_cond_t *);
void **tsd;
pthread_attr_t attr;
volatile int dead;
+ struct {
+ void **head;
+ long off;
+ void *pending;
+ } robust_list;
};
#define __SU (sizeof(size_t)/sizeof(int))
--- /dev/null
+#include "pthread_impl.h"
+
+int pthread_mutex_consistent(pthread_mutex_t *m)
+{
+ if (m->_m_type < 8) return EINVAL;
+ if ((m->_m_lock & 0x3fffffff) != pthread_self()->tid)
+ return EPERM;
+ m->_m_type -= 8;
+ return 0;
+}
int pthread_mutex_init(pthread_mutex_t *m, const pthread_mutexattr_t *a)
{
memset(m, 0, sizeof *m);
- if (a) m->_m_type = *a & 3;
+ if (a) m->_m_type = *a & 7;
return 0;
}
{
int r;
while ((r=pthread_mutex_trylock(m)) == EBUSY) {
- if (!(r=m->_m_lock)) continue;
- if (m->_m_type == PTHREAD_MUTEX_ERRORCHECK
- && r == pthread_self()->tid)
+ if (!(r=m->_m_lock) || (r&0x40000000)) continue;
+ if ((m->_m_type&3) == PTHREAD_MUTEX_ERRORCHECK
+ && (r&0x1fffffff) == pthread_self()->tid)
return EDEADLK;
__wait(&m->_m_lock, &m->_m_waiters, r, 0);
}
{
int r, w=0;
while ((r=pthread_mutex_trylock(m)) == EBUSY) {
+ if (!(r=m->_m_lock) || (r&0x40000000)) continue;
if (!w) a_inc(&m->_m_waiters), w++;
- if (__timedwait(&m->_m_lock, 1, CLOCK_REALTIME, at, 0) == ETIMEDOUT) {
+ if (__timedwait(&m->_m_lock, r, CLOCK_REALTIME, at, 0) == ETIMEDOUT) {
if (w) a_dec(&m->_m_waiters);
return ETIMEDOUT;
}
int pthread_mutex_trylock(pthread_mutex_t *m)
{
int tid;
+ int own;
+ pthread_t self;
if (m->_m_type == PTHREAD_MUTEX_NORMAL)
return (m->_m_lock || a_swap(&m->_m_lock, 1)) ? EBUSY : 0;
- tid = pthread_self()->tid;
+ self = pthread_self();
+ tid = self->tid | 0x80000000;
- if (m->_m_lock == tid && m->_m_type == PTHREAD_MUTEX_RECURSIVE) {
+ if (m->_m_type >= 4) {
+ if (!self->robust_list.off)
+ syscall2(__NR_set_robust_list,
+ (long)&self->robust_list, 3*sizeof(long));
+ self->robust_list.off = (char*)&m->_m_lock-(char *)&m->_m_next;
+ self->robust_list.pending = &m->_m_next;
+ }
+
+ if (m->_m_lock == tid && (m->_m_type&3) == PTHREAD_MUTEX_RECURSIVE) {
if ((unsigned)m->_m_count >= INT_MAX) return EAGAIN;
m->_m_count++;
return 0;
}
- if (m->_m_lock || a_cas(&m->_m_lock, 0, tid)) return EBUSY;
+ own = m->_m_lock;
+ if ((own && !(own & 0x40000000)) || a_cas(&m->_m_lock, own, tid)!=own)
+ return EBUSY;
+
m->_m_count = 1;
+
+ if (m->_m_type < 4) return 0;
+
+ if (m->_m_type >= 8) {
+ m->_m_lock = 0;
+ return ENOTRECOVERABLE;
+ }
+ m->_m_next = self->robust_list.head;
+ m->_m_prev = &self->robust_list.head;
+ if (self->robust_list.head)
+ self->robust_list.head[-1] = &m->_m_next;
+ self->robust_list.head = &m->_m_next;
+ self->robust_list.pending = 0;
+ if (own) {
+ m->_m_type += 8;
+ return EOWNERDEAD;
+ }
+
return 0;
}
int pthread_mutex_unlock(pthread_mutex_t *m)
{
+ pthread_t self;
+
if (m->_m_type != PTHREAD_MUTEX_NORMAL) {
- if (!m->_m_lock || m->_m_lock != __pthread_self()->tid)
+ self = __pthread_self();
+ if ((m->_m_lock&0x1fffffff) != self->tid)
return EPERM;
- if (m->_m_type == PTHREAD_MUTEX_RECURSIVE && --m->_m_count)
+ if ((m->_m_type&3) == PTHREAD_MUTEX_RECURSIVE && --m->_m_count)
return 0;
+ if (m->_m_type >= 4) {
+ self->robust_list.pending = &m->_m_next;
+ *(void **)m->_m_prev = m->_m_next;
+ if (m->_m_next) ((void **)m->_m_next)[-1] = m->_m_prev;
+ }
}
m->_m_lock = 0;
if (m->_m_waiters) __wake(&m->_m_lock, 1, 0);
+ if (m->_m_type >= 4) self->robust_list.pending = 0;
return 0;
}
--- /dev/null
+#include "pthread_impl.h"
+
+int pthread_mutexattr_getrobust(const pthread_mutexattr_t *a, int *robust)
+{
+ *robust = *a / 4U % 2;
+ return 0;
+}
--- /dev/null
+#include "pthread_impl.h"
+
+int pthread_mutexattr_setrobust(pthread_mutexattr_t *a, int robust)
+{
+ if (robust > 1U) return EINVAL;
+ *a &= ~4;
+ *a |= robust*4;
+ return 0;
+}