this roughly halves the cost of pthread_mutex_unlock, at least for
non-robust, normal-type mutexes.
the a_store change is in preparation for future support of archs which
require a memory barrier or special atomic store operation, and also
should prevent the possibility of the compiler misordering writes.
self->robust_list.pending = &m->_m_next;
*(void **)m->_m_prev = m->_m_next;
if (m->_m_next) ((void **)m->_m_next)[-1] = m->_m_prev;
+ a_store(&m->_m_lock, 0);
+ self->robust_list.pending = 0;
+ } else {
+ a_store(&m->_m_lock, 0);
}
+ } else {
+ a_store(&m->_m_lock, 0);
}
- m->_m_lock = 0;
if (m->_m_waiters) __wake(&m->_m_lock, 1, 0);
- if (m->_m_type >= 4) self->robust_list.pending = 0;
return 0;
}