From 02084109f0f0d6e0a7fe4a8cb3a90a422725e264 Mon Sep 17 00:00:00 2001 From: Rich Felker Date: Wed, 30 Mar 2011 09:06:00 -0400 Subject: [PATCH] streamline mutex unlock to remove a useless branch, use a_store to unlock this roughly halves the cost of pthread_mutex_unlock, at least for non-robust, normal-type mutexes. the a_store change is in preparation for future support of archs which require a memory barrier or special atomic store operation, and also should prevent the possibility of the compiler misordering writes. --- src/thread/pthread_mutex_unlock.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/thread/pthread_mutex_unlock.c b/src/thread/pthread_mutex_unlock.c index 67aa7ba5..5855db0b 100644 --- a/src/thread/pthread_mutex_unlock.c +++ b/src/thread/pthread_mutex_unlock.c @@ -14,11 +14,15 @@ int pthread_mutex_unlock(pthread_mutex_t *m) self->robust_list.pending = &m->_m_next; *(void **)m->_m_prev = m->_m_next; if (m->_m_next) ((void **)m->_m_next)[-1] = m->_m_prev; + a_store(&m->_m_lock, 0); + self->robust_list.pending = 0; + } else { + a_store(&m->_m_lock, 0); } + } else { + a_store(&m->_m_lock, 0); } - m->_m_lock = 0; if (m->_m_waiters) __wake(&m->_m_lock, 1, 0); - if (m->_m_type >= 4) self->robust_list.pending = 0; return 0; } -- 2.25.1