if there is already a waiter for a lock, spinning on the lock is
essentially an attempt to steal it from whichever waiter would obtain
it via any priority rules in place, and is therefore undesirable. in
the current implementation, there is always an inherent race window at
unlock during which a newly-arriving thread may steal the lock from
the existing waiters, but we should aim to keep this window minimal
rather than enlarging it.
{
int spins=100;
if (priv) priv = 128;
- while (spins--) {
+ while (spins-- && (!waiters || !*waiters)) {
if (*addr==val) a_spin();
else return;
}
if (r != EBUSY) return r;
int spins = 100;
- while (spins-- && m->_m_lock) a_spin();
+ while (spins-- && m->_m_lock && !m->_m_waiters) a_spin();
while ((r=pthread_mutex_trylock(m)) == EBUSY) {
if (!(r=m->_m_lock) || ((r&0x40000000) && (m->_m_type&4)))
if (r != EBUSY) return r;
int spins = 100;
- while (spins-- && rw->_rw_lock) a_spin();
+ while (spins-- && rw->_rw_lock && !rw->_rw_waiters) a_spin();
while ((r=pthread_rwlock_tryrdlock(rw))==EBUSY) {
if (!(r=rw->_rw_lock) || (r&0x7fffffff)!=0x7fffffff) continue;
if (r != EBUSY) return r;
int spins = 100;
- while (spins-- && rw->_rw_lock) a_spin();
+ while (spins-- && rw->_rw_lock && !rw->_rw_waiters) a_spin();
while ((r=pthread_rwlock_trywrlock(rw))==EBUSY) {
if (!(r=rw->_rw_lock)) continue;
if (!sem_trywait(sem)) return 0;
int spins = 100;
- while (spins-- && sem->__val[0] <= 0) a_spin();
+ while (spins-- && sem->__val[0] <= 0 && !sem->__val[1]) a_spin();
while (sem_trywait(sem)) {
int r;