From: Rich Felker Date: Sat, 16 Aug 2014 03:54:52 +0000 (-0400) Subject: make futex operations use private-futex mode when possible X-Git-Tag: v1.1.5~67 X-Git-Url: https://git.librecmc.org/?a=commitdiff_plain;h=bc09d58c0432a4eca5f6a1e536679a527f971116;p=oweals%2Fmusl.git make futex operations use private-futex mode when possible private-futex uses the virtual address of the futex int directly as the hash key rather than requiring the kernel to resolve the address to an underlying backing for the mapping in which it lies. for certain usage patterns it improves performance significantly. in many places, the code using futex __wake and __wait operations was already passing a correct fixed zero or nonzero flag for the priv argument, so no change was needed at the site of the call, only in the __wake and __wait functions themselves. in other places, especially where the process-shared attribute for a synchronization object was not previously tracked, additional new code is needed. for mutexes, the only place to store the flag is in the type field, so additional bit masking logic is needed for accessing the type. for non-process-shared condition variable broadcasts, the futex requeue operation is unable to requeue from a private futex to a process-shared one in the mutex structure, so requeue is simply disabled in this case by waking all waiters. for robust mutexes, the kernel always performs a non-private wake when the owner dies. in order not to introduce a behavioral regression in non-process-shared robust mutexes (when the owning thread dies), they are simply forced to be treated as process-shared for now, giving correct behavior at the expense of performance. this can be fixed by adding explicit code to pthread_exit to do the right thing for non-shared robust mutexes in userspace rather than relying on the kernel to do it, and will be fixed in this way later. since not all supported kernels have private futex support, the new code detects EINVAL from the futex syscall and falls back to making the call without the private flag. no attempt to cache the result is made; caching it and using the cached value efficiently is somewhat difficult, and not worth the complexity when the benefits would be seen only on ancient kernels which have numerous other limitations and bugs anyway. --- diff --git a/src/internal/pthread_impl.h b/src/internal/pthread_impl.h index 650e8115..826191c2 100644 --- a/src/internal/pthread_impl.h +++ b/src/internal/pthread_impl.h @@ -76,6 +76,7 @@ struct __timer { #define _c_destroy __u.__i[8] #define _rw_lock __u.__i[0] #define _rw_waiters __u.__i[1] +#define _rw_shared __u.__i[2] #define _b_lock __u.__i[0] #define _b_waiters __u.__i[1] #define _b_limit __u.__i[2] @@ -108,8 +109,13 @@ void __unmapself(void *, size_t); int __timedwait(volatile int *, int, clockid_t, const struct timespec *, void (*)(void *), void *, int); void __wait(volatile int *, volatile int *, int, int); -#define __wake(addr, cnt, priv) \ - __syscall(SYS_futex, addr, FUTEX_WAKE, (cnt)<0?INT_MAX:(cnt)) +static inline void __wake(volatile void *addr, int cnt, int priv) +{ + if (priv) priv = 128; + if (cnt<0) cnt = INT_MAX; + __syscall(SYS_futex, addr, FUTEX_WAKE|priv, cnt) != -EINVAL || + __syscall(SYS_futex, addr, FUTEX_WAKE, cnt); +} void __acquire_ptc(); void __release_ptc(); diff --git a/src/thread/__timedwait.c b/src/thread/__timedwait.c index 302273ae..39eb9963 100644 --- a/src/thread/__timedwait.c +++ b/src/thread/__timedwait.c @@ -4,12 +4,15 @@ #include "futex.h" #include "syscall.h" -static int do_wait(volatile int *addr, int val, - clockid_t clk, const struct timespec *at, int priv) +int __timedwait(volatile int *addr, int val, + clockid_t clk, const struct timespec *at, + void (*cleanup)(void *), void *arg, int priv) { - int r; + int r, cs; struct timespec to, *top=0; + if (priv) priv = 128; + if (at) { if (at->tv_nsec >= 1000000000UL) return EINVAL; if (clock_gettime(clk, &to)) return EINVAL; @@ -22,21 +25,12 @@ static int do_wait(volatile int *addr, int val, top = &to; } - r = -__syscall_cp(SYS_futex, addr, FUTEX_WAIT, val, top); - if (r == EINTR || r == EINVAL || r == ETIMEDOUT) return r; - return 0; -} - -int __timedwait(volatile int *addr, int val, - clockid_t clk, const struct timespec *at, - void (*cleanup)(void *), void *arg, int priv) -{ - int r, cs; - if (!cleanup) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cs); pthread_cleanup_push(cleanup, arg); - r = do_wait(addr, val, clk, at, priv); + r = -__syscall_cp(SYS_futex, addr, FUTEX_WAIT|priv, val, top); + if (r == EINVAL) r = -__syscall_cp(SYS_futex, addr, FUTEX_WAIT, val, top); + if (r != EINTR && r != ETIMEDOUT) r = 0; pthread_cleanup_pop(0); if (!cleanup) pthread_setcancelstate(cs, 0); diff --git a/src/thread/__wait.c b/src/thread/__wait.c index a1e47804..ec1e8206 100644 --- a/src/thread/__wait.c +++ b/src/thread/__wait.c @@ -3,13 +3,15 @@ void __wait(volatile int *addr, volatile int *waiters, int val, int priv) { int spins=10000; - if (priv) priv = 128; priv=0; + if (priv) priv = 128; while (spins--) { if (*addr==val) a_spin(); else return; } if (waiters) a_inc(waiters); - while (*addr==val) - __syscall(SYS_futex, addr, FUTEX_WAIT|priv, val, 0); + while (*addr==val) { + __syscall(SYS_futex, addr, FUTEX_WAIT|priv, val, 0) != -EINVAL + || __syscall(SYS_futex, addr, FUTEX_WAIT, val, 0); + } if (waiters) a_dec(waiters); } diff --git a/src/thread/pthread_attr_get.c b/src/thread/pthread_attr_get.c index 03fc91e3..3d296bf3 100644 --- a/src/thread/pthread_attr_get.c +++ b/src/thread/pthread_attr_get.c @@ -75,7 +75,7 @@ int pthread_mutexattr_getprotocol(const pthread_mutexattr_t *restrict a, int *re } int pthread_mutexattr_getpshared(const pthread_mutexattr_t *restrict a, int *restrict pshared) { - *pshared = a->__attr>>31; + *pshared = a->__attr / 128U % 2; return 0; } diff --git a/src/thread/pthread_barrier_wait.c b/src/thread/pthread_barrier_wait.c index 5e603380..6b329c95 100644 --- a/src/thread/pthread_barrier_wait.c +++ b/src/thread/pthread_barrier_wait.c @@ -87,7 +87,8 @@ int pthread_barrier_wait(pthread_barrier_t *b) a_spin(); a_inc(&inst->finished); while (inst->finished == 1) - __syscall(SYS_futex, &inst->finished, FUTEX_WAIT,1,0); + __syscall(SYS_futex,&inst->finished,FUTEX_WAIT|128,1,0) != -EINTR + || __syscall(SYS_futex,&inst->finished,FUTEX_WAIT,1,0); return PTHREAD_BARRIER_SERIAL_THREAD; } diff --git a/src/thread/pthread_cond_broadcast.c b/src/thread/pthread_cond_broadcast.c index 0901daf6..18e778f3 100644 --- a/src/thread/pthread_cond_broadcast.c +++ b/src/thread/pthread_cond_broadcast.c @@ -27,13 +27,17 @@ int pthread_cond_broadcast(pthread_cond_t *c) /* Perform the futex requeue, waking one waiter unless we know * that the calling thread holds the mutex. */ + int wake_cnt = !(m->_m_type & 3) + || (m->_m_lock&INT_MAX)!=__pthread_self()->tid; + if (m->_m_type & 128) wake_cnt = INT_MAX; + __syscall(SYS_futex, &c->_c_seq, FUTEX_REQUEUE | 128, + wake_cnt, INT_MAX, &m->_m_lock) != -EINVAL || __syscall(SYS_futex, &c->_c_seq, FUTEX_REQUEUE, - !m->_m_type || (m->_m_lock&INT_MAX)!=__pthread_self()->tid, - INT_MAX, &m->_m_lock); + wake_cnt, INT_MAX, &m->_m_lock); out: a_store(&c->_c_lock, 0); - if (c->_c_lockwait) __wake(&c->_c_lock, 1, 0); + if (c->_c_lockwait) __wake(&c->_c_lock, 1, 1); return 0; } diff --git a/src/thread/pthread_cond_signal.c b/src/thread/pthread_cond_signal.c index 71bcdcd9..5fd72f90 100644 --- a/src/thread/pthread_cond_signal.c +++ b/src/thread/pthread_cond_signal.c @@ -4,6 +4,6 @@ int pthread_cond_signal(pthread_cond_t *c) { if (!c->_c_waiters) return 0; a_inc(&c->_c_seq); - if (c->_c_waiters) __wake(&c->_c_seq, 1, 0); + if (c->_c_waiters) __wake(&c->_c_seq, 1, c->_c_mutex!=(void*)-1); return 0; } diff --git a/src/thread/pthread_cond_timedwait.c b/src/thread/pthread_cond_timedwait.c index 99d62cca..44e89567 100644 --- a/src/thread/pthread_cond_timedwait.c +++ b/src/thread/pthread_cond_timedwait.c @@ -41,7 +41,7 @@ int pthread_cond_timedwait(pthread_cond_t *restrict c, pthread_mutex_t *restrict struct cm cm = { .c=c, .m=m }; int r, e=0, seq; - if (m->_m_type && (m->_m_lock&INT_MAX) != __pthread_self()->tid) + if ((m->_m_type&15) && (m->_m_lock&INT_MAX) != __pthread_self()->tid) return EPERM; if (ts && ts->tv_nsec >= 1000000000UL) @@ -64,7 +64,8 @@ int pthread_cond_timedwait(pthread_cond_t *restrict c, pthread_mutex_t *restrict pthread_mutex_unlock(m); - do e = __timedwait(&c->_c_seq, seq, c->_c_clock, ts, cleanup, &cm, 0); + do e = __timedwait(&c->_c_seq, seq, c->_c_clock, ts, cleanup, &cm, + c->_c_mutex != (void *)-1); while (c->_c_seq == seq && (!e || e==EINTR)); if (e == EINTR) e = 0; diff --git a/src/thread/pthread_mutex_consistent.c b/src/thread/pthread_mutex_consistent.c index 65da29fa..baea0ff4 100644 --- a/src/thread/pthread_mutex_consistent.c +++ b/src/thread/pthread_mutex_consistent.c @@ -2,7 +2,7 @@ int pthread_mutex_consistent(pthread_mutex_t *m) { - if (m->_m_type < 8) return EINVAL; + if ((m->_m_type & 15) < 8) return EINVAL; if ((m->_m_lock & 0x3fffffff) != __pthread_self()->tid) return EPERM; m->_m_type -= 8; diff --git a/src/thread/pthread_mutex_init.c b/src/thread/pthread_mutex_init.c index 9d85a354..b83edd0f 100644 --- a/src/thread/pthread_mutex_init.c +++ b/src/thread/pthread_mutex_init.c @@ -3,6 +3,7 @@ int pthread_mutex_init(pthread_mutex_t *restrict m, const pthread_mutexattr_t *restrict a) { *m = (pthread_mutex_t){0}; - if (a) m->_m_type = a->__attr & 7; + if (a) m->_m_type = a->__attr; + if (m->_m_type & 4) m->_m_type |= 128U; return 0; } diff --git a/src/thread/pthread_mutex_lock.c b/src/thread/pthread_mutex_lock.c index 42b5af64..2a9a3aa4 100644 --- a/src/thread/pthread_mutex_lock.c +++ b/src/thread/pthread_mutex_lock.c @@ -2,7 +2,8 @@ int pthread_mutex_lock(pthread_mutex_t *m) { - if (m->_m_type == PTHREAD_MUTEX_NORMAL && !a_cas(&m->_m_lock, 0, EBUSY)) + if ((m->_m_type&15) == PTHREAD_MUTEX_NORMAL + && !a_cas(&m->_m_lock, 0, EBUSY)) return 0; return pthread_mutex_timedlock(m, 0); diff --git a/src/thread/pthread_mutex_timedlock.c b/src/thread/pthread_mutex_timedlock.c index 7b1afc02..849febb7 100644 --- a/src/thread/pthread_mutex_timedlock.c +++ b/src/thread/pthread_mutex_timedlock.c @@ -2,11 +2,12 @@ int pthread_mutex_timedlock(pthread_mutex_t *restrict m, const struct timespec *restrict at) { - int r, t; - - if (m->_m_type == PTHREAD_MUTEX_NORMAL && !a_cas(&m->_m_lock, 0, EBUSY)) + if ((m->_m_type&15) == PTHREAD_MUTEX_NORMAL + && !a_cas(&m->_m_lock, 0, EBUSY)) return 0; + int r, t, priv = (m->_m_type & 128) ^ 128; + while ((r=pthread_mutex_trylock(m)) == EBUSY) { if (!(r=m->_m_lock) || (r&0x40000000)) continue; if ((m->_m_type&3) == PTHREAD_MUTEX_ERRORCHECK @@ -16,7 +17,7 @@ int pthread_mutex_timedlock(pthread_mutex_t *restrict m, const struct timespec * a_inc(&m->_m_waiters); t = r | 0x80000000; a_cas(&m->_m_lock, r, t); - r = __timedwait(&m->_m_lock, t, CLOCK_REALTIME, at, 0, 0, 0); + r = __timedwait(&m->_m_lock, t, CLOCK_REALTIME, at, 0, 0, priv); a_dec(&m->_m_waiters); if (r && r != EINTR) break; } diff --git a/src/thread/pthread_mutex_trylock.c b/src/thread/pthread_mutex_trylock.c index 00ad65de..850fcb90 100644 --- a/src/thread/pthread_mutex_trylock.c +++ b/src/thread/pthread_mutex_trylock.c @@ -1,17 +1,13 @@ #include "pthread_impl.h" -int pthread_mutex_trylock(pthread_mutex_t *m) +int __pthread_mutex_trylock_owner(pthread_mutex_t *m) { - int tid, old, own; - pthread_t self; - - if (m->_m_type == PTHREAD_MUTEX_NORMAL) - return a_cas(&m->_m_lock, 0, EBUSY) & EBUSY; + int old, own; + int type = m->_m_type & 15; + pthread_t self = __pthread_self(); + int tid = self->tid; - self = __pthread_self(); - tid = self->tid; - - if (m->_m_type >= 4) { + if (type >= 4) { if (!self->robust_list.off) __syscall(SYS_set_robust_list, &self->robust_list, 3*sizeof(long)); @@ -21,7 +17,7 @@ int pthread_mutex_trylock(pthread_mutex_t *m) old = m->_m_lock; own = old & 0x7fffffff; - if (own == tid && (m->_m_type&3) == PTHREAD_MUTEX_RECURSIVE) { + if (own == tid && (type&3) == PTHREAD_MUTEX_RECURSIVE) { if ((unsigned)m->_m_count >= INT_MAX) return EAGAIN; m->_m_count++; return 0; @@ -30,9 +26,9 @@ int pthread_mutex_trylock(pthread_mutex_t *m) if ((own && !(own & 0x40000000)) || a_cas(&m->_m_lock, old, tid)!=old) return EBUSY; - if (m->_m_type < 4) return 0; + if (type < 4) return 0; - if (m->_m_type >= 8) { + if (type >= 8) { m->_m_lock = 0; return ENOTRECOVERABLE; } @@ -50,3 +46,10 @@ int pthread_mutex_trylock(pthread_mutex_t *m) return 0; } + +int pthread_mutex_trylock(pthread_mutex_t *m) +{ + if ((m->_m_type&15) == PTHREAD_MUTEX_NORMAL) + return a_cas(&m->_m_lock, 0, EBUSY) & EBUSY; + return __pthread_mutex_trylock_owner(m); +} diff --git a/src/thread/pthread_mutex_unlock.c b/src/thread/pthread_mutex_unlock.c index b4bd74b8..769d6e56 100644 --- a/src/thread/pthread_mutex_unlock.c +++ b/src/thread/pthread_mutex_unlock.c @@ -9,16 +9,18 @@ int pthread_mutex_unlock(pthread_mutex_t *m) int waiters = m->_m_waiters; int cont; int robust = 0; + int type = m->_m_type & 15; + int priv = (m->_m_type & 128) ^ 128; - if (m->_m_type != PTHREAD_MUTEX_NORMAL) { + if (type != PTHREAD_MUTEX_NORMAL) { if (!m->_m_lock) return EPERM; self = __pthread_self(); if ((m->_m_lock&0x1fffffff) != self->tid) return EPERM; - if ((m->_m_type&3) == PTHREAD_MUTEX_RECURSIVE && m->_m_count) + if ((type&3) == PTHREAD_MUTEX_RECURSIVE && m->_m_count) return m->_m_count--, 0; - if (m->_m_type >= 4) { + if (type >= 4) { robust = 1; self->robust_list.pending = &m->_m_next; *(void **)m->_m_prev = m->_m_next; @@ -32,6 +34,6 @@ int pthread_mutex_unlock(pthread_mutex_t *m) __vm_unlock_impl(); } if (waiters || cont<0) - __wake(&m->_m_lock, 1, 0); + __wake(&m->_m_lock, 1, priv); return 0; } diff --git a/src/thread/pthread_mutexattr_setpshared.c b/src/thread/pthread_mutexattr_setpshared.c index 8c7a1e26..100f6ff2 100644 --- a/src/thread/pthread_mutexattr_setpshared.c +++ b/src/thread/pthread_mutexattr_setpshared.c @@ -3,7 +3,7 @@ int pthread_mutexattr_setpshared(pthread_mutexattr_t *a, int pshared) { if (pshared > 1U) return EINVAL; - a->__attr &= 0x7fffffff; - a->__attr |= pshared<<31; + a->__attr &= ~128U; + a->__attr |= pshared<<7; return 0; } diff --git a/src/thread/pthread_once.c b/src/thread/pthread_once.c index e01f6d48..2eb0f932 100644 --- a/src/thread/pthread_once.c +++ b/src/thread/pthread_once.c @@ -3,7 +3,7 @@ static void undo(void *control) { a_store(control, 0); - __wake(control, 1, 0); + __wake(control, 1, 1); } int pthread_once(pthread_once_t *control, void (*init)(void)) @@ -25,10 +25,10 @@ int pthread_once(pthread_once_t *control, void (*init)(void)) pthread_cleanup_pop(0); a_store(control, 2); - if (waiters) __wake(control, -1, 0); + if (waiters) __wake(control, -1, 1); return 0; case 1: - __wait(control, &waiters, 1, 0); + __wait(control, &waiters, 1, 1); continue; case 2: return 0; diff --git a/src/thread/pthread_rwlock_init.c b/src/thread/pthread_rwlock_init.c index 82df52e2..a2c0b478 100644 --- a/src/thread/pthread_rwlock_init.c +++ b/src/thread/pthread_rwlock_init.c @@ -3,7 +3,6 @@ int pthread_rwlock_init(pthread_rwlock_t *restrict rw, const pthread_rwlockattr_t *restrict a) { *rw = (pthread_rwlock_t){0}; - if (a) { - } + if (a) rw->_rw_shared = a->__attr[0]*128; return 0; } diff --git a/src/thread/pthread_rwlock_timedrdlock.c b/src/thread/pthread_rwlock_timedrdlock.c index c0c94c97..a2b4d446 100644 --- a/src/thread/pthread_rwlock_timedrdlock.c +++ b/src/thread/pthread_rwlock_timedrdlock.c @@ -8,7 +8,7 @@ int pthread_rwlock_timedrdlock(pthread_rwlock_t *restrict rw, const struct times t = r | 0x80000000; a_inc(&rw->_rw_waiters); a_cas(&rw->_rw_lock, r, t); - r = __timedwait(&rw->_rw_lock, t, CLOCK_REALTIME, at, 0, 0, 0); + r = __timedwait(&rw->_rw_lock, t, CLOCK_REALTIME, at, 0, 0, rw->_rw_shared^128); a_dec(&rw->_rw_waiters); if (r && r != EINTR) return r; } diff --git a/src/thread/pthread_rwlock_timedwrlock.c b/src/thread/pthread_rwlock_timedwrlock.c index 339a1679..63a32ecb 100644 --- a/src/thread/pthread_rwlock_timedwrlock.c +++ b/src/thread/pthread_rwlock_timedwrlock.c @@ -8,7 +8,7 @@ int pthread_rwlock_timedwrlock(pthread_rwlock_t *restrict rw, const struct times t = r | 0x80000000; a_inc(&rw->_rw_waiters); a_cas(&rw->_rw_lock, r, t); - r = __timedwait(&rw->_rw_lock, t, CLOCK_REALTIME, at, 0, 0, 0); + r = __timedwait(&rw->_rw_lock, t, CLOCK_REALTIME, at, 0, 0, rw->_rw_shared^128); a_dec(&rw->_rw_waiters); if (r && r != EINTR) return r; } diff --git a/src/thread/pthread_rwlock_unlock.c b/src/thread/pthread_rwlock_unlock.c index a6d20854..7b5eec84 100644 --- a/src/thread/pthread_rwlock_unlock.c +++ b/src/thread/pthread_rwlock_unlock.c @@ -2,7 +2,7 @@ int pthread_rwlock_unlock(pthread_rwlock_t *rw) { - int val, cnt, waiters, new; + int val, cnt, waiters, new, priv = rw->_rw_shared^128; do { val = rw->_rw_lock; @@ -12,7 +12,7 @@ int pthread_rwlock_unlock(pthread_rwlock_t *rw) } while (a_cas(&rw->_rw_lock, val, new) != val); if (!new && (waiters || val<0)) - __wake(&rw->_rw_lock, cnt, 0); + __wake(&rw->_rw_lock, cnt, priv); return 0; } diff --git a/src/thread/sem_init.c b/src/thread/sem_init.c index e8e419cf..55092434 100644 --- a/src/thread/sem_init.c +++ b/src/thread/sem_init.c @@ -10,5 +10,6 @@ int sem_init(sem_t *sem, int pshared, unsigned value) } sem->__val[0] = value; sem->__val[1] = 0; + sem->__val[2] = pshared ? 0 : 128; return 0; } diff --git a/src/thread/sem_post.c b/src/thread/sem_post.c index 14a2dfe2..31e3293d 100644 --- a/src/thread/sem_post.c +++ b/src/thread/sem_post.c @@ -3,7 +3,7 @@ int sem_post(sem_t *sem) { - int val, waiters; + int val, waiters, priv = sem->__val[2]; do { val = sem->__val[0]; waiters = sem->__val[1]; @@ -12,6 +12,6 @@ int sem_post(sem_t *sem) return -1; } } while (a_cas(sem->__val, val, val+1+(val<0)) != val); - if (val<0 || waiters) __wake(sem->__val, 1, 0); + if (val<0 || waiters) __wake(sem->__val, 1, priv); return 0; } diff --git a/src/thread/sem_timedwait.c b/src/thread/sem_timedwait.c index 6d0d0114..bfcb6dcd 100644 --- a/src/thread/sem_timedwait.c +++ b/src/thread/sem_timedwait.c @@ -12,7 +12,7 @@ int sem_timedwait(sem_t *restrict sem, const struct timespec *restrict at) int r; a_inc(sem->__val+1); a_cas(sem->__val, 0, -1); - r = __timedwait(sem->__val, -1, CLOCK_REALTIME, at, cleanup, sem->__val+1, 0); + r = __timedwait(sem->__val, -1, CLOCK_REALTIME, at, cleanup, sem->__val+1, sem->__val[2]); a_dec(sem->__val+1); if (r) { errno = r;