static inline void lock(volatile int *lk)
{
- if (libc.threaded)
+ int need_locks = libc.need_locks;
+ if (need_locks) {
while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1);
+ if (need_locks < 0) libc.need_locks = 0;
+ }
}
static inline void unlock(volatile int *lk)
void __lock(volatile int *l)
{
- if (!libc.threaded) return;
+ int need_locks = libc.need_locks;
+ if (!need_locks) return;
/* fast path: INT_MIN for the lock, +1 for the congestion */
int current = a_cas(l, 0, INT_MIN + 1);
+ if (need_locks < 0) libc.need_locks = 0;
if (!current) return;
/* A first spin loop, for medium congestion. */
for (unsigned i = 0; i < 10; ++i) {
* until the lock is released, which only happens after SYS_exit
* has been called, via the exit futex address pointing at the lock.
* This needs to happen after any possible calls to LOCK() that might
- * skip locking if libc.threads_minus_1 is zero. */
- libc.threads_minus_1--;
+ * skip locking if process appears single-threaded. */
+ if (!--libc.threads_minus_1) libc.need_locks = -1;
self->next->prev = self->prev;
self->prev->next = self->next;
self->prev = self->next = self;
~(1UL<<((SIGCANCEL-1)%(8*sizeof(long))));
__tl_lock();
- libc.threads_minus_1++;
+ if (!libc.threads_minus_1++) libc.need_locks = 1;
ret = __clone((c11 ? start_c11 : start), stack, flags, args, &new->tid, TP_ADJ(new), &__thread_list_lock);
/* All clone failures translate to EAGAIN. If explicit scheduling
new->next->prev = new;
new->prev->next = new;
} else {
- libc.threads_minus_1--;
+ if (!--libc.threads_minus_1) libc.need_locks = 0;
}
__tl_unlock();
__restore_sigs(&set);