optimize out setting up robust list with kernel when not needed
authorRich Felker <dalias@aerifal.cx>
Fri, 10 Apr 2015 04:54:48 +0000 (00:54 -0400)
committerRich Felker <dalias@aerifal.cx>
Fri, 10 Apr 2015 04:54:48 +0000 (00:54 -0400)
as a result of commit 12e1e324683a1d381b7f15dd36c99b37dd44d940, kernel
processing of the robust list is only needed for process-shared
mutexes. previously the first attempt to lock any owner-tracked mutex
resulted in robust list initialization and a set_robust_list syscall.
this is no longer necessary, and since the kernel's record of the
robust list must now be cleared at thread exit time for detached
threads, optimizing it out is more worthwhile than before too.

src/env/__init_tls.c
src/process/fork.c
src/thread/pthread_create.c
src/thread/pthread_mutex_trylock.c

index e651c7a78922a40703c18c8ef5f4b3de44db4e6a..ac4d9e7f08a9f5b7f836f4344ceb1fc22aee6b1c 100644 (file)
@@ -18,6 +18,7 @@ int __init_tp(void *p)
        libc.has_thread_pointer = 1;
        td->tid = __syscall(SYS_set_tid_address, &td->tid);
        td->locale = &libc.global_locale;
+       td->robust_list.head = &td->robust_list.head;
        return 0;
 }
 
index 43c52bc4c0c8e9d28455611697e686b714ccf23e..8d676828415cac2020f53aad23940d61b858d8ad 100644 (file)
@@ -25,7 +25,8 @@ pid_t fork(void)
        if (libc.has_thread_pointer && !ret) {
                pthread_t self = __pthread_self();
                self->tid = __syscall(SYS_gettid);
-               memset(&self->robust_list, 0, sizeof self->robust_list);
+               self->robust_list.off = 0;
+               self->robust_list.pending = 0;
                libc.threads_minus_1 = 0;
        }
        __restore_sigs(&set);
index 893773fa10f0228be9d64af7e1e884dcb048391c..8b0135bccec4b7feb300e7af4c76075f7579ef40 100644 (file)
@@ -268,6 +268,7 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att
                do_sched = new->startlock[0] = 1;
                __block_app_sigs(new->sigmask);
        }
+       new->robust_list.head = &new->robust_list.head;
        new->unblock_cancel = self->cancel;
        new->canary = self->canary;
 
index cb93565194a1cc519b26051fe3f8e65cafec4217..0df3ce2982ff85d2161ba1e0742e75c73c023a7b 100644 (file)
@@ -7,12 +7,6 @@ int __pthread_mutex_trylock_owner(pthread_mutex_t *m)
        pthread_t self = __pthread_self();
        int tid = self->tid;
 
-       if (!self->robust_list.off) {
-               __syscall(SYS_set_robust_list, &self->robust_list, 3*sizeof(long));
-               self->robust_list.head = &self->robust_list.head;
-               self->robust_list.off = (char*)&m->_m_lock-(char *)&m->_m_next;
-       }
-
        old = m->_m_lock;
        own = old & 0x7fffffff;
        if (own == tid && (type&3) == PTHREAD_MUTEX_RECURSIVE) {
@@ -23,6 +17,10 @@ int __pthread_mutex_trylock_owner(pthread_mutex_t *m)
        if (own == 0x40000000) return ENOTRECOVERABLE;
 
        if (m->_m_type & 128) {
+               if (!self->robust_list.off) {
+                       self->robust_list.off = (char*)&m->_m_lock-(char *)&m->_m_next;
+                       __syscall(SYS_set_robust_list, &self->robust_list, 3*sizeof(long));
+               }
                if (m->_m_waiters) tid |= 0x80000000;
                self->robust_list.pending = &m->_m_next;
        }