as a result of commit
12e1e324683a1d381b7f15dd36c99b37dd44d940, kernel
processing of the robust list is only needed for process-shared
mutexes. previously the first attempt to lock any owner-tracked mutex
resulted in robust list initialization and a set_robust_list syscall.
this is no longer necessary, and since the kernel's record of the
robust list must now be cleared at thread exit time for detached
threads, optimizing it out is more worthwhile than before too.
libc.has_thread_pointer = 1;
td->tid = __syscall(SYS_set_tid_address, &td->tid);
td->locale = &libc.global_locale;
+ td->robust_list.head = &td->robust_list.head;
return 0;
}
if (libc.has_thread_pointer && !ret) {
pthread_t self = __pthread_self();
self->tid = __syscall(SYS_gettid);
- memset(&self->robust_list, 0, sizeof self->robust_list);
+ self->robust_list.off = 0;
+ self->robust_list.pending = 0;
libc.threads_minus_1 = 0;
}
__restore_sigs(&set);
do_sched = new->startlock[0] = 1;
__block_app_sigs(new->sigmask);
}
+ new->robust_list.head = &new->robust_list.head;
new->unblock_cancel = self->cancel;
new->canary = self->canary;
pthread_t self = __pthread_self();
int tid = self->tid;
- if (!self->robust_list.off) {
- __syscall(SYS_set_robust_list, &self->robust_list, 3*sizeof(long));
- self->robust_list.head = &self->robust_list.head;
- self->robust_list.off = (char*)&m->_m_lock-(char *)&m->_m_next;
- }
-
old = m->_m_lock;
own = old & 0x7fffffff;
if (own == tid && (type&3) == PTHREAD_MUTEX_RECURSIVE) {
if (own == 0x40000000) return ENOTRECOVERABLE;
if (m->_m_type & 128) {
+ if (!self->robust_list.off) {
+ self->robust_list.off = (char*)&m->_m_lock-(char *)&m->_m_next;
+ __syscall(SYS_set_robust_list, &self->robust_list, 3*sizeof(long));
+ }
if (m->_m_waiters) tid |= 0x80000000;
self->robust_list.pending = &m->_m_next;
}