2 #include "pthread_impl.h"
3 #include "stdio_impl.h"
9 void *__mmap(void *, size_t, int, int, int, off_t);
10 int __munmap(void *, size_t);
11 int __mprotect(void *, size_t, int);
16 weak_alias(dummy_0, __acquire_ptc);
17 weak_alias(dummy_0, __release_ptc);
18 weak_alias(dummy_0, __pthread_tsd_run_dtors);
19 weak_alias(dummy_0, __do_orphaned_stdio_locks);
20 weak_alias(dummy_0, __dl_thread_cleanup);
22 _Noreturn void __pthread_exit(void *result)
24 pthread_t self = __pthread_self();
27 self->canceldisable = 1;
28 self->cancelasync = 0;
29 self->result = result;
31 while (self->cancelbuf) {
32 void (*f)(void *) = self->cancelbuf->__f;
33 void *x = self->cancelbuf->__x;
34 self->cancelbuf = self->cancelbuf->__next;
38 __pthread_tsd_run_dtors();
40 /* Access to target the exiting thread with syscalls that use
41 * its kernel tid is controlled by killlock. For detached threads,
42 * any use past this point would have undefined behavior, but for
43 * joinable threads it's a valid usage that must be handled. */
46 /* Block all signals before decrementing the live thread count.
47 * This is important to ensure that dynamically allocated TLS
48 * is not under-allocated/over-committed, and possibly for other
50 __block_all_sigs(&set);
52 /* It's impossible to determine whether this is "the last thread"
53 * until performing the atomic decrement, since multiple threads
54 * could exit at the same time. For the last thread, revert the
55 * decrement, restore the tid, and unblock signals to give the
56 * atexit handlers and stdio cleanup code a consistent state. */
57 if (a_fetch_add(&libc.threads_minus_1, -1)==0) {
58 libc.threads_minus_1 = 0;
59 UNLOCK(self->killlock);
64 /* Process robust list in userspace to handle non-pshared mutexes
65 * and the detached thread case where the robust list head will
66 * be invalid when the kernel would process it. */
68 volatile void *volatile *rp;
69 while ((rp=self->robust_list.head) && rp != &self->robust_list.head) {
70 pthread_mutex_t *m = (void *)((char *)rp
71 - offsetof(pthread_mutex_t, _m_next));
72 int waiters = m->_m_waiters;
73 int priv = (m->_m_type & 128) ^ 128;
74 self->robust_list.pending = rp;
75 self->robust_list.head = *rp;
76 int cont = a_swap(&m->_m_lock, 0x40000000);
77 self->robust_list.pending = 0;
78 if (cont < 0 || waiters)
79 __wake(&m->_m_lock, 1, priv);
83 __do_orphaned_stdio_locks();
84 __dl_thread_cleanup();
86 /* This atomic potentially competes with a concurrent pthread_detach
87 * call; the loser is responsible for freeing thread resources. */
88 int state = a_cas(&self->detach_state, DT_JOINABLE, DT_EXITING);
90 if (state>=DT_DETACHED && self->map_base) {
91 /* Detached threads must avoid the kernel clear_child_tid
92 * feature, since the virtual address will have been
93 * unmapped and possibly already reused by a new mapping
94 * at the time the kernel would perform the write. In
95 * the case of threads that started out detached, the
96 * initial clone flags are correct, but if the thread was
97 * detached later, we need to clear it here. */
98 if (state == DT_DYNAMIC) __syscall(SYS_set_tid_address, 0);
100 /* Robust list will no longer be valid, and was already
101 * processed above, so unregister it with the kernel. */
102 if (self->robust_list.off)
103 __syscall(SYS_set_robust_list, 0, 3*sizeof(long));
105 /* Since __unmapself bypasses the normal munmap code path,
106 * explicitly wait for vmlock holders first. */
109 /* The following call unmaps the thread's stack mapping
110 * and then exits without touching the stack. */
111 __unmapself(self->map_base, self->map_size);
114 /* After the kernel thread exits, its tid may be reused. Clear it
115 * to prevent inadvertent use and inform functions that would use
116 * it that it's no longer available. */
118 UNLOCK(self->killlock);
120 for (;;) __syscall(SYS_exit, 0);
123 void __do_cleanup_push(struct __ptcb *cb)
125 struct pthread *self = __pthread_self();
126 cb->__next = self->cancelbuf;
127 self->cancelbuf = cb;
130 void __do_cleanup_pop(struct __ptcb *cb)
132 __pthread_self()->cancelbuf = cb->__next;
135 struct start_sched_args {
137 void *(*start_fn)(void *);
139 pthread_attr_t *attr;
143 static void *start_sched(void *p)
145 struct start_sched_args *ssa = p;
146 void *start_arg = ssa->start_arg;
147 void *(*start_fn)(void *) = ssa->start_fn;
148 pthread_t self = __pthread_self();
150 int ret = -__syscall(SYS_sched_setscheduler, self->tid,
151 ssa->attr->_a_policy, &ssa->attr->_a_prio);
152 if (!ret) __restore_sigs(&ssa->mask);
153 a_store(&ssa->futex, ret);
154 __wake(&ssa->futex, 1, 1);
156 self->detach_state = DT_DYNAMIC;
159 return start_fn(start_arg);
162 static int start(void *p)
165 if (self->unblock_cancel)
166 __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK,
167 SIGPT_SET, 0, _NSIG/8);
168 __pthread_exit(self->start(self->start_arg));
172 static int start_c11(void *p)
175 int (*start)(void*) = (int(*)(void*)) self->start;
176 __pthread_exit((void *)(uintptr_t)start(self->start_arg));
180 #define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
182 /* pthread_key_create.c overrides this */
183 static volatile size_t dummy = 0;
184 weak_alias(dummy, __pthread_tsd_size);
185 static void *dummy_tsd[1] = { 0 };
186 weak_alias(dummy_tsd, __pthread_tsd_main);
188 volatile int __block_new_threads = 0;
189 size_t __default_stacksize = DEFAULT_STACK_SIZE;
190 size_t __default_guardsize = DEFAULT_GUARD_SIZE;
192 static FILE *volatile dummy_file = 0;
193 weak_alias(dummy_file, __stdin_used);
194 weak_alias(dummy_file, __stdout_used);
195 weak_alias(dummy_file, __stderr_used);
197 static void init_file_lock(FILE *f)
199 if (f && f->lock<0) f->lock = 0;
202 void *__copy_tls(unsigned char *);
204 int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict attrp, void *(*entry)(void *), void *restrict arg)
206 int ret, c11 = (attrp == __ATTRP_C11_THREAD);
208 struct pthread *self, *new;
209 unsigned char *map = 0, *stack = 0, *tsd = 0, *stack_limit;
210 unsigned flags = CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND
211 | CLONE_THREAD | CLONE_SYSVSEM | CLONE_SETTLS
212 | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID | CLONE_DETACHED;
214 pthread_attr_t attr = { 0 };
215 struct start_sched_args ssa;
217 if (!libc.can_do_threads) return ENOSYS;
218 self = __pthread_self();
219 if (!libc.threaded) {
220 for (FILE *f=*__ofl_lock(); f; f=f->next)
223 init_file_lock(__stdin_used);
224 init_file_lock(__stdout_used);
225 init_file_lock(__stderr_used);
226 __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK, SIGPT_SET, 0, _NSIG/8);
227 self->tsd = (void **)__pthread_tsd_main;
230 if (attrp && !c11) attr = *attrp;
234 attr._a_stacksize = __default_stacksize;
235 attr._a_guardsize = __default_guardsize;
238 if (__block_new_threads) __wait(&__block_new_threads, 0, 1, 1);
240 if (attr._a_stackaddr) {
241 size_t need = libc.tls_size + __pthread_tsd_size;
242 size = attr._a_stacksize;
243 stack = (void *)(attr._a_stackaddr & -16);
244 stack_limit = (void *)(attr._a_stackaddr - size);
245 /* Use application-provided stack for TLS only when
246 * it does not take more than ~12% or 2k of the
247 * application's stack space. */
248 if (need < size/8 && need < 2048) {
249 tsd = stack - __pthread_tsd_size;
250 stack = tsd - libc.tls_size;
251 memset(stack, 0, need);
257 guard = ROUND(attr._a_guardsize);
258 size = guard + ROUND(attr._a_stacksize
259 + libc.tls_size + __pthread_tsd_size);
264 map = __mmap(0, size, PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0);
265 if (map == MAP_FAILED) goto fail;
266 if (__mprotect(map+guard, size-guard, PROT_READ|PROT_WRITE)
267 && errno != ENOSYS) {
272 map = __mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
273 if (map == MAP_FAILED) goto fail;
275 tsd = map + size - __pthread_tsd_size;
277 stack = tsd - libc.tls_size;
278 stack_limit = map + guard;
282 new = __copy_tls(tsd - libc.tls_size);
284 new->map_size = size;
286 new->stack_size = stack - stack_limit;
287 new->guard_size = guard;
289 new->start_arg = arg;
291 new->tsd = (void *)tsd;
292 new->locale = &libc.global_locale;
293 if (attr._a_detach) {
294 new->detach_state = DT_DETACHED;
295 flags -= CLONE_CHILD_CLEARTID;
297 new->detach_state = DT_JOINABLE;
302 ssa.start_fn = new->start;
303 ssa.start_arg = new->start_arg;
305 new->start = start_sched;
306 new->start_arg = &ssa;
307 __block_app_sigs(&ssa.mask);
309 new->robust_list.head = &new->robust_list.head;
310 new->unblock_cancel = self->cancel;
311 new->CANARY = self->CANARY;
313 a_inc(&libc.threads_minus_1);
314 ret = __clone((c11 ? start_c11 : start), stack, flags, new, &new->tid, TP_ADJ(new), &new->detach_state);
319 __restore_sigs(&ssa.mask);
323 a_dec(&libc.threads_minus_1);
324 if (map) __munmap(map, size);
329 __futexwait(&ssa.futex, -1, 1);
340 weak_alias(__pthread_exit, pthread_exit);
341 weak_alias(__pthread_create, pthread_create);