void __release_ptc();
void __inhibit_ptc();
+void __block_all_sigs(void *);
+void __block_app_sigs(void *);
+void __restore_sigs(void *);
+
#define DEFAULT_STACK_SIZE 81920
#define DEFAULT_GUARD_SIZE PAGE_SIZE
--- /dev/null
+#include "pthread_impl.h"
+#include "syscall.h"
+#include <signal.h>
+
+static const unsigned long all_mask[] = {
+#if ULONG_MAX == 0xffffffff && _NSIG == 129
+ -1UL, -1UL, -1UL, -1UL
+#elif ULONG_MAX == 0xffffffff
+ -1UL, -1UL
+#else
+ -1UL
+#endif
+};
+
+static const unsigned long app_mask[] = {
+#if ULONG_MAX == 0xffffffff
+#if _NSIG == 65
+ 0x7fffffff, 0xfffffffc
+#else
+ 0x7fffffff, 0xfffffffc, -1UL, -1UL
+#endif
+#else
+#if _NSIG == 65
+ 0xfffffffc7fffffff
+#else
+ 0xfffffffc7fffffff, -1UL
+#endif
+#endif
+};
+
+void __block_all_sigs(void *set)
+{
+ __syscall(SYS_rt_sigprocmask, SIG_BLOCK, &all_mask, set, _NSIG/8);
+}
+
+void __block_app_sigs(void *set)
+{
+ __syscall(SYS_rt_sigprocmask, SIG_BLOCK, &app_mask, set, _NSIG/8);
+}
+
+void __restore_sigs(void *set)
+{
+ __syscall(SYS_rt_sigprocmask, SIG_SETMASK, set, 0, _NSIG/8);
+}
{
int pid, tid, ret;
sigset_t set;
- __syscall(SYS_rt_sigprocmask, SIG_BLOCK, SIGALL_SET, &set, _NSIG/8);
+ __block_app_sigs(&set);
tid = __syscall(SYS_gettid);
pid = __syscall(SYS_getpid);
ret = syscall(SYS_tgkill, pid, tid, sig);
- __syscall(SYS_rt_sigprocmask, SIG_SETMASK, &set, 0, _NSIG/8);
+ __restore_sigs(&set);
return ret;
}
#include <signal.h>
#include <stdlib.h>
#include "syscall.h"
+#include "pthread_impl.h"
_Noreturn void siglongjmp(sigjmp_buf buf, int ret)
{
- if (buf->__fl) __syscall(SYS_rt_sigprocmask, SIG_SETMASK,
- buf->__ss, 0, _NSIG/8);
+ if (buf->__fl) __restore_sigs(buf->__ss);
longjmp(buf->__jb, ret);
}
* This is important to ensure that dynamically allocated TLS
* is not under-allocated/over-committed, and possibly for other
* reasons as well. */
- __syscall(SYS_rt_sigprocmask, SIG_BLOCK, SIGALL_SET, &set, _NSIG/8);
+ __block_all_sigs(&set);
/* Wait to unlock the kill lock, which governs functions like
* pthread_kill which target a thread id, until signals have
* stdio cleanup code a consistent state. */
if (a_fetch_add(&libc.threads_minus_1, -1)==0) {
libc.threads_minus_1 = 0;
- __syscall(SYS_rt_sigprocmask, SIG_SETMASK, &set, 0, _NSIG/8);
+ __restore_sigs(&set);
exit(0);
}
self->detached = 2;
pthread_exit(0);
}
- __syscall(SYS_rt_sigprocmask, SIG_SETMASK,
- self->sigmask, 0, _NSIG/8);
+ __restore_sigs(self->sigmask);
}
if (self->unblock_cancel)
__syscall(SYS_rt_sigprocmask, SIG_UNBLOCK,
}
if (attr._a_sched) {
do_sched = new->startlock[0] = 1;
- __syscall(SYS_rt_sigprocmask, SIG_BLOCK,
- SIGALL_SET, new->sigmask, _NSIG/8);
+ __block_app_sigs(new->sigmask);
}
new->unblock_cancel = self->cancel;
new->canary = self->canary;
__release_ptc();
if (do_sched) {
- __syscall(SYS_rt_sigprocmask, SIG_SETMASK,
- new->sigmask, 0, _NSIG/8);
+ __restore_sigs(new->sigmask);
}
if (ret < 0) {