in a few places, non-hidden symbols were referenced from asm in ways
that assumed ld-time binding. while these is no semantic reason these
symbols need to be hidden, fixing the references without making them
hidden was going to be ugly, and hidden reduces some bloat anyway.
in the asm files, .global/.hidden directives have been moved to the
top to unclutter the actual code.
12 files changed:
+
+#ifdef SHARED
+__attribute__((__visibility__("hidden")))
+#endif
long __syscall_cp_internal(volatile void*, long long, long long, long long, long long,
long long, long long, long long);
long __syscall_cp_internal(volatile void*, long long, long long, long long, long long,
long long, long long, long long);
#define __fixup(X) do { if(X) X = (unsigned long) (&(struct __timespec_kernel) \
{ .tv_sec = __tsc(X)->tv_sec, .tv_nsec = __tsc(X)->tv_nsec}); } while(0)
#define __fixup(X) do { if(X) X = (unsigned long) (&(struct __timespec_kernel) \
{ .tv_sec = __tsc(X)->tv_sec, .tv_nsec = __tsc(X)->tv_nsec}); } while(0)
+#ifdef SHARED
+__attribute__((__visibility__("hidden")))
+#endif
long __syscall_cp_asm (volatile void * foo, long long n, long long a1, long long a2, long long a3,
long long a4, long long a5, long long a6) {
switch (n) {
long __syscall_cp_asm (volatile void * foo, long long n, long long a1, long long a2, long long a3,
long long a4, long long a5, long long a6) {
switch (n) {
// syscall(nr, u, v, w, x, y, z)
// x8 x0 x1 x2 x3 x4 x5
// syscall(nr, u, v, w, x, y, z)
// x8 x0 x1 x2 x3 x4 x5
+.global __cp_begin
+.hidden __cp_begin
+.global __cp_end
+.hidden __cp_end
+.global __cp_cancel
+.hidden __cp_cancel
+.hidden __cancel
+.hidden __syscall_cp_asm
.type __syscall_cp_asm,%function
__syscall_cp_asm:
.type __syscall_cp_asm,%function
__syscall_cp_asm:
__cp_begin:
ldr w0,[x0]
cbnz w0,1f
__cp_begin:
ldr w0,[x0]
cbnz w0,1f
mov x4,x6
mov x5,x7
svc 0
mov x4,x6
mov x5,x7
svc 0
+.global __cp_begin
+.hidden __cp_begin
+.global __cp_end
+.hidden __cp_end
+.global __cp_cancel
+.hidden __cp_cancel
+.hidden __cancel
+.hidden __syscall_cp_asm
.type __syscall_cp_asm,%function
__syscall_cp_asm:
mov ip,sp
stmfd sp!,{r4,r5,r6,r7,lr}
.type __syscall_cp_asm,%function
__syscall_cp_asm:
mov ip,sp
stmfd sp!,{r4,r5,r6,r7,lr}
__cp_begin:
ldr r0,[r0]
cmp r0,#0
__cp_begin:
ldr r0,[r0]
cmp r0,#0
mov r1,r3
ldmfd ip,{r2,r3,r4,r5,r6}
svc 0
mov r1,r3
ldmfd ip,{r2,r3,r4,r5,r6}
svc 0
__cp_end:
ldmfd sp!,{r4,r5,r6,r7,lr}
tst lr,#1
moveq pc,lr
bx lr
__cp_end:
ldmfd sp!,{r4,r5,r6,r7,lr}
tst lr,#1
moveq pc,lr
bx lr
__cp_cancel:
ldmfd sp!,{r4,r5,r6,r7,lr}
b __cancel
__cp_cancel:
ldmfd sp!,{r4,r5,r6,r7,lr}
b __cancel
+.global __cp_begin
+.hidden __cp_begin
+.global __cp_end
+.hidden __cp_end
+.global __cp_cancel
+.hidden __cp_cancel
+.hidden __cancel
+.hidden __syscall_cp_asm
.type __syscall_cp_asm,@function
__syscall_cp_asm:
mov 4(%esp),%ecx
.type __syscall_cp_asm,@function
__syscall_cp_asm:
mov 4(%esp),%ecx
pushl %esi
pushl %edi
pushl %ebp
pushl %esi
pushl %edi
pushl %ebp
__cp_begin:
movl (%ecx),%eax
testl %eax,%eax
__cp_begin:
movl (%ecx),%eax
testl %eax,%eax
movl 44(%esp),%edi
movl 48(%esp),%ebp
int $128
movl 44(%esp),%edi
movl 48(%esp),%ebp
int $128
__cp_end:
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
__cp_end:
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
__cp_cancel:
popl %ebp
popl %edi
__cp_cancel:
popl %ebp
popl %edi
+.global __cp_begin
+.hidden __cp_begin
+.global __cp_end
+.hidden __cp_end
+.global __cp_cancel
+.hidden __cp_cancel
+.hidden __cancel
+.hidden __syscall_cp_asm
.type __syscall_cp_asm,@function
__syscall_cp_asm:
.type __syscall_cp_asm,@function
__syscall_cp_asm:
__cp_begin:
lwi r5, r5, 0
__cp_begin:
lwi r5, r5, 0
addi r12, r6, 0
add r5, r7, r0
add r6, r8, r0
addi r12, r6, 0
add r5, r7, r0
add r6, r8, r0
lwi r9, r1, 28
lwi r10, r1, 32
brki r14, 0x8
lwi r9, r1, 28
lwi r10, r1, 32
brki r14, 0x8
__cp_end:
rtsd r15, 8
nop
__cp_end:
rtsd r15, 8
nop
+.global __cp_begin
+.hidden __cp_begin
+.global __cp_end
+.hidden __cp_end
+.global __cp_cancel
+.hidden __cp_cancel
+.hidden __cancel
+.hidden __syscall_cp_asm
.type __syscall_cp_asm,@function
__syscall_cp_asm:
subu $sp, $sp, 32
.type __syscall_cp_asm,@function
__syscall_cp_asm:
subu $sp, $sp, 32
__cp_begin:
lw $4, 0($4)
bne $4, $0, __cp_cancel
__cp_begin:
lw $4, 0($4)
bne $4, $0, __cp_cancel
sw $2, 28($sp)
lw $2, 28($sp)
syscall
sw $2, 28($sp)
lw $2, 28($sp)
syscall
__cp_end:
beq $7, $0, 1f
addu $sp, $sp, 32
__cp_end:
beq $7, $0, 1f
addu $sp, $sp, 32
__cp_cancel:
addu $sp, $sp, 32
lw $25, %call16(__cancel)($gp)
__cp_cancel:
addu $sp, $sp, 32
lw $25, %call16(__cancel)($gp)
+.global __cp_begin
+.hidden __cp_begin
+.global __cp_end
+.hidden __cp_end
+.global __cp_cancel
+.hidden __cp_cancel
+.hidden __cancel
+.hidden __syscall_cp_asm
.type __syscall_cp_asm,@function
__syscall_cp_asm:
.type __syscall_cp_asm,@function
__syscall_cp_asm:
__cp_begin:
l.lwz r3, 0(r3)
l.sfeqi r3, 0
__cp_begin:
l.lwz r3, 0(r3)
l.sfeqi r3, 0
l.ori r11, r4, 0
l.ori r3, r5, 0
l.ori r4, r6, 0
l.ori r11, r4, 0
l.ori r3, r5, 0
l.ori r4, r6, 0
l.lwz r7, 0(r1)
l.lwz r8, 4(r1)
l.sys 1
l.lwz r7, 0(r1)
l.lwz r8, 4(r1)
l.sys 1
+.global __cp_begin
+.hidden __cp_begin
+.global __cp_end
+.hidden __cp_end
+.global __cp_cancel
+.hidden __cp_cancel
+.hidden __cancel
+.global __syscall_cp_asm
+.hidden __syscall_cp_asm
+
#r0: volatile. may be modified during linkage.
#r1: stack frame: 16 byte alignment.
#r2: tls/thread pointer on pp32
#r0: volatile. may be modified during linkage.
#r1: stack frame: 16 byte alignment.
#r2: tls/thread pointer on pp32
#the fields CR2,CR2,CR4 of the cond reg must be preserved
#LR (link reg) shall contain the funcs return address
.text
#the fields CR2,CR2,CR4 of the cond reg must be preserved
#LR (link reg) shall contain the funcs return address
.text
- .global __syscall_cp_asm
.type __syscall_cp_asm,%function
__syscall_cp_asm:
# at enter: r3 = pointer to self->cancel, r4: syscall no, r5: first arg, r6: 2nd, r7: 3rd, r8: 4th, r9: 5th, r10: 6th
.type __syscall_cp_asm,%function
__syscall_cp_asm:
# at enter: r3 = pointer to self->cancel, r4: syscall no, r5: first arg, r6: 2nd, r7: 3rd, r8: 4th, r9: 5th, r10: 6th
__cp_begin:
# r3 holds first argument, its a pointer to self->cancel.
# we must compare the dereferenced value with 0 and jump to __cancel if its not
__cp_begin:
# r3 holds first argument, its a pointer to self->cancel.
# we must compare the dereferenced value with 0 and jump to __cancel if its not
mr 7, 9 # arg5
mr 8, 10 # arg6
sc
mr 7, 9 # arg5
mr 8, 10 # arg6
sc
__cp_end:
bnslr+ # return if no summary overflow.
#else negate result.
__cp_end:
bnslr+ # return if no summary overflow.
#else negate result.
#include "syscall.h"
#include "libc.h"
#include "syscall.h"
#include "libc.h"
+#ifdef SHARED
+#define hidden __attribute__((__visibility__("hidden")))
+#else
+#define hidden
+#endif
+
+hidden long __cancel()
{
pthread_t self = __pthread_self();
if (self->canceldisable == PTHREAD_CANCEL_ENABLE || self->cancelasync)
{
pthread_t self = __pthread_self();
if (self->canceldisable == PTHREAD_CANCEL_ENABLE || self->cancelasync)
* definition of __cp_cancel to undo those adjustments and call __cancel.
* Otherwise, __cancel provides a definition for __cp_cancel. */
* definition of __cp_cancel to undo those adjustments and call __cancel.
* Otherwise, __cancel provides a definition for __cp_cancel. */
-weak_alias(__cancel, __cp_cancel);
+hidden weak_alias(__cancel, __cp_cancel);
long __syscall_cp_asm(volatile void *, syscall_arg_t,
syscall_arg_t, syscall_arg_t, syscall_arg_t,
syscall_arg_t, syscall_arg_t, syscall_arg_t);
long __syscall_cp_asm(volatile void *, syscall_arg_t,
syscall_arg_t, syscall_arg_t, syscall_arg_t,
syscall_arg_t, syscall_arg_t, syscall_arg_t);
long __syscall_cp_c(syscall_arg_t nr,
syscall_arg_t u, syscall_arg_t v, syscall_arg_t w,
syscall_arg_t x, syscall_arg_t y, syscall_arg_t z)
long __syscall_cp_c(syscall_arg_t nr,
syscall_arg_t u, syscall_arg_t v, syscall_arg_t w,
syscall_arg_t x, syscall_arg_t y, syscall_arg_t z)
pthread_t self = __pthread_self();
ucontext_t *uc = ctx;
const char *ip = ((char **)&uc->uc_mcontext)[CANCEL_REG_IP];
pthread_t self = __pthread_self();
ucontext_t *uc = ctx;
const char *ip = ((char **)&uc->uc_mcontext)[CANCEL_REG_IP];
- extern const char __cp_begin[1], __cp_end[1];
+ hidden extern const char __cp_begin[1], __cp_end[1];
a_barrier();
if (!self->cancel || self->canceldisable == PTHREAD_CANCEL_DISABLE) return;
a_barrier();
if (!self->cancel || self->canceldisable == PTHREAD_CANCEL_DISABLE) return;
+.global __cp_begin
+.hidden __cp_begin
+.global __cp_end
+.hidden __cp_end
+.global __cp_cancel
+.hidden __cp_cancel
+.hidden __cancel
+.hidden __syscall_cp_asm
.type __syscall_cp_asm, @function
__syscall_cp_asm:
.type __syscall_cp_asm, @function
__syscall_cp_asm:
__cp_begin:
mov.l @r4, r4
tst r4, r4
__cp_begin:
mov.l @r4, r4
tst r4, r4
-L1: .long __cancel@PLT-(1b-.)
+L1: .long __cancel-(1b-.)
mov.l @(12,r15), r1
trapa #22
mov.l @(12,r15), r1
trapa #22
__cp_end:
! work around hardware bug
or r0, r0
__cp_end:
! work around hardware bug
or r0, r0
+.global __cp_begin
+.hidden __cp_begin
+.global __cp_end
+.hidden __cp_end
+.global __cp_cancel
+.hidden __cp_cancel
+.hidden __cancel
.global __syscall_cp_internal
.global __syscall_cp_internal
+.hidden __syscall_cp_internal
.type __syscall_cp_internal,@function
__syscall_cp_internal:
.type __syscall_cp_internal,@function
__syscall_cp_internal:
__cp_begin:
mov (%rdi),%eax
test %eax,%eax
__cp_begin:
mov (%rdi),%eax
test %eax,%eax
mov 16(%rsp),%r9
mov %r11,8(%rsp)
syscall
mov 16(%rsp),%r9
mov %r11,8(%rsp)
syscall
+.global __cp_begin
+.hidden __cp_begin
+.global __cp_end
+.hidden __cp_end
+.global __cp_cancel
+.hidden __cp_cancel
+.hidden __cancel
+.hidden __syscall_cp_asm
.type __syscall_cp_asm,@function
__syscall_cp_asm:
.type __syscall_cp_asm,@function
__syscall_cp_asm:
__cp_begin:
mov (%rdi),%eax
test %eax,%eax
__cp_begin:
mov (%rdi),%eax
test %eax,%eax
mov 16(%rsp),%r9
mov %r11,8(%rsp)
syscall
mov 16(%rsp),%r9
mov %r11,8(%rsp)
syscall