From 1bcdaeee6e659f1d856717c9aa562a068f2f3bd4 Mon Sep 17 00:00:00 2001 From: Rich Felker Date: Wed, 10 Apr 2019 19:51:47 -0400 Subject: [PATCH] implement inline 5- and 6-argument syscalls for mipsn32 and mips64 n32 and n64 ABIs add new argument registers vs o32, so that passing on the stack is not necessary, so it's not clear why the 5- and 6-argument versions were special-cased to begin with; it seems to have been pattern-copying from arch/mips (o32). i've treated the new argument registers like the first 4 in terms of clobber status (non-clobbered). hopefully this is correct. --- arch/mips64/syscall_arch.h | 66 ++++++++++++++++++++++++------------- arch/mipsn32/syscall_arch.h | 31 +++++++++++++---- 2 files changed, 68 insertions(+), 29 deletions(-) diff --git a/arch/mips64/syscall_arch.h b/arch/mips64/syscall_arch.h index 28d0f934..99eebc32 100644 --- a/arch/mips64/syscall_arch.h +++ b/arch/mips64/syscall_arch.h @@ -1,9 +1,6 @@ #define __SYSCALL_LL_E(x) (x) #define __SYSCALL_LL_O(x) (x) -__attribute__((visibility("hidden"))) -long (__syscall)(long, ...); - #define SYSCALL_RLIM_INFINITY (-1UL/2) #include @@ -167,48 +164,71 @@ static inline long __syscall4(long n, long a, long b, long c, long d) static inline long __syscall5(long n, long a, long b, long c, long d, long e) { - long r2; - long old_b = b; - long old_c = c; struct kernel_stat kst; + long ret; + register long r4 __asm__("$4") = a; + register long r5 __asm__("$5") = b; + register long r6 __asm__("$6") = c; + register long r7 __asm__("$7") = d; + register long r8 __asm__("$8") = e; + register long r2 __asm__("$2"); if (n == SYS_stat || n == SYS_fstat || n == SYS_lstat) - b = (long) &kst; + r5 = (long) &kst; if (n == SYS_newfstatat) - c = (long) &kst; + r6 = (long) &kst; - r2 = (__syscall)(n, a, b, c, d, e); - if (r2 > -4096UL) return r2; + __asm__ __volatile__ ( + "daddu $2,$0,%2 ; syscall" + : "=&r"(r2), "=r"(r7) : "ir"(n), "0"(r2), "1"(r7), + "r"(r4), "r"(r5), "r"(r6), "r"(r8) + : "$1", "$3", "$9", "$10", "$11", "$12", "$13", + "$14", "$15", "$24", "$25", "hi", "lo", "memory"); + + if (r7) return -r2; + ret = r2; if (n == SYS_stat || n == SYS_fstat || n == SYS_lstat) - __stat_fix(&kst, (struct stat *)old_b); + __stat_fix(&kst, (struct stat *)b); if (n == SYS_newfstatat) - __stat_fix(&kst, (struct stat *)old_c); + __stat_fix(&kst, (struct stat *)c); - return r2; + return ret; } static inline long __syscall6(long n, long a, long b, long c, long d, long e, long f) { - long r2; - long old_b = b; - long old_c = c; struct kernel_stat kst; + long ret; + register long r4 __asm__("$4") = a; + register long r5 __asm__("$5") = b; + register long r6 __asm__("$6") = c; + register long r7 __asm__("$7") = d; + register long r8 __asm__("$8") = e; + register long r9 __asm__("$9") = f; + register long r2 __asm__("$2"); if (n == SYS_stat || n == SYS_fstat || n == SYS_lstat) - b = (long) &kst; + r5 = (long) &kst; if (n == SYS_newfstatat) - c = (long) &kst; + r6 = (long) &kst; + + __asm__ __volatile__ ( + "daddu $2,$0,%2 ; syscall" + : "=&r"(r2), "=r"(r7) : "ir"(n), "0"(r2), "1"(r7), + "r"(r4), "r"(r5), "r"(r6), "r"(r8), "r"(r9) + : "$1", "$3", "$10", "$11", "$12", "$13", + "$14", "$15", "$24", "$25", "hi", "lo", "memory"); - r2 = (__syscall)(n, a, b, c, d, e, f); - if (r2 > -4096UL) return r2; + if (r7) return -r2; + ret = r2; if (n == SYS_stat || n == SYS_fstat || n == SYS_lstat) - __stat_fix(&kst, (struct stat *)old_b); + __stat_fix(&kst, (struct stat *)b); if (n == SYS_newfstatat) - __stat_fix(&kst, (struct stat *)old_c); + __stat_fix(&kst, (struct stat *)c); - return r2; + return ret; } #define VDSO_USEFUL diff --git a/arch/mipsn32/syscall_arch.h b/arch/mipsn32/syscall_arch.h index f6a1fbae..2ebf0306 100644 --- a/arch/mipsn32/syscall_arch.h +++ b/arch/mipsn32/syscall_arch.h @@ -1,8 +1,6 @@ #define __SYSCALL_LL_E(x) (x) #define __SYSCALL_LL_O(x) (x) -hidden long (__syscall)(long, ...); - #define SYSCALL_RLIM_INFINITY (-1UL/2) #if _MIPSEL || __MIPSEL || __MIPSEL__ @@ -102,8 +100,18 @@ static inline long __syscall4(long n, long a, long b, long c, long d) static inline long __syscall5(long n, long a, long b, long c, long d, long e) { - long r2 = (__syscall)(n, a, b, c, d, e); - if (r2 > -4096UL) return r2; + register long r4 __asm__("$4") = a; + register long r5 __asm__("$5") = b; + register long r6 __asm__("$6") = c; + register long r7 __asm__("$7") = d; + register long r8 __asm__("$8") = e; + register long r2 __asm__("$2"); + __asm__ __volatile__ ( + "addu $2,$0,%2 ; syscall" + : "=&r"(r2), "=r"(r7) : "ir"(n), "0"(r2), "1"(r7), + "r"(r4), "r"(r5), "r"(r6), "r"(r8) + : "$1", "$3", "$9", "$10", "$11", "$12", "$13", + "$14", "$15", "$24", "$25", "hi", "lo", "memory"); if (n == SYS_stat || n == SYS_fstat || n == SYS_lstat) __stat_fix(b); if (n == SYS_newfstatat) __stat_fix(c); return r2; @@ -111,8 +119,19 @@ static inline long __syscall5(long n, long a, long b, long c, long d, long e) static inline long __syscall6(long n, long a, long b, long c, long d, long e, long f) { - long r2 = (__syscall)(n, a, b, c, d, e, f); - if (r2 > -4096UL) return r2; + register long r4 __asm__("$4") = a; + register long r5 __asm__("$5") = b; + register long r6 __asm__("$6") = c; + register long r7 __asm__("$7") = d; + register long r8 __asm__("$8") = e; + register long r8 __asm__("$9") = f; + register long r2 __asm__("$2"); + __asm__ __volatile__ ( + "addu $2,$0,%2 ; syscall" + : "=&r"(r2), "=r"(r7) : "ir"(n), "0"(r2), "1"(r7), + "r"(r4), "r"(r5), "r"(r6), "r"(r8), "r"(r9) + : "$1", "$3", "$10", "$11", "$12", "$13", + "$14", "$15", "$24", "$25", "hi", "lo", "memory"); if (n == SYS_stat || n == SYS_fstat || n == SYS_lstat) __stat_fix(b); if (n == SYS_newfstatat) __stat_fix(c); return r2; -- 2.25.1