From 2dcbeabd917e404a0dde0195388da401b849b9a4 Mon Sep 17 00:00:00 2001 From: Rich Felker Date: Wed, 17 Jul 2019 18:53:26 -0400 Subject: [PATCH] fix riscv64 atomic asm constraints most egregious problem was the lack of memory clobber and lack of volatile asm; this made the atomics memory barriers but not compiler barriers. use of "+r" rather than "=r" for a clobbered temp was also wrong, since the initial value is indeterminate. --- arch/riscv64/atomic_arch.h | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/arch/riscv64/atomic_arch.h b/arch/riscv64/atomic_arch.h index 98f12fc7..d0228a3e 100644 --- a/arch/riscv64/atomic_arch.h +++ b/arch/riscv64/atomic_arch.h @@ -8,13 +8,15 @@ static inline void a_barrier() static inline int a_cas(volatile int *p, int t, int s) { int old, tmp; - __asm__("\n1: lr.w.aqrl %0, %2\n" + __asm__ __volatile__ ( + "\n1: lr.w.aqrl %0, %2\n" " bne %0, %3, 1f\n" " sc.w.aqrl %1, %4, %2\n" " bnez %1, 1b\n" "1:" - : "=&r"(old), "+r"(tmp), "+A"(*p) - : "r"(t), "r"(s)); + : "=&r"(old), "=r"(tmp), "+A"(*p) + : "r"(t), "r"(s) + : "memory"); return old; } @@ -23,12 +25,14 @@ static inline void *a_cas_p(volatile void *p, void *t, void *s) { void *old; int tmp; - __asm__("\n1: lr.d.aqrl %0, %2\n" + __asm__ __volatile__ ( + "\n1: lr.d.aqrl %0, %2\n" " bne %0, %3, 1f\n" " sc.d.aqrl %1, %4, %2\n" " bnez %1, 1b\n" "1:" - : "=&r"(old), "+r"(tmp), "+A"(*(long *)p) - : "r"(t), "r"(s)); + : "=&r"(old), "=r"(tmp), "+A"(*(long *)p) + : "r"(t), "r"(s) + : "memory"); return old; } -- 2.25.1