Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / arch / arm64 / include / asm / cmpxchg.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Based on arch/arm/include/asm/cmpxchg.h
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  */
7 #ifndef __ASM_CMPXCHG_H
8 #define __ASM_CMPXCHG_H
9
10 #include <linux/build_bug.h>
11 #include <linux/compiler.h>
12
13 #include <asm/atomic.h>
14 #include <asm/barrier.h>
15 #include <asm/lse.h>
16
17 /*
18  * We need separate acquire parameters for ll/sc and lse, since the full
19  * barrier case is generated as release+dmb for the former and
20  * acquire+release for the latter.
21  */
22 #define __XCHG_CASE(w, sfx, name, sz, mb, nop_lse, acq, acq_lse, rel, cl)       \
23 static inline u##sz __xchg_case_##name##sz(u##sz x, volatile void *ptr)         \
24 {                                                                               \
25         u##sz ret;                                                              \
26         unsigned long tmp;                                                      \
27                                                                                 \
28         asm volatile(ARM64_LSE_ATOMIC_INSN(                                     \
29         /* LL/SC */                                                             \
30         "       prfm    pstl1strm, %2\n"                                        \
31         "1:     ld" #acq "xr" #sfx "\t%" #w "0, %2\n"                           \
32         "       st" #rel "xr" #sfx "\t%w1, %" #w "3, %2\n"                      \
33         "       cbnz    %w1, 1b\n"                                              \
34         "       " #mb,                                                          \
35         /* LSE atomics */                                                       \
36         "       swp" #acq_lse #rel #sfx "\t%" #w "3, %" #w "0, %2\n"            \
37                 __nops(3)                                                       \
38         "       " #nop_lse)                                                     \
39         : "=&r" (ret), "=&r" (tmp), "+Q" (*(u##sz *)ptr)                        \
40         : "r" (x)                                                               \
41         : cl);                                                                  \
42                                                                                 \
43         return ret;                                                             \
44 }
45
46 __XCHG_CASE(w, b,     ,  8,        ,    ,  ,  ,  ,         )
47 __XCHG_CASE(w, h,     , 16,        ,    ,  ,  ,  ,         )
48 __XCHG_CASE(w,  ,     , 32,        ,    ,  ,  ,  ,         )
49 __XCHG_CASE( ,  ,     , 64,        ,    ,  ,  ,  ,         )
50 __XCHG_CASE(w, b, acq_,  8,        ,    , a, a,  , "memory")
51 __XCHG_CASE(w, h, acq_, 16,        ,    , a, a,  , "memory")
52 __XCHG_CASE(w,  , acq_, 32,        ,    , a, a,  , "memory")
53 __XCHG_CASE( ,  , acq_, 64,        ,    , a, a,  , "memory")
54 __XCHG_CASE(w, b, rel_,  8,        ,    ,  ,  , l, "memory")
55 __XCHG_CASE(w, h, rel_, 16,        ,    ,  ,  , l, "memory")
56 __XCHG_CASE(w,  , rel_, 32,        ,    ,  ,  , l, "memory")
57 __XCHG_CASE( ,  , rel_, 64,        ,    ,  ,  , l, "memory")
58 __XCHG_CASE(w, b,  mb_,  8, dmb ish, nop,  , a, l, "memory")
59 __XCHG_CASE(w, h,  mb_, 16, dmb ish, nop,  , a, l, "memory")
60 __XCHG_CASE(w,  ,  mb_, 32, dmb ish, nop,  , a, l, "memory")
61 __XCHG_CASE( ,  ,  mb_, 64, dmb ish, nop,  , a, l, "memory")
62
63 #undef __XCHG_CASE
64
65 #define __XCHG_GEN(sfx)                                                 \
66 static __always_inline  unsigned long __xchg##sfx(unsigned long x,      \
67                                         volatile void *ptr,             \
68                                         int size)                       \
69 {                                                                       \
70         switch (size) {                                                 \
71         case 1:                                                         \
72                 return __xchg_case##sfx##_8(x, ptr);                    \
73         case 2:                                                         \
74                 return __xchg_case##sfx##_16(x, ptr);                   \
75         case 4:                                                         \
76                 return __xchg_case##sfx##_32(x, ptr);                   \
77         case 8:                                                         \
78                 return __xchg_case##sfx##_64(x, ptr);                   \
79         default:                                                        \
80                 BUILD_BUG();                                            \
81         }                                                               \
82                                                                         \
83         unreachable();                                                  \
84 }
85
86 __XCHG_GEN()
87 __XCHG_GEN(_acq)
88 __XCHG_GEN(_rel)
89 __XCHG_GEN(_mb)
90
91 #undef __XCHG_GEN
92
93 #define __xchg_wrapper(sfx, ptr, x)                                     \
94 ({                                                                      \
95         __typeof__(*(ptr)) __ret;                                       \
96         __ret = (__typeof__(*(ptr)))                                    \
97                 __xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \
98         __ret;                                                          \
99 })
100
101 /* xchg */
102 #define arch_xchg_relaxed(...)  __xchg_wrapper(    , __VA_ARGS__)
103 #define arch_xchg_acquire(...)  __xchg_wrapper(_acq, __VA_ARGS__)
104 #define arch_xchg_release(...)  __xchg_wrapper(_rel, __VA_ARGS__)
105 #define arch_xchg(...)          __xchg_wrapper( _mb, __VA_ARGS__)
106
107 #define __CMPXCHG_GEN(sfx)                                              \
108 static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
109                                            unsigned long old,           \
110                                            unsigned long new,           \
111                                            int size)                    \
112 {                                                                       \
113         switch (size) {                                                 \
114         case 1:                                                         \
115                 return __cmpxchg_case##sfx##_8(ptr, old, new);          \
116         case 2:                                                         \
117                 return __cmpxchg_case##sfx##_16(ptr, old, new);         \
118         case 4:                                                         \
119                 return __cmpxchg_case##sfx##_32(ptr, old, new);         \
120         case 8:                                                         \
121                 return __cmpxchg_case##sfx##_64(ptr, old, new);         \
122         default:                                                        \
123                 BUILD_BUG();                                            \
124         }                                                               \
125                                                                         \
126         unreachable();                                                  \
127 }
128
129 __CMPXCHG_GEN()
130 __CMPXCHG_GEN(_acq)
131 __CMPXCHG_GEN(_rel)
132 __CMPXCHG_GEN(_mb)
133
134 #undef __CMPXCHG_GEN
135
136 #define __cmpxchg_wrapper(sfx, ptr, o, n)                               \
137 ({                                                                      \
138         __typeof__(*(ptr)) __ret;                                       \
139         __ret = (__typeof__(*(ptr)))                                    \
140                 __cmpxchg##sfx((ptr), (unsigned long)(o),               \
141                                 (unsigned long)(n), sizeof(*(ptr)));    \
142         __ret;                                                          \
143 })
144
145 /* cmpxchg */
146 #define arch_cmpxchg_relaxed(...)       __cmpxchg_wrapper(    , __VA_ARGS__)
147 #define arch_cmpxchg_acquire(...)       __cmpxchg_wrapper(_acq, __VA_ARGS__)
148 #define arch_cmpxchg_release(...)       __cmpxchg_wrapper(_rel, __VA_ARGS__)
149 #define arch_cmpxchg(...)               __cmpxchg_wrapper( _mb, __VA_ARGS__)
150 #define arch_cmpxchg_local              arch_cmpxchg_relaxed
151
152 /* cmpxchg64 */
153 #define arch_cmpxchg64_relaxed          arch_cmpxchg_relaxed
154 #define arch_cmpxchg64_acquire          arch_cmpxchg_acquire
155 #define arch_cmpxchg64_release          arch_cmpxchg_release
156 #define arch_cmpxchg64                  arch_cmpxchg
157 #define arch_cmpxchg64_local            arch_cmpxchg_local
158
159 /* cmpxchg_double */
160 #define system_has_cmpxchg_double()     1
161
162 #define __cmpxchg_double_check(ptr1, ptr2)                                      \
163 ({                                                                              \
164         if (sizeof(*(ptr1)) != 8)                                               \
165                 BUILD_BUG();                                                    \
166         VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1);      \
167 })
168
169 #define arch_cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2)                         \
170 ({                                                                              \
171         int __ret;                                                              \
172         __cmpxchg_double_check(ptr1, ptr2);                                     \
173         __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2),  \
174                                      (unsigned long)(n1), (unsigned long)(n2),  \
175                                      ptr1);                                     \
176         __ret;                                                                  \
177 })
178
179 #define arch_cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2)                   \
180 ({                                                                              \
181         int __ret;                                                              \
182         __cmpxchg_double_check(ptr1, ptr2);                                     \
183         __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2),     \
184                                   (unsigned long)(n1), (unsigned long)(n2),     \
185                                   ptr1);                                        \
186         __ret;                                                                  \
187 })
188
189 #define __CMPWAIT_CASE(w, sfx, sz)                                      \
190 static inline void __cmpwait_case_##sz(volatile void *ptr,              \
191                                        unsigned long val)               \
192 {                                                                       \
193         unsigned long tmp;                                              \
194                                                                         \
195         asm volatile(                                                   \
196         "       sevl\n"                                                 \
197         "       wfe\n"                                                  \
198         "       ldxr" #sfx "\t%" #w "[tmp], %[v]\n"                     \
199         "       eor     %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n"     \
200         "       cbnz    %" #w "[tmp], 1f\n"                             \
201         "       wfe\n"                                                  \
202         "1:"                                                            \
203         : [tmp] "=&r" (tmp), [v] "+Q" (*(unsigned long *)ptr)           \
204         : [val] "r" (val));                                             \
205 }
206
207 __CMPWAIT_CASE(w, b, 8);
208 __CMPWAIT_CASE(w, h, 16);
209 __CMPWAIT_CASE(w,  , 32);
210 __CMPWAIT_CASE( ,  , 64);
211
212 #undef __CMPWAIT_CASE
213
214 #define __CMPWAIT_GEN(sfx)                                              \
215 static __always_inline void __cmpwait##sfx(volatile void *ptr,          \
216                                   unsigned long val,                    \
217                                   int size)                             \
218 {                                                                       \
219         switch (size) {                                                 \
220         case 1:                                                         \
221                 return __cmpwait_case##sfx##_8(ptr, (u8)val);           \
222         case 2:                                                         \
223                 return __cmpwait_case##sfx##_16(ptr, (u16)val);         \
224         case 4:                                                         \
225                 return __cmpwait_case##sfx##_32(ptr, val);              \
226         case 8:                                                         \
227                 return __cmpwait_case##sfx##_64(ptr, val);              \
228         default:                                                        \
229                 BUILD_BUG();                                            \
230         }                                                               \
231                                                                         \
232         unreachable();                                                  \
233 }
234
235 __CMPWAIT_GEN()
236
237 #undef __CMPWAIT_GEN
238
239 #define __cmpwait_relaxed(ptr, val) \
240         __cmpwait((ptr), (unsigned long)(val), sizeof(*(ptr)))
241
242 #endif  /* __ASM_CMPXCHG_H */