Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / arch / riscv / include / asm / atomic.h
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Copyright (C) 2012 Regents of the University of California
5  * Copyright (C) 2017 SiFive
6  */
7
8 #ifndef _ASM_RISCV_ATOMIC_H
9 #define _ASM_RISCV_ATOMIC_H
10
11 #ifdef CONFIG_GENERIC_ATOMIC64
12 # include <asm-generic/atomic64.h>
13 #else
14 # if (__riscv_xlen < 64)
15 #  error "64-bit atomics require XLEN to be at least 64"
16 # endif
17 #endif
18
19 #include <asm/cmpxchg.h>
20 #include <asm/barrier.h>
21
22 #define ATOMIC_INIT(i)  { (i) }
23
24 #define __atomic_acquire_fence()                                        \
25         __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
26
27 #define __atomic_release_fence()                                        \
28         __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
29
30 static __always_inline int atomic_read(const atomic_t *v)
31 {
32         return READ_ONCE(v->counter);
33 }
34 static __always_inline void atomic_set(atomic_t *v, int i)
35 {
36         WRITE_ONCE(v->counter, i);
37 }
38
39 #ifndef CONFIG_GENERIC_ATOMIC64
40 #define ATOMIC64_INIT(i) { (i) }
41 static __always_inline s64 atomic64_read(const atomic64_t *v)
42 {
43         return READ_ONCE(v->counter);
44 }
45 static __always_inline void atomic64_set(atomic64_t *v, s64 i)
46 {
47         WRITE_ONCE(v->counter, i);
48 }
49 #endif
50
51 /*
52  * First, the atomic ops that have no ordering constraints and therefor don't
53  * have the AQ or RL bits set.  These don't return anything, so there's only
54  * one version to worry about.
55  */
56 #define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix)              \
57 static __always_inline                                                  \
58 void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v)             \
59 {                                                                       \
60         __asm__ __volatile__ (                                          \
61                 "       amo" #asm_op "." #asm_type " zero, %1, %0"      \
62                 : "+A" (v->counter)                                     \
63                 : "r" (I)                                               \
64                 : "memory");                                            \
65 }                                                                       \
66
67 #ifdef CONFIG_GENERIC_ATOMIC64
68 #define ATOMIC_OPS(op, asm_op, I)                                       \
69         ATOMIC_OP (op, asm_op, I, w, int,   )
70 #else
71 #define ATOMIC_OPS(op, asm_op, I)                                       \
72         ATOMIC_OP (op, asm_op, I, w, int,   )                           \
73         ATOMIC_OP (op, asm_op, I, d, s64, 64)
74 #endif
75
76 ATOMIC_OPS(add, add,  i)
77 ATOMIC_OPS(sub, add, -i)
78 ATOMIC_OPS(and, and,  i)
79 ATOMIC_OPS( or,  or,  i)
80 ATOMIC_OPS(xor, xor,  i)
81
82 #undef ATOMIC_OP
83 #undef ATOMIC_OPS
84
85 /*
86  * Atomic ops that have ordered, relaxed, acquire, and release variants.
87  * There's two flavors of these: the arithmatic ops have both fetch and return
88  * versions, while the logical ops only have fetch versions.
89  */
90 #define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix)        \
91 static __always_inline                                                  \
92 c_type atomic##prefix##_fetch_##op##_relaxed(c_type i,                  \
93                                              atomic##prefix##_t *v)     \
94 {                                                                       \
95         register c_type ret;                                            \
96         __asm__ __volatile__ (                                          \
97                 "       amo" #asm_op "." #asm_type " %1, %2, %0"        \
98                 : "+A" (v->counter), "=r" (ret)                         \
99                 : "r" (I)                                               \
100                 : "memory");                                            \
101         return ret;                                                     \
102 }                                                                       \
103 static __always_inline                                                  \
104 c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v)     \
105 {                                                                       \
106         register c_type ret;                                            \
107         __asm__ __volatile__ (                                          \
108                 "       amo" #asm_op "." #asm_type ".aqrl  %1, %2, %0"  \
109                 : "+A" (v->counter), "=r" (ret)                         \
110                 : "r" (I)                                               \
111                 : "memory");                                            \
112         return ret;                                                     \
113 }
114
115 #define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \
116 static __always_inline                                                  \
117 c_type atomic##prefix##_##op##_return_relaxed(c_type i,                 \
118                                               atomic##prefix##_t *v)    \
119 {                                                                       \
120         return atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I;      \
121 }                                                                       \
122 static __always_inline                                                  \
123 c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v)  \
124 {                                                                       \
125         return atomic##prefix##_fetch_##op(i, v) c_op I;                \
126 }
127
128 #ifdef CONFIG_GENERIC_ATOMIC64
129 #define ATOMIC_OPS(op, asm_op, c_op, I)                                 \
130         ATOMIC_FETCH_OP( op, asm_op,       I, w, int,   )               \
131         ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int,   )
132 #else
133 #define ATOMIC_OPS(op, asm_op, c_op, I)                                 \
134         ATOMIC_FETCH_OP( op, asm_op,       I, w, int,   )               \
135         ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int,   )               \
136         ATOMIC_FETCH_OP( op, asm_op,       I, d, s64, 64)               \
137         ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, s64, 64)
138 #endif
139
140 ATOMIC_OPS(add, add, +,  i)
141 ATOMIC_OPS(sub, add, +, -i)
142
143 #define atomic_add_return_relaxed       atomic_add_return_relaxed
144 #define atomic_sub_return_relaxed       atomic_sub_return_relaxed
145 #define atomic_add_return               atomic_add_return
146 #define atomic_sub_return               atomic_sub_return
147
148 #define atomic_fetch_add_relaxed        atomic_fetch_add_relaxed
149 #define atomic_fetch_sub_relaxed        atomic_fetch_sub_relaxed
150 #define atomic_fetch_add                atomic_fetch_add
151 #define atomic_fetch_sub                atomic_fetch_sub
152
153 #ifndef CONFIG_GENERIC_ATOMIC64
154 #define atomic64_add_return_relaxed     atomic64_add_return_relaxed
155 #define atomic64_sub_return_relaxed     atomic64_sub_return_relaxed
156 #define atomic64_add_return             atomic64_add_return
157 #define atomic64_sub_return             atomic64_sub_return
158
159 #define atomic64_fetch_add_relaxed      atomic64_fetch_add_relaxed
160 #define atomic64_fetch_sub_relaxed      atomic64_fetch_sub_relaxed
161 #define atomic64_fetch_add              atomic64_fetch_add
162 #define atomic64_fetch_sub              atomic64_fetch_sub
163 #endif
164
165 #undef ATOMIC_OPS
166
167 #ifdef CONFIG_GENERIC_ATOMIC64
168 #define ATOMIC_OPS(op, asm_op, I)                                       \
169         ATOMIC_FETCH_OP(op, asm_op, I, w, int,   )
170 #else
171 #define ATOMIC_OPS(op, asm_op, I)                                       \
172         ATOMIC_FETCH_OP(op, asm_op, I, w, int,   )                      \
173         ATOMIC_FETCH_OP(op, asm_op, I, d, s64, 64)
174 #endif
175
176 ATOMIC_OPS(and, and, i)
177 ATOMIC_OPS( or,  or, i)
178 ATOMIC_OPS(xor, xor, i)
179
180 #define atomic_fetch_and_relaxed        atomic_fetch_and_relaxed
181 #define atomic_fetch_or_relaxed         atomic_fetch_or_relaxed
182 #define atomic_fetch_xor_relaxed        atomic_fetch_xor_relaxed
183 #define atomic_fetch_and                atomic_fetch_and
184 #define atomic_fetch_or                 atomic_fetch_or
185 #define atomic_fetch_xor                atomic_fetch_xor
186
187 #ifndef CONFIG_GENERIC_ATOMIC64
188 #define atomic64_fetch_and_relaxed      atomic64_fetch_and_relaxed
189 #define atomic64_fetch_or_relaxed       atomic64_fetch_or_relaxed
190 #define atomic64_fetch_xor_relaxed      atomic64_fetch_xor_relaxed
191 #define atomic64_fetch_and              atomic64_fetch_and
192 #define atomic64_fetch_or               atomic64_fetch_or
193 #define atomic64_fetch_xor              atomic64_fetch_xor
194 #endif
195
196 #undef ATOMIC_OPS
197
198 #undef ATOMIC_FETCH_OP
199 #undef ATOMIC_OP_RETURN
200
201 /* This is required to provide a full barrier on success. */
202 static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
203 {
204        int prev, rc;
205
206         __asm__ __volatile__ (
207                 "0:     lr.w     %[p],  %[c]\n"
208                 "       beq      %[p],  %[u], 1f\n"
209                 "       add      %[rc], %[p], %[a]\n"
210                 "       sc.w.rl  %[rc], %[rc], %[c]\n"
211                 "       bnez     %[rc], 0b\n"
212                 "       fence    rw, rw\n"
213                 "1:\n"
214                 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
215                 : [a]"r" (a), [u]"r" (u)
216                 : "memory");
217         return prev;
218 }
219 #define atomic_fetch_add_unless atomic_fetch_add_unless
220
221 #ifndef CONFIG_GENERIC_ATOMIC64
222 static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
223 {
224        s64 prev;
225        long rc;
226
227         __asm__ __volatile__ (
228                 "0:     lr.d     %[p],  %[c]\n"
229                 "       beq      %[p],  %[u], 1f\n"
230                 "       add      %[rc], %[p], %[a]\n"
231                 "       sc.d.rl  %[rc], %[rc], %[c]\n"
232                 "       bnez     %[rc], 0b\n"
233                 "       fence    rw, rw\n"
234                 "1:\n"
235                 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
236                 : [a]"r" (a), [u]"r" (u)
237                 : "memory");
238         return prev;
239 }
240 #define atomic64_fetch_add_unless atomic64_fetch_add_unless
241 #endif
242
243 /*
244  * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
245  * {cmp,}xchg and the operations that return, so they need a full barrier.
246  */
247 #define ATOMIC_OP(c_t, prefix, size)                                    \
248 static __always_inline                                                  \
249 c_t atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n)         \
250 {                                                                       \
251         return __xchg_relaxed(&(v->counter), n, size);                  \
252 }                                                                       \
253 static __always_inline                                                  \
254 c_t atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n)         \
255 {                                                                       \
256         return __xchg_acquire(&(v->counter), n, size);                  \
257 }                                                                       \
258 static __always_inline                                                  \
259 c_t atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n)         \
260 {                                                                       \
261         return __xchg_release(&(v->counter), n, size);                  \
262 }                                                                       \
263 static __always_inline                                                  \
264 c_t atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n)                 \
265 {                                                                       \
266         return __xchg(&(v->counter), n, size);                          \
267 }                                                                       \
268 static __always_inline                                                  \
269 c_t atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v,             \
270                                      c_t o, c_t n)                      \
271 {                                                                       \
272         return __cmpxchg_relaxed(&(v->counter), o, n, size);            \
273 }                                                                       \
274 static __always_inline                                                  \
275 c_t atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v,             \
276                                      c_t o, c_t n)                      \
277 {                                                                       \
278         return __cmpxchg_acquire(&(v->counter), o, n, size);            \
279 }                                                                       \
280 static __always_inline                                                  \
281 c_t atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v,             \
282                                      c_t o, c_t n)                      \
283 {                                                                       \
284         return __cmpxchg_release(&(v->counter), o, n, size);            \
285 }                                                                       \
286 static __always_inline                                                  \
287 c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n)       \
288 {                                                                       \
289         return __cmpxchg(&(v->counter), o, n, size);                    \
290 }
291
292 #ifdef CONFIG_GENERIC_ATOMIC64
293 #define ATOMIC_OPS()                                                    \
294         ATOMIC_OP(int,   , 4)
295 #else
296 #define ATOMIC_OPS()                                                    \
297         ATOMIC_OP(int,   , 4)                                           \
298         ATOMIC_OP(s64, 64, 8)
299 #endif
300
301 ATOMIC_OPS()
302
303 #define atomic_xchg_relaxed atomic_xchg_relaxed
304 #define atomic_xchg_acquire atomic_xchg_acquire
305 #define atomic_xchg_release atomic_xchg_release
306 #define atomic_xchg atomic_xchg
307 #define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
308 #define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
309 #define atomic_cmpxchg_release atomic_cmpxchg_release
310 #define atomic_cmpxchg atomic_cmpxchg
311
312 #undef ATOMIC_OPS
313 #undef ATOMIC_OP
314
315 static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
316 {
317        int prev, rc;
318
319         __asm__ __volatile__ (
320                 "0:     lr.w     %[p],  %[c]\n"
321                 "       sub      %[rc], %[p], %[o]\n"
322                 "       bltz     %[rc], 1f\n"
323                 "       sc.w.rl  %[rc], %[rc], %[c]\n"
324                 "       bnez     %[rc], 0b\n"
325                 "       fence    rw, rw\n"
326                 "1:\n"
327                 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
328                 : [o]"r" (offset)
329                 : "memory");
330         return prev - offset;
331 }
332
333 #define atomic_dec_if_positive(v)       atomic_sub_if_positive(v, 1)
334
335 #ifndef CONFIG_GENERIC_ATOMIC64
336 static __always_inline s64 atomic64_sub_if_positive(atomic64_t *v, s64 offset)
337 {
338        s64 prev;
339        long rc;
340
341         __asm__ __volatile__ (
342                 "0:     lr.d     %[p],  %[c]\n"
343                 "       sub      %[rc], %[p], %[o]\n"
344                 "       bltz     %[rc], 1f\n"
345                 "       sc.d.rl  %[rc], %[rc], %[c]\n"
346                 "       bnez     %[rc], 0b\n"
347                 "       fence    rw, rw\n"
348                 "1:\n"
349                 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
350                 : [o]"r" (offset)
351                 : "memory");
352         return prev - offset;
353 }
354
355 #define atomic64_dec_if_positive(v)     atomic64_sub_if_positive(v, 1)
356 #endif
357
358 #endif /* _ASM_RISCV_ATOMIC_H */