Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / arch / arm / include / asm / atomic.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  *  arch/arm/include/asm/atomic.h
4  *
5  *  Copyright (C) 1996 Russell King.
6  *  Copyright (C) 2002 Deep Blue Solutions Ltd.
7  */
8 #ifndef __ASM_ARM_ATOMIC_H
9 #define __ASM_ARM_ATOMIC_H
10
11 #include <linux/compiler.h>
12 #include <linux/prefetch.h>
13 #include <linux/types.h>
14 #include <linux/irqflags.h>
15 #include <asm/barrier.h>
16 #include <asm/cmpxchg.h>
17
18 #define ATOMIC_INIT(i)  { (i) }
19
20 #ifdef __KERNEL__
21
22 /*
23  * On ARM, ordinary assignment (str instruction) doesn't clear the local
24  * strex/ldrex monitor on some implementations. The reason we can use it for
25  * atomic_set() is the clrex or dummy strex done on every exception return.
26  */
27 #define atomic_read(v)  READ_ONCE((v)->counter)
28 #define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
29
30 #if __LINUX_ARM_ARCH__ >= 6
31
32 /*
33  * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
34  * store exclusive to ensure that these are atomic.  We may loop
35  * to ensure that the update happens.
36  */
37
38 #define ATOMIC_OP(op, c_op, asm_op)                                     \
39 static inline void atomic_##op(int i, atomic_t *v)                      \
40 {                                                                       \
41         unsigned long tmp;                                              \
42         int result;                                                     \
43                                                                         \
44         prefetchw(&v->counter);                                         \
45         __asm__ __volatile__("@ atomic_" #op "\n"                       \
46 "1:     ldrex   %0, [%3]\n"                                             \
47 "       " #asm_op "     %0, %0, %4\n"                                   \
48 "       strex   %1, %0, [%3]\n"                                         \
49 "       teq     %1, #0\n"                                               \
50 "       bne     1b"                                                     \
51         : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)               \
52         : "r" (&v->counter), "Ir" (i)                                   \
53         : "cc");                                                        \
54 }                                                                       \
55
56 #define ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
57 static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)      \
58 {                                                                       \
59         unsigned long tmp;                                              \
60         int result;                                                     \
61                                                                         \
62         prefetchw(&v->counter);                                         \
63                                                                         \
64         __asm__ __volatile__("@ atomic_" #op "_return\n"                \
65 "1:     ldrex   %0, [%3]\n"                                             \
66 "       " #asm_op "     %0, %0, %4\n"                                   \
67 "       strex   %1, %0, [%3]\n"                                         \
68 "       teq     %1, #0\n"                                               \
69 "       bne     1b"                                                     \
70         : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)               \
71         : "r" (&v->counter), "Ir" (i)                                   \
72         : "cc");                                                        \
73                                                                         \
74         return result;                                                  \
75 }
76
77 #define ATOMIC_FETCH_OP(op, c_op, asm_op)                               \
78 static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v)       \
79 {                                                                       \
80         unsigned long tmp;                                              \
81         int result, val;                                                \
82                                                                         \
83         prefetchw(&v->counter);                                         \
84                                                                         \
85         __asm__ __volatile__("@ atomic_fetch_" #op "\n"                 \
86 "1:     ldrex   %0, [%4]\n"                                             \
87 "       " #asm_op "     %1, %0, %5\n"                                   \
88 "       strex   %2, %1, [%4]\n"                                         \
89 "       teq     %2, #0\n"                                               \
90 "       bne     1b"                                                     \
91         : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter)  \
92         : "r" (&v->counter), "Ir" (i)                                   \
93         : "cc");                                                        \
94                                                                         \
95         return result;                                                  \
96 }
97
98 #define atomic_add_return_relaxed       atomic_add_return_relaxed
99 #define atomic_sub_return_relaxed       atomic_sub_return_relaxed
100 #define atomic_fetch_add_relaxed        atomic_fetch_add_relaxed
101 #define atomic_fetch_sub_relaxed        atomic_fetch_sub_relaxed
102
103 #define atomic_fetch_and_relaxed        atomic_fetch_and_relaxed
104 #define atomic_fetch_andnot_relaxed     atomic_fetch_andnot_relaxed
105 #define atomic_fetch_or_relaxed         atomic_fetch_or_relaxed
106 #define atomic_fetch_xor_relaxed        atomic_fetch_xor_relaxed
107
108 static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
109 {
110         int oldval;
111         unsigned long res;
112
113         prefetchw(&ptr->counter);
114
115         do {
116                 __asm__ __volatile__("@ atomic_cmpxchg\n"
117                 "ldrex  %1, [%3]\n"
118                 "mov    %0, #0\n"
119                 "teq    %1, %4\n"
120                 "strexeq %0, %5, [%3]\n"
121                     : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
122                     : "r" (&ptr->counter), "Ir" (old), "r" (new)
123                     : "cc");
124         } while (res);
125
126         return oldval;
127 }
128 #define atomic_cmpxchg_relaxed          atomic_cmpxchg_relaxed
129
130 static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
131 {
132         int oldval, newval;
133         unsigned long tmp;
134
135         smp_mb();
136         prefetchw(&v->counter);
137
138         __asm__ __volatile__ ("@ atomic_add_unless\n"
139 "1:     ldrex   %0, [%4]\n"
140 "       teq     %0, %5\n"
141 "       beq     2f\n"
142 "       add     %1, %0, %6\n"
143 "       strex   %2, %1, [%4]\n"
144 "       teq     %2, #0\n"
145 "       bne     1b\n"
146 "2:"
147         : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
148         : "r" (&v->counter), "r" (u), "r" (a)
149         : "cc");
150
151         if (oldval != u)
152                 smp_mb();
153
154         return oldval;
155 }
156 #define atomic_fetch_add_unless         atomic_fetch_add_unless
157
158 #else /* ARM_ARCH_6 */
159
160 #ifdef CONFIG_SMP
161 #error SMP not supported on pre-ARMv6 CPUs
162 #endif
163
164 #define ATOMIC_OP(op, c_op, asm_op)                                     \
165 static inline void atomic_##op(int i, atomic_t *v)                      \
166 {                                                                       \
167         unsigned long flags;                                            \
168                                                                         \
169         raw_local_irq_save(flags);                                      \
170         v->counter c_op i;                                              \
171         raw_local_irq_restore(flags);                                   \
172 }                                                                       \
173
174 #define ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
175 static inline int atomic_##op##_return(int i, atomic_t *v)              \
176 {                                                                       \
177         unsigned long flags;                                            \
178         int val;                                                        \
179                                                                         \
180         raw_local_irq_save(flags);                                      \
181         v->counter c_op i;                                              \
182         val = v->counter;                                               \
183         raw_local_irq_restore(flags);                                   \
184                                                                         \
185         return val;                                                     \
186 }
187
188 #define ATOMIC_FETCH_OP(op, c_op, asm_op)                               \
189 static inline int atomic_fetch_##op(int i, atomic_t *v)                 \
190 {                                                                       \
191         unsigned long flags;                                            \
192         int val;                                                        \
193                                                                         \
194         raw_local_irq_save(flags);                                      \
195         val = v->counter;                                               \
196         v->counter c_op i;                                              \
197         raw_local_irq_restore(flags);                                   \
198                                                                         \
199         return val;                                                     \
200 }
201
202 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
203 {
204         int ret;
205         unsigned long flags;
206
207         raw_local_irq_save(flags);
208         ret = v->counter;
209         if (likely(ret == old))
210                 v->counter = new;
211         raw_local_irq_restore(flags);
212
213         return ret;
214 }
215
216 #define atomic_fetch_andnot             atomic_fetch_andnot
217
218 #endif /* __LINUX_ARM_ARCH__ */
219
220 #define ATOMIC_OPS(op, c_op, asm_op)                                    \
221         ATOMIC_OP(op, c_op, asm_op)                                     \
222         ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
223         ATOMIC_FETCH_OP(op, c_op, asm_op)
224
225 ATOMIC_OPS(add, +=, add)
226 ATOMIC_OPS(sub, -=, sub)
227
228 #define atomic_andnot atomic_andnot
229
230 #undef ATOMIC_OPS
231 #define ATOMIC_OPS(op, c_op, asm_op)                                    \
232         ATOMIC_OP(op, c_op, asm_op)                                     \
233         ATOMIC_FETCH_OP(op, c_op, asm_op)
234
235 ATOMIC_OPS(and, &=, and)
236 ATOMIC_OPS(andnot, &= ~, bic)
237 ATOMIC_OPS(or,  |=, orr)
238 ATOMIC_OPS(xor, ^=, eor)
239
240 #undef ATOMIC_OPS
241 #undef ATOMIC_FETCH_OP
242 #undef ATOMIC_OP_RETURN
243 #undef ATOMIC_OP
244
245 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
246
247 #ifndef CONFIG_GENERIC_ATOMIC64
248 typedef struct {
249         s64 counter;
250 } atomic64_t;
251
252 #define ATOMIC64_INIT(i) { (i) }
253
254 #ifdef CONFIG_ARM_LPAE
255 static inline s64 atomic64_read(const atomic64_t *v)
256 {
257         s64 result;
258
259         __asm__ __volatile__("@ atomic64_read\n"
260 "       ldrd    %0, %H0, [%1]"
261         : "=&r" (result)
262         : "r" (&v->counter), "Qo" (v->counter)
263         );
264
265         return result;
266 }
267
268 static inline void atomic64_set(atomic64_t *v, s64 i)
269 {
270         __asm__ __volatile__("@ atomic64_set\n"
271 "       strd    %2, %H2, [%1]"
272         : "=Qo" (v->counter)
273         : "r" (&v->counter), "r" (i)
274         );
275 }
276 #else
277 static inline s64 atomic64_read(const atomic64_t *v)
278 {
279         s64 result;
280
281         __asm__ __volatile__("@ atomic64_read\n"
282 "       ldrexd  %0, %H0, [%1]"
283         : "=&r" (result)
284         : "r" (&v->counter), "Qo" (v->counter)
285         );
286
287         return result;
288 }
289
290 static inline void atomic64_set(atomic64_t *v, s64 i)
291 {
292         s64 tmp;
293
294         prefetchw(&v->counter);
295         __asm__ __volatile__("@ atomic64_set\n"
296 "1:     ldrexd  %0, %H0, [%2]\n"
297 "       strexd  %0, %3, %H3, [%2]\n"
298 "       teq     %0, #0\n"
299 "       bne     1b"
300         : "=&r" (tmp), "=Qo" (v->counter)
301         : "r" (&v->counter), "r" (i)
302         : "cc");
303 }
304 #endif
305
306 #define ATOMIC64_OP(op, op1, op2)                                       \
307 static inline void atomic64_##op(s64 i, atomic64_t *v)                  \
308 {                                                                       \
309         s64 result;                                                     \
310         unsigned long tmp;                                              \
311                                                                         \
312         prefetchw(&v->counter);                                         \
313         __asm__ __volatile__("@ atomic64_" #op "\n"                     \
314 "1:     ldrexd  %0, %H0, [%3]\n"                                        \
315 "       " #op1 " %Q0, %Q0, %Q4\n"                                       \
316 "       " #op2 " %R0, %R0, %R4\n"                                       \
317 "       strexd  %1, %0, %H0, [%3]\n"                                    \
318 "       teq     %1, #0\n"                                               \
319 "       bne     1b"                                                     \
320         : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)               \
321         : "r" (&v->counter), "r" (i)                                    \
322         : "cc");                                                        \
323 }                                                                       \
324
325 #define ATOMIC64_OP_RETURN(op, op1, op2)                                \
326 static inline s64                                                       \
327 atomic64_##op##_return_relaxed(s64 i, atomic64_t *v)                    \
328 {                                                                       \
329         s64 result;                                                     \
330         unsigned long tmp;                                              \
331                                                                         \
332         prefetchw(&v->counter);                                         \
333                                                                         \
334         __asm__ __volatile__("@ atomic64_" #op "_return\n"              \
335 "1:     ldrexd  %0, %H0, [%3]\n"                                        \
336 "       " #op1 " %Q0, %Q0, %Q4\n"                                       \
337 "       " #op2 " %R0, %R0, %R4\n"                                       \
338 "       strexd  %1, %0, %H0, [%3]\n"                                    \
339 "       teq     %1, #0\n"                                               \
340 "       bne     1b"                                                     \
341         : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)               \
342         : "r" (&v->counter), "r" (i)                                    \
343         : "cc");                                                        \
344                                                                         \
345         return result;                                                  \
346 }
347
348 #define ATOMIC64_FETCH_OP(op, op1, op2)                                 \
349 static inline s64                                                       \
350 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v)                     \
351 {                                                                       \
352         s64 result, val;                                                \
353         unsigned long tmp;                                              \
354                                                                         \
355         prefetchw(&v->counter);                                         \
356                                                                         \
357         __asm__ __volatile__("@ atomic64_fetch_" #op "\n"               \
358 "1:     ldrexd  %0, %H0, [%4]\n"                                        \
359 "       " #op1 " %Q1, %Q0, %Q5\n"                                       \
360 "       " #op2 " %R1, %R0, %R5\n"                                       \
361 "       strexd  %2, %1, %H1, [%4]\n"                                    \
362 "       teq     %2, #0\n"                                               \
363 "       bne     1b"                                                     \
364         : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter)  \
365         : "r" (&v->counter), "r" (i)                                    \
366         : "cc");                                                        \
367                                                                         \
368         return result;                                                  \
369 }
370
371 #define ATOMIC64_OPS(op, op1, op2)                                      \
372         ATOMIC64_OP(op, op1, op2)                                       \
373         ATOMIC64_OP_RETURN(op, op1, op2)                                \
374         ATOMIC64_FETCH_OP(op, op1, op2)
375
376 ATOMIC64_OPS(add, adds, adc)
377 ATOMIC64_OPS(sub, subs, sbc)
378
379 #define atomic64_add_return_relaxed     atomic64_add_return_relaxed
380 #define atomic64_sub_return_relaxed     atomic64_sub_return_relaxed
381 #define atomic64_fetch_add_relaxed      atomic64_fetch_add_relaxed
382 #define atomic64_fetch_sub_relaxed      atomic64_fetch_sub_relaxed
383
384 #undef ATOMIC64_OPS
385 #define ATOMIC64_OPS(op, op1, op2)                                      \
386         ATOMIC64_OP(op, op1, op2)                                       \
387         ATOMIC64_FETCH_OP(op, op1, op2)
388
389 #define atomic64_andnot atomic64_andnot
390
391 ATOMIC64_OPS(and, and, and)
392 ATOMIC64_OPS(andnot, bic, bic)
393 ATOMIC64_OPS(or,  orr, orr)
394 ATOMIC64_OPS(xor, eor, eor)
395
396 #define atomic64_fetch_and_relaxed      atomic64_fetch_and_relaxed
397 #define atomic64_fetch_andnot_relaxed   atomic64_fetch_andnot_relaxed
398 #define atomic64_fetch_or_relaxed       atomic64_fetch_or_relaxed
399 #define atomic64_fetch_xor_relaxed      atomic64_fetch_xor_relaxed
400
401 #undef ATOMIC64_OPS
402 #undef ATOMIC64_FETCH_OP
403 #undef ATOMIC64_OP_RETURN
404 #undef ATOMIC64_OP
405
406 static inline s64 atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
407 {
408         s64 oldval;
409         unsigned long res;
410
411         prefetchw(&ptr->counter);
412
413         do {
414                 __asm__ __volatile__("@ atomic64_cmpxchg\n"
415                 "ldrexd         %1, %H1, [%3]\n"
416                 "mov            %0, #0\n"
417                 "teq            %1, %4\n"
418                 "teqeq          %H1, %H4\n"
419                 "strexdeq       %0, %5, %H5, [%3]"
420                 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
421                 : "r" (&ptr->counter), "r" (old), "r" (new)
422                 : "cc");
423         } while (res);
424
425         return oldval;
426 }
427 #define atomic64_cmpxchg_relaxed        atomic64_cmpxchg_relaxed
428
429 static inline s64 atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
430 {
431         s64 result;
432         unsigned long tmp;
433
434         prefetchw(&ptr->counter);
435
436         __asm__ __volatile__("@ atomic64_xchg\n"
437 "1:     ldrexd  %0, %H0, [%3]\n"
438 "       strexd  %1, %4, %H4, [%3]\n"
439 "       teq     %1, #0\n"
440 "       bne     1b"
441         : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
442         : "r" (&ptr->counter), "r" (new)
443         : "cc");
444
445         return result;
446 }
447 #define atomic64_xchg_relaxed           atomic64_xchg_relaxed
448
449 static inline s64 atomic64_dec_if_positive(atomic64_t *v)
450 {
451         s64 result;
452         unsigned long tmp;
453
454         smp_mb();
455         prefetchw(&v->counter);
456
457         __asm__ __volatile__("@ atomic64_dec_if_positive\n"
458 "1:     ldrexd  %0, %H0, [%3]\n"
459 "       subs    %Q0, %Q0, #1\n"
460 "       sbc     %R0, %R0, #0\n"
461 "       teq     %R0, #0\n"
462 "       bmi     2f\n"
463 "       strexd  %1, %0, %H0, [%3]\n"
464 "       teq     %1, #0\n"
465 "       bne     1b\n"
466 "2:"
467         : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
468         : "r" (&v->counter)
469         : "cc");
470
471         smp_mb();
472
473         return result;
474 }
475 #define atomic64_dec_if_positive atomic64_dec_if_positive
476
477 static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
478 {
479         s64 oldval, newval;
480         unsigned long tmp;
481
482         smp_mb();
483         prefetchw(&v->counter);
484
485         __asm__ __volatile__("@ atomic64_add_unless\n"
486 "1:     ldrexd  %0, %H0, [%4]\n"
487 "       teq     %0, %5\n"
488 "       teqeq   %H0, %H5\n"
489 "       beq     2f\n"
490 "       adds    %Q1, %Q0, %Q6\n"
491 "       adc     %R1, %R0, %R6\n"
492 "       strexd  %2, %1, %H1, [%4]\n"
493 "       teq     %2, #0\n"
494 "       bne     1b\n"
495 "2:"
496         : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
497         : "r" (&v->counter), "r" (u), "r" (a)
498         : "cc");
499
500         if (oldval != u)
501                 smp_mb();
502
503         return oldval;
504 }
505 #define atomic64_fetch_add_unless atomic64_fetch_add_unless
506
507 #endif /* !CONFIG_GENERIC_ATOMIC64 */
508 #endif
509 #endif