Linux-libre 5.4.48-gnu
[librecmc/linux-libre.git] / arch / powerpc / include / asm / atomic.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_ATOMIC_H_
3 #define _ASM_POWERPC_ATOMIC_H_
4
5 /*
6  * PowerPC atomic operations
7  */
8
9 #ifdef __KERNEL__
10 #include <linux/types.h>
11 #include <asm/cmpxchg.h>
12 #include <asm/barrier.h>
13 #include <asm/asm-405.h>
14
15 #define ATOMIC_INIT(i)          { (i) }
16
17 /*
18  * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
19  * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
20  * on the platform without lwsync.
21  */
22 #define __atomic_acquire_fence()                                        \
23         __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
24
25 #define __atomic_release_fence()                                        \
26         __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
27
28 static __inline__ int atomic_read(const atomic_t *v)
29 {
30         int t;
31
32         __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
33
34         return t;
35 }
36
37 static __inline__ void atomic_set(atomic_t *v, int i)
38 {
39         __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
40 }
41
42 #define ATOMIC_OP(op, asm_op)                                           \
43 static __inline__ void atomic_##op(int a, atomic_t *v)                  \
44 {                                                                       \
45         int t;                                                          \
46                                                                         \
47         __asm__ __volatile__(                                           \
48 "1:     lwarx   %0,0,%3         # atomic_" #op "\n"                     \
49         #asm_op " %0,%2,%0\n"                                           \
50         PPC405_ERR77(0,%3)                                              \
51 "       stwcx.  %0,0,%3 \n"                                             \
52 "       bne-    1b\n"                                                   \
53         : "=&r" (t), "+m" (v->counter)                                  \
54         : "r" (a), "r" (&v->counter)                                    \
55         : "cc");                                                        \
56 }                                                                       \
57
58 #define ATOMIC_OP_RETURN_RELAXED(op, asm_op)                            \
59 static inline int atomic_##op##_return_relaxed(int a, atomic_t *v)      \
60 {                                                                       \
61         int t;                                                          \
62                                                                         \
63         __asm__ __volatile__(                                           \
64 "1:     lwarx   %0,0,%3         # atomic_" #op "_return_relaxed\n"      \
65         #asm_op " %0,%2,%0\n"                                           \
66         PPC405_ERR77(0, %3)                                             \
67 "       stwcx.  %0,0,%3\n"                                              \
68 "       bne-    1b\n"                                                   \
69         : "=&r" (t), "+m" (v->counter)                                  \
70         : "r" (a), "r" (&v->counter)                                    \
71         : "cc");                                                        \
72                                                                         \
73         return t;                                                       \
74 }
75
76 #define ATOMIC_FETCH_OP_RELAXED(op, asm_op)                             \
77 static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v)       \
78 {                                                                       \
79         int res, t;                                                     \
80                                                                         \
81         __asm__ __volatile__(                                           \
82 "1:     lwarx   %0,0,%4         # atomic_fetch_" #op "_relaxed\n"       \
83         #asm_op " %1,%3,%0\n"                                           \
84         PPC405_ERR77(0, %4)                                             \
85 "       stwcx.  %1,0,%4\n"                                              \
86 "       bne-    1b\n"                                                   \
87         : "=&r" (res), "=&r" (t), "+m" (v->counter)                     \
88         : "r" (a), "r" (&v->counter)                                    \
89         : "cc");                                                        \
90                                                                         \
91         return res;                                                     \
92 }
93
94 #define ATOMIC_OPS(op, asm_op)                                          \
95         ATOMIC_OP(op, asm_op)                                           \
96         ATOMIC_OP_RETURN_RELAXED(op, asm_op)                            \
97         ATOMIC_FETCH_OP_RELAXED(op, asm_op)
98
99 ATOMIC_OPS(add, add)
100 ATOMIC_OPS(sub, subf)
101
102 #define atomic_add_return_relaxed atomic_add_return_relaxed
103 #define atomic_sub_return_relaxed atomic_sub_return_relaxed
104
105 #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
106 #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
107
108 #undef ATOMIC_OPS
109 #define ATOMIC_OPS(op, asm_op)                                          \
110         ATOMIC_OP(op, asm_op)                                           \
111         ATOMIC_FETCH_OP_RELAXED(op, asm_op)
112
113 ATOMIC_OPS(and, and)
114 ATOMIC_OPS(or, or)
115 ATOMIC_OPS(xor, xor)
116
117 #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
118 #define atomic_fetch_or_relaxed  atomic_fetch_or_relaxed
119 #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
120
121 #undef ATOMIC_OPS
122 #undef ATOMIC_FETCH_OP_RELAXED
123 #undef ATOMIC_OP_RETURN_RELAXED
124 #undef ATOMIC_OP
125
126 static __inline__ void atomic_inc(atomic_t *v)
127 {
128         int t;
129
130         __asm__ __volatile__(
131 "1:     lwarx   %0,0,%2         # atomic_inc\n\
132         addic   %0,%0,1\n"
133         PPC405_ERR77(0,%2)
134 "       stwcx.  %0,0,%2 \n\
135         bne-    1b"
136         : "=&r" (t), "+m" (v->counter)
137         : "r" (&v->counter)
138         : "cc", "xer");
139 }
140 #define atomic_inc atomic_inc
141
142 static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
143 {
144         int t;
145
146         __asm__ __volatile__(
147 "1:     lwarx   %0,0,%2         # atomic_inc_return_relaxed\n"
148 "       addic   %0,%0,1\n"
149         PPC405_ERR77(0, %2)
150 "       stwcx.  %0,0,%2\n"
151 "       bne-    1b"
152         : "=&r" (t), "+m" (v->counter)
153         : "r" (&v->counter)
154         : "cc", "xer");
155
156         return t;
157 }
158
159 static __inline__ void atomic_dec(atomic_t *v)
160 {
161         int t;
162
163         __asm__ __volatile__(
164 "1:     lwarx   %0,0,%2         # atomic_dec\n\
165         addic   %0,%0,-1\n"
166         PPC405_ERR77(0,%2)\
167 "       stwcx.  %0,0,%2\n\
168         bne-    1b"
169         : "=&r" (t), "+m" (v->counter)
170         : "r" (&v->counter)
171         : "cc", "xer");
172 }
173 #define atomic_dec atomic_dec
174
175 static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
176 {
177         int t;
178
179         __asm__ __volatile__(
180 "1:     lwarx   %0,0,%2         # atomic_dec_return_relaxed\n"
181 "       addic   %0,%0,-1\n"
182         PPC405_ERR77(0, %2)
183 "       stwcx.  %0,0,%2\n"
184 "       bne-    1b"
185         : "=&r" (t), "+m" (v->counter)
186         : "r" (&v->counter)
187         : "cc", "xer");
188
189         return t;
190 }
191
192 #define atomic_inc_return_relaxed atomic_inc_return_relaxed
193 #define atomic_dec_return_relaxed atomic_dec_return_relaxed
194
195 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
196 #define atomic_cmpxchg_relaxed(v, o, n) \
197         cmpxchg_relaxed(&((v)->counter), (o), (n))
198 #define atomic_cmpxchg_acquire(v, o, n) \
199         cmpxchg_acquire(&((v)->counter), (o), (n))
200
201 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
202 #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
203
204 /**
205  * atomic_fetch_add_unless - add unless the number is a given value
206  * @v: pointer of type atomic_t
207  * @a: the amount to add to v...
208  * @u: ...unless v is equal to u.
209  *
210  * Atomically adds @a to @v, so long as it was not @u.
211  * Returns the old value of @v.
212  */
213 static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
214 {
215         int t;
216
217         __asm__ __volatile__ (
218         PPC_ATOMIC_ENTRY_BARRIER
219 "1:     lwarx   %0,0,%1         # atomic_fetch_add_unless\n\
220         cmpw    0,%0,%3 \n\
221         beq     2f \n\
222         add     %0,%2,%0 \n"
223         PPC405_ERR77(0,%2)
224 "       stwcx.  %0,0,%1 \n\
225         bne-    1b \n"
226         PPC_ATOMIC_EXIT_BARRIER
227 "       subf    %0,%2,%0 \n\
228 2:"
229         : "=&r" (t)
230         : "r" (&v->counter), "r" (a), "r" (u)
231         : "cc", "memory");
232
233         return t;
234 }
235 #define atomic_fetch_add_unless atomic_fetch_add_unless
236
237 /**
238  * atomic_inc_not_zero - increment unless the number is zero
239  * @v: pointer of type atomic_t
240  *
241  * Atomically increments @v by 1, so long as @v is non-zero.
242  * Returns non-zero if @v was non-zero, and zero otherwise.
243  */
244 static __inline__ int atomic_inc_not_zero(atomic_t *v)
245 {
246         int t1, t2;
247
248         __asm__ __volatile__ (
249         PPC_ATOMIC_ENTRY_BARRIER
250 "1:     lwarx   %0,0,%2         # atomic_inc_not_zero\n\
251         cmpwi   0,%0,0\n\
252         beq-    2f\n\
253         addic   %1,%0,1\n"
254         PPC405_ERR77(0,%2)
255 "       stwcx.  %1,0,%2\n\
256         bne-    1b\n"
257         PPC_ATOMIC_EXIT_BARRIER
258         "\n\
259 2:"
260         : "=&r" (t1), "=&r" (t2)
261         : "r" (&v->counter)
262         : "cc", "xer", "memory");
263
264         return t1;
265 }
266 #define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
267
268 /*
269  * Atomically test *v and decrement if it is greater than 0.
270  * The function returns the old value of *v minus 1, even if
271  * the atomic variable, v, was not decremented.
272  */
273 static __inline__ int atomic_dec_if_positive(atomic_t *v)
274 {
275         int t;
276
277         __asm__ __volatile__(
278         PPC_ATOMIC_ENTRY_BARRIER
279 "1:     lwarx   %0,0,%1         # atomic_dec_if_positive\n\
280         cmpwi   %0,1\n\
281         addi    %0,%0,-1\n\
282         blt-    2f\n"
283         PPC405_ERR77(0,%1)
284 "       stwcx.  %0,0,%1\n\
285         bne-    1b"
286         PPC_ATOMIC_EXIT_BARRIER
287         "\n\
288 2:"     : "=&b" (t)
289         : "r" (&v->counter)
290         : "cc", "memory");
291
292         return t;
293 }
294 #define atomic_dec_if_positive atomic_dec_if_positive
295
296 #ifdef __powerpc64__
297
298 #define ATOMIC64_INIT(i)        { (i) }
299
300 static __inline__ s64 atomic64_read(const atomic64_t *v)
301 {
302         s64 t;
303
304         __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
305
306         return t;
307 }
308
309 static __inline__ void atomic64_set(atomic64_t *v, s64 i)
310 {
311         __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
312 }
313
314 #define ATOMIC64_OP(op, asm_op)                                         \
315 static __inline__ void atomic64_##op(s64 a, atomic64_t *v)              \
316 {                                                                       \
317         s64 t;                                                          \
318                                                                         \
319         __asm__ __volatile__(                                           \
320 "1:     ldarx   %0,0,%3         # atomic64_" #op "\n"                   \
321         #asm_op " %0,%2,%0\n"                                           \
322 "       stdcx.  %0,0,%3 \n"                                             \
323 "       bne-    1b\n"                                                   \
324         : "=&r" (t), "+m" (v->counter)                                  \
325         : "r" (a), "r" (&v->counter)                                    \
326         : "cc");                                                        \
327 }
328
329 #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op)                          \
330 static inline s64                                                       \
331 atomic64_##op##_return_relaxed(s64 a, atomic64_t *v)                    \
332 {                                                                       \
333         s64 t;                                                          \
334                                                                         \
335         __asm__ __volatile__(                                           \
336 "1:     ldarx   %0,0,%3         # atomic64_" #op "_return_relaxed\n"    \
337         #asm_op " %0,%2,%0\n"                                           \
338 "       stdcx.  %0,0,%3\n"                                              \
339 "       bne-    1b\n"                                                   \
340         : "=&r" (t), "+m" (v->counter)                                  \
341         : "r" (a), "r" (&v->counter)                                    \
342         : "cc");                                                        \
343                                                                         \
344         return t;                                                       \
345 }
346
347 #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op)                           \
348 static inline s64                                                       \
349 atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v)                     \
350 {                                                                       \
351         s64 res, t;                                                     \
352                                                                         \
353         __asm__ __volatile__(                                           \
354 "1:     ldarx   %0,0,%4         # atomic64_fetch_" #op "_relaxed\n"     \
355         #asm_op " %1,%3,%0\n"                                           \
356 "       stdcx.  %1,0,%4\n"                                              \
357 "       bne-    1b\n"                                                   \
358         : "=&r" (res), "=&r" (t), "+m" (v->counter)                     \
359         : "r" (a), "r" (&v->counter)                                    \
360         : "cc");                                                        \
361                                                                         \
362         return res;                                                     \
363 }
364
365 #define ATOMIC64_OPS(op, asm_op)                                        \
366         ATOMIC64_OP(op, asm_op)                                         \
367         ATOMIC64_OP_RETURN_RELAXED(op, asm_op)                          \
368         ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
369
370 ATOMIC64_OPS(add, add)
371 ATOMIC64_OPS(sub, subf)
372
373 #define atomic64_add_return_relaxed atomic64_add_return_relaxed
374 #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
375
376 #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
377 #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
378
379 #undef ATOMIC64_OPS
380 #define ATOMIC64_OPS(op, asm_op)                                        \
381         ATOMIC64_OP(op, asm_op)                                         \
382         ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
383
384 ATOMIC64_OPS(and, and)
385 ATOMIC64_OPS(or, or)
386 ATOMIC64_OPS(xor, xor)
387
388 #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
389 #define atomic64_fetch_or_relaxed  atomic64_fetch_or_relaxed
390 #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
391
392 #undef ATOPIC64_OPS
393 #undef ATOMIC64_FETCH_OP_RELAXED
394 #undef ATOMIC64_OP_RETURN_RELAXED
395 #undef ATOMIC64_OP
396
397 static __inline__ void atomic64_inc(atomic64_t *v)
398 {
399         s64 t;
400
401         __asm__ __volatile__(
402 "1:     ldarx   %0,0,%2         # atomic64_inc\n\
403         addic   %0,%0,1\n\
404         stdcx.  %0,0,%2 \n\
405         bne-    1b"
406         : "=&r" (t), "+m" (v->counter)
407         : "r" (&v->counter)
408         : "cc", "xer");
409 }
410 #define atomic64_inc atomic64_inc
411
412 static __inline__ s64 atomic64_inc_return_relaxed(atomic64_t *v)
413 {
414         s64 t;
415
416         __asm__ __volatile__(
417 "1:     ldarx   %0,0,%2         # atomic64_inc_return_relaxed\n"
418 "       addic   %0,%0,1\n"
419 "       stdcx.  %0,0,%2\n"
420 "       bne-    1b"
421         : "=&r" (t), "+m" (v->counter)
422         : "r" (&v->counter)
423         : "cc", "xer");
424
425         return t;
426 }
427
428 static __inline__ void atomic64_dec(atomic64_t *v)
429 {
430         s64 t;
431
432         __asm__ __volatile__(
433 "1:     ldarx   %0,0,%2         # atomic64_dec\n\
434         addic   %0,%0,-1\n\
435         stdcx.  %0,0,%2\n\
436         bne-    1b"
437         : "=&r" (t), "+m" (v->counter)
438         : "r" (&v->counter)
439         : "cc", "xer");
440 }
441 #define atomic64_dec atomic64_dec
442
443 static __inline__ s64 atomic64_dec_return_relaxed(atomic64_t *v)
444 {
445         s64 t;
446
447         __asm__ __volatile__(
448 "1:     ldarx   %0,0,%2         # atomic64_dec_return_relaxed\n"
449 "       addic   %0,%0,-1\n"
450 "       stdcx.  %0,0,%2\n"
451 "       bne-    1b"
452         : "=&r" (t), "+m" (v->counter)
453         : "r" (&v->counter)
454         : "cc", "xer");
455
456         return t;
457 }
458
459 #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
460 #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
461
462 /*
463  * Atomically test *v and decrement if it is greater than 0.
464  * The function returns the old value of *v minus 1.
465  */
466 static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
467 {
468         s64 t;
469
470         __asm__ __volatile__(
471         PPC_ATOMIC_ENTRY_BARRIER
472 "1:     ldarx   %0,0,%1         # atomic64_dec_if_positive\n\
473         addic.  %0,%0,-1\n\
474         blt-    2f\n\
475         stdcx.  %0,0,%1\n\
476         bne-    1b"
477         PPC_ATOMIC_EXIT_BARRIER
478         "\n\
479 2:"     : "=&r" (t)
480         : "r" (&v->counter)
481         : "cc", "xer", "memory");
482
483         return t;
484 }
485 #define atomic64_dec_if_positive atomic64_dec_if_positive
486
487 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
488 #define atomic64_cmpxchg_relaxed(v, o, n) \
489         cmpxchg_relaxed(&((v)->counter), (o), (n))
490 #define atomic64_cmpxchg_acquire(v, o, n) \
491         cmpxchg_acquire(&((v)->counter), (o), (n))
492
493 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
494 #define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
495
496 /**
497  * atomic64_fetch_add_unless - add unless the number is a given value
498  * @v: pointer of type atomic64_t
499  * @a: the amount to add to v...
500  * @u: ...unless v is equal to u.
501  *
502  * Atomically adds @a to @v, so long as it was not @u.
503  * Returns the old value of @v.
504  */
505 static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
506 {
507         s64 t;
508
509         __asm__ __volatile__ (
510         PPC_ATOMIC_ENTRY_BARRIER
511 "1:     ldarx   %0,0,%1         # atomic64_fetch_add_unless\n\
512         cmpd    0,%0,%3 \n\
513         beq     2f \n\
514         add     %0,%2,%0 \n"
515 "       stdcx.  %0,0,%1 \n\
516         bne-    1b \n"
517         PPC_ATOMIC_EXIT_BARRIER
518 "       subf    %0,%2,%0 \n\
519 2:"
520         : "=&r" (t)
521         : "r" (&v->counter), "r" (a), "r" (u)
522         : "cc", "memory");
523
524         return t;
525 }
526 #define atomic64_fetch_add_unless atomic64_fetch_add_unless
527
528 /**
529  * atomic_inc64_not_zero - increment unless the number is zero
530  * @v: pointer of type atomic64_t
531  *
532  * Atomically increments @v by 1, so long as @v is non-zero.
533  * Returns non-zero if @v was non-zero, and zero otherwise.
534  */
535 static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
536 {
537         s64 t1, t2;
538
539         __asm__ __volatile__ (
540         PPC_ATOMIC_ENTRY_BARRIER
541 "1:     ldarx   %0,0,%2         # atomic64_inc_not_zero\n\
542         cmpdi   0,%0,0\n\
543         beq-    2f\n\
544         addic   %1,%0,1\n\
545         stdcx.  %1,0,%2\n\
546         bne-    1b\n"
547         PPC_ATOMIC_EXIT_BARRIER
548         "\n\
549 2:"
550         : "=&r" (t1), "=&r" (t2)
551         : "r" (&v->counter)
552         : "cc", "xer", "memory");
553
554         return t1 != 0;
555 }
556 #define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
557
558 #endif /* __powerpc64__ */
559
560 #endif /* __KERNEL__ */
561 #endif /* _ASM_POWERPC_ATOMIC_H_ */