Linux-libre 4.14.145-gnu
[librecmc/linux-libre.git] / arch / metag / include / asm / spinlock_lnkget.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_SPINLOCK_LNKGET_H
3 #define __ASM_SPINLOCK_LNKGET_H
4
5 /*
6  * None of these asm statements clobber memory as LNKSET writes around
7  * the cache so the memory it modifies cannot safely be read by any means
8  * other than these accessors.
9  */
10
11 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
12 {
13         int ret;
14
15         asm volatile ("LNKGETD  %0, [%1]\n"
16                       "TST      %0, #1\n"
17                       "MOV      %0, #1\n"
18                       "XORZ      %0, %0, %0\n"
19                       : "=&d" (ret)
20                       : "da" (&lock->lock)
21                       : "cc");
22         return ret;
23 }
24
25 static inline void arch_spin_lock(arch_spinlock_t *lock)
26 {
27         int tmp;
28
29         asm volatile ("1:     LNKGETD %0,[%1]\n"
30                       "       TST     %0, #1\n"
31                       "       ADD     %0, %0, #1\n"
32                       "       LNKSETDZ [%1], %0\n"
33                       "       BNZ     1b\n"
34                       "       DEFR    %0, TXSTAT\n"
35                       "       ANDT    %0, %0, #HI(0x3f000000)\n"
36                       "       CMPT    %0, #HI(0x02000000)\n"
37                       "       BNZ     1b\n"
38                       : "=&d" (tmp)
39                       : "da" (&lock->lock)
40                       : "cc");
41
42         smp_mb();
43 }
44
45 /* Returns 0 if failed to acquire lock */
46 static inline int arch_spin_trylock(arch_spinlock_t *lock)
47 {
48         int tmp;
49
50         asm volatile ("       LNKGETD %0,[%1]\n"
51                       "       TST     %0, #1\n"
52                       "       ADD     %0, %0, #1\n"
53                       "       LNKSETDZ [%1], %0\n"
54                       "       BNZ     1f\n"
55                       "       DEFR    %0, TXSTAT\n"
56                       "       ANDT    %0, %0, #HI(0x3f000000)\n"
57                       "       CMPT    %0, #HI(0x02000000)\n"
58                       "       MOV     %0, #1\n"
59                       "1:     XORNZ   %0, %0, %0\n"
60                       : "=&d" (tmp)
61                       : "da" (&lock->lock)
62                       : "cc");
63
64         smp_mb();
65
66         return tmp;
67 }
68
69 static inline void arch_spin_unlock(arch_spinlock_t *lock)
70 {
71         smp_mb();
72
73         asm volatile ("       SETD    [%0], %1\n"
74                       :
75                       : "da" (&lock->lock), "da" (0)
76                       : "memory");
77 }
78
79 /*
80  * RWLOCKS
81  *
82  *
83  * Write locks are easy - we just set bit 31.  When unlocking, we can
84  * just write zero since the lock is exclusively held.
85  */
86
87 static inline void arch_write_lock(arch_rwlock_t *rw)
88 {
89         int tmp;
90
91         asm volatile ("1:     LNKGETD %0,[%1]\n"
92                       "       CMP     %0, #0\n"
93                       "       ADD     %0, %0, %2\n"
94                       "       LNKSETDZ [%1], %0\n"
95                       "       BNZ     1b\n"
96                       "       DEFR    %0, TXSTAT\n"
97                       "       ANDT    %0, %0, #HI(0x3f000000)\n"
98                       "       CMPT    %0, #HI(0x02000000)\n"
99                       "       BNZ     1b\n"
100                       : "=&d" (tmp)
101                       : "da" (&rw->lock), "bd" (0x80000000)
102                       : "cc");
103
104         smp_mb();
105 }
106
107 static inline int arch_write_trylock(arch_rwlock_t *rw)
108 {
109         int tmp;
110
111         asm volatile ("       LNKGETD %0,[%1]\n"
112                       "       CMP     %0, #0\n"
113                       "       ADD     %0, %0, %2\n"
114                       "       LNKSETDZ [%1], %0\n"
115                       "       BNZ     1f\n"
116                       "       DEFR    %0, TXSTAT\n"
117                       "       ANDT    %0, %0, #HI(0x3f000000)\n"
118                       "       CMPT    %0, #HI(0x02000000)\n"
119                       "       MOV     %0,#1\n"
120                       "1:     XORNZ   %0, %0, %0\n"
121                       : "=&d" (tmp)
122                       : "da" (&rw->lock), "bd" (0x80000000)
123                       : "cc");
124
125         smp_mb();
126
127         return tmp;
128 }
129
130 static inline void arch_write_unlock(arch_rwlock_t *rw)
131 {
132         smp_mb();
133
134         asm volatile ("       SETD    [%0], %1\n"
135                       :
136                       : "da" (&rw->lock), "da" (0)
137                       : "memory");
138 }
139
140 /* write_can_lock - would write_trylock() succeed? */
141 static inline int arch_write_can_lock(arch_rwlock_t *rw)
142 {
143         int ret;
144
145         asm volatile ("LNKGETD  %0, [%1]\n"
146                       "CMP      %0, #0\n"
147                       "MOV      %0, #1\n"
148                       "XORNZ     %0, %0, %0\n"
149                       : "=&d" (ret)
150                       : "da" (&rw->lock)
151                       : "cc");
152         return ret;
153 }
154
155 /*
156  * Read locks are a bit more hairy:
157  *  - Exclusively load the lock value.
158  *  - Increment it.
159  *  - Store new lock value if positive, and we still own this location.
160  *    If the value is negative, we've already failed.
161  *  - If we failed to store the value, we want a negative result.
162  *  - If we failed, try again.
163  * Unlocking is similarly hairy.  We may have multiple read locks
164  * currently active.  However, we know we won't have any write
165  * locks.
166  */
167 static inline void arch_read_lock(arch_rwlock_t *rw)
168 {
169         int tmp;
170
171         asm volatile ("1:     LNKGETD %0,[%1]\n"
172                       "       ADDS    %0, %0, #1\n"
173                       "       LNKSETDPL [%1], %0\n"
174                       "       BMI     1b\n"
175                       "       DEFR    %0, TXSTAT\n"
176                       "       ANDT    %0, %0, #HI(0x3f000000)\n"
177                       "       CMPT    %0, #HI(0x02000000)\n"
178                       "       BNZ     1b\n"
179                       : "=&d" (tmp)
180                       : "da" (&rw->lock)
181                       : "cc");
182
183         smp_mb();
184 }
185
186 static inline void arch_read_unlock(arch_rwlock_t *rw)
187 {
188         int tmp;
189
190         smp_mb();
191
192         asm volatile ("1:     LNKGETD %0,[%1]\n"
193                       "       SUB     %0, %0, #1\n"
194                       "       LNKSETD [%1], %0\n"
195                       "       DEFR    %0, TXSTAT\n"
196                       "       ANDT    %0, %0, #HI(0x3f000000)\n"
197                       "       CMPT    %0, #HI(0x02000000)\n"
198                       "       BNZ     1b\n"
199                       : "=&d" (tmp)
200                       : "da" (&rw->lock)
201                       : "cc", "memory");
202 }
203
204 static inline int arch_read_trylock(arch_rwlock_t *rw)
205 {
206         int tmp;
207
208         asm volatile ("       LNKGETD %0,[%1]\n"
209                       "       ADDS    %0, %0, #1\n"
210                       "       LNKSETDPL [%1], %0\n"
211                       "       BMI     1f\n"
212                       "       DEFR    %0, TXSTAT\n"
213                       "       ANDT    %0, %0, #HI(0x3f000000)\n"
214                       "       CMPT    %0, #HI(0x02000000)\n"
215                       "       MOV     %0,#1\n"
216                       "       BZ      2f\n"
217                       "1:     MOV     %0,#0\n"
218                       "2:\n"
219                       : "=&d" (tmp)
220                       : "da" (&rw->lock)
221                       : "cc");
222
223         smp_mb();
224
225         return tmp;
226 }
227
228 /* read_can_lock - would read_trylock() succeed? */
229 static inline int arch_read_can_lock(arch_rwlock_t *rw)
230 {
231         int tmp;
232
233         asm volatile ("LNKGETD  %0, [%1]\n"
234                       "CMP      %0, %2\n"
235                       "MOV      %0, #1\n"
236                       "XORZ     %0, %0, %0\n"
237                       : "=&d" (tmp)
238                       : "da" (&rw->lock), "bd" (0x80000000)
239                       : "cc");
240         return tmp;
241 }
242
243 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
244 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
245
246 #define arch_spin_relax(lock)   cpu_relax()
247 #define arch_read_relax(lock)   cpu_relax()
248 #define arch_write_relax(lock)  cpu_relax()
249
250 #endif /* __ASM_SPINLOCK_LNKGET_H */