Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / fs / btrfs / locking.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2008 Oracle.  All rights reserved.
4  */
5
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/spinlock.h>
9 #include <linux/page-flags.h>
10 #include <asm/bug.h>
11 #include "ctree.h"
12 #include "extent_io.h"
13 #include "locking.h"
14
15 #ifdef CONFIG_BTRFS_DEBUG
16 static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
17 {
18         WARN_ON(eb->spinning_writers);
19         eb->spinning_writers++;
20 }
21
22 static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
23 {
24         WARN_ON(eb->spinning_writers != 1);
25         eb->spinning_writers--;
26 }
27
28 static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
29 {
30         WARN_ON(eb->spinning_writers);
31 }
32
33 static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
34 {
35         atomic_inc(&eb->spinning_readers);
36 }
37
38 static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb)
39 {
40         WARN_ON(atomic_read(&eb->spinning_readers) == 0);
41         atomic_dec(&eb->spinning_readers);
42 }
43
44 static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb)
45 {
46         atomic_inc(&eb->read_locks);
47 }
48
49 static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb)
50 {
51         atomic_dec(&eb->read_locks);
52 }
53
54 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
55 {
56         BUG_ON(!atomic_read(&eb->read_locks));
57 }
58
59 static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb)
60 {
61         eb->write_locks++;
62 }
63
64 static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb)
65 {
66         eb->write_locks--;
67 }
68
69 void btrfs_assert_tree_locked(struct extent_buffer *eb)
70 {
71         BUG_ON(!eb->write_locks);
72 }
73
74 #else
75 static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
76 static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
77 static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { }
78 static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) { }
79 static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { }
80 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { }
81 static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) { }
82 static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) { }
83 void btrfs_assert_tree_locked(struct extent_buffer *eb) { }
84 static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) { }
85 static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) { }
86 #endif
87
88 void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
89 {
90         trace_btrfs_set_lock_blocking_read(eb);
91         /*
92          * No lock is required.  The lock owner may change if we have a read
93          * lock, but it won't change to or away from us.  If we have the write
94          * lock, we are the owner and it'll never change.
95          */
96         if (eb->lock_nested && current->pid == eb->lock_owner)
97                 return;
98         btrfs_assert_tree_read_locked(eb);
99         atomic_inc(&eb->blocking_readers);
100         btrfs_assert_spinning_readers_put(eb);
101         read_unlock(&eb->lock);
102 }
103
104 void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
105 {
106         trace_btrfs_set_lock_blocking_write(eb);
107         /*
108          * No lock is required.  The lock owner may change if we have a read
109          * lock, but it won't change to or away from us.  If we have the write
110          * lock, we are the owner and it'll never change.
111          */
112         if (eb->lock_nested && current->pid == eb->lock_owner)
113                 return;
114         if (eb->blocking_writers == 0) {
115                 btrfs_assert_spinning_writers_put(eb);
116                 btrfs_assert_tree_locked(eb);
117                 eb->blocking_writers++;
118                 write_unlock(&eb->lock);
119         }
120 }
121
122 void btrfs_clear_lock_blocking_read(struct extent_buffer *eb)
123 {
124         trace_btrfs_clear_lock_blocking_read(eb);
125         /*
126          * No lock is required.  The lock owner may change if we have a read
127          * lock, but it won't change to or away from us.  If we have the write
128          * lock, we are the owner and it'll never change.
129          */
130         if (eb->lock_nested && current->pid == eb->lock_owner)
131                 return;
132         BUG_ON(atomic_read(&eb->blocking_readers) == 0);
133         read_lock(&eb->lock);
134         btrfs_assert_spinning_readers_get(eb);
135         /* atomic_dec_and_test implies a barrier */
136         if (atomic_dec_and_test(&eb->blocking_readers))
137                 cond_wake_up_nomb(&eb->read_lock_wq);
138 }
139
140 void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
141 {
142         trace_btrfs_clear_lock_blocking_write(eb);
143         /*
144          * no lock is required.  The lock owner may change if
145          * we have a read lock, but it won't change to or away
146          * from us.  If we have the write lock, we are the owner
147          * and it'll never change.
148          */
149         if (eb->lock_nested && current->pid == eb->lock_owner)
150                 return;
151         write_lock(&eb->lock);
152         BUG_ON(eb->blocking_writers != 1);
153         btrfs_assert_spinning_writers_get(eb);
154         if (--eb->blocking_writers == 0)
155                 cond_wake_up(&eb->write_lock_wq);
156 }
157
158 /*
159  * take a spinning read lock.  This will wait for any blocking
160  * writers
161  */
162 void btrfs_tree_read_lock(struct extent_buffer *eb)
163 {
164         u64 start_ns = 0;
165
166         if (trace_btrfs_tree_read_lock_enabled())
167                 start_ns = ktime_get_ns();
168 again:
169         read_lock(&eb->lock);
170         BUG_ON(eb->blocking_writers == 0 &&
171                current->pid == eb->lock_owner);
172         if (eb->blocking_writers && current->pid == eb->lock_owner) {
173                 /*
174                  * This extent is already write-locked by our thread. We allow
175                  * an additional read lock to be added because it's for the same
176                  * thread. btrfs_find_all_roots() depends on this as it may be
177                  * called on a partly (write-)locked tree.
178                  */
179                 BUG_ON(eb->lock_nested);
180                 eb->lock_nested = true;
181                 read_unlock(&eb->lock);
182                 trace_btrfs_tree_read_lock(eb, start_ns);
183                 return;
184         }
185         if (eb->blocking_writers) {
186                 read_unlock(&eb->lock);
187                 wait_event(eb->write_lock_wq,
188                            eb->blocking_writers == 0);
189                 goto again;
190         }
191         btrfs_assert_tree_read_locks_get(eb);
192         btrfs_assert_spinning_readers_get(eb);
193         trace_btrfs_tree_read_lock(eb, start_ns);
194 }
195
196 /*
197  * take a spinning read lock.
198  * returns 1 if we get the read lock and 0 if we don't
199  * this won't wait for blocking writers
200  */
201 int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
202 {
203         if (eb->blocking_writers)
204                 return 0;
205
206         read_lock(&eb->lock);
207         if (eb->blocking_writers) {
208                 read_unlock(&eb->lock);
209                 return 0;
210         }
211         btrfs_assert_tree_read_locks_get(eb);
212         btrfs_assert_spinning_readers_get(eb);
213         trace_btrfs_tree_read_lock_atomic(eb);
214         return 1;
215 }
216
217 /*
218  * returns 1 if we get the read lock and 0 if we don't
219  * this won't wait for blocking writers
220  */
221 int btrfs_try_tree_read_lock(struct extent_buffer *eb)
222 {
223         if (eb->blocking_writers)
224                 return 0;
225
226         if (!read_trylock(&eb->lock))
227                 return 0;
228
229         if (eb->blocking_writers) {
230                 read_unlock(&eb->lock);
231                 return 0;
232         }
233         btrfs_assert_tree_read_locks_get(eb);
234         btrfs_assert_spinning_readers_get(eb);
235         trace_btrfs_try_tree_read_lock(eb);
236         return 1;
237 }
238
239 /*
240  * returns 1 if we get the read lock and 0 if we don't
241  * this won't wait for blocking writers or readers
242  */
243 int btrfs_try_tree_write_lock(struct extent_buffer *eb)
244 {
245         if (eb->blocking_writers || atomic_read(&eb->blocking_readers))
246                 return 0;
247
248         write_lock(&eb->lock);
249         if (eb->blocking_writers || atomic_read(&eb->blocking_readers)) {
250                 write_unlock(&eb->lock);
251                 return 0;
252         }
253         btrfs_assert_tree_write_locks_get(eb);
254         btrfs_assert_spinning_writers_get(eb);
255         eb->lock_owner = current->pid;
256         trace_btrfs_try_tree_write_lock(eb);
257         return 1;
258 }
259
260 /*
261  * drop a spinning read lock
262  */
263 void btrfs_tree_read_unlock(struct extent_buffer *eb)
264 {
265         trace_btrfs_tree_read_unlock(eb);
266         /*
267          * if we're nested, we have the write lock.  No new locking
268          * is needed as long as we are the lock owner.
269          * The write unlock will do a barrier for us, and the lock_nested
270          * field only matters to the lock owner.
271          */
272         if (eb->lock_nested && current->pid == eb->lock_owner) {
273                 eb->lock_nested = false;
274                 return;
275         }
276         btrfs_assert_tree_read_locked(eb);
277         btrfs_assert_spinning_readers_put(eb);
278         btrfs_assert_tree_read_locks_put(eb);
279         read_unlock(&eb->lock);
280 }
281
282 /*
283  * drop a blocking read lock
284  */
285 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
286 {
287         trace_btrfs_tree_read_unlock_blocking(eb);
288         /*
289          * if we're nested, we have the write lock.  No new locking
290          * is needed as long as we are the lock owner.
291          * The write unlock will do a barrier for us, and the lock_nested
292          * field only matters to the lock owner.
293          */
294         if (eb->lock_nested && current->pid == eb->lock_owner) {
295                 eb->lock_nested = false;
296                 return;
297         }
298         btrfs_assert_tree_read_locked(eb);
299         WARN_ON(atomic_read(&eb->blocking_readers) == 0);
300         /* atomic_dec_and_test implies a barrier */
301         if (atomic_dec_and_test(&eb->blocking_readers))
302                 cond_wake_up_nomb(&eb->read_lock_wq);
303         btrfs_assert_tree_read_locks_put(eb);
304 }
305
306 /*
307  * take a spinning write lock.  This will wait for both
308  * blocking readers or writers
309  */
310 void btrfs_tree_lock(struct extent_buffer *eb)
311 {
312         u64 start_ns = 0;
313
314         if (trace_btrfs_tree_lock_enabled())
315                 start_ns = ktime_get_ns();
316
317         WARN_ON(eb->lock_owner == current->pid);
318 again:
319         wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
320         wait_event(eb->write_lock_wq, eb->blocking_writers == 0);
321         write_lock(&eb->lock);
322         if (atomic_read(&eb->blocking_readers) || eb->blocking_writers) {
323                 write_unlock(&eb->lock);
324                 goto again;
325         }
326         btrfs_assert_spinning_writers_get(eb);
327         btrfs_assert_tree_write_locks_get(eb);
328         eb->lock_owner = current->pid;
329         trace_btrfs_tree_lock(eb, start_ns);
330 }
331
332 /*
333  * drop a spinning or a blocking write lock.
334  */
335 void btrfs_tree_unlock(struct extent_buffer *eb)
336 {
337         int blockers = eb->blocking_writers;
338
339         BUG_ON(blockers > 1);
340
341         btrfs_assert_tree_locked(eb);
342         trace_btrfs_tree_unlock(eb);
343         eb->lock_owner = 0;
344         btrfs_assert_tree_write_locks_put(eb);
345
346         if (blockers) {
347                 btrfs_assert_no_spinning_writers(eb);
348                 eb->blocking_writers--;
349                 /*
350                  * We need to order modifying blocking_writers above with
351                  * actually waking up the sleepers to ensure they see the
352                  * updated value of blocking_writers
353                  */
354                 cond_wake_up(&eb->write_lock_wq);
355         } else {
356                 btrfs_assert_spinning_writers_put(eb);
357                 write_unlock(&eb->lock);
358         }
359 }