Linux-libre 5.4.47-gnu
[librecmc/linux-libre.git] / fs / btrfs / async-thread.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  * Copyright (C) 2014 Fujitsu.  All rights reserved.
5  */
6
7 #include <linux/kthread.h>
8 #include <linux/slab.h>
9 #include <linux/list.h>
10 #include <linux/spinlock.h>
11 #include <linux/freezer.h>
12 #include "async-thread.h"
13 #include "ctree.h"
14
15 enum {
16         WORK_DONE_BIT,
17         WORK_ORDER_DONE_BIT,
18         WORK_HIGH_PRIO_BIT,
19 };
20
21 #define NO_THRESHOLD (-1)
22 #define DFT_THRESHOLD (32)
23
24 struct __btrfs_workqueue {
25         struct workqueue_struct *normal_wq;
26
27         /* File system this workqueue services */
28         struct btrfs_fs_info *fs_info;
29
30         /* List head pointing to ordered work list */
31         struct list_head ordered_list;
32
33         /* Spinlock for ordered_list */
34         spinlock_t list_lock;
35
36         /* Thresholding related variants */
37         atomic_t pending;
38
39         /* Up limit of concurrency workers */
40         int limit_active;
41
42         /* Current number of concurrency workers */
43         int current_active;
44
45         /* Threshold to change current_active */
46         int thresh;
47         unsigned int count;
48         spinlock_t thres_lock;
49 };
50
51 struct btrfs_workqueue {
52         struct __btrfs_workqueue *normal;
53         struct __btrfs_workqueue *high;
54 };
55
56 struct btrfs_fs_info *
57 btrfs_workqueue_owner(const struct __btrfs_workqueue *wq)
58 {
59         return wq->fs_info;
60 }
61
62 struct btrfs_fs_info *
63 btrfs_work_owner(const struct btrfs_work *work)
64 {
65         return work->wq->fs_info;
66 }
67
68 bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq)
69 {
70         /*
71          * We could compare wq->normal->pending with num_online_cpus()
72          * to support "thresh == NO_THRESHOLD" case, but it requires
73          * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's
74          * postpone it until someone needs the support of that case.
75          */
76         if (wq->normal->thresh == NO_THRESHOLD)
77                 return false;
78
79         return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2;
80 }
81
82 static struct __btrfs_workqueue *
83 __btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name,
84                         unsigned int flags, int limit_active, int thresh)
85 {
86         struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
87
88         if (!ret)
89                 return NULL;
90
91         ret->fs_info = fs_info;
92         ret->limit_active = limit_active;
93         atomic_set(&ret->pending, 0);
94         if (thresh == 0)
95                 thresh = DFT_THRESHOLD;
96         /* For low threshold, disabling threshold is a better choice */
97         if (thresh < DFT_THRESHOLD) {
98                 ret->current_active = limit_active;
99                 ret->thresh = NO_THRESHOLD;
100         } else {
101                 /*
102                  * For threshold-able wq, let its concurrency grow on demand.
103                  * Use minimal max_active at alloc time to reduce resource
104                  * usage.
105                  */
106                 ret->current_active = 1;
107                 ret->thresh = thresh;
108         }
109
110         if (flags & WQ_HIGHPRI)
111                 ret->normal_wq = alloc_workqueue("btrfs-%s-high", flags,
112                                                  ret->current_active, name);
113         else
114                 ret->normal_wq = alloc_workqueue("btrfs-%s", flags,
115                                                  ret->current_active, name);
116         if (!ret->normal_wq) {
117                 kfree(ret);
118                 return NULL;
119         }
120
121         INIT_LIST_HEAD(&ret->ordered_list);
122         spin_lock_init(&ret->list_lock);
123         spin_lock_init(&ret->thres_lock);
124         trace_btrfs_workqueue_alloc(ret, name, flags & WQ_HIGHPRI);
125         return ret;
126 }
127
128 static inline void
129 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
130
131 struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
132                                               const char *name,
133                                               unsigned int flags,
134                                               int limit_active,
135                                               int thresh)
136 {
137         struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
138
139         if (!ret)
140                 return NULL;
141
142         ret->normal = __btrfs_alloc_workqueue(fs_info, name,
143                                               flags & ~WQ_HIGHPRI,
144                                               limit_active, thresh);
145         if (!ret->normal) {
146                 kfree(ret);
147                 return NULL;
148         }
149
150         if (flags & WQ_HIGHPRI) {
151                 ret->high = __btrfs_alloc_workqueue(fs_info, name, flags,
152                                                     limit_active, thresh);
153                 if (!ret->high) {
154                         __btrfs_destroy_workqueue(ret->normal);
155                         kfree(ret);
156                         return NULL;
157                 }
158         }
159         return ret;
160 }
161
162 /*
163  * Hook for threshold which will be called in btrfs_queue_work.
164  * This hook WILL be called in IRQ handler context,
165  * so workqueue_set_max_active MUST NOT be called in this hook
166  */
167 static inline void thresh_queue_hook(struct __btrfs_workqueue *wq)
168 {
169         if (wq->thresh == NO_THRESHOLD)
170                 return;
171         atomic_inc(&wq->pending);
172 }
173
174 /*
175  * Hook for threshold which will be called before executing the work,
176  * This hook is called in kthread content.
177  * So workqueue_set_max_active is called here.
178  */
179 static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
180 {
181         int new_current_active;
182         long pending;
183         int need_change = 0;
184
185         if (wq->thresh == NO_THRESHOLD)
186                 return;
187
188         atomic_dec(&wq->pending);
189         spin_lock(&wq->thres_lock);
190         /*
191          * Use wq->count to limit the calling frequency of
192          * workqueue_set_max_active.
193          */
194         wq->count++;
195         wq->count %= (wq->thresh / 4);
196         if (!wq->count)
197                 goto  out;
198         new_current_active = wq->current_active;
199
200         /*
201          * pending may be changed later, but it's OK since we really
202          * don't need it so accurate to calculate new_max_active.
203          */
204         pending = atomic_read(&wq->pending);
205         if (pending > wq->thresh)
206                 new_current_active++;
207         if (pending < wq->thresh / 2)
208                 new_current_active--;
209         new_current_active = clamp_val(new_current_active, 1, wq->limit_active);
210         if (new_current_active != wq->current_active)  {
211                 need_change = 1;
212                 wq->current_active = new_current_active;
213         }
214 out:
215         spin_unlock(&wq->thres_lock);
216
217         if (need_change) {
218                 workqueue_set_max_active(wq->normal_wq, wq->current_active);
219         }
220 }
221
222 static void run_ordered_work(struct __btrfs_workqueue *wq,
223                              struct btrfs_work *self)
224 {
225         struct list_head *list = &wq->ordered_list;
226         struct btrfs_work *work;
227         spinlock_t *lock = &wq->list_lock;
228         unsigned long flags;
229         void *wtag;
230         bool free_self = false;
231
232         while (1) {
233                 spin_lock_irqsave(lock, flags);
234                 if (list_empty(list))
235                         break;
236                 work = list_entry(list->next, struct btrfs_work,
237                                   ordered_list);
238                 if (!test_bit(WORK_DONE_BIT, &work->flags))
239                         break;
240
241                 /*
242                  * we are going to call the ordered done function, but
243                  * we leave the work item on the list as a barrier so
244                  * that later work items that are done don't have their
245                  * functions called before this one returns
246                  */
247                 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
248                         break;
249                 trace_btrfs_ordered_sched(work);
250                 spin_unlock_irqrestore(lock, flags);
251                 work->ordered_func(work);
252
253                 /* now take the lock again and drop our item from the list */
254                 spin_lock_irqsave(lock, flags);
255                 list_del(&work->ordered_list);
256                 spin_unlock_irqrestore(lock, flags);
257
258                 if (work == self) {
259                         /*
260                          * This is the work item that the worker is currently
261                          * executing.
262                          *
263                          * The kernel workqueue code guarantees non-reentrancy
264                          * of work items. I.e., if a work item with the same
265                          * address and work function is queued twice, the second
266                          * execution is blocked until the first one finishes. A
267                          * work item may be freed and recycled with the same
268                          * work function; the workqueue code assumes that the
269                          * original work item cannot depend on the recycled work
270                          * item in that case (see find_worker_executing_work()).
271                          *
272                          * Note that different types of Btrfs work can depend on
273                          * each other, and one type of work on one Btrfs
274                          * filesystem may even depend on the same type of work
275                          * on another Btrfs filesystem via, e.g., a loop device.
276                          * Therefore, we must not allow the current work item to
277                          * be recycled until we are really done, otherwise we
278                          * break the above assumption and can deadlock.
279                          */
280                         free_self = true;
281                 } else {
282                         /*
283                          * We don't want to call the ordered free functions with
284                          * the lock held though. Save the work as tag for the
285                          * trace event, because the callback could free the
286                          * structure.
287                          */
288                         wtag = work;
289                         work->ordered_free(work);
290                         trace_btrfs_all_work_done(wq->fs_info, wtag);
291                 }
292         }
293         spin_unlock_irqrestore(lock, flags);
294
295         if (free_self) {
296                 wtag = self;
297                 self->ordered_free(self);
298                 trace_btrfs_all_work_done(wq->fs_info, wtag);
299         }
300 }
301
302 static void btrfs_work_helper(struct work_struct *normal_work)
303 {
304         struct btrfs_work *work = container_of(normal_work, struct btrfs_work,
305                                                normal_work);
306         struct __btrfs_workqueue *wq;
307         void *wtag;
308         int need_order = 0;
309
310         /*
311          * We should not touch things inside work in the following cases:
312          * 1) after work->func() if it has no ordered_free
313          *    Since the struct is freed in work->func().
314          * 2) after setting WORK_DONE_BIT
315          *    The work may be freed in other threads almost instantly.
316          * So we save the needed things here.
317          */
318         if (work->ordered_func)
319                 need_order = 1;
320         wq = work->wq;
321         /* Safe for tracepoints in case work gets freed by the callback */
322         wtag = work;
323
324         trace_btrfs_work_sched(work);
325         thresh_exec_hook(wq);
326         work->func(work);
327         if (need_order) {
328                 set_bit(WORK_DONE_BIT, &work->flags);
329                 run_ordered_work(wq, work);
330         }
331         if (!need_order)
332                 trace_btrfs_all_work_done(wq->fs_info, wtag);
333 }
334
335 void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
336                      btrfs_func_t ordered_func, btrfs_func_t ordered_free)
337 {
338         work->func = func;
339         work->ordered_func = ordered_func;
340         work->ordered_free = ordered_free;
341         INIT_WORK(&work->normal_work, btrfs_work_helper);
342         INIT_LIST_HEAD(&work->ordered_list);
343         work->flags = 0;
344 }
345
346 static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
347                                       struct btrfs_work *work)
348 {
349         unsigned long flags;
350
351         work->wq = wq;
352         thresh_queue_hook(wq);
353         if (work->ordered_func) {
354                 spin_lock_irqsave(&wq->list_lock, flags);
355                 list_add_tail(&work->ordered_list, &wq->ordered_list);
356                 spin_unlock_irqrestore(&wq->list_lock, flags);
357         }
358         trace_btrfs_work_queued(work);
359         queue_work(wq->normal_wq, &work->normal_work);
360 }
361
362 void btrfs_queue_work(struct btrfs_workqueue *wq,
363                       struct btrfs_work *work)
364 {
365         struct __btrfs_workqueue *dest_wq;
366
367         if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high)
368                 dest_wq = wq->high;
369         else
370                 dest_wq = wq->normal;
371         __btrfs_queue_work(dest_wq, work);
372 }
373
374 static inline void
375 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq)
376 {
377         destroy_workqueue(wq->normal_wq);
378         trace_btrfs_workqueue_destroy(wq);
379         kfree(wq);
380 }
381
382 void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
383 {
384         if (!wq)
385                 return;
386         if (wq->high)
387                 __btrfs_destroy_workqueue(wq->high);
388         __btrfs_destroy_workqueue(wq->normal);
389         kfree(wq);
390 }
391
392 void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active)
393 {
394         if (!wq)
395                 return;
396         wq->normal->limit_active = limit_active;
397         if (wq->high)
398                 wq->high->limit_active = limit_active;
399 }
400
401 void btrfs_set_work_high_priority(struct btrfs_work *work)
402 {
403         set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
404 }
405
406 void btrfs_flush_workqueue(struct btrfs_workqueue *wq)
407 {
408         if (wq->high)
409                 flush_workqueue(wq->high->normal_wq);
410
411         flush_workqueue(wq->normal->normal_wq);
412 }