Linux-libre 4.4.228-gnu
[librecmc/linux-libre.git] / fs / btrfs / reada.c
1 /*
2  * Copyright (C) 2011 STRATO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
26 #include "ctree.h"
27 #include "volumes.h"
28 #include "disk-io.h"
29 #include "transaction.h"
30 #include "dev-replace.h"
31
32 #undef DEBUG
33
34 /*
35  * This is the implementation for the generic read ahead framework.
36  *
37  * To trigger a readahead, btrfs_reada_add must be called. It will start
38  * a read ahead for the given range [start, end) on tree root. The returned
39  * handle can either be used to wait on the readahead to finish
40  * (btrfs_reada_wait), or to send it to the background (btrfs_reada_detach).
41  *
42  * The read ahead works as follows:
43  * On btrfs_reada_add, the root of the tree is inserted into a radix_tree.
44  * reada_start_machine will then search for extents to prefetch and trigger
45  * some reads. When a read finishes for a node, all contained node/leaf
46  * pointers that lie in the given range will also be enqueued. The reads will
47  * be triggered in sequential order, thus giving a big win over a naive
48  * enumeration. It will also make use of multi-device layouts. Each disk
49  * will have its on read pointer and all disks will by utilized in parallel.
50  * Also will no two disks read both sides of a mirror simultaneously, as this
51  * would waste seeking capacity. Instead both disks will read different parts
52  * of the filesystem.
53  * Any number of readaheads can be started in parallel. The read order will be
54  * determined globally, i.e. 2 parallel readaheads will normally finish faster
55  * than the 2 started one after another.
56  */
57
58 #define MAX_IN_FLIGHT 6
59
60 struct reada_extctl {
61         struct list_head        list;
62         struct reada_control    *rc;
63         u64                     generation;
64 };
65
66 struct reada_extent {
67         u64                     logical;
68         struct btrfs_key        top;
69         int                     err;
70         struct list_head        extctl;
71         int                     refcnt;
72         spinlock_t              lock;
73         struct reada_zone       *zones[BTRFS_MAX_MIRRORS];
74         int                     nzones;
75         struct btrfs_device     *scheduled_for;
76 };
77
78 struct reada_zone {
79         u64                     start;
80         u64                     end;
81         u64                     elems;
82         struct list_head        list;
83         spinlock_t              lock;
84         int                     locked;
85         struct btrfs_device     *device;
86         struct btrfs_device     *devs[BTRFS_MAX_MIRRORS]; /* full list, incl
87                                                            * self */
88         int                     ndevs;
89         struct kref             refcnt;
90 };
91
92 struct reada_machine_work {
93         struct btrfs_work       work;
94         struct btrfs_fs_info    *fs_info;
95 };
96
97 static void reada_extent_put(struct btrfs_fs_info *, struct reada_extent *);
98 static void reada_control_release(struct kref *kref);
99 static void reada_zone_release(struct kref *kref);
100 static void reada_start_machine(struct btrfs_fs_info *fs_info);
101 static void __reada_start_machine(struct btrfs_fs_info *fs_info);
102
103 static int reada_add_block(struct reada_control *rc, u64 logical,
104                            struct btrfs_key *top, int level, u64 generation);
105
106 /* recurses */
107 /* in case of err, eb might be NULL */
108 static int __readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
109                             u64 start, int err)
110 {
111         int level = 0;
112         int nritems;
113         int i;
114         u64 bytenr;
115         u64 generation;
116         struct reada_extent *re;
117         struct btrfs_fs_info *fs_info = root->fs_info;
118         struct list_head list;
119         unsigned long index = start >> PAGE_CACHE_SHIFT;
120         struct btrfs_device *for_dev;
121
122         if (eb)
123                 level = btrfs_header_level(eb);
124
125         /* find extent */
126         spin_lock(&fs_info->reada_lock);
127         re = radix_tree_lookup(&fs_info->reada_tree, index);
128         if (re)
129                 re->refcnt++;
130         spin_unlock(&fs_info->reada_lock);
131
132         if (!re)
133                 return -1;
134
135         spin_lock(&re->lock);
136         /*
137          * just take the full list from the extent. afterwards we
138          * don't need the lock anymore
139          */
140         list_replace_init(&re->extctl, &list);
141         for_dev = re->scheduled_for;
142         re->scheduled_for = NULL;
143         spin_unlock(&re->lock);
144
145         if (err == 0) {
146                 nritems = level ? btrfs_header_nritems(eb) : 0;
147                 generation = btrfs_header_generation(eb);
148                 /*
149                  * FIXME: currently we just set nritems to 0 if this is a leaf,
150                  * effectively ignoring the content. In a next step we could
151                  * trigger more readahead depending from the content, e.g.
152                  * fetch the checksums for the extents in the leaf.
153                  */
154         } else {
155                 /*
156                  * this is the error case, the extent buffer has not been
157                  * read correctly. We won't access anything from it and
158                  * just cleanup our data structures. Effectively this will
159                  * cut the branch below this node from read ahead.
160                  */
161                 nritems = 0;
162                 generation = 0;
163         }
164
165         for (i = 0; i < nritems; i++) {
166                 struct reada_extctl *rec;
167                 u64 n_gen;
168                 struct btrfs_key key;
169                 struct btrfs_key next_key;
170
171                 btrfs_node_key_to_cpu(eb, &key, i);
172                 if (i + 1 < nritems)
173                         btrfs_node_key_to_cpu(eb, &next_key, i + 1);
174                 else
175                         next_key = re->top;
176                 bytenr = btrfs_node_blockptr(eb, i);
177                 n_gen = btrfs_node_ptr_generation(eb, i);
178
179                 list_for_each_entry(rec, &list, list) {
180                         struct reada_control *rc = rec->rc;
181
182                         /*
183                          * if the generation doesn't match, just ignore this
184                          * extctl. This will probably cut off a branch from
185                          * prefetch. Alternatively one could start a new (sub-)
186                          * prefetch for this branch, starting again from root.
187                          * FIXME: move the generation check out of this loop
188                          */
189 #ifdef DEBUG
190                         if (rec->generation != generation) {
191                                 btrfs_debug(root->fs_info,
192                                            "generation mismatch for (%llu,%d,%llu) %llu != %llu",
193                                        key.objectid, key.type, key.offset,
194                                        rec->generation, generation);
195                         }
196 #endif
197                         if (rec->generation == generation &&
198                             btrfs_comp_cpu_keys(&key, &rc->key_end) < 0 &&
199                             btrfs_comp_cpu_keys(&next_key, &rc->key_start) > 0)
200                                 reada_add_block(rc, bytenr, &next_key,
201                                                 level - 1, n_gen);
202                 }
203         }
204         /*
205          * free extctl records
206          */
207         while (!list_empty(&list)) {
208                 struct reada_control *rc;
209                 struct reada_extctl *rec;
210
211                 rec = list_first_entry(&list, struct reada_extctl, list);
212                 list_del(&rec->list);
213                 rc = rec->rc;
214                 kfree(rec);
215
216                 kref_get(&rc->refcnt);
217                 if (atomic_dec_and_test(&rc->elems)) {
218                         kref_put(&rc->refcnt, reada_control_release);
219                         wake_up(&rc->wait);
220                 }
221                 kref_put(&rc->refcnt, reada_control_release);
222
223                 reada_extent_put(fs_info, re);  /* one ref for each entry */
224         }
225         reada_extent_put(fs_info, re);  /* our ref */
226         if (for_dev)
227                 atomic_dec(&for_dev->reada_in_flight);
228
229         return 0;
230 }
231
232 /*
233  * start is passed separately in case eb in NULL, which may be the case with
234  * failed I/O
235  */
236 int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
237                          u64 start, int err)
238 {
239         int ret;
240
241         ret = __readahead_hook(root, eb, start, err);
242
243         reada_start_machine(root->fs_info);
244
245         return ret;
246 }
247
248 static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
249                                           struct btrfs_device *dev, u64 logical,
250                                           struct btrfs_bio *bbio)
251 {
252         int ret;
253         struct reada_zone *zone;
254         struct btrfs_block_group_cache *cache = NULL;
255         u64 start;
256         u64 end;
257         int i;
258
259         zone = NULL;
260         spin_lock(&fs_info->reada_lock);
261         ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
262                                      logical >> PAGE_CACHE_SHIFT, 1);
263         if (ret == 1)
264                 kref_get(&zone->refcnt);
265         spin_unlock(&fs_info->reada_lock);
266
267         if (ret == 1) {
268                 if (logical >= zone->start && logical < zone->end)
269                         return zone;
270                 spin_lock(&fs_info->reada_lock);
271                 kref_put(&zone->refcnt, reada_zone_release);
272                 spin_unlock(&fs_info->reada_lock);
273         }
274
275         cache = btrfs_lookup_block_group(fs_info, logical);
276         if (!cache)
277                 return NULL;
278
279         start = cache->key.objectid;
280         end = start + cache->key.offset - 1;
281         btrfs_put_block_group(cache);
282
283         zone = kzalloc(sizeof(*zone), GFP_NOFS);
284         if (!zone)
285                 return NULL;
286
287         zone->start = start;
288         zone->end = end;
289         INIT_LIST_HEAD(&zone->list);
290         spin_lock_init(&zone->lock);
291         zone->locked = 0;
292         kref_init(&zone->refcnt);
293         zone->elems = 0;
294         zone->device = dev; /* our device always sits at index 0 */
295         for (i = 0; i < bbio->num_stripes; ++i) {
296                 /* bounds have already been checked */
297                 zone->devs[i] = bbio->stripes[i].dev;
298         }
299         zone->ndevs = bbio->num_stripes;
300
301         spin_lock(&fs_info->reada_lock);
302         ret = radix_tree_insert(&dev->reada_zones,
303                                 (unsigned long)(zone->end >> PAGE_CACHE_SHIFT),
304                                 zone);
305
306         if (ret == -EEXIST) {
307                 kfree(zone);
308                 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
309                                              logical >> PAGE_CACHE_SHIFT, 1);
310                 if (ret == 1)
311                         kref_get(&zone->refcnt);
312         }
313         spin_unlock(&fs_info->reada_lock);
314
315         return zone;
316 }
317
318 static struct reada_extent *reada_find_extent(struct btrfs_root *root,
319                                               u64 logical,
320                                               struct btrfs_key *top, int level)
321 {
322         int ret;
323         struct reada_extent *re = NULL;
324         struct reada_extent *re_exist = NULL;
325         struct btrfs_fs_info *fs_info = root->fs_info;
326         struct btrfs_bio *bbio = NULL;
327         struct btrfs_device *dev;
328         struct btrfs_device *prev_dev;
329         u32 blocksize;
330         u64 length;
331         int real_stripes;
332         int nzones = 0;
333         int i;
334         unsigned long index = logical >> PAGE_CACHE_SHIFT;
335         int dev_replace_is_ongoing;
336
337         spin_lock(&fs_info->reada_lock);
338         re = radix_tree_lookup(&fs_info->reada_tree, index);
339         if (re)
340                 re->refcnt++;
341         spin_unlock(&fs_info->reada_lock);
342
343         if (re)
344                 return re;
345
346         re = kzalloc(sizeof(*re), GFP_NOFS);
347         if (!re)
348                 return NULL;
349
350         blocksize = root->nodesize;
351         re->logical = logical;
352         re->top = *top;
353         INIT_LIST_HEAD(&re->extctl);
354         spin_lock_init(&re->lock);
355         re->refcnt = 1;
356
357         /*
358          * map block
359          */
360         length = blocksize;
361         ret = btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, logical, &length,
362                               &bbio, 0);
363         if (ret || !bbio || length < blocksize)
364                 goto error;
365
366         if (bbio->num_stripes > BTRFS_MAX_MIRRORS) {
367                 btrfs_err(root->fs_info,
368                            "readahead: more than %d copies not supported",
369                            BTRFS_MAX_MIRRORS);
370                 goto error;
371         }
372
373         real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
374         for (nzones = 0; nzones < real_stripes; ++nzones) {
375                 struct reada_zone *zone;
376
377                 dev = bbio->stripes[nzones].dev;
378                 zone = reada_find_zone(fs_info, dev, logical, bbio);
379                 if (!zone)
380                         break;
381
382                 re->zones[nzones] = zone;
383                 spin_lock(&zone->lock);
384                 if (!zone->elems)
385                         kref_get(&zone->refcnt);
386                 ++zone->elems;
387                 spin_unlock(&zone->lock);
388                 spin_lock(&fs_info->reada_lock);
389                 kref_put(&zone->refcnt, reada_zone_release);
390                 spin_unlock(&fs_info->reada_lock);
391         }
392         re->nzones = nzones;
393         if (nzones == 0) {
394                 /* not a single zone found, error and out */
395                 goto error;
396         }
397
398         /* insert extent in reada_tree + all per-device trees, all or nothing */
399         btrfs_dev_replace_lock(&fs_info->dev_replace);
400         spin_lock(&fs_info->reada_lock);
401         ret = radix_tree_insert(&fs_info->reada_tree, index, re);
402         if (ret == -EEXIST) {
403                 re_exist = radix_tree_lookup(&fs_info->reada_tree, index);
404                 BUG_ON(!re_exist);
405                 re_exist->refcnt++;
406                 spin_unlock(&fs_info->reada_lock);
407                 btrfs_dev_replace_unlock(&fs_info->dev_replace);
408                 goto error;
409         }
410         if (ret) {
411                 spin_unlock(&fs_info->reada_lock);
412                 btrfs_dev_replace_unlock(&fs_info->dev_replace);
413                 goto error;
414         }
415         prev_dev = NULL;
416         dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(
417                         &fs_info->dev_replace);
418         for (i = 0; i < nzones; ++i) {
419                 dev = bbio->stripes[i].dev;
420                 if (dev == prev_dev) {
421                         /*
422                          * in case of DUP, just add the first zone. As both
423                          * are on the same device, there's nothing to gain
424                          * from adding both.
425                          * Also, it wouldn't work, as the tree is per device
426                          * and adding would fail with EEXIST
427                          */
428                         continue;
429                 }
430                 if (!dev->bdev) {
431                         /*
432                          * cannot read ahead on missing device, but for RAID5/6,
433                          * REQ_GET_READ_MIRRORS return 1. So don't skip missing
434                          * device for such case.
435                          */
436                         if (nzones > 1)
437                                 continue;
438                 }
439                 if (dev_replace_is_ongoing &&
440                     dev == fs_info->dev_replace.tgtdev) {
441                         /*
442                          * as this device is selected for reading only as
443                          * a last resort, skip it for read ahead.
444                          */
445                         continue;
446                 }
447                 prev_dev = dev;
448                 ret = radix_tree_insert(&dev->reada_extents, index, re);
449                 if (ret) {
450                         while (--i >= 0) {
451                                 dev = bbio->stripes[i].dev;
452                                 BUG_ON(dev == NULL);
453                                 /* ignore whether the entry was inserted */
454                                 radix_tree_delete(&dev->reada_extents, index);
455                         }
456                         BUG_ON(fs_info == NULL);
457                         radix_tree_delete(&fs_info->reada_tree, index);
458                         spin_unlock(&fs_info->reada_lock);
459                         btrfs_dev_replace_unlock(&fs_info->dev_replace);
460                         goto error;
461                 }
462         }
463         spin_unlock(&fs_info->reada_lock);
464         btrfs_dev_replace_unlock(&fs_info->dev_replace);
465
466         btrfs_put_bbio(bbio);
467         return re;
468
469 error:
470         while (nzones) {
471                 struct reada_zone *zone;
472
473                 --nzones;
474                 zone = re->zones[nzones];
475                 kref_get(&zone->refcnt);
476                 spin_lock(&zone->lock);
477                 --zone->elems;
478                 if (zone->elems == 0) {
479                         /*
480                          * no fs_info->reada_lock needed, as this can't be
481                          * the last ref
482                          */
483                         kref_put(&zone->refcnt, reada_zone_release);
484                 }
485                 spin_unlock(&zone->lock);
486
487                 spin_lock(&fs_info->reada_lock);
488                 kref_put(&zone->refcnt, reada_zone_release);
489                 spin_unlock(&fs_info->reada_lock);
490         }
491         btrfs_put_bbio(bbio);
492         kfree(re);
493         return re_exist;
494 }
495
496 static void reada_extent_put(struct btrfs_fs_info *fs_info,
497                              struct reada_extent *re)
498 {
499         int i;
500         unsigned long index = re->logical >> PAGE_CACHE_SHIFT;
501
502         spin_lock(&fs_info->reada_lock);
503         if (--re->refcnt) {
504                 spin_unlock(&fs_info->reada_lock);
505                 return;
506         }
507
508         radix_tree_delete(&fs_info->reada_tree, index);
509         for (i = 0; i < re->nzones; ++i) {
510                 struct reada_zone *zone = re->zones[i];
511
512                 radix_tree_delete(&zone->device->reada_extents, index);
513         }
514
515         spin_unlock(&fs_info->reada_lock);
516
517         for (i = 0; i < re->nzones; ++i) {
518                 struct reada_zone *zone = re->zones[i];
519
520                 kref_get(&zone->refcnt);
521                 spin_lock(&zone->lock);
522                 --zone->elems;
523                 if (zone->elems == 0) {
524                         /* no fs_info->reada_lock needed, as this can't be
525                          * the last ref */
526                         kref_put(&zone->refcnt, reada_zone_release);
527                 }
528                 spin_unlock(&zone->lock);
529
530                 spin_lock(&fs_info->reada_lock);
531                 kref_put(&zone->refcnt, reada_zone_release);
532                 spin_unlock(&fs_info->reada_lock);
533         }
534         if (re->scheduled_for)
535                 atomic_dec(&re->scheduled_for->reada_in_flight);
536
537         kfree(re);
538 }
539
540 static void reada_zone_release(struct kref *kref)
541 {
542         struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);
543
544         radix_tree_delete(&zone->device->reada_zones,
545                           zone->end >> PAGE_CACHE_SHIFT);
546
547         kfree(zone);
548 }
549
550 static void reada_control_release(struct kref *kref)
551 {
552         struct reada_control *rc = container_of(kref, struct reada_control,
553                                                 refcnt);
554
555         kfree(rc);
556 }
557
558 static int reada_add_block(struct reada_control *rc, u64 logical,
559                            struct btrfs_key *top, int level, u64 generation)
560 {
561         struct btrfs_root *root = rc->root;
562         struct reada_extent *re;
563         struct reada_extctl *rec;
564
565         re = reada_find_extent(root, logical, top, level); /* takes one ref */
566         if (!re)
567                 return -1;
568
569         rec = kzalloc(sizeof(*rec), GFP_NOFS);
570         if (!rec) {
571                 reada_extent_put(root->fs_info, re);
572                 return -ENOMEM;
573         }
574
575         rec->rc = rc;
576         rec->generation = generation;
577         atomic_inc(&rc->elems);
578
579         spin_lock(&re->lock);
580         list_add_tail(&rec->list, &re->extctl);
581         spin_unlock(&re->lock);
582
583         /* leave the ref on the extent */
584
585         return 0;
586 }
587
588 /*
589  * called with fs_info->reada_lock held
590  */
591 static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock)
592 {
593         int i;
594         unsigned long index = zone->end >> PAGE_CACHE_SHIFT;
595
596         for (i = 0; i < zone->ndevs; ++i) {
597                 struct reada_zone *peer;
598                 peer = radix_tree_lookup(&zone->devs[i]->reada_zones, index);
599                 if (peer && peer->device != zone->device)
600                         peer->locked = lock;
601         }
602 }
603
604 /*
605  * called with fs_info->reada_lock held
606  */
607 static int reada_pick_zone(struct btrfs_device *dev)
608 {
609         struct reada_zone *top_zone = NULL;
610         struct reada_zone *top_locked_zone = NULL;
611         u64 top_elems = 0;
612         u64 top_locked_elems = 0;
613         unsigned long index = 0;
614         int ret;
615
616         if (dev->reada_curr_zone) {
617                 reada_peer_zones_set_lock(dev->reada_curr_zone, 0);
618                 kref_put(&dev->reada_curr_zone->refcnt, reada_zone_release);
619                 dev->reada_curr_zone = NULL;
620         }
621         /* pick the zone with the most elements */
622         while (1) {
623                 struct reada_zone *zone;
624
625                 ret = radix_tree_gang_lookup(&dev->reada_zones,
626                                              (void **)&zone, index, 1);
627                 if (ret == 0)
628                         break;
629                 index = (zone->end >> PAGE_CACHE_SHIFT) + 1;
630                 if (zone->locked) {
631                         if (zone->elems > top_locked_elems) {
632                                 top_locked_elems = zone->elems;
633                                 top_locked_zone = zone;
634                         }
635                 } else {
636                         if (zone->elems > top_elems) {
637                                 top_elems = zone->elems;
638                                 top_zone = zone;
639                         }
640                 }
641         }
642         if (top_zone)
643                 dev->reada_curr_zone = top_zone;
644         else if (top_locked_zone)
645                 dev->reada_curr_zone = top_locked_zone;
646         else
647                 return 0;
648
649         dev->reada_next = dev->reada_curr_zone->start;
650         kref_get(&dev->reada_curr_zone->refcnt);
651         reada_peer_zones_set_lock(dev->reada_curr_zone, 1);
652
653         return 1;
654 }
655
656 static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
657                                    struct btrfs_device *dev)
658 {
659         struct reada_extent *re = NULL;
660         int mirror_num = 0;
661         struct extent_buffer *eb = NULL;
662         u64 logical;
663         int ret;
664         int i;
665         int need_kick = 0;
666
667         spin_lock(&fs_info->reada_lock);
668         if (dev->reada_curr_zone == NULL) {
669                 ret = reada_pick_zone(dev);
670                 if (!ret) {
671                         spin_unlock(&fs_info->reada_lock);
672                         return 0;
673                 }
674         }
675         /*
676          * FIXME currently we issue the reads one extent at a time. If we have
677          * a contiguous block of extents, we could also coagulate them or use
678          * plugging to speed things up
679          */
680         ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
681                                      dev->reada_next >> PAGE_CACHE_SHIFT, 1);
682         if (ret == 0 || re->logical >= dev->reada_curr_zone->end) {
683                 ret = reada_pick_zone(dev);
684                 if (!ret) {
685                         spin_unlock(&fs_info->reada_lock);
686                         return 0;
687                 }
688                 re = NULL;
689                 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
690                                         dev->reada_next >> PAGE_CACHE_SHIFT, 1);
691         }
692         if (ret == 0) {
693                 spin_unlock(&fs_info->reada_lock);
694                 return 0;
695         }
696         dev->reada_next = re->logical + fs_info->tree_root->nodesize;
697         re->refcnt++;
698
699         spin_unlock(&fs_info->reada_lock);
700
701         /*
702          * find mirror num
703          */
704         for (i = 0; i < re->nzones; ++i) {
705                 if (re->zones[i]->device == dev) {
706                         mirror_num = i + 1;
707                         break;
708                 }
709         }
710         logical = re->logical;
711
712         spin_lock(&re->lock);
713         if (re->scheduled_for == NULL) {
714                 re->scheduled_for = dev;
715                 need_kick = 1;
716         }
717         spin_unlock(&re->lock);
718
719         reada_extent_put(fs_info, re);
720
721         if (!need_kick)
722                 return 0;
723
724         atomic_inc(&dev->reada_in_flight);
725         ret = reada_tree_block_flagged(fs_info->extent_root, logical,
726                         mirror_num, &eb);
727         if (ret)
728                 __readahead_hook(fs_info->extent_root, NULL, logical, ret);
729         else if (eb)
730                 __readahead_hook(fs_info->extent_root, eb, eb->start, ret);
731
732         if (eb)
733                 free_extent_buffer(eb);
734
735         return 1;
736
737 }
738
739 static void reada_start_machine_worker(struct btrfs_work *work)
740 {
741         struct reada_machine_work *rmw;
742         struct btrfs_fs_info *fs_info;
743         int old_ioprio;
744
745         rmw = container_of(work, struct reada_machine_work, work);
746         fs_info = rmw->fs_info;
747
748         kfree(rmw);
749
750         old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current),
751                                        task_nice_ioprio(current));
752         set_task_ioprio(current, BTRFS_IOPRIO_READA);
753         __reada_start_machine(fs_info);
754         set_task_ioprio(current, old_ioprio);
755 }
756
757 static void __reada_start_machine(struct btrfs_fs_info *fs_info)
758 {
759         struct btrfs_device *device;
760         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
761         u64 enqueued;
762         u64 total = 0;
763         int i;
764
765 again:
766         do {
767                 enqueued = 0;
768                 mutex_lock(&fs_devices->device_list_mutex);
769                 list_for_each_entry(device, &fs_devices->devices, dev_list) {
770                         if (atomic_read(&device->reada_in_flight) <
771                             MAX_IN_FLIGHT)
772                                 enqueued += reada_start_machine_dev(fs_info,
773                                                                     device);
774                 }
775                 mutex_unlock(&fs_devices->device_list_mutex);
776                 total += enqueued;
777         } while (enqueued && total < 10000);
778         if (fs_devices->seed) {
779                 fs_devices = fs_devices->seed;
780                 goto again;
781         }
782
783         if (enqueued == 0)
784                 return;
785
786         /*
787          * If everything is already in the cache, this is effectively single
788          * threaded. To a) not hold the caller for too long and b) to utilize
789          * more cores, we broke the loop above after 10000 iterations and now
790          * enqueue to workers to finish it. This will distribute the load to
791          * the cores.
792          */
793         for (i = 0; i < 2; ++i)
794                 reada_start_machine(fs_info);
795 }
796
797 static void reada_start_machine(struct btrfs_fs_info *fs_info)
798 {
799         struct reada_machine_work *rmw;
800
801         rmw = kzalloc(sizeof(*rmw), GFP_NOFS);
802         if (!rmw) {
803                 /* FIXME we cannot handle this properly right now */
804                 BUG();
805         }
806         btrfs_init_work(&rmw->work, btrfs_readahead_helper,
807                         reada_start_machine_worker, NULL, NULL);
808         rmw->fs_info = fs_info;
809
810         btrfs_queue_work(fs_info->readahead_workers, &rmw->work);
811 }
812
813 #ifdef DEBUG
814 static void dump_devs(struct btrfs_fs_info *fs_info, int all)
815 {
816         struct btrfs_device *device;
817         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
818         unsigned long index;
819         int ret;
820         int i;
821         int j;
822         int cnt;
823
824         spin_lock(&fs_info->reada_lock);
825         list_for_each_entry(device, &fs_devices->devices, dev_list) {
826                 printk(KERN_DEBUG "dev %lld has %d in flight\n", device->devid,
827                         atomic_read(&device->reada_in_flight));
828                 index = 0;
829                 while (1) {
830                         struct reada_zone *zone;
831                         ret = radix_tree_gang_lookup(&device->reada_zones,
832                                                      (void **)&zone, index, 1);
833                         if (ret == 0)
834                                 break;
835                         printk(KERN_DEBUG "  zone %llu-%llu elems %llu locked "
836                                 "%d devs", zone->start, zone->end, zone->elems,
837                                 zone->locked);
838                         for (j = 0; j < zone->ndevs; ++j) {
839                                 printk(KERN_CONT " %lld",
840                                         zone->devs[j]->devid);
841                         }
842                         if (device->reada_curr_zone == zone)
843                                 printk(KERN_CONT " curr off %llu",
844                                         device->reada_next - zone->start);
845                         printk(KERN_CONT "\n");
846                         index = (zone->end >> PAGE_CACHE_SHIFT) + 1;
847                 }
848                 cnt = 0;
849                 index = 0;
850                 while (all) {
851                         struct reada_extent *re = NULL;
852
853                         ret = radix_tree_gang_lookup(&device->reada_extents,
854                                                      (void **)&re, index, 1);
855                         if (ret == 0)
856                                 break;
857                         printk(KERN_DEBUG
858                                 "  re: logical %llu size %u empty %d for %lld",
859                                 re->logical, fs_info->tree_root->nodesize,
860                                 list_empty(&re->extctl), re->scheduled_for ?
861                                 re->scheduled_for->devid : -1);
862
863                         for (i = 0; i < re->nzones; ++i) {
864                                 printk(KERN_CONT " zone %llu-%llu devs",
865                                         re->zones[i]->start,
866                                         re->zones[i]->end);
867                                 for (j = 0; j < re->zones[i]->ndevs; ++j) {
868                                         printk(KERN_CONT " %lld",
869                                                 re->zones[i]->devs[j]->devid);
870                                 }
871                         }
872                         printk(KERN_CONT "\n");
873                         index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
874                         if (++cnt > 15)
875                                 break;
876                 }
877         }
878
879         index = 0;
880         cnt = 0;
881         while (all) {
882                 struct reada_extent *re = NULL;
883
884                 ret = radix_tree_gang_lookup(&fs_info->reada_tree, (void **)&re,
885                                              index, 1);
886                 if (ret == 0)
887                         break;
888                 if (!re->scheduled_for) {
889                         index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
890                         continue;
891                 }
892                 printk(KERN_DEBUG
893                         "re: logical %llu size %u list empty %d for %lld",
894                         re->logical, fs_info->tree_root->nodesize,
895                         list_empty(&re->extctl),
896                         re->scheduled_for ? re->scheduled_for->devid : -1);
897                 for (i = 0; i < re->nzones; ++i) {
898                         printk(KERN_CONT " zone %llu-%llu devs",
899                                 re->zones[i]->start,
900                                 re->zones[i]->end);
901                         for (i = 0; i < re->nzones; ++i) {
902                                 printk(KERN_CONT " zone %llu-%llu devs",
903                                         re->zones[i]->start,
904                                         re->zones[i]->end);
905                                 for (j = 0; j < re->zones[i]->ndevs; ++j) {
906                                         printk(KERN_CONT " %lld",
907                                                 re->zones[i]->devs[j]->devid);
908                                 }
909                         }
910                 }
911                 printk(KERN_CONT "\n");
912                 index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
913         }
914         spin_unlock(&fs_info->reada_lock);
915 }
916 #endif
917
918 /*
919  * interface
920  */
921 struct reada_control *btrfs_reada_add(struct btrfs_root *root,
922                         struct btrfs_key *key_start, struct btrfs_key *key_end)
923 {
924         struct reada_control *rc;
925         u64 start;
926         u64 generation;
927         int level;
928         int ret;
929         struct extent_buffer *node;
930         static struct btrfs_key max_key = {
931                 .objectid = (u64)-1,
932                 .type = (u8)-1,
933                 .offset = (u64)-1
934         };
935
936         rc = kzalloc(sizeof(*rc), GFP_NOFS);
937         if (!rc)
938                 return ERR_PTR(-ENOMEM);
939
940         rc->root = root;
941         rc->key_start = *key_start;
942         rc->key_end = *key_end;
943         atomic_set(&rc->elems, 0);
944         init_waitqueue_head(&rc->wait);
945         kref_init(&rc->refcnt);
946         kref_get(&rc->refcnt); /* one ref for having elements */
947
948         node = btrfs_root_node(root);
949         start = node->start;
950         level = btrfs_header_level(node);
951         generation = btrfs_header_generation(node);
952         free_extent_buffer(node);
953
954         ret = reada_add_block(rc, start, &max_key, level, generation);
955         if (ret) {
956                 kfree(rc);
957                 return ERR_PTR(ret);
958         }
959
960         reada_start_machine(root->fs_info);
961
962         return rc;
963 }
964
965 #ifdef DEBUG
966 int btrfs_reada_wait(void *handle)
967 {
968         struct reada_control *rc = handle;
969
970         while (atomic_read(&rc->elems)) {
971                 wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
972                                    5 * HZ);
973                 dump_devs(rc->root->fs_info,
974                           atomic_read(&rc->elems) < 10 ? 1 : 0);
975         }
976
977         dump_devs(rc->root->fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
978
979         kref_put(&rc->refcnt, reada_control_release);
980
981         return 0;
982 }
983 #else
984 int btrfs_reada_wait(void *handle)
985 {
986         struct reada_control *rc = handle;
987
988         while (atomic_read(&rc->elems)) {
989                 wait_event(rc->wait, atomic_read(&rc->elems) == 0);
990         }
991
992         kref_put(&rc->refcnt, reada_control_release);
993
994         return 0;
995 }
996 #endif
997
998 void btrfs_reada_detach(void *handle)
999 {
1000         struct reada_control *rc = handle;
1001
1002         kref_put(&rc->refcnt, reada_control_release);
1003 }