Linux-libre 4.9.123-gnu
[librecmc/linux-libre.git] / drivers / staging / lustre / lustre / fld / fld_cache.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2013, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/fld/fld_cache.c
33  *
34  * FLD (Fids Location Database)
35  *
36  * Author: Pravin Shelar <pravin.shelar@sun.com>
37  * Author: Yury Umanets <umka@clusterfs.com>
38  */
39
40 #define DEBUG_SUBSYSTEM S_FLD
41
42 #include "../../include/linux/libcfs/libcfs.h"
43 #include <linux/module.h>
44 #include <asm/div64.h>
45
46 #include "../include/obd.h"
47 #include "../include/obd_class.h"
48 #include "../include/lustre_ver.h"
49 #include "../include/obd_support.h"
50 #include "../include/lprocfs_status.h"
51
52 #include "../include/lustre_req_layout.h"
53 #include "../include/lustre_fld.h"
54 #include "fld_internal.h"
55
56 /**
57  * create fld cache.
58  */
59 struct fld_cache *fld_cache_init(const char *name,
60                                  int cache_size, int cache_threshold)
61 {
62         struct fld_cache *cache;
63
64         LASSERT(name);
65         LASSERT(cache_threshold < cache_size);
66
67         cache = kzalloc(sizeof(*cache), GFP_NOFS);
68         if (!cache)
69                 return ERR_PTR(-ENOMEM);
70
71         INIT_LIST_HEAD(&cache->fci_entries_head);
72         INIT_LIST_HEAD(&cache->fci_lru);
73
74         cache->fci_cache_count = 0;
75         rwlock_init(&cache->fci_lock);
76
77         strlcpy(cache->fci_name, name,
78                 sizeof(cache->fci_name));
79
80         cache->fci_cache_size = cache_size;
81         cache->fci_threshold = cache_threshold;
82
83         /* Init fld cache info. */
84         memset(&cache->fci_stat, 0, sizeof(cache->fci_stat));
85
86         CDEBUG(D_INFO, "%s: FLD cache - Size: %d, Threshold: %d\n",
87                cache->fci_name, cache_size, cache_threshold);
88
89         return cache;
90 }
91
92 /**
93  * destroy fld cache.
94  */
95 void fld_cache_fini(struct fld_cache *cache)
96 {
97         __u64 pct;
98
99         LASSERT(cache);
100         fld_cache_flush(cache);
101
102         if (cache->fci_stat.fst_count > 0) {
103                 pct = cache->fci_stat.fst_cache * 100;
104                 do_div(pct, cache->fci_stat.fst_count);
105         } else {
106                 pct = 0;
107         }
108
109         CDEBUG(D_INFO, "FLD cache statistics (%s):\n", cache->fci_name);
110         CDEBUG(D_INFO, "  Total reqs: %llu\n", cache->fci_stat.fst_count);
111         CDEBUG(D_INFO, "  Cache reqs: %llu\n", cache->fci_stat.fst_cache);
112         CDEBUG(D_INFO, "  Cache hits: %llu%%\n", pct);
113
114         kfree(cache);
115 }
116
117 /**
118  * delete given node from list.
119  */
120 static void fld_cache_entry_delete(struct fld_cache *cache,
121                                    struct fld_cache_entry *node)
122 {
123         list_del(&node->fce_list);
124         list_del(&node->fce_lru);
125         cache->fci_cache_count--;
126         kfree(node);
127 }
128
129 /**
130  * fix list by checking new entry with NEXT entry in order.
131  */
132 static void fld_fix_new_list(struct fld_cache *cache)
133 {
134         struct fld_cache_entry *f_curr;
135         struct fld_cache_entry *f_next;
136         struct lu_seq_range *c_range;
137         struct lu_seq_range *n_range;
138         struct list_head *head = &cache->fci_entries_head;
139
140 restart_fixup:
141
142         list_for_each_entry_safe(f_curr, f_next, head, fce_list) {
143                 c_range = &f_curr->fce_range;
144                 n_range = &f_next->fce_range;
145
146                 LASSERT(range_is_sane(c_range));
147                 if (&f_next->fce_list == head)
148                         break;
149
150                 if (c_range->lsr_flags != n_range->lsr_flags)
151                         continue;
152
153                 LASSERTF(c_range->lsr_start <= n_range->lsr_start,
154                          "cur lsr_start "DRANGE" next lsr_start "DRANGE"\n",
155                          PRANGE(c_range), PRANGE(n_range));
156
157                 /* check merge possibility with next range */
158                 if (c_range->lsr_end == n_range->lsr_start) {
159                         if (c_range->lsr_index != n_range->lsr_index)
160                                 continue;
161                         n_range->lsr_start = c_range->lsr_start;
162                         fld_cache_entry_delete(cache, f_curr);
163                         continue;
164                 }
165
166                 /* check if current range overlaps with next range. */
167                 if (n_range->lsr_start < c_range->lsr_end) {
168                         if (c_range->lsr_index == n_range->lsr_index) {
169                                 n_range->lsr_start = c_range->lsr_start;
170                                 n_range->lsr_end = max(c_range->lsr_end,
171                                                        n_range->lsr_end);
172                                 fld_cache_entry_delete(cache, f_curr);
173                         } else {
174                                 if (n_range->lsr_end <= c_range->lsr_end) {
175                                         *n_range = *c_range;
176                                         fld_cache_entry_delete(cache, f_curr);
177                                 } else {
178                                         n_range->lsr_start = c_range->lsr_end;
179                                 }
180                         }
181
182                         /* we could have overlap over next
183                          * range too. better restart.
184                          */
185                         goto restart_fixup;
186                 }
187
188                 /* kill duplicates */
189                 if (c_range->lsr_start == n_range->lsr_start &&
190                     c_range->lsr_end == n_range->lsr_end)
191                         fld_cache_entry_delete(cache, f_curr);
192         }
193 }
194
195 /**
196  * add node to fld cache
197  */
198 static inline void fld_cache_entry_add(struct fld_cache *cache,
199                                        struct fld_cache_entry *f_new,
200                                        struct list_head *pos)
201 {
202         list_add(&f_new->fce_list, pos);
203         list_add(&f_new->fce_lru, &cache->fci_lru);
204
205         cache->fci_cache_count++;
206         fld_fix_new_list(cache);
207 }
208
209 /**
210  * Check if cache needs to be shrunk. If so - do it.
211  * Remove one entry in list and so on until cache is shrunk enough.
212  */
213 static int fld_cache_shrink(struct fld_cache *cache)
214 {
215         struct fld_cache_entry *flde;
216         struct list_head *curr;
217         int num = 0;
218
219         if (cache->fci_cache_count < cache->fci_cache_size)
220                 return 0;
221
222         curr = cache->fci_lru.prev;
223
224         while (cache->fci_cache_count + cache->fci_threshold >
225                cache->fci_cache_size && curr != &cache->fci_lru) {
226                 flde = list_entry(curr, struct fld_cache_entry, fce_lru);
227                 curr = curr->prev;
228                 fld_cache_entry_delete(cache, flde);
229                 num++;
230         }
231
232         CDEBUG(D_INFO, "%s: FLD cache - Shrunk by %d entries\n",
233                cache->fci_name, num);
234
235         return 0;
236 }
237
238 /**
239  * kill all fld cache entries.
240  */
241 void fld_cache_flush(struct fld_cache *cache)
242 {
243         write_lock(&cache->fci_lock);
244         cache->fci_cache_size = 0;
245         fld_cache_shrink(cache);
246         write_unlock(&cache->fci_lock);
247 }
248
249 /**
250  * punch hole in existing range. divide this range and add new
251  * entry accordingly.
252  */
253
254 static void fld_cache_punch_hole(struct fld_cache *cache,
255                                  struct fld_cache_entry *f_curr,
256                                  struct fld_cache_entry *f_new)
257 {
258         const struct lu_seq_range *range = &f_new->fce_range;
259         const u64 new_start  = range->lsr_start;
260         const u64 new_end  = range->lsr_end;
261         struct fld_cache_entry *fldt;
262
263         fldt = kzalloc(sizeof(*fldt), GFP_ATOMIC);
264         if (!fldt) {
265                 kfree(f_new);
266                 /* overlap is not allowed, so dont mess up list. */
267                 return;
268         }
269         /*  break f_curr RANGE into three RANGES:
270          *      f_curr, f_new , fldt
271          */
272
273         /* f_new = *range */
274
275         /* fldt */
276         fldt->fce_range.lsr_start = new_end;
277         fldt->fce_range.lsr_end = f_curr->fce_range.lsr_end;
278         fldt->fce_range.lsr_index = f_curr->fce_range.lsr_index;
279
280         /* f_curr */
281         f_curr->fce_range.lsr_end = new_start;
282
283         /* add these two entries to list */
284         fld_cache_entry_add(cache, f_new, &f_curr->fce_list);
285         fld_cache_entry_add(cache, fldt, &f_new->fce_list);
286
287         /* no need to fixup */
288 }
289
290 /**
291  * handle range overlap in fld cache.
292  */
293 static void fld_cache_overlap_handle(struct fld_cache *cache,
294                                      struct fld_cache_entry *f_curr,
295                                      struct fld_cache_entry *f_new)
296 {
297         const struct lu_seq_range *range = &f_new->fce_range;
298         const u64 new_start  = range->lsr_start;
299         const u64 new_end  = range->lsr_end;
300         const u32 mdt = range->lsr_index;
301
302         /* this is overlap case, these case are checking overlapping with
303          * prev range only. fixup will handle overlapping with next range.
304          */
305
306         if (f_curr->fce_range.lsr_index == mdt) {
307                 f_curr->fce_range.lsr_start = min(f_curr->fce_range.lsr_start,
308                                                   new_start);
309
310                 f_curr->fce_range.lsr_end = max(f_curr->fce_range.lsr_end,
311                                                 new_end);
312
313                 kfree(f_new);
314                 fld_fix_new_list(cache);
315
316         } else if (new_start <= f_curr->fce_range.lsr_start &&
317                         f_curr->fce_range.lsr_end <= new_end) {
318                 /* case 1: new range completely overshadowed existing range.
319                  *       e.g. whole range migrated. update fld cache entry
320                  */
321
322                 f_curr->fce_range = *range;
323                 kfree(f_new);
324                 fld_fix_new_list(cache);
325
326         } else if (f_curr->fce_range.lsr_start < new_start &&
327                         new_end < f_curr->fce_range.lsr_end) {
328                 /* case 2: new range fit within existing range. */
329
330                 fld_cache_punch_hole(cache, f_curr, f_new);
331
332         } else  if (new_end <= f_curr->fce_range.lsr_end) {
333                 /* case 3: overlap:
334                  *       [new_start [c_start  new_end)  c_end)
335                  */
336
337                 LASSERT(new_start <= f_curr->fce_range.lsr_start);
338
339                 f_curr->fce_range.lsr_start = new_end;
340                 fld_cache_entry_add(cache, f_new, f_curr->fce_list.prev);
341
342         } else if (f_curr->fce_range.lsr_start <= new_start) {
343                 /* case 4: overlap:
344                  *       [c_start [new_start c_end) new_end)
345                  */
346
347                 LASSERT(f_curr->fce_range.lsr_end <= new_end);
348
349                 f_curr->fce_range.lsr_end = new_start;
350                 fld_cache_entry_add(cache, f_new, &f_curr->fce_list);
351         } else
352                 CERROR("NEW range ="DRANGE" curr = "DRANGE"\n",
353                        PRANGE(range), PRANGE(&f_curr->fce_range));
354 }
355
356 struct fld_cache_entry
357 *fld_cache_entry_create(const struct lu_seq_range *range)
358 {
359         struct fld_cache_entry *f_new;
360
361         LASSERT(range_is_sane(range));
362
363         f_new = kzalloc(sizeof(*f_new), GFP_NOFS);
364         if (!f_new)
365                 return ERR_PTR(-ENOMEM);
366
367         f_new->fce_range = *range;
368         return f_new;
369 }
370
371 /**
372  * Insert FLD entry in FLD cache.
373  *
374  * This function handles all cases of merging and breaking up of
375  * ranges.
376  */
377 static int fld_cache_insert_nolock(struct fld_cache *cache,
378                                    struct fld_cache_entry *f_new)
379 {
380         struct fld_cache_entry *f_curr;
381         struct fld_cache_entry *n;
382         struct list_head *head;
383         struct list_head *prev = NULL;
384         const u64 new_start  = f_new->fce_range.lsr_start;
385         const u64 new_end  = f_new->fce_range.lsr_end;
386         __u32 new_flags  = f_new->fce_range.lsr_flags;
387
388         /*
389          * Duplicate entries are eliminated in insert op.
390          * So we don't need to search new entry before starting
391          * insertion loop.
392          */
393
394         if (!cache->fci_no_shrink)
395                 fld_cache_shrink(cache);
396
397         head = &cache->fci_entries_head;
398
399         list_for_each_entry_safe(f_curr, n, head, fce_list) {
400                 /* add list if next is end of list */
401                 if (new_end < f_curr->fce_range.lsr_start ||
402                     (new_end == f_curr->fce_range.lsr_start &&
403                      new_flags != f_curr->fce_range.lsr_flags))
404                         break;
405
406                 prev = &f_curr->fce_list;
407                 /* check if this range is to left of new range. */
408                 if (new_start < f_curr->fce_range.lsr_end &&
409                     new_flags == f_curr->fce_range.lsr_flags) {
410                         fld_cache_overlap_handle(cache, f_curr, f_new);
411                         goto out;
412                 }
413         }
414
415         if (!prev)
416                 prev = head;
417
418         CDEBUG(D_INFO, "insert range "DRANGE"\n", PRANGE(&f_new->fce_range));
419         /* Add new entry to cache and lru list. */
420         fld_cache_entry_add(cache, f_new, prev);
421 out:
422         return 0;
423 }
424
425 int fld_cache_insert(struct fld_cache *cache,
426                      const struct lu_seq_range *range)
427 {
428         struct fld_cache_entry  *flde;
429         int rc;
430
431         flde = fld_cache_entry_create(range);
432         if (IS_ERR(flde))
433                 return PTR_ERR(flde);
434
435         write_lock(&cache->fci_lock);
436         rc = fld_cache_insert_nolock(cache, flde);
437         write_unlock(&cache->fci_lock);
438         if (rc)
439                 kfree(flde);
440
441         return rc;
442 }
443
444 /**
445  * Delete FLD entry in FLD cache.
446  *
447  */
448
449 struct fld_cache_entry
450 *fld_cache_entry_lookup_nolock(struct fld_cache *cache,
451                               struct lu_seq_range *range)
452 {
453         struct fld_cache_entry *flde;
454         struct fld_cache_entry *got = NULL;
455         struct list_head *head;
456
457         head = &cache->fci_entries_head;
458         list_for_each_entry(flde, head, fce_list) {
459                 if (range->lsr_start == flde->fce_range.lsr_start ||
460                     (range->lsr_end == flde->fce_range.lsr_end &&
461                      range->lsr_flags == flde->fce_range.lsr_flags)) {
462                         got = flde;
463                         break;
464                 }
465         }
466
467         return got;
468 }
469
470 /**
471  * lookup \a seq sequence for range in fld cache.
472  */
473 struct fld_cache_entry
474 *fld_cache_entry_lookup(struct fld_cache *cache, struct lu_seq_range *range)
475 {
476         struct fld_cache_entry *got = NULL;
477
478         read_lock(&cache->fci_lock);
479         got = fld_cache_entry_lookup_nolock(cache, range);
480         read_unlock(&cache->fci_lock);
481         return got;
482 }
483
484 /**
485  * lookup \a seq sequence for range in fld cache.
486  */
487 int fld_cache_lookup(struct fld_cache *cache,
488                      const u64 seq, struct lu_seq_range *range)
489 {
490         struct fld_cache_entry *flde;
491         struct fld_cache_entry *prev = NULL;
492         struct list_head *head;
493
494         read_lock(&cache->fci_lock);
495         head = &cache->fci_entries_head;
496
497         cache->fci_stat.fst_count++;
498         list_for_each_entry(flde, head, fce_list) {
499                 if (flde->fce_range.lsr_start > seq) {
500                         if (prev)
501                                 *range = prev->fce_range;
502                         break;
503                 }
504
505                 prev = flde;
506                 if (range_within(&flde->fce_range, seq)) {
507                         *range = flde->fce_range;
508
509                         cache->fci_stat.fst_cache++;
510                         read_unlock(&cache->fci_lock);
511                         return 0;
512                 }
513         }
514         read_unlock(&cache->fci_lock);
515         return -ENOENT;
516 }