4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2013, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/fld/fld_cache.c
34 * FLD (Fids Location Database)
36 * Author: Pravin Shelar <pravin.shelar@sun.com>
37 * Author: Yury Umanets <umka@clusterfs.com>
40 #define DEBUG_SUBSYSTEM S_FLD
42 #include "../../include/linux/libcfs/libcfs.h"
43 #include <linux/module.h>
44 #include <asm/div64.h>
46 #include "../include/obd.h"
47 #include "../include/obd_class.h"
48 #include "../include/lustre_ver.h"
49 #include "../include/obd_support.h"
50 #include "../include/lprocfs_status.h"
52 #include "../include/lustre_req_layout.h"
53 #include "../include/lustre_fld.h"
54 #include "fld_internal.h"
59 struct fld_cache *fld_cache_init(const char *name,
60 int cache_size, int cache_threshold)
62 struct fld_cache *cache;
65 LASSERT(cache_threshold < cache_size);
67 cache = kzalloc(sizeof(*cache), GFP_NOFS);
69 return ERR_PTR(-ENOMEM);
71 INIT_LIST_HEAD(&cache->fci_entries_head);
72 INIT_LIST_HEAD(&cache->fci_lru);
74 cache->fci_cache_count = 0;
75 rwlock_init(&cache->fci_lock);
77 strlcpy(cache->fci_name, name,
78 sizeof(cache->fci_name));
80 cache->fci_cache_size = cache_size;
81 cache->fci_threshold = cache_threshold;
83 /* Init fld cache info. */
84 memset(&cache->fci_stat, 0, sizeof(cache->fci_stat));
86 CDEBUG(D_INFO, "%s: FLD cache - Size: %d, Threshold: %d\n",
87 cache->fci_name, cache_size, cache_threshold);
95 void fld_cache_fini(struct fld_cache *cache)
100 fld_cache_flush(cache);
102 if (cache->fci_stat.fst_count > 0) {
103 pct = cache->fci_stat.fst_cache * 100;
104 do_div(pct, cache->fci_stat.fst_count);
109 CDEBUG(D_INFO, "FLD cache statistics (%s):\n", cache->fci_name);
110 CDEBUG(D_INFO, " Total reqs: %llu\n", cache->fci_stat.fst_count);
111 CDEBUG(D_INFO, " Cache reqs: %llu\n", cache->fci_stat.fst_cache);
112 CDEBUG(D_INFO, " Cache hits: %llu%%\n", pct);
118 * delete given node from list.
120 static void fld_cache_entry_delete(struct fld_cache *cache,
121 struct fld_cache_entry *node)
123 list_del(&node->fce_list);
124 list_del(&node->fce_lru);
125 cache->fci_cache_count--;
130 * fix list by checking new entry with NEXT entry in order.
132 static void fld_fix_new_list(struct fld_cache *cache)
134 struct fld_cache_entry *f_curr;
135 struct fld_cache_entry *f_next;
136 struct lu_seq_range *c_range;
137 struct lu_seq_range *n_range;
138 struct list_head *head = &cache->fci_entries_head;
142 list_for_each_entry_safe(f_curr, f_next, head, fce_list) {
143 c_range = &f_curr->fce_range;
144 n_range = &f_next->fce_range;
146 LASSERT(range_is_sane(c_range));
147 if (&f_next->fce_list == head)
150 if (c_range->lsr_flags != n_range->lsr_flags)
153 LASSERTF(c_range->lsr_start <= n_range->lsr_start,
154 "cur lsr_start "DRANGE" next lsr_start "DRANGE"\n",
155 PRANGE(c_range), PRANGE(n_range));
157 /* check merge possibility with next range */
158 if (c_range->lsr_end == n_range->lsr_start) {
159 if (c_range->lsr_index != n_range->lsr_index)
161 n_range->lsr_start = c_range->lsr_start;
162 fld_cache_entry_delete(cache, f_curr);
166 /* check if current range overlaps with next range. */
167 if (n_range->lsr_start < c_range->lsr_end) {
168 if (c_range->lsr_index == n_range->lsr_index) {
169 n_range->lsr_start = c_range->lsr_start;
170 n_range->lsr_end = max(c_range->lsr_end,
172 fld_cache_entry_delete(cache, f_curr);
174 if (n_range->lsr_end <= c_range->lsr_end) {
176 fld_cache_entry_delete(cache, f_curr);
178 n_range->lsr_start = c_range->lsr_end;
182 /* we could have overlap over next
183 * range too. better restart.
188 /* kill duplicates */
189 if (c_range->lsr_start == n_range->lsr_start &&
190 c_range->lsr_end == n_range->lsr_end)
191 fld_cache_entry_delete(cache, f_curr);
196 * add node to fld cache
198 static inline void fld_cache_entry_add(struct fld_cache *cache,
199 struct fld_cache_entry *f_new,
200 struct list_head *pos)
202 list_add(&f_new->fce_list, pos);
203 list_add(&f_new->fce_lru, &cache->fci_lru);
205 cache->fci_cache_count++;
206 fld_fix_new_list(cache);
210 * Check if cache needs to be shrunk. If so - do it.
211 * Remove one entry in list and so on until cache is shrunk enough.
213 static int fld_cache_shrink(struct fld_cache *cache)
215 struct fld_cache_entry *flde;
216 struct list_head *curr;
219 if (cache->fci_cache_count < cache->fci_cache_size)
222 curr = cache->fci_lru.prev;
224 while (cache->fci_cache_count + cache->fci_threshold >
225 cache->fci_cache_size && curr != &cache->fci_lru) {
226 flde = list_entry(curr, struct fld_cache_entry, fce_lru);
228 fld_cache_entry_delete(cache, flde);
232 CDEBUG(D_INFO, "%s: FLD cache - Shrunk by %d entries\n",
233 cache->fci_name, num);
239 * kill all fld cache entries.
241 void fld_cache_flush(struct fld_cache *cache)
243 write_lock(&cache->fci_lock);
244 cache->fci_cache_size = 0;
245 fld_cache_shrink(cache);
246 write_unlock(&cache->fci_lock);
250 * punch hole in existing range. divide this range and add new
254 static void fld_cache_punch_hole(struct fld_cache *cache,
255 struct fld_cache_entry *f_curr,
256 struct fld_cache_entry *f_new)
258 const struct lu_seq_range *range = &f_new->fce_range;
259 const u64 new_start = range->lsr_start;
260 const u64 new_end = range->lsr_end;
261 struct fld_cache_entry *fldt;
263 fldt = kzalloc(sizeof(*fldt), GFP_ATOMIC);
266 /* overlap is not allowed, so dont mess up list. */
269 /* break f_curr RANGE into three RANGES:
270 * f_curr, f_new , fldt
276 fldt->fce_range.lsr_start = new_end;
277 fldt->fce_range.lsr_end = f_curr->fce_range.lsr_end;
278 fldt->fce_range.lsr_index = f_curr->fce_range.lsr_index;
281 f_curr->fce_range.lsr_end = new_start;
283 /* add these two entries to list */
284 fld_cache_entry_add(cache, f_new, &f_curr->fce_list);
285 fld_cache_entry_add(cache, fldt, &f_new->fce_list);
287 /* no need to fixup */
291 * handle range overlap in fld cache.
293 static void fld_cache_overlap_handle(struct fld_cache *cache,
294 struct fld_cache_entry *f_curr,
295 struct fld_cache_entry *f_new)
297 const struct lu_seq_range *range = &f_new->fce_range;
298 const u64 new_start = range->lsr_start;
299 const u64 new_end = range->lsr_end;
300 const u32 mdt = range->lsr_index;
302 /* this is overlap case, these case are checking overlapping with
303 * prev range only. fixup will handle overlapping with next range.
306 if (f_curr->fce_range.lsr_index == mdt) {
307 f_curr->fce_range.lsr_start = min(f_curr->fce_range.lsr_start,
310 f_curr->fce_range.lsr_end = max(f_curr->fce_range.lsr_end,
314 fld_fix_new_list(cache);
316 } else if (new_start <= f_curr->fce_range.lsr_start &&
317 f_curr->fce_range.lsr_end <= new_end) {
318 /* case 1: new range completely overshadowed existing range.
319 * e.g. whole range migrated. update fld cache entry
322 f_curr->fce_range = *range;
324 fld_fix_new_list(cache);
326 } else if (f_curr->fce_range.lsr_start < new_start &&
327 new_end < f_curr->fce_range.lsr_end) {
328 /* case 2: new range fit within existing range. */
330 fld_cache_punch_hole(cache, f_curr, f_new);
332 } else if (new_end <= f_curr->fce_range.lsr_end) {
334 * [new_start [c_start new_end) c_end)
337 LASSERT(new_start <= f_curr->fce_range.lsr_start);
339 f_curr->fce_range.lsr_start = new_end;
340 fld_cache_entry_add(cache, f_new, f_curr->fce_list.prev);
342 } else if (f_curr->fce_range.lsr_start <= new_start) {
344 * [c_start [new_start c_end) new_end)
347 LASSERT(f_curr->fce_range.lsr_end <= new_end);
349 f_curr->fce_range.lsr_end = new_start;
350 fld_cache_entry_add(cache, f_new, &f_curr->fce_list);
352 CERROR("NEW range ="DRANGE" curr = "DRANGE"\n",
353 PRANGE(range), PRANGE(&f_curr->fce_range));
356 struct fld_cache_entry
357 *fld_cache_entry_create(const struct lu_seq_range *range)
359 struct fld_cache_entry *f_new;
361 LASSERT(range_is_sane(range));
363 f_new = kzalloc(sizeof(*f_new), GFP_NOFS);
365 return ERR_PTR(-ENOMEM);
367 f_new->fce_range = *range;
372 * Insert FLD entry in FLD cache.
374 * This function handles all cases of merging and breaking up of
377 static int fld_cache_insert_nolock(struct fld_cache *cache,
378 struct fld_cache_entry *f_new)
380 struct fld_cache_entry *f_curr;
381 struct fld_cache_entry *n;
382 struct list_head *head;
383 struct list_head *prev = NULL;
384 const u64 new_start = f_new->fce_range.lsr_start;
385 const u64 new_end = f_new->fce_range.lsr_end;
386 __u32 new_flags = f_new->fce_range.lsr_flags;
389 * Duplicate entries are eliminated in insert op.
390 * So we don't need to search new entry before starting
394 if (!cache->fci_no_shrink)
395 fld_cache_shrink(cache);
397 head = &cache->fci_entries_head;
399 list_for_each_entry_safe(f_curr, n, head, fce_list) {
400 /* add list if next is end of list */
401 if (new_end < f_curr->fce_range.lsr_start ||
402 (new_end == f_curr->fce_range.lsr_start &&
403 new_flags != f_curr->fce_range.lsr_flags))
406 prev = &f_curr->fce_list;
407 /* check if this range is to left of new range. */
408 if (new_start < f_curr->fce_range.lsr_end &&
409 new_flags == f_curr->fce_range.lsr_flags) {
410 fld_cache_overlap_handle(cache, f_curr, f_new);
418 CDEBUG(D_INFO, "insert range "DRANGE"\n", PRANGE(&f_new->fce_range));
419 /* Add new entry to cache and lru list. */
420 fld_cache_entry_add(cache, f_new, prev);
425 int fld_cache_insert(struct fld_cache *cache,
426 const struct lu_seq_range *range)
428 struct fld_cache_entry *flde;
431 flde = fld_cache_entry_create(range);
433 return PTR_ERR(flde);
435 write_lock(&cache->fci_lock);
436 rc = fld_cache_insert_nolock(cache, flde);
437 write_unlock(&cache->fci_lock);
445 * Delete FLD entry in FLD cache.
449 struct fld_cache_entry
450 *fld_cache_entry_lookup_nolock(struct fld_cache *cache,
451 struct lu_seq_range *range)
453 struct fld_cache_entry *flde;
454 struct fld_cache_entry *got = NULL;
455 struct list_head *head;
457 head = &cache->fci_entries_head;
458 list_for_each_entry(flde, head, fce_list) {
459 if (range->lsr_start == flde->fce_range.lsr_start ||
460 (range->lsr_end == flde->fce_range.lsr_end &&
461 range->lsr_flags == flde->fce_range.lsr_flags)) {
471 * lookup \a seq sequence for range in fld cache.
473 struct fld_cache_entry
474 *fld_cache_entry_lookup(struct fld_cache *cache, struct lu_seq_range *range)
476 struct fld_cache_entry *got = NULL;
478 read_lock(&cache->fci_lock);
479 got = fld_cache_entry_lookup_nolock(cache, range);
480 read_unlock(&cache->fci_lock);
485 * lookup \a seq sequence for range in fld cache.
487 int fld_cache_lookup(struct fld_cache *cache,
488 const u64 seq, struct lu_seq_range *range)
490 struct fld_cache_entry *flde;
491 struct fld_cache_entry *prev = NULL;
492 struct list_head *head;
494 read_lock(&cache->fci_lock);
495 head = &cache->fci_entries_head;
497 cache->fci_stat.fst_count++;
498 list_for_each_entry(flde, head, fce_list) {
499 if (flde->fce_range.lsr_start > seq) {
501 *range = prev->fce_range;
506 if (range_within(&flde->fce_range, seq)) {
507 *range = flde->fce_range;
509 cache->fci_stat.fst_cache++;
510 read_unlock(&cache->fci_lock);
514 read_unlock(&cache->fci_lock);