Linux-libre 4.19.123-gnu
[librecmc/linux-libre.git] / mm / list_lru.c
1 /*
2  * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3  * Authors: David Chinner and Glauber Costa
4  *
5  * Generic LRU infrastructure
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/mm.h>
10 #include <linux/list_lru.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
13 #include <linux/memcontrol.h>
14
15 #ifdef CONFIG_MEMCG_KMEM
16 static LIST_HEAD(list_lrus);
17 static DEFINE_MUTEX(list_lrus_mutex);
18
19 static void list_lru_register(struct list_lru *lru)
20 {
21         mutex_lock(&list_lrus_mutex);
22         list_add(&lru->list, &list_lrus);
23         mutex_unlock(&list_lrus_mutex);
24 }
25
26 static void list_lru_unregister(struct list_lru *lru)
27 {
28         mutex_lock(&list_lrus_mutex);
29         list_del(&lru->list);
30         mutex_unlock(&list_lrus_mutex);
31 }
32
33 static int lru_shrinker_id(struct list_lru *lru)
34 {
35         return lru->shrinker_id;
36 }
37
38 static inline bool list_lru_memcg_aware(struct list_lru *lru)
39 {
40         return lru->memcg_aware;
41 }
42
43 static inline struct list_lru_one *
44 list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
45 {
46         struct list_lru_memcg *memcg_lrus;
47         /*
48          * Either lock or RCU protects the array of per cgroup lists
49          * from relocation (see memcg_update_list_lru_node).
50          */
51         memcg_lrus = rcu_dereference_check(nlru->memcg_lrus,
52                                            lockdep_is_held(&nlru->lock));
53         if (memcg_lrus && idx >= 0)
54                 return memcg_lrus->lru[idx];
55         return &nlru->lru;
56 }
57
58 static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
59 {
60         struct page *page;
61
62         if (!memcg_kmem_enabled())
63                 return NULL;
64         page = virt_to_head_page(ptr);
65         return page->mem_cgroup;
66 }
67
68 static inline struct list_lru_one *
69 list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
70                    struct mem_cgroup **memcg_ptr)
71 {
72         struct list_lru_one *l = &nlru->lru;
73         struct mem_cgroup *memcg = NULL;
74
75         if (!nlru->memcg_lrus)
76                 goto out;
77
78         memcg = mem_cgroup_from_kmem(ptr);
79         if (!memcg)
80                 goto out;
81
82         l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
83 out:
84         if (memcg_ptr)
85                 *memcg_ptr = memcg;
86         return l;
87 }
88 #else
89 static void list_lru_register(struct list_lru *lru)
90 {
91 }
92
93 static void list_lru_unregister(struct list_lru *lru)
94 {
95 }
96
97 static int lru_shrinker_id(struct list_lru *lru)
98 {
99         return -1;
100 }
101
102 static inline bool list_lru_memcg_aware(struct list_lru *lru)
103 {
104         return false;
105 }
106
107 static inline struct list_lru_one *
108 list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
109 {
110         return &nlru->lru;
111 }
112
113 static inline struct list_lru_one *
114 list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
115                    struct mem_cgroup **memcg_ptr)
116 {
117         if (memcg_ptr)
118                 *memcg_ptr = NULL;
119         return &nlru->lru;
120 }
121 #endif /* CONFIG_MEMCG_KMEM */
122
123 bool list_lru_add(struct list_lru *lru, struct list_head *item)
124 {
125         int nid = page_to_nid(virt_to_page(item));
126         struct list_lru_node *nlru = &lru->node[nid];
127         struct mem_cgroup *memcg;
128         struct list_lru_one *l;
129
130         spin_lock(&nlru->lock);
131         if (list_empty(item)) {
132                 l = list_lru_from_kmem(nlru, item, &memcg);
133                 list_add_tail(item, &l->list);
134                 /* Set shrinker bit if the first element was added */
135                 if (!l->nr_items++)
136                         memcg_set_shrinker_bit(memcg, nid,
137                                                lru_shrinker_id(lru));
138                 nlru->nr_items++;
139                 spin_unlock(&nlru->lock);
140                 return true;
141         }
142         spin_unlock(&nlru->lock);
143         return false;
144 }
145 EXPORT_SYMBOL_GPL(list_lru_add);
146
147 bool list_lru_del(struct list_lru *lru, struct list_head *item)
148 {
149         int nid = page_to_nid(virt_to_page(item));
150         struct list_lru_node *nlru = &lru->node[nid];
151         struct list_lru_one *l;
152
153         spin_lock(&nlru->lock);
154         if (!list_empty(item)) {
155                 l = list_lru_from_kmem(nlru, item, NULL);
156                 list_del_init(item);
157                 l->nr_items--;
158                 nlru->nr_items--;
159                 spin_unlock(&nlru->lock);
160                 return true;
161         }
162         spin_unlock(&nlru->lock);
163         return false;
164 }
165 EXPORT_SYMBOL_GPL(list_lru_del);
166
167 void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
168 {
169         list_del_init(item);
170         list->nr_items--;
171 }
172 EXPORT_SYMBOL_GPL(list_lru_isolate);
173
174 void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
175                            struct list_head *head)
176 {
177         list_move(item, head);
178         list->nr_items--;
179 }
180 EXPORT_SYMBOL_GPL(list_lru_isolate_move);
181
182 unsigned long list_lru_count_one(struct list_lru *lru,
183                                  int nid, struct mem_cgroup *memcg)
184 {
185         struct list_lru_node *nlru = &lru->node[nid];
186         struct list_lru_one *l;
187         unsigned long count;
188
189         rcu_read_lock();
190         l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
191         count = l->nr_items;
192         rcu_read_unlock();
193
194         return count;
195 }
196 EXPORT_SYMBOL_GPL(list_lru_count_one);
197
198 unsigned long list_lru_count_node(struct list_lru *lru, int nid)
199 {
200         struct list_lru_node *nlru;
201
202         nlru = &lru->node[nid];
203         return nlru->nr_items;
204 }
205 EXPORT_SYMBOL_GPL(list_lru_count_node);
206
207 static unsigned long
208 __list_lru_walk_one(struct list_lru_node *nlru, int memcg_idx,
209                     list_lru_walk_cb isolate, void *cb_arg,
210                     unsigned long *nr_to_walk)
211 {
212
213         struct list_lru_one *l;
214         struct list_head *item, *n;
215         unsigned long isolated = 0;
216
217         l = list_lru_from_memcg_idx(nlru, memcg_idx);
218 restart:
219         list_for_each_safe(item, n, &l->list) {
220                 enum lru_status ret;
221
222                 /*
223                  * decrement nr_to_walk first so that we don't livelock if we
224                  * get stuck on large numbesr of LRU_RETRY items
225                  */
226                 if (!*nr_to_walk)
227                         break;
228                 --*nr_to_walk;
229
230                 ret = isolate(item, l, &nlru->lock, cb_arg);
231                 switch (ret) {
232                 case LRU_REMOVED_RETRY:
233                         assert_spin_locked(&nlru->lock);
234                         /* fall through */
235                 case LRU_REMOVED:
236                         isolated++;
237                         nlru->nr_items--;
238                         /*
239                          * If the lru lock has been dropped, our list
240                          * traversal is now invalid and so we have to
241                          * restart from scratch.
242                          */
243                         if (ret == LRU_REMOVED_RETRY)
244                                 goto restart;
245                         break;
246                 case LRU_ROTATE:
247                         list_move_tail(item, &l->list);
248                         break;
249                 case LRU_SKIP:
250                         break;
251                 case LRU_RETRY:
252                         /*
253                          * The lru lock has been dropped, our list traversal is
254                          * now invalid and so we have to restart from scratch.
255                          */
256                         assert_spin_locked(&nlru->lock);
257                         goto restart;
258                 default:
259                         BUG();
260                 }
261         }
262         return isolated;
263 }
264
265 unsigned long
266 list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
267                   list_lru_walk_cb isolate, void *cb_arg,
268                   unsigned long *nr_to_walk)
269 {
270         struct list_lru_node *nlru = &lru->node[nid];
271         unsigned long ret;
272
273         spin_lock(&nlru->lock);
274         ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
275                                   nr_to_walk);
276         spin_unlock(&nlru->lock);
277         return ret;
278 }
279 EXPORT_SYMBOL_GPL(list_lru_walk_one);
280
281 unsigned long
282 list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
283                       list_lru_walk_cb isolate, void *cb_arg,
284                       unsigned long *nr_to_walk)
285 {
286         struct list_lru_node *nlru = &lru->node[nid];
287         unsigned long ret;
288
289         spin_lock_irq(&nlru->lock);
290         ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
291                                   nr_to_walk);
292         spin_unlock_irq(&nlru->lock);
293         return ret;
294 }
295
296 unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
297                                  list_lru_walk_cb isolate, void *cb_arg,
298                                  unsigned long *nr_to_walk)
299 {
300         long isolated = 0;
301         int memcg_idx;
302
303         isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
304                                       nr_to_walk);
305         if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
306                 for_each_memcg_cache_index(memcg_idx) {
307                         struct list_lru_node *nlru = &lru->node[nid];
308
309                         spin_lock(&nlru->lock);
310                         isolated += __list_lru_walk_one(nlru, memcg_idx,
311                                                         isolate, cb_arg,
312                                                         nr_to_walk);
313                         spin_unlock(&nlru->lock);
314
315                         if (*nr_to_walk <= 0)
316                                 break;
317                 }
318         }
319         return isolated;
320 }
321 EXPORT_SYMBOL_GPL(list_lru_walk_node);
322
323 static void init_one_lru(struct list_lru_one *l)
324 {
325         INIT_LIST_HEAD(&l->list);
326         l->nr_items = 0;
327 }
328
329 #ifdef CONFIG_MEMCG_KMEM
330 static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
331                                           int begin, int end)
332 {
333         int i;
334
335         for (i = begin; i < end; i++)
336                 kfree(memcg_lrus->lru[i]);
337 }
338
339 static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
340                                       int begin, int end)
341 {
342         int i;
343
344         for (i = begin; i < end; i++) {
345                 struct list_lru_one *l;
346
347                 l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
348                 if (!l)
349                         goto fail;
350
351                 init_one_lru(l);
352                 memcg_lrus->lru[i] = l;
353         }
354         return 0;
355 fail:
356         __memcg_destroy_list_lru_node(memcg_lrus, begin, i);
357         return -ENOMEM;
358 }
359
360 static int memcg_init_list_lru_node(struct list_lru_node *nlru)
361 {
362         struct list_lru_memcg *memcg_lrus;
363         int size = memcg_nr_cache_ids;
364
365         memcg_lrus = kvmalloc(sizeof(*memcg_lrus) +
366                               size * sizeof(void *), GFP_KERNEL);
367         if (!memcg_lrus)
368                 return -ENOMEM;
369
370         if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) {
371                 kvfree(memcg_lrus);
372                 return -ENOMEM;
373         }
374         RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus);
375
376         return 0;
377 }
378
379 static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
380 {
381         struct list_lru_memcg *memcg_lrus;
382         /*
383          * This is called when shrinker has already been unregistered,
384          * and nobody can use it. So, there is no need to use kvfree_rcu().
385          */
386         memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true);
387         __memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids);
388         kvfree(memcg_lrus);
389 }
390
391 static void kvfree_rcu(struct rcu_head *head)
392 {
393         struct list_lru_memcg *mlru;
394
395         mlru = container_of(head, struct list_lru_memcg, rcu);
396         kvfree(mlru);
397 }
398
399 static int memcg_update_list_lru_node(struct list_lru_node *nlru,
400                                       int old_size, int new_size)
401 {
402         struct list_lru_memcg *old, *new;
403
404         BUG_ON(old_size > new_size);
405
406         old = rcu_dereference_protected(nlru->memcg_lrus,
407                                         lockdep_is_held(&list_lrus_mutex));
408         new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL);
409         if (!new)
410                 return -ENOMEM;
411
412         if (__memcg_init_list_lru_node(new, old_size, new_size)) {
413                 kvfree(new);
414                 return -ENOMEM;
415         }
416
417         memcpy(&new->lru, &old->lru, old_size * sizeof(void *));
418
419         /*
420          * The locking below allows readers that hold nlru->lock avoid taking
421          * rcu_read_lock (see list_lru_from_memcg_idx).
422          *
423          * Since list_lru_{add,del} may be called under an IRQ-safe lock,
424          * we have to use IRQ-safe primitives here to avoid deadlock.
425          */
426         spin_lock_irq(&nlru->lock);
427         rcu_assign_pointer(nlru->memcg_lrus, new);
428         spin_unlock_irq(&nlru->lock);
429
430         call_rcu(&old->rcu, kvfree_rcu);
431         return 0;
432 }
433
434 static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
435                                               int old_size, int new_size)
436 {
437         struct list_lru_memcg *memcg_lrus;
438
439         memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus,
440                                                lockdep_is_held(&list_lrus_mutex));
441         /* do not bother shrinking the array back to the old size, because we
442          * cannot handle allocation failures here */
443         __memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size);
444 }
445
446 static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
447 {
448         int i;
449
450         lru->memcg_aware = memcg_aware;
451
452         if (!memcg_aware)
453                 return 0;
454
455         for_each_node(i) {
456                 if (memcg_init_list_lru_node(&lru->node[i]))
457                         goto fail;
458         }
459         return 0;
460 fail:
461         for (i = i - 1; i >= 0; i--) {
462                 if (!lru->node[i].memcg_lrus)
463                         continue;
464                 memcg_destroy_list_lru_node(&lru->node[i]);
465         }
466         return -ENOMEM;
467 }
468
469 static void memcg_destroy_list_lru(struct list_lru *lru)
470 {
471         int i;
472
473         if (!list_lru_memcg_aware(lru))
474                 return;
475
476         for_each_node(i)
477                 memcg_destroy_list_lru_node(&lru->node[i]);
478 }
479
480 static int memcg_update_list_lru(struct list_lru *lru,
481                                  int old_size, int new_size)
482 {
483         int i;
484
485         if (!list_lru_memcg_aware(lru))
486                 return 0;
487
488         for_each_node(i) {
489                 if (memcg_update_list_lru_node(&lru->node[i],
490                                                old_size, new_size))
491                         goto fail;
492         }
493         return 0;
494 fail:
495         for (i = i - 1; i >= 0; i--) {
496                 if (!lru->node[i].memcg_lrus)
497                         continue;
498
499                 memcg_cancel_update_list_lru_node(&lru->node[i],
500                                                   old_size, new_size);
501         }
502         return -ENOMEM;
503 }
504
505 static void memcg_cancel_update_list_lru(struct list_lru *lru,
506                                          int old_size, int new_size)
507 {
508         int i;
509
510         if (!list_lru_memcg_aware(lru))
511                 return;
512
513         for_each_node(i)
514                 memcg_cancel_update_list_lru_node(&lru->node[i],
515                                                   old_size, new_size);
516 }
517
518 int memcg_update_all_list_lrus(int new_size)
519 {
520         int ret = 0;
521         struct list_lru *lru;
522         int old_size = memcg_nr_cache_ids;
523
524         mutex_lock(&list_lrus_mutex);
525         list_for_each_entry(lru, &list_lrus, list) {
526                 ret = memcg_update_list_lru(lru, old_size, new_size);
527                 if (ret)
528                         goto fail;
529         }
530 out:
531         mutex_unlock(&list_lrus_mutex);
532         return ret;
533 fail:
534         list_for_each_entry_continue_reverse(lru, &list_lrus, list)
535                 memcg_cancel_update_list_lru(lru, old_size, new_size);
536         goto out;
537 }
538
539 static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
540                                       int src_idx, struct mem_cgroup *dst_memcg)
541 {
542         struct list_lru_node *nlru = &lru->node[nid];
543         int dst_idx = dst_memcg->kmemcg_id;
544         struct list_lru_one *src, *dst;
545         bool set;
546
547         /*
548          * Since list_lru_{add,del} may be called under an IRQ-safe lock,
549          * we have to use IRQ-safe primitives here to avoid deadlock.
550          */
551         spin_lock_irq(&nlru->lock);
552
553         src = list_lru_from_memcg_idx(nlru, src_idx);
554         dst = list_lru_from_memcg_idx(nlru, dst_idx);
555
556         list_splice_init(&src->list, &dst->list);
557         set = (!dst->nr_items && src->nr_items);
558         dst->nr_items += src->nr_items;
559         if (set)
560                 memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
561         src->nr_items = 0;
562
563         spin_unlock_irq(&nlru->lock);
564 }
565
566 static void memcg_drain_list_lru(struct list_lru *lru,
567                                  int src_idx, struct mem_cgroup *dst_memcg)
568 {
569         int i;
570
571         if (!list_lru_memcg_aware(lru))
572                 return;
573
574         for_each_node(i)
575                 memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg);
576 }
577
578 void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg)
579 {
580         struct list_lru *lru;
581
582         mutex_lock(&list_lrus_mutex);
583         list_for_each_entry(lru, &list_lrus, list)
584                 memcg_drain_list_lru(lru, src_idx, dst_memcg);
585         mutex_unlock(&list_lrus_mutex);
586 }
587 #else
588 static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
589 {
590         return 0;
591 }
592
593 static void memcg_destroy_list_lru(struct list_lru *lru)
594 {
595 }
596 #endif /* CONFIG_MEMCG_KMEM */
597
598 int __list_lru_init(struct list_lru *lru, bool memcg_aware,
599                     struct lock_class_key *key, struct shrinker *shrinker)
600 {
601         int i;
602         size_t size = sizeof(*lru->node) * nr_node_ids;
603         int err = -ENOMEM;
604
605 #ifdef CONFIG_MEMCG_KMEM
606         if (shrinker)
607                 lru->shrinker_id = shrinker->id;
608         else
609                 lru->shrinker_id = -1;
610 #endif
611         memcg_get_cache_ids();
612
613         lru->node = kzalloc(size, GFP_KERNEL);
614         if (!lru->node)
615                 goto out;
616
617         for_each_node(i) {
618                 spin_lock_init(&lru->node[i].lock);
619                 if (key)
620                         lockdep_set_class(&lru->node[i].lock, key);
621                 init_one_lru(&lru->node[i].lru);
622         }
623
624         err = memcg_init_list_lru(lru, memcg_aware);
625         if (err) {
626                 kfree(lru->node);
627                 /* Do this so a list_lru_destroy() doesn't crash: */
628                 lru->node = NULL;
629                 goto out;
630         }
631
632         list_lru_register(lru);
633 out:
634         memcg_put_cache_ids();
635         return err;
636 }
637 EXPORT_SYMBOL_GPL(__list_lru_init);
638
639 void list_lru_destroy(struct list_lru *lru)
640 {
641         /* Already destroyed or not yet initialized? */
642         if (!lru->node)
643                 return;
644
645         memcg_get_cache_ids();
646
647         list_lru_unregister(lru);
648
649         memcg_destroy_list_lru(lru);
650         kfree(lru->node);
651         lru->node = NULL;
652
653 #ifdef CONFIG_MEMCG_KMEM
654         lru->shrinker_id = -1;
655 #endif
656         memcg_put_cache_ids();
657 }
658 EXPORT_SYMBOL_GPL(list_lru_destroy);