Linux-libre 3.18.37-gnu
[librecmc/linux-libre.git] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) Nadia Yvette Chambers, April 2004
4  */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/compiler.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/bootmem.h>
20 #include <linux/sysfs.h>
21 #include <linux/slab.h>
22 #include <linux/rmap.h>
23 #include <linux/swap.h>
24 #include <linux/swapops.h>
25 #include <linux/page-isolation.h>
26 #include <linux/jhash.h>
27
28 #include <asm/page.h>
29 #include <asm/pgtable.h>
30 #include <asm/tlb.h>
31
32 #include <linux/io.h>
33 #include <linux/hugetlb.h>
34 #include <linux/hugetlb_cgroup.h>
35 #include <linux/node.h>
36 #include "internal.h"
37
38 unsigned long hugepages_treat_as_movable;
39
40 int hugetlb_max_hstate __read_mostly;
41 unsigned int default_hstate_idx;
42 struct hstate hstates[HUGE_MAX_HSTATE];
43
44 __initdata LIST_HEAD(huge_boot_pages);
45
46 /* for command line parsing */
47 static struct hstate * __initdata parsed_hstate;
48 static unsigned long __initdata default_hstate_max_huge_pages;
49 static unsigned long __initdata default_hstate_size;
50
51 /*
52  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
53  * free_huge_pages, and surplus_huge_pages.
54  */
55 DEFINE_SPINLOCK(hugetlb_lock);
56
57 /*
58  * Serializes faults on the same logical page.  This is used to
59  * prevent spurious OOMs when the hugepage pool is fully utilized.
60  */
61 static int num_fault_mutexes;
62 static struct mutex *htlb_fault_mutex_table ____cacheline_aligned_in_smp;
63
64 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
65 {
66         bool free = (spool->count == 0) && (spool->used_hpages == 0);
67
68         spin_unlock(&spool->lock);
69
70         /* If no pages are used, and no other handles to the subpool
71          * remain, free the subpool the subpool remain */
72         if (free)
73                 kfree(spool);
74 }
75
76 struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
77 {
78         struct hugepage_subpool *spool;
79
80         spool = kmalloc(sizeof(*spool), GFP_KERNEL);
81         if (!spool)
82                 return NULL;
83
84         spin_lock_init(&spool->lock);
85         spool->count = 1;
86         spool->max_hpages = nr_blocks;
87         spool->used_hpages = 0;
88
89         return spool;
90 }
91
92 void hugepage_put_subpool(struct hugepage_subpool *spool)
93 {
94         spin_lock(&spool->lock);
95         BUG_ON(!spool->count);
96         spool->count--;
97         unlock_or_release_subpool(spool);
98 }
99
100 static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
101                                       long delta)
102 {
103         int ret = 0;
104
105         if (!spool)
106                 return 0;
107
108         spin_lock(&spool->lock);
109         if ((spool->used_hpages + delta) <= spool->max_hpages) {
110                 spool->used_hpages += delta;
111         } else {
112                 ret = -ENOMEM;
113         }
114         spin_unlock(&spool->lock);
115
116         return ret;
117 }
118
119 static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
120                                        long delta)
121 {
122         if (!spool)
123                 return;
124
125         spin_lock(&spool->lock);
126         spool->used_hpages -= delta;
127         /* If hugetlbfs_put_super couldn't free spool due to
128         * an outstanding quota reference, free it now. */
129         unlock_or_release_subpool(spool);
130 }
131
132 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
133 {
134         return HUGETLBFS_SB(inode->i_sb)->spool;
135 }
136
137 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
138 {
139         return subpool_inode(file_inode(vma->vm_file));
140 }
141
142 /*
143  * Region tracking -- allows tracking of reservations and instantiated pages
144  *                    across the pages in a mapping.
145  *
146  * The region data structures are embedded into a resv_map and
147  * protected by a resv_map's lock
148  */
149 struct file_region {
150         struct list_head link;
151         long from;
152         long to;
153 };
154
155 static long region_add(struct resv_map *resv, long f, long t)
156 {
157         struct list_head *head = &resv->regions;
158         struct file_region *rg, *nrg, *trg;
159
160         spin_lock(&resv->lock);
161         /* Locate the region we are either in or before. */
162         list_for_each_entry(rg, head, link)
163                 if (f <= rg->to)
164                         break;
165
166         /* Round our left edge to the current segment if it encloses us. */
167         if (f > rg->from)
168                 f = rg->from;
169
170         /* Check for and consume any regions we now overlap with. */
171         nrg = rg;
172         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
173                 if (&rg->link == head)
174                         break;
175                 if (rg->from > t)
176                         break;
177
178                 /* If this area reaches higher then extend our area to
179                  * include it completely.  If this is not the first area
180                  * which we intend to reuse, free it. */
181                 if (rg->to > t)
182                         t = rg->to;
183                 if (rg != nrg) {
184                         list_del(&rg->link);
185                         kfree(rg);
186                 }
187         }
188         nrg->from = f;
189         nrg->to = t;
190         spin_unlock(&resv->lock);
191         return 0;
192 }
193
194 static long region_chg(struct resv_map *resv, long f, long t)
195 {
196         struct list_head *head = &resv->regions;
197         struct file_region *rg, *nrg = NULL;
198         long chg = 0;
199
200 retry:
201         spin_lock(&resv->lock);
202         /* Locate the region we are before or in. */
203         list_for_each_entry(rg, head, link)
204                 if (f <= rg->to)
205                         break;
206
207         /* If we are below the current region then a new region is required.
208          * Subtle, allocate a new region at the position but make it zero
209          * size such that we can guarantee to record the reservation. */
210         if (&rg->link == head || t < rg->from) {
211                 if (!nrg) {
212                         spin_unlock(&resv->lock);
213                         nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
214                         if (!nrg)
215                                 return -ENOMEM;
216
217                         nrg->from = f;
218                         nrg->to   = f;
219                         INIT_LIST_HEAD(&nrg->link);
220                         goto retry;
221                 }
222
223                 list_add(&nrg->link, rg->link.prev);
224                 chg = t - f;
225                 goto out_nrg;
226         }
227
228         /* Round our left edge to the current segment if it encloses us. */
229         if (f > rg->from)
230                 f = rg->from;
231         chg = t - f;
232
233         /* Check for and consume any regions we now overlap with. */
234         list_for_each_entry(rg, rg->link.prev, link) {
235                 if (&rg->link == head)
236                         break;
237                 if (rg->from > t)
238                         goto out;
239
240                 /* We overlap with this area, if it extends further than
241                  * us then we must extend ourselves.  Account for its
242                  * existing reservation. */
243                 if (rg->to > t) {
244                         chg += rg->to - t;
245                         t = rg->to;
246                 }
247                 chg -= rg->to - rg->from;
248         }
249
250 out:
251         spin_unlock(&resv->lock);
252         /*  We already know we raced and no longer need the new region */
253         kfree(nrg);
254         return chg;
255 out_nrg:
256         spin_unlock(&resv->lock);
257         return chg;
258 }
259
260 static long region_truncate(struct resv_map *resv, long end)
261 {
262         struct list_head *head = &resv->regions;
263         struct file_region *rg, *trg;
264         long chg = 0;
265
266         spin_lock(&resv->lock);
267         /* Locate the region we are either in or before. */
268         list_for_each_entry(rg, head, link)
269                 if (end <= rg->to)
270                         break;
271         if (&rg->link == head)
272                 goto out;
273
274         /* If we are in the middle of a region then adjust it. */
275         if (end > rg->from) {
276                 chg = rg->to - end;
277                 rg->to = end;
278                 rg = list_entry(rg->link.next, typeof(*rg), link);
279         }
280
281         /* Drop any remaining regions. */
282         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
283                 if (&rg->link == head)
284                         break;
285                 chg += rg->to - rg->from;
286                 list_del(&rg->link);
287                 kfree(rg);
288         }
289
290 out:
291         spin_unlock(&resv->lock);
292         return chg;
293 }
294
295 static long region_count(struct resv_map *resv, long f, long t)
296 {
297         struct list_head *head = &resv->regions;
298         struct file_region *rg;
299         long chg = 0;
300
301         spin_lock(&resv->lock);
302         /* Locate each segment we overlap with, and count that overlap. */
303         list_for_each_entry(rg, head, link) {
304                 long seg_from;
305                 long seg_to;
306
307                 if (rg->to <= f)
308                         continue;
309                 if (rg->from >= t)
310                         break;
311
312                 seg_from = max(rg->from, f);
313                 seg_to = min(rg->to, t);
314
315                 chg += seg_to - seg_from;
316         }
317         spin_unlock(&resv->lock);
318
319         return chg;
320 }
321
322 /*
323  * Convert the address within this vma to the page offset within
324  * the mapping, in pagecache page units; huge pages here.
325  */
326 static pgoff_t vma_hugecache_offset(struct hstate *h,
327                         struct vm_area_struct *vma, unsigned long address)
328 {
329         return ((address - vma->vm_start) >> huge_page_shift(h)) +
330                         (vma->vm_pgoff >> huge_page_order(h));
331 }
332
333 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
334                                      unsigned long address)
335 {
336         return vma_hugecache_offset(hstate_vma(vma), vma, address);
337 }
338
339 /*
340  * Return the size of the pages allocated when backing a VMA. In the majority
341  * cases this will be same size as used by the page table entries.
342  */
343 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
344 {
345         struct hstate *hstate;
346
347         if (!is_vm_hugetlb_page(vma))
348                 return PAGE_SIZE;
349
350         hstate = hstate_vma(vma);
351
352         return 1UL << huge_page_shift(hstate);
353 }
354 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
355
356 /*
357  * Return the page size being used by the MMU to back a VMA. In the majority
358  * of cases, the page size used by the kernel matches the MMU size. On
359  * architectures where it differs, an architecture-specific version of this
360  * function is required.
361  */
362 #ifndef vma_mmu_pagesize
363 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
364 {
365         return vma_kernel_pagesize(vma);
366 }
367 #endif
368
369 /*
370  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
371  * bits of the reservation map pointer, which are always clear due to
372  * alignment.
373  */
374 #define HPAGE_RESV_OWNER    (1UL << 0)
375 #define HPAGE_RESV_UNMAPPED (1UL << 1)
376 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
377
378 /*
379  * These helpers are used to track how many pages are reserved for
380  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
381  * is guaranteed to have their future faults succeed.
382  *
383  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
384  * the reserve counters are updated with the hugetlb_lock held. It is safe
385  * to reset the VMA at fork() time as it is not in use yet and there is no
386  * chance of the global counters getting corrupted as a result of the values.
387  *
388  * The private mapping reservation is represented in a subtly different
389  * manner to a shared mapping.  A shared mapping has a region map associated
390  * with the underlying file, this region map represents the backing file
391  * pages which have ever had a reservation assigned which this persists even
392  * after the page is instantiated.  A private mapping has a region map
393  * associated with the original mmap which is attached to all VMAs which
394  * reference it, this region map represents those offsets which have consumed
395  * reservation ie. where pages have been instantiated.
396  */
397 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
398 {
399         return (unsigned long)vma->vm_private_data;
400 }
401
402 static void set_vma_private_data(struct vm_area_struct *vma,
403                                                         unsigned long value)
404 {
405         vma->vm_private_data = (void *)value;
406 }
407
408 struct resv_map *resv_map_alloc(void)
409 {
410         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
411         if (!resv_map)
412                 return NULL;
413
414         kref_init(&resv_map->refs);
415         spin_lock_init(&resv_map->lock);
416         INIT_LIST_HEAD(&resv_map->regions);
417
418         return resv_map;
419 }
420
421 void resv_map_release(struct kref *ref)
422 {
423         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
424
425         /* Clear out any active regions before we release the map. */
426         region_truncate(resv_map, 0);
427         kfree(resv_map);
428 }
429
430 static inline struct resv_map *inode_resv_map(struct inode *inode)
431 {
432         return inode->i_mapping->private_data;
433 }
434
435 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
436 {
437         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
438         if (vma->vm_flags & VM_MAYSHARE) {
439                 struct address_space *mapping = vma->vm_file->f_mapping;
440                 struct inode *inode = mapping->host;
441
442                 return inode_resv_map(inode);
443
444         } else {
445                 return (struct resv_map *)(get_vma_private_data(vma) &
446                                                         ~HPAGE_RESV_MASK);
447         }
448 }
449
450 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
451 {
452         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
453         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
454
455         set_vma_private_data(vma, (get_vma_private_data(vma) &
456                                 HPAGE_RESV_MASK) | (unsigned long)map);
457 }
458
459 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
460 {
461         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
462         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
463
464         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
465 }
466
467 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
468 {
469         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
470
471         return (get_vma_private_data(vma) & flag) != 0;
472 }
473
474 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
475 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
476 {
477         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
478         if (!(vma->vm_flags & VM_MAYSHARE))
479                 vma->vm_private_data = (void *)0;
480 }
481
482 /* Returns true if the VMA has associated reserve pages */
483 static int vma_has_reserves(struct vm_area_struct *vma, long chg)
484 {
485         if (vma->vm_flags & VM_NORESERVE) {
486                 /*
487                  * This address is already reserved by other process(chg == 0),
488                  * so, we should decrement reserved count. Without decrementing,
489                  * reserve count remains after releasing inode, because this
490                  * allocated page will go into page cache and is regarded as
491                  * coming from reserved pool in releasing step.  Currently, we
492                  * don't have any other solution to deal with this situation
493                  * properly, so add work-around here.
494                  */
495                 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
496                         return 1;
497                 else
498                         return 0;
499         }
500
501         /* Shared mappings always use reserves */
502         if (vma->vm_flags & VM_MAYSHARE)
503                 return 1;
504
505         /*
506          * Only the process that called mmap() has reserves for
507          * private mappings.
508          */
509         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
510                 return 1;
511
512         return 0;
513 }
514
515 static void enqueue_huge_page(struct hstate *h, struct page *page)
516 {
517         int nid = page_to_nid(page);
518         list_move(&page->lru, &h->hugepage_freelists[nid]);
519         h->free_huge_pages++;
520         h->free_huge_pages_node[nid]++;
521 }
522
523 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
524 {
525         struct page *page;
526
527         list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
528                 if (!is_migrate_isolate_page(page))
529                         break;
530         /*
531          * if 'non-isolated free hugepage' not found on the list,
532          * the allocation fails.
533          */
534         if (&h->hugepage_freelists[nid] == &page->lru)
535                 return NULL;
536         list_move(&page->lru, &h->hugepage_activelist);
537         set_page_refcounted(page);
538         h->free_huge_pages--;
539         h->free_huge_pages_node[nid]--;
540         return page;
541 }
542
543 /* Movability of hugepages depends on migration support. */
544 static inline gfp_t htlb_alloc_mask(struct hstate *h)
545 {
546         if (hugepages_treat_as_movable || hugepage_migration_supported(h))
547                 return GFP_HIGHUSER_MOVABLE;
548         else
549                 return GFP_HIGHUSER;
550 }
551
552 static struct page *dequeue_huge_page_vma(struct hstate *h,
553                                 struct vm_area_struct *vma,
554                                 unsigned long address, int avoid_reserve,
555                                 long chg)
556 {
557         struct page *page = NULL;
558         struct mempolicy *mpol;
559         nodemask_t *nodemask;
560         struct zonelist *zonelist;
561         struct zone *zone;
562         struct zoneref *z;
563         unsigned int cpuset_mems_cookie;
564
565         /*
566          * A child process with MAP_PRIVATE mappings created by their parent
567          * have no page reserves. This check ensures that reservations are
568          * not "stolen". The child may still get SIGKILLed
569          */
570         if (!vma_has_reserves(vma, chg) &&
571                         h->free_huge_pages - h->resv_huge_pages == 0)
572                 goto err;
573
574         /* If reserves cannot be used, ensure enough pages are in the pool */
575         if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
576                 goto err;
577
578 retry_cpuset:
579         cpuset_mems_cookie = read_mems_allowed_begin();
580         zonelist = huge_zonelist(vma, address,
581                                         htlb_alloc_mask(h), &mpol, &nodemask);
582
583         for_each_zone_zonelist_nodemask(zone, z, zonelist,
584                                                 MAX_NR_ZONES - 1, nodemask) {
585                 if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask(h))) {
586                         page = dequeue_huge_page_node(h, zone_to_nid(zone));
587                         if (page) {
588                                 if (avoid_reserve)
589                                         break;
590                                 if (!vma_has_reserves(vma, chg))
591                                         break;
592
593                                 SetPagePrivate(page);
594                                 h->resv_huge_pages--;
595                                 break;
596                         }
597                 }
598         }
599
600         mpol_cond_put(mpol);
601         if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
602                 goto retry_cpuset;
603         return page;
604
605 err:
606         return NULL;
607 }
608
609 /*
610  * common helper functions for hstate_next_node_to_{alloc|free}.
611  * We may have allocated or freed a huge page based on a different
612  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
613  * be outside of *nodes_allowed.  Ensure that we use an allowed
614  * node for alloc or free.
615  */
616 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
617 {
618         nid = next_node(nid, *nodes_allowed);
619         if (nid == MAX_NUMNODES)
620                 nid = first_node(*nodes_allowed);
621         VM_BUG_ON(nid >= MAX_NUMNODES);
622
623         return nid;
624 }
625
626 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
627 {
628         if (!node_isset(nid, *nodes_allowed))
629                 nid = next_node_allowed(nid, nodes_allowed);
630         return nid;
631 }
632
633 /*
634  * returns the previously saved node ["this node"] from which to
635  * allocate a persistent huge page for the pool and advance the
636  * next node from which to allocate, handling wrap at end of node
637  * mask.
638  */
639 static int hstate_next_node_to_alloc(struct hstate *h,
640                                         nodemask_t *nodes_allowed)
641 {
642         int nid;
643
644         VM_BUG_ON(!nodes_allowed);
645
646         nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
647         h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
648
649         return nid;
650 }
651
652 /*
653  * helper for free_pool_huge_page() - return the previously saved
654  * node ["this node"] from which to free a huge page.  Advance the
655  * next node id whether or not we find a free huge page to free so
656  * that the next attempt to free addresses the next node.
657  */
658 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
659 {
660         int nid;
661
662         VM_BUG_ON(!nodes_allowed);
663
664         nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
665         h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
666
667         return nid;
668 }
669
670 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)           \
671         for (nr_nodes = nodes_weight(*mask);                            \
672                 nr_nodes > 0 &&                                         \
673                 ((node = hstate_next_node_to_alloc(hs, mask)) || 1);    \
674                 nr_nodes--)
675
676 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)            \
677         for (nr_nodes = nodes_weight(*mask);                            \
678                 nr_nodes > 0 &&                                         \
679                 ((node = hstate_next_node_to_free(hs, mask)) || 1);     \
680                 nr_nodes--)
681
682 #if defined(CONFIG_CMA) && defined(CONFIG_X86_64)
683 static void destroy_compound_gigantic_page(struct page *page,
684                                         unsigned int order)
685 {
686         int i;
687         int nr_pages = 1 << order;
688         struct page *p = page + 1;
689
690         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
691                 __ClearPageTail(p);
692                 set_page_refcounted(p);
693                 p->first_page = NULL;
694         }
695
696         set_compound_order(page, 0);
697         __ClearPageHead(page);
698 }
699
700 static void free_gigantic_page(struct page *page, unsigned int order)
701 {
702         free_contig_range(page_to_pfn(page), 1 << order);
703 }
704
705 static int __alloc_gigantic_page(unsigned long start_pfn,
706                                 unsigned long nr_pages)
707 {
708         unsigned long end_pfn = start_pfn + nr_pages;
709         return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
710 }
711
712 static bool pfn_range_valid_gigantic(unsigned long start_pfn,
713                                 unsigned long nr_pages)
714 {
715         unsigned long i, end_pfn = start_pfn + nr_pages;
716         struct page *page;
717
718         for (i = start_pfn; i < end_pfn; i++) {
719                 if (!pfn_valid(i))
720                         return false;
721
722                 page = pfn_to_page(i);
723
724                 if (PageReserved(page))
725                         return false;
726
727                 if (page_count(page) > 0)
728                         return false;
729
730                 if (PageHuge(page))
731                         return false;
732         }
733
734         return true;
735 }
736
737 static bool zone_spans_last_pfn(const struct zone *zone,
738                         unsigned long start_pfn, unsigned long nr_pages)
739 {
740         unsigned long last_pfn = start_pfn + nr_pages - 1;
741         return zone_spans_pfn(zone, last_pfn);
742 }
743
744 static struct page *alloc_gigantic_page(int nid, unsigned int order)
745 {
746         unsigned long nr_pages = 1 << order;
747         unsigned long ret, pfn, flags;
748         struct zone *z;
749
750         z = NODE_DATA(nid)->node_zones;
751         for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
752                 spin_lock_irqsave(&z->lock, flags);
753
754                 pfn = ALIGN(z->zone_start_pfn, nr_pages);
755                 while (zone_spans_last_pfn(z, pfn, nr_pages)) {
756                         if (pfn_range_valid_gigantic(pfn, nr_pages)) {
757                                 /*
758                                  * We release the zone lock here because
759                                  * alloc_contig_range() will also lock the zone
760                                  * at some point. If there's an allocation
761                                  * spinning on this lock, it may win the race
762                                  * and cause alloc_contig_range() to fail...
763                                  */
764                                 spin_unlock_irqrestore(&z->lock, flags);
765                                 ret = __alloc_gigantic_page(pfn, nr_pages);
766                                 if (!ret)
767                                         return pfn_to_page(pfn);
768                                 spin_lock_irqsave(&z->lock, flags);
769                         }
770                         pfn += nr_pages;
771                 }
772
773                 spin_unlock_irqrestore(&z->lock, flags);
774         }
775
776         return NULL;
777 }
778
779 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
780 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
781
782 static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
783 {
784         struct page *page;
785
786         page = alloc_gigantic_page(nid, huge_page_order(h));
787         if (page) {
788                 prep_compound_gigantic_page(page, huge_page_order(h));
789                 prep_new_huge_page(h, page, nid);
790         }
791
792         return page;
793 }
794
795 static int alloc_fresh_gigantic_page(struct hstate *h,
796                                 nodemask_t *nodes_allowed)
797 {
798         struct page *page = NULL;
799         int nr_nodes, node;
800
801         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
802                 page = alloc_fresh_gigantic_page_node(h, node);
803                 if (page)
804                         return 1;
805         }
806
807         return 0;
808 }
809
810 static inline bool gigantic_page_supported(void) { return true; }
811 #else
812 static inline bool gigantic_page_supported(void) { return false; }
813 static inline void free_gigantic_page(struct page *page, unsigned int order) { }
814 static inline void destroy_compound_gigantic_page(struct page *page,
815                                                 unsigned int order) { }
816 static inline int alloc_fresh_gigantic_page(struct hstate *h,
817                                         nodemask_t *nodes_allowed) { return 0; }
818 #endif
819
820 static void update_and_free_page(struct hstate *h, struct page *page)
821 {
822         int i;
823
824         if (hstate_is_gigantic(h) && !gigantic_page_supported())
825                 return;
826
827         h->nr_huge_pages--;
828         h->nr_huge_pages_node[page_to_nid(page)]--;
829         for (i = 0; i < pages_per_huge_page(h); i++) {
830                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
831                                 1 << PG_referenced | 1 << PG_dirty |
832                                 1 << PG_active | 1 << PG_private |
833                                 1 << PG_writeback);
834         }
835         VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
836         set_compound_page_dtor(page, NULL);
837         set_page_refcounted(page);
838         if (hstate_is_gigantic(h)) {
839                 destroy_compound_gigantic_page(page, huge_page_order(h));
840                 free_gigantic_page(page, huge_page_order(h));
841         } else {
842                 arch_release_hugepage(page);
843                 __free_pages(page, huge_page_order(h));
844         }
845 }
846
847 struct hstate *size_to_hstate(unsigned long size)
848 {
849         struct hstate *h;
850
851         for_each_hstate(h) {
852                 if (huge_page_size(h) == size)
853                         return h;
854         }
855         return NULL;
856 }
857
858 /*
859  * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
860  * to hstate->hugepage_activelist.)
861  *
862  * This function can be called for tail pages, but never returns true for them.
863  */
864 bool page_huge_active(struct page *page)
865 {
866         VM_BUG_ON_PAGE(!PageHuge(page), page);
867         return PageHead(page) && PagePrivate(&page[1]);
868 }
869
870 /* never called for tail page */
871 static void set_page_huge_active(struct page *page)
872 {
873         VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
874         SetPagePrivate(&page[1]);
875 }
876
877 static void clear_page_huge_active(struct page *page)
878 {
879         VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
880         ClearPagePrivate(&page[1]);
881 }
882
883 void free_huge_page(struct page *page)
884 {
885         /*
886          * Can't pass hstate in here because it is called from the
887          * compound page destructor.
888          */
889         struct hstate *h = page_hstate(page);
890         int nid = page_to_nid(page);
891         struct hugepage_subpool *spool =
892                 (struct hugepage_subpool *)page_private(page);
893         bool restore_reserve;
894
895         set_page_private(page, 0);
896         page->mapping = NULL;
897         BUG_ON(page_count(page));
898         BUG_ON(page_mapcount(page));
899         restore_reserve = PagePrivate(page);
900         ClearPagePrivate(page);
901
902         spin_lock(&hugetlb_lock);
903         clear_page_huge_active(page);
904         hugetlb_cgroup_uncharge_page(hstate_index(h),
905                                      pages_per_huge_page(h), page);
906         if (restore_reserve)
907                 h->resv_huge_pages++;
908
909         if (h->surplus_huge_pages_node[nid]) {
910                 /* remove the page from active list */
911                 list_del(&page->lru);
912                 update_and_free_page(h, page);
913                 h->surplus_huge_pages--;
914                 h->surplus_huge_pages_node[nid]--;
915         } else {
916                 arch_clear_hugepage_flags(page);
917                 enqueue_huge_page(h, page);
918         }
919         spin_unlock(&hugetlb_lock);
920         hugepage_subpool_put_pages(spool, 1);
921 }
922
923 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
924 {
925         INIT_LIST_HEAD(&page->lru);
926         set_compound_page_dtor(page, free_huge_page);
927         spin_lock(&hugetlb_lock);
928         set_hugetlb_cgroup(page, NULL);
929         h->nr_huge_pages++;
930         h->nr_huge_pages_node[nid]++;
931         spin_unlock(&hugetlb_lock);
932         put_page(page); /* free it into the hugepage allocator */
933 }
934
935 static void prep_compound_gigantic_page(struct page *page, unsigned int order)
936 {
937         int i;
938         int nr_pages = 1 << order;
939         struct page *p = page + 1;
940
941         /* we rely on prep_new_huge_page to set the destructor */
942         set_compound_order(page, order);
943         __SetPageHead(page);
944         __ClearPageReserved(page);
945         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
946                 __SetPageTail(p);
947                 /*
948                  * For gigantic hugepages allocated through bootmem at
949                  * boot, it's safer to be consistent with the not-gigantic
950                  * hugepages and clear the PG_reserved bit from all tail pages
951                  * too.  Otherwse drivers using get_user_pages() to access tail
952                  * pages may get the reference counting wrong if they see
953                  * PG_reserved set on a tail page (despite the head page not
954                  * having PG_reserved set).  Enforcing this consistency between
955                  * head and tail pages allows drivers to optimize away a check
956                  * on the head page when they need know if put_page() is needed
957                  * after get_user_pages().
958                  */
959                 __ClearPageReserved(p);
960                 set_page_count(p, 0);
961                 p->first_page = page;
962         }
963 }
964
965 /*
966  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
967  * transparent huge pages.  See the PageTransHuge() documentation for more
968  * details.
969  */
970 int PageHuge(struct page *page)
971 {
972         if (!PageCompound(page))
973                 return 0;
974
975         page = compound_head(page);
976         return get_compound_page_dtor(page) == free_huge_page;
977 }
978 EXPORT_SYMBOL_GPL(PageHuge);
979
980 /*
981  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
982  * normal or transparent huge pages.
983  */
984 int PageHeadHuge(struct page *page_head)
985 {
986         if (!PageHead(page_head))
987                 return 0;
988
989         return get_compound_page_dtor(page_head) == free_huge_page;
990 }
991
992 pgoff_t __basepage_index(struct page *page)
993 {
994         struct page *page_head = compound_head(page);
995         pgoff_t index = page_index(page_head);
996         unsigned long compound_idx;
997
998         if (!PageHuge(page_head))
999                 return page_index(page);
1000
1001         if (compound_order(page_head) >= MAX_ORDER)
1002                 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1003         else
1004                 compound_idx = page - page_head;
1005
1006         return (index << compound_order(page_head)) + compound_idx;
1007 }
1008
1009 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
1010 {
1011         struct page *page;
1012
1013         page = alloc_pages_exact_node(nid,
1014                 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1015                                                 __GFP_REPEAT|__GFP_NOWARN,
1016                 huge_page_order(h));
1017         if (page) {
1018                 if (arch_prepare_hugepage(page)) {
1019                         __free_pages(page, huge_page_order(h));
1020                         return NULL;
1021                 }
1022                 prep_new_huge_page(h, page, nid);
1023         }
1024
1025         return page;
1026 }
1027
1028 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1029 {
1030         struct page *page;
1031         int nr_nodes, node;
1032         int ret = 0;
1033
1034         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1035                 page = alloc_fresh_huge_page_node(h, node);
1036                 if (page) {
1037                         ret = 1;
1038                         break;
1039                 }
1040         }
1041
1042         if (ret)
1043                 count_vm_event(HTLB_BUDDY_PGALLOC);
1044         else
1045                 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1046
1047         return ret;
1048 }
1049
1050 /*
1051  * Free huge page from pool from next node to free.
1052  * Attempt to keep persistent huge pages more or less
1053  * balanced over allowed nodes.
1054  * Called with hugetlb_lock locked.
1055  */
1056 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1057                                                          bool acct_surplus)
1058 {
1059         int nr_nodes, node;
1060         int ret = 0;
1061
1062         for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1063                 /*
1064                  * If we're returning unused surplus pages, only examine
1065                  * nodes with surplus pages.
1066                  */
1067                 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1068                     !list_empty(&h->hugepage_freelists[node])) {
1069                         struct page *page =
1070                                 list_entry(h->hugepage_freelists[node].next,
1071                                           struct page, lru);
1072                         list_del(&page->lru);
1073                         h->free_huge_pages--;
1074                         h->free_huge_pages_node[node]--;
1075                         if (acct_surplus) {
1076                                 h->surplus_huge_pages--;
1077                                 h->surplus_huge_pages_node[node]--;
1078                         }
1079                         update_and_free_page(h, page);
1080                         ret = 1;
1081                         break;
1082                 }
1083         }
1084
1085         return ret;
1086 }
1087
1088 /*
1089  * Dissolve a given free hugepage into free buddy pages. This function does
1090  * nothing for in-use (including surplus) hugepages.
1091  */
1092 static void dissolve_free_huge_page(struct page *page)
1093 {
1094         spin_lock(&hugetlb_lock);
1095         if (PageHuge(page) && !page_count(page)) {
1096                 struct hstate *h = page_hstate(page);
1097                 int nid = page_to_nid(page);
1098                 list_del(&page->lru);
1099                 h->free_huge_pages--;
1100                 h->free_huge_pages_node[nid]--;
1101                 update_and_free_page(h, page);
1102         }
1103         spin_unlock(&hugetlb_lock);
1104 }
1105
1106 /*
1107  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1108  * make specified memory blocks removable from the system.
1109  * Note that start_pfn should aligned with (minimum) hugepage size.
1110  */
1111 void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1112 {
1113         unsigned int order = 8 * sizeof(void *);
1114         unsigned long pfn;
1115         struct hstate *h;
1116
1117         if (!hugepages_supported())
1118                 return;
1119
1120         /* Set scan step to minimum hugepage size */
1121         for_each_hstate(h)
1122                 if (order > huge_page_order(h))
1123                         order = huge_page_order(h);
1124         VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << order));
1125         for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order)
1126                 dissolve_free_huge_page(pfn_to_page(pfn));
1127 }
1128
1129 static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
1130 {
1131         struct page *page;
1132         unsigned int r_nid;
1133
1134         if (hstate_is_gigantic(h))
1135                 return NULL;
1136
1137         /*
1138          * Assume we will successfully allocate the surplus page to
1139          * prevent racing processes from causing the surplus to exceed
1140          * overcommit
1141          *
1142          * This however introduces a different race, where a process B
1143          * tries to grow the static hugepage pool while alloc_pages() is
1144          * called by process A. B will only examine the per-node
1145          * counters in determining if surplus huge pages can be
1146          * converted to normal huge pages in adjust_pool_surplus(). A
1147          * won't be able to increment the per-node counter, until the
1148          * lock is dropped by B, but B doesn't drop hugetlb_lock until
1149          * no more huge pages can be converted from surplus to normal
1150          * state (and doesn't try to convert again). Thus, we have a
1151          * case where a surplus huge page exists, the pool is grown, and
1152          * the surplus huge page still exists after, even though it
1153          * should just have been converted to a normal huge page. This
1154          * does not leak memory, though, as the hugepage will be freed
1155          * once it is out of use. It also does not allow the counters to
1156          * go out of whack in adjust_pool_surplus() as we don't modify
1157          * the node values until we've gotten the hugepage and only the
1158          * per-node value is checked there.
1159          */
1160         spin_lock(&hugetlb_lock);
1161         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1162                 spin_unlock(&hugetlb_lock);
1163                 return NULL;
1164         } else {
1165                 h->nr_huge_pages++;
1166                 h->surplus_huge_pages++;
1167         }
1168         spin_unlock(&hugetlb_lock);
1169
1170         if (nid == NUMA_NO_NODE)
1171                 page = alloc_pages(htlb_alloc_mask(h)|__GFP_COMP|
1172                                    __GFP_REPEAT|__GFP_NOWARN,
1173                                    huge_page_order(h));
1174         else
1175                 page = alloc_pages_exact_node(nid,
1176                         htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1177                         __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
1178
1179         if (page && arch_prepare_hugepage(page)) {
1180                 __free_pages(page, huge_page_order(h));
1181                 page = NULL;
1182         }
1183
1184         spin_lock(&hugetlb_lock);
1185         if (page) {
1186                 INIT_LIST_HEAD(&page->lru);
1187                 r_nid = page_to_nid(page);
1188                 set_compound_page_dtor(page, free_huge_page);
1189                 set_hugetlb_cgroup(page, NULL);
1190                 /*
1191                  * We incremented the global counters already
1192                  */
1193                 h->nr_huge_pages_node[r_nid]++;
1194                 h->surplus_huge_pages_node[r_nid]++;
1195                 __count_vm_event(HTLB_BUDDY_PGALLOC);
1196         } else {
1197                 h->nr_huge_pages--;
1198                 h->surplus_huge_pages--;
1199                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1200         }
1201         spin_unlock(&hugetlb_lock);
1202
1203         return page;
1204 }
1205
1206 /*
1207  * This allocation function is useful in the context where vma is irrelevant.
1208  * E.g. soft-offlining uses this function because it only cares physical
1209  * address of error page.
1210  */
1211 struct page *alloc_huge_page_node(struct hstate *h, int nid)
1212 {
1213         struct page *page = NULL;
1214
1215         spin_lock(&hugetlb_lock);
1216         if (h->free_huge_pages - h->resv_huge_pages > 0)
1217                 page = dequeue_huge_page_node(h, nid);
1218         spin_unlock(&hugetlb_lock);
1219
1220         if (!page)
1221                 page = alloc_buddy_huge_page(h, nid);
1222
1223         return page;
1224 }
1225
1226 /*
1227  * Increase the hugetlb pool such that it can accommodate a reservation
1228  * of size 'delta'.
1229  */
1230 static int gather_surplus_pages(struct hstate *h, int delta)
1231 {
1232         struct list_head surplus_list;
1233         struct page *page, *tmp;
1234         int ret, i;
1235         int needed, allocated;
1236         bool alloc_ok = true;
1237
1238         needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1239         if (needed <= 0) {
1240                 h->resv_huge_pages += delta;
1241                 return 0;
1242         }
1243
1244         allocated = 0;
1245         INIT_LIST_HEAD(&surplus_list);
1246
1247         ret = -ENOMEM;
1248 retry:
1249         spin_unlock(&hugetlb_lock);
1250         for (i = 0; i < needed; i++) {
1251                 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1252                 if (!page) {
1253                         alloc_ok = false;
1254                         break;
1255                 }
1256                 list_add(&page->lru, &surplus_list);
1257         }
1258         allocated += i;
1259
1260         /*
1261          * After retaking hugetlb_lock, we need to recalculate 'needed'
1262          * because either resv_huge_pages or free_huge_pages may have changed.
1263          */
1264         spin_lock(&hugetlb_lock);
1265         needed = (h->resv_huge_pages + delta) -
1266                         (h->free_huge_pages + allocated);
1267         if (needed > 0) {
1268                 if (alloc_ok)
1269                         goto retry;
1270                 /*
1271                  * We were not able to allocate enough pages to
1272                  * satisfy the entire reservation so we free what
1273                  * we've allocated so far.
1274                  */
1275                 goto free;
1276         }
1277         /*
1278          * The surplus_list now contains _at_least_ the number of extra pages
1279          * needed to accommodate the reservation.  Add the appropriate number
1280          * of pages to the hugetlb pool and free the extras back to the buddy
1281          * allocator.  Commit the entire reservation here to prevent another
1282          * process from stealing the pages as they are added to the pool but
1283          * before they are reserved.
1284          */
1285         needed += allocated;
1286         h->resv_huge_pages += delta;
1287         ret = 0;
1288
1289         /* Free the needed pages to the hugetlb pool */
1290         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1291                 if ((--needed) < 0)
1292                         break;
1293                 /*
1294                  * This page is now managed by the hugetlb allocator and has
1295                  * no users -- drop the buddy allocator's reference.
1296                  */
1297                 put_page_testzero(page);
1298                 VM_BUG_ON_PAGE(page_count(page), page);
1299                 enqueue_huge_page(h, page);
1300         }
1301 free:
1302         spin_unlock(&hugetlb_lock);
1303
1304         /* Free unnecessary surplus pages to the buddy allocator */
1305         list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1306                 put_page(page);
1307         spin_lock(&hugetlb_lock);
1308
1309         return ret;
1310 }
1311
1312 /*
1313  * When releasing a hugetlb pool reservation, any surplus pages that were
1314  * allocated to satisfy the reservation must be explicitly freed if they were
1315  * never used.
1316  * Called with hugetlb_lock held.
1317  */
1318 static void return_unused_surplus_pages(struct hstate *h,
1319                                         unsigned long unused_resv_pages)
1320 {
1321         unsigned long nr_pages;
1322
1323         /* Uncommit the reservation */
1324         h->resv_huge_pages -= unused_resv_pages;
1325
1326         /* Cannot return gigantic pages currently */
1327         if (hstate_is_gigantic(h))
1328                 return;
1329
1330         nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1331
1332         /*
1333          * We want to release as many surplus pages as possible, spread
1334          * evenly across all nodes with memory. Iterate across these nodes
1335          * until we can no longer free unreserved surplus pages. This occurs
1336          * when the nodes with surplus pages have no free pages.
1337          * free_pool_huge_page() will balance the the freed pages across the
1338          * on-line nodes with memory and will handle the hstate accounting.
1339          */
1340         while (nr_pages--) {
1341                 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1342                         break;
1343                 cond_resched_lock(&hugetlb_lock);
1344         }
1345 }
1346
1347 /*
1348  * Determine if the huge page at addr within the vma has an associated
1349  * reservation.  Where it does not we will need to logically increase
1350  * reservation and actually increase subpool usage before an allocation
1351  * can occur.  Where any new reservation would be required the
1352  * reservation change is prepared, but not committed.  Once the page
1353  * has been allocated from the subpool and instantiated the change should
1354  * be committed via vma_commit_reservation.  No action is required on
1355  * failure.
1356  */
1357 static long vma_needs_reservation(struct hstate *h,
1358                         struct vm_area_struct *vma, unsigned long addr)
1359 {
1360         struct resv_map *resv;
1361         pgoff_t idx;
1362         long chg;
1363
1364         resv = vma_resv_map(vma);
1365         if (!resv)
1366                 return 1;
1367
1368         idx = vma_hugecache_offset(h, vma, addr);
1369         chg = region_chg(resv, idx, idx + 1);
1370
1371         if (vma->vm_flags & VM_MAYSHARE)
1372                 return chg;
1373         else
1374                 return chg < 0 ? chg : 0;
1375 }
1376 static void vma_commit_reservation(struct hstate *h,
1377                         struct vm_area_struct *vma, unsigned long addr)
1378 {
1379         struct resv_map *resv;
1380         pgoff_t idx;
1381
1382         resv = vma_resv_map(vma);
1383         if (!resv)
1384                 return;
1385
1386         idx = vma_hugecache_offset(h, vma, addr);
1387         region_add(resv, idx, idx + 1);
1388 }
1389
1390 static struct page *alloc_huge_page(struct vm_area_struct *vma,
1391                                     unsigned long addr, int avoid_reserve)
1392 {
1393         struct hugepage_subpool *spool = subpool_vma(vma);
1394         struct hstate *h = hstate_vma(vma);
1395         struct page *page;
1396         long chg;
1397         int ret, idx;
1398         struct hugetlb_cgroup *h_cg;
1399
1400         idx = hstate_index(h);
1401         /*
1402          * Processes that did not create the mapping will have no
1403          * reserves and will not have accounted against subpool
1404          * limit. Check that the subpool limit can be made before
1405          * satisfying the allocation MAP_NORESERVE mappings may also
1406          * need pages and subpool limit allocated allocated if no reserve
1407          * mapping overlaps.
1408          */
1409         chg = vma_needs_reservation(h, vma, addr);
1410         if (chg < 0)
1411                 return ERR_PTR(-ENOMEM);
1412         if (chg || avoid_reserve)
1413                 if (hugepage_subpool_get_pages(spool, 1))
1414                         return ERR_PTR(-ENOSPC);
1415
1416         ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
1417         if (ret)
1418                 goto out_subpool_put;
1419
1420         spin_lock(&hugetlb_lock);
1421         page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg);
1422         if (!page) {
1423                 spin_unlock(&hugetlb_lock);
1424                 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1425                 if (!page)
1426                         goto out_uncharge_cgroup;
1427
1428                 spin_lock(&hugetlb_lock);
1429                 list_move(&page->lru, &h->hugepage_activelist);
1430                 /* Fall through */
1431         }
1432         hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
1433         spin_unlock(&hugetlb_lock);
1434
1435         set_page_private(page, (unsigned long)spool);
1436
1437         vma_commit_reservation(h, vma, addr);
1438         return page;
1439
1440 out_uncharge_cgroup:
1441         hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
1442 out_subpool_put:
1443         if (chg || avoid_reserve)
1444                 hugepage_subpool_put_pages(spool, 1);
1445         return ERR_PTR(-ENOSPC);
1446 }
1447
1448 /*
1449  * alloc_huge_page()'s wrapper which simply returns the page if allocation
1450  * succeeds, otherwise NULL. This function is called from new_vma_page(),
1451  * where no ERR_VALUE is expected to be returned.
1452  */
1453 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
1454                                 unsigned long addr, int avoid_reserve)
1455 {
1456         struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
1457         if (IS_ERR(page))
1458                 page = NULL;
1459         return page;
1460 }
1461
1462 int __weak alloc_bootmem_huge_page(struct hstate *h)
1463 {
1464         struct huge_bootmem_page *m;
1465         int nr_nodes, node;
1466
1467         for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
1468                 void *addr;
1469
1470                 addr = memblock_virt_alloc_try_nid_nopanic(
1471                                 huge_page_size(h), huge_page_size(h),
1472                                 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
1473                 if (addr) {
1474                         /*
1475                          * Use the beginning of the huge page to store the
1476                          * huge_bootmem_page struct (until gather_bootmem
1477                          * puts them into the mem_map).
1478                          */
1479                         m = addr;
1480                         goto found;
1481                 }
1482         }
1483         return 0;
1484
1485 found:
1486         BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
1487         /* Put them into a private list first because mem_map is not up yet */
1488         list_add(&m->list, &huge_boot_pages);
1489         m->hstate = h;
1490         return 1;
1491 }
1492
1493 static void __init prep_compound_huge_page(struct page *page,
1494                 unsigned int order)
1495 {
1496         if (unlikely(order > (MAX_ORDER - 1)))
1497                 prep_compound_gigantic_page(page, order);
1498         else
1499                 prep_compound_page(page, order);
1500 }
1501
1502 /* Put bootmem huge pages into the standard lists after mem_map is up */
1503 static void __init gather_bootmem_prealloc(void)
1504 {
1505         struct huge_bootmem_page *m;
1506
1507         list_for_each_entry(m, &huge_boot_pages, list) {
1508                 struct hstate *h = m->hstate;
1509                 struct page *page;
1510
1511 #ifdef CONFIG_HIGHMEM
1512                 page = pfn_to_page(m->phys >> PAGE_SHIFT);
1513                 memblock_free_late(__pa(m),
1514                                    sizeof(struct huge_bootmem_page));
1515 #else
1516                 page = virt_to_page(m);
1517 #endif
1518                 WARN_ON(page_count(page) != 1);
1519                 prep_compound_huge_page(page, h->order);
1520                 WARN_ON(PageReserved(page));
1521                 prep_new_huge_page(h, page, page_to_nid(page));
1522                 /*
1523                  * If we had gigantic hugepages allocated at boot time, we need
1524                  * to restore the 'stolen' pages to totalram_pages in order to
1525                  * fix confusing memory reports from free(1) and another
1526                  * side-effects, like CommitLimit going negative.
1527                  */
1528                 if (hstate_is_gigantic(h))
1529                         adjust_managed_page_count(page, 1 << h->order);
1530         }
1531 }
1532
1533 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1534 {
1535         unsigned long i;
1536
1537         for (i = 0; i < h->max_huge_pages; ++i) {
1538                 if (hstate_is_gigantic(h)) {
1539                         if (!alloc_bootmem_huge_page(h))
1540                                 break;
1541                 } else if (!alloc_fresh_huge_page(h,
1542                                          &node_states[N_MEMORY]))
1543                         break;
1544         }
1545         h->max_huge_pages = i;
1546 }
1547
1548 static void __init hugetlb_init_hstates(void)
1549 {
1550         struct hstate *h;
1551
1552         for_each_hstate(h) {
1553                 /* oversize hugepages were init'ed in early boot */
1554                 if (!hstate_is_gigantic(h))
1555                         hugetlb_hstate_alloc_pages(h);
1556         }
1557 }
1558
1559 static char * __init memfmt(char *buf, unsigned long n)
1560 {
1561         if (n >= (1UL << 30))
1562                 sprintf(buf, "%lu GB", n >> 30);
1563         else if (n >= (1UL << 20))
1564                 sprintf(buf, "%lu MB", n >> 20);
1565         else
1566                 sprintf(buf, "%lu KB", n >> 10);
1567         return buf;
1568 }
1569
1570 static void __init report_hugepages(void)
1571 {
1572         struct hstate *h;
1573
1574         for_each_hstate(h) {
1575                 char buf[32];
1576                 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
1577                         memfmt(buf, huge_page_size(h)),
1578                         h->free_huge_pages);
1579         }
1580 }
1581
1582 #ifdef CONFIG_HIGHMEM
1583 static void try_to_free_low(struct hstate *h, unsigned long count,
1584                                                 nodemask_t *nodes_allowed)
1585 {
1586         int i;
1587
1588         if (hstate_is_gigantic(h))
1589                 return;
1590
1591         for_each_node_mask(i, *nodes_allowed) {
1592                 struct page *page, *next;
1593                 struct list_head *freel = &h->hugepage_freelists[i];
1594                 list_for_each_entry_safe(page, next, freel, lru) {
1595                         if (count >= h->nr_huge_pages)
1596                                 return;
1597                         if (PageHighMem(page))
1598                                 continue;
1599                         list_del(&page->lru);
1600                         update_and_free_page(h, page);
1601                         h->free_huge_pages--;
1602                         h->free_huge_pages_node[page_to_nid(page)]--;
1603                 }
1604         }
1605 }
1606 #else
1607 static inline void try_to_free_low(struct hstate *h, unsigned long count,
1608                                                 nodemask_t *nodes_allowed)
1609 {
1610 }
1611 #endif
1612
1613 /*
1614  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
1615  * balanced by operating on them in a round-robin fashion.
1616  * Returns 1 if an adjustment was made.
1617  */
1618 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1619                                 int delta)
1620 {
1621         int nr_nodes, node;
1622
1623         VM_BUG_ON(delta != -1 && delta != 1);
1624
1625         if (delta < 0) {
1626                 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1627                         if (h->surplus_huge_pages_node[node])
1628                                 goto found;
1629                 }
1630         } else {
1631                 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1632                         if (h->surplus_huge_pages_node[node] <
1633                                         h->nr_huge_pages_node[node])
1634                                 goto found;
1635                 }
1636         }
1637         return 0;
1638
1639 found:
1640         h->surplus_huge_pages += delta;
1641         h->surplus_huge_pages_node[node] += delta;
1642         return 1;
1643 }
1644
1645 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1646 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1647                                                 nodemask_t *nodes_allowed)
1648 {
1649         unsigned long min_count, ret;
1650
1651         if (hstate_is_gigantic(h) && !gigantic_page_supported())
1652                 return h->max_huge_pages;
1653
1654         /*
1655          * Increase the pool size
1656          * First take pages out of surplus state.  Then make up the
1657          * remaining difference by allocating fresh huge pages.
1658          *
1659          * We might race with alloc_buddy_huge_page() here and be unable
1660          * to convert a surplus huge page to a normal huge page. That is
1661          * not critical, though, it just means the overall size of the
1662          * pool might be one hugepage larger than it needs to be, but
1663          * within all the constraints specified by the sysctls.
1664          */
1665         spin_lock(&hugetlb_lock);
1666         while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1667                 if (!adjust_pool_surplus(h, nodes_allowed, -1))
1668                         break;
1669         }
1670
1671         while (count > persistent_huge_pages(h)) {
1672                 /*
1673                  * If this allocation races such that we no longer need the
1674                  * page, free_huge_page will handle it by freeing the page
1675                  * and reducing the surplus.
1676                  */
1677                 spin_unlock(&hugetlb_lock);
1678                 if (hstate_is_gigantic(h))
1679                         ret = alloc_fresh_gigantic_page(h, nodes_allowed);
1680                 else
1681                         ret = alloc_fresh_huge_page(h, nodes_allowed);
1682                 spin_lock(&hugetlb_lock);
1683                 if (!ret)
1684                         goto out;
1685
1686                 /* Bail for signals. Probably ctrl-c from user */
1687                 if (signal_pending(current))
1688                         goto out;
1689         }
1690
1691         /*
1692          * Decrease the pool size
1693          * First return free pages to the buddy allocator (being careful
1694          * to keep enough around to satisfy reservations).  Then place
1695          * pages into surplus state as needed so the pool will shrink
1696          * to the desired size as pages become free.
1697          *
1698          * By placing pages into the surplus state independent of the
1699          * overcommit value, we are allowing the surplus pool size to
1700          * exceed overcommit. There are few sane options here. Since
1701          * alloc_buddy_huge_page() is checking the global counter,
1702          * though, we'll note that we're not allowed to exceed surplus
1703          * and won't grow the pool anywhere else. Not until one of the
1704          * sysctls are changed, or the surplus pages go out of use.
1705          */
1706         min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1707         min_count = max(count, min_count);
1708         try_to_free_low(h, min_count, nodes_allowed);
1709         while (min_count < persistent_huge_pages(h)) {
1710                 if (!free_pool_huge_page(h, nodes_allowed, 0))
1711                         break;
1712                 cond_resched_lock(&hugetlb_lock);
1713         }
1714         while (count < persistent_huge_pages(h)) {
1715                 if (!adjust_pool_surplus(h, nodes_allowed, 1))
1716                         break;
1717         }
1718 out:
1719         ret = persistent_huge_pages(h);
1720         spin_unlock(&hugetlb_lock);
1721         return ret;
1722 }
1723
1724 #define HSTATE_ATTR_RO(_name) \
1725         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1726
1727 #define HSTATE_ATTR(_name) \
1728         static struct kobj_attribute _name##_attr = \
1729                 __ATTR(_name, 0644, _name##_show, _name##_store)
1730
1731 static struct kobject *hugepages_kobj;
1732 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1733
1734 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1735
1736 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
1737 {
1738         int i;
1739
1740         for (i = 0; i < HUGE_MAX_HSTATE; i++)
1741                 if (hstate_kobjs[i] == kobj) {
1742                         if (nidp)
1743                                 *nidp = NUMA_NO_NODE;
1744                         return &hstates[i];
1745                 }
1746
1747         return kobj_to_node_hstate(kobj, nidp);
1748 }
1749
1750 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
1751                                         struct kobj_attribute *attr, char *buf)
1752 {
1753         struct hstate *h;
1754         unsigned long nr_huge_pages;
1755         int nid;
1756
1757         h = kobj_to_hstate(kobj, &nid);
1758         if (nid == NUMA_NO_NODE)
1759                 nr_huge_pages = h->nr_huge_pages;
1760         else
1761                 nr_huge_pages = h->nr_huge_pages_node[nid];
1762
1763         return sprintf(buf, "%lu\n", nr_huge_pages);
1764 }
1765
1766 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
1767                                            struct hstate *h, int nid,
1768                                            unsigned long count, size_t len)
1769 {
1770         int err;
1771         NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1772
1773         if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
1774                 err = -EINVAL;
1775                 goto out;
1776         }
1777
1778         if (nid == NUMA_NO_NODE) {
1779                 /*
1780                  * global hstate attribute
1781                  */
1782                 if (!(obey_mempolicy &&
1783                                 init_nodemask_of_mempolicy(nodes_allowed))) {
1784                         NODEMASK_FREE(nodes_allowed);
1785                         nodes_allowed = &node_states[N_MEMORY];
1786                 }
1787         } else if (nodes_allowed) {
1788                 /*
1789                  * per node hstate attribute: adjust count to global,
1790                  * but restrict alloc/free to the specified node.
1791                  */
1792                 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1793                 init_nodemask_of_node(nodes_allowed, nid);
1794         } else
1795                 nodes_allowed = &node_states[N_MEMORY];
1796
1797         h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1798
1799         if (nodes_allowed != &node_states[N_MEMORY])
1800                 NODEMASK_FREE(nodes_allowed);
1801
1802         return len;
1803 out:
1804         NODEMASK_FREE(nodes_allowed);
1805         return err;
1806 }
1807
1808 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1809                                          struct kobject *kobj, const char *buf,
1810                                          size_t len)
1811 {
1812         struct hstate *h;
1813         unsigned long count;
1814         int nid;
1815         int err;
1816
1817         err = kstrtoul(buf, 10, &count);
1818         if (err)
1819                 return err;
1820
1821         h = kobj_to_hstate(kobj, &nid);
1822         return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
1823 }
1824
1825 static ssize_t nr_hugepages_show(struct kobject *kobj,
1826                                        struct kobj_attribute *attr, char *buf)
1827 {
1828         return nr_hugepages_show_common(kobj, attr, buf);
1829 }
1830
1831 static ssize_t nr_hugepages_store(struct kobject *kobj,
1832                struct kobj_attribute *attr, const char *buf, size_t len)
1833 {
1834         return nr_hugepages_store_common(false, kobj, buf, len);
1835 }
1836 HSTATE_ATTR(nr_hugepages);
1837
1838 #ifdef CONFIG_NUMA
1839
1840 /*
1841  * hstate attribute for optionally mempolicy-based constraint on persistent
1842  * huge page alloc/free.
1843  */
1844 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1845                                        struct kobj_attribute *attr, char *buf)
1846 {
1847         return nr_hugepages_show_common(kobj, attr, buf);
1848 }
1849
1850 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1851                struct kobj_attribute *attr, const char *buf, size_t len)
1852 {
1853         return nr_hugepages_store_common(true, kobj, buf, len);
1854 }
1855 HSTATE_ATTR(nr_hugepages_mempolicy);
1856 #endif
1857
1858
1859 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1860                                         struct kobj_attribute *attr, char *buf)
1861 {
1862         struct hstate *h = kobj_to_hstate(kobj, NULL);
1863         return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1864 }
1865
1866 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1867                 struct kobj_attribute *attr, const char *buf, size_t count)
1868 {
1869         int err;
1870         unsigned long input;
1871         struct hstate *h = kobj_to_hstate(kobj, NULL);
1872
1873         if (hstate_is_gigantic(h))
1874                 return -EINVAL;
1875
1876         err = kstrtoul(buf, 10, &input);
1877         if (err)
1878                 return err;
1879
1880         spin_lock(&hugetlb_lock);
1881         h->nr_overcommit_huge_pages = input;
1882         spin_unlock(&hugetlb_lock);
1883
1884         return count;
1885 }
1886 HSTATE_ATTR(nr_overcommit_hugepages);
1887
1888 static ssize_t free_hugepages_show(struct kobject *kobj,
1889                                         struct kobj_attribute *attr, char *buf)
1890 {
1891         struct hstate *h;
1892         unsigned long free_huge_pages;
1893         int nid;
1894
1895         h = kobj_to_hstate(kobj, &nid);
1896         if (nid == NUMA_NO_NODE)
1897                 free_huge_pages = h->free_huge_pages;
1898         else
1899                 free_huge_pages = h->free_huge_pages_node[nid];
1900
1901         return sprintf(buf, "%lu\n", free_huge_pages);
1902 }
1903 HSTATE_ATTR_RO(free_hugepages);
1904
1905 static ssize_t resv_hugepages_show(struct kobject *kobj,
1906                                         struct kobj_attribute *attr, char *buf)
1907 {
1908         struct hstate *h = kobj_to_hstate(kobj, NULL);
1909         return sprintf(buf, "%lu\n", h->resv_huge_pages);
1910 }
1911 HSTATE_ATTR_RO(resv_hugepages);
1912
1913 static ssize_t surplus_hugepages_show(struct kobject *kobj,
1914                                         struct kobj_attribute *attr, char *buf)
1915 {
1916         struct hstate *h;
1917         unsigned long surplus_huge_pages;
1918         int nid;
1919
1920         h = kobj_to_hstate(kobj, &nid);
1921         if (nid == NUMA_NO_NODE)
1922                 surplus_huge_pages = h->surplus_huge_pages;
1923         else
1924                 surplus_huge_pages = h->surplus_huge_pages_node[nid];
1925
1926         return sprintf(buf, "%lu\n", surplus_huge_pages);
1927 }
1928 HSTATE_ATTR_RO(surplus_hugepages);
1929
1930 static struct attribute *hstate_attrs[] = {
1931         &nr_hugepages_attr.attr,
1932         &nr_overcommit_hugepages_attr.attr,
1933         &free_hugepages_attr.attr,
1934         &resv_hugepages_attr.attr,
1935         &surplus_hugepages_attr.attr,
1936 #ifdef CONFIG_NUMA
1937         &nr_hugepages_mempolicy_attr.attr,
1938 #endif
1939         NULL,
1940 };
1941
1942 static struct attribute_group hstate_attr_group = {
1943         .attrs = hstate_attrs,
1944 };
1945
1946 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
1947                                     struct kobject **hstate_kobjs,
1948                                     struct attribute_group *hstate_attr_group)
1949 {
1950         int retval;
1951         int hi = hstate_index(h);
1952
1953         hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
1954         if (!hstate_kobjs[hi])
1955                 return -ENOMEM;
1956
1957         retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
1958         if (retval)
1959                 kobject_put(hstate_kobjs[hi]);
1960
1961         return retval;
1962 }
1963
1964 static void __init hugetlb_sysfs_init(void)
1965 {
1966         struct hstate *h;
1967         int err;
1968
1969         hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1970         if (!hugepages_kobj)
1971                 return;
1972
1973         for_each_hstate(h) {
1974                 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
1975                                          hstate_kobjs, &hstate_attr_group);
1976                 if (err)
1977                         pr_err("Hugetlb: Unable to add hstate %s", h->name);
1978         }
1979 }
1980
1981 #ifdef CONFIG_NUMA
1982
1983 /*
1984  * node_hstate/s - associate per node hstate attributes, via their kobjects,
1985  * with node devices in node_devices[] using a parallel array.  The array
1986  * index of a node device or _hstate == node id.
1987  * This is here to avoid any static dependency of the node device driver, in
1988  * the base kernel, on the hugetlb module.
1989  */
1990 struct node_hstate {
1991         struct kobject          *hugepages_kobj;
1992         struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
1993 };
1994 struct node_hstate node_hstates[MAX_NUMNODES];
1995
1996 /*
1997  * A subset of global hstate attributes for node devices
1998  */
1999 static struct attribute *per_node_hstate_attrs[] = {
2000         &nr_hugepages_attr.attr,
2001         &free_hugepages_attr.attr,
2002         &surplus_hugepages_attr.attr,
2003         NULL,
2004 };
2005
2006 static struct attribute_group per_node_hstate_attr_group = {
2007         .attrs = per_node_hstate_attrs,
2008 };
2009
2010 /*
2011  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
2012  * Returns node id via non-NULL nidp.
2013  */
2014 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2015 {
2016         int nid;
2017
2018         for (nid = 0; nid < nr_node_ids; nid++) {
2019                 struct node_hstate *nhs = &node_hstates[nid];
2020                 int i;
2021                 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2022                         if (nhs->hstate_kobjs[i] == kobj) {
2023                                 if (nidp)
2024                                         *nidp = nid;
2025                                 return &hstates[i];
2026                         }
2027         }
2028
2029         BUG();
2030         return NULL;
2031 }
2032
2033 /*
2034  * Unregister hstate attributes from a single node device.
2035  * No-op if no hstate attributes attached.
2036  */
2037 static void hugetlb_unregister_node(struct node *node)
2038 {
2039         struct hstate *h;
2040         struct node_hstate *nhs = &node_hstates[node->dev.id];
2041
2042         if (!nhs->hugepages_kobj)
2043                 return;         /* no hstate attributes */
2044
2045         for_each_hstate(h) {
2046                 int idx = hstate_index(h);
2047                 if (nhs->hstate_kobjs[idx]) {
2048                         kobject_put(nhs->hstate_kobjs[idx]);
2049                         nhs->hstate_kobjs[idx] = NULL;
2050                 }
2051         }
2052
2053         kobject_put(nhs->hugepages_kobj);
2054         nhs->hugepages_kobj = NULL;
2055 }
2056
2057 /*
2058  * hugetlb module exit:  unregister hstate attributes from node devices
2059  * that have them.
2060  */
2061 static void hugetlb_unregister_all_nodes(void)
2062 {
2063         int nid;
2064
2065         /*
2066          * disable node device registrations.
2067          */
2068         register_hugetlbfs_with_node(NULL, NULL);
2069
2070         /*
2071          * remove hstate attributes from any nodes that have them.
2072          */
2073         for (nid = 0; nid < nr_node_ids; nid++)
2074                 hugetlb_unregister_node(node_devices[nid]);
2075 }
2076
2077 /*
2078  * Register hstate attributes for a single node device.
2079  * No-op if attributes already registered.
2080  */
2081 static void hugetlb_register_node(struct node *node)
2082 {
2083         struct hstate *h;
2084         struct node_hstate *nhs = &node_hstates[node->dev.id];
2085         int err;
2086
2087         if (nhs->hugepages_kobj)
2088                 return;         /* already allocated */
2089
2090         nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2091                                                         &node->dev.kobj);
2092         if (!nhs->hugepages_kobj)
2093                 return;
2094
2095         for_each_hstate(h) {
2096                 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2097                                                 nhs->hstate_kobjs,
2098                                                 &per_node_hstate_attr_group);
2099                 if (err) {
2100                         pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2101                                 h->name, node->dev.id);
2102                         hugetlb_unregister_node(node);
2103                         break;
2104                 }
2105         }
2106 }
2107
2108 /*
2109  * hugetlb init time:  register hstate attributes for all registered node
2110  * devices of nodes that have memory.  All on-line nodes should have
2111  * registered their associated device by this time.
2112  */
2113 static void hugetlb_register_all_nodes(void)
2114 {
2115         int nid;
2116
2117         for_each_node_state(nid, N_MEMORY) {
2118                 struct node *node = node_devices[nid];
2119                 if (node->dev.id == nid)
2120                         hugetlb_register_node(node);
2121         }
2122
2123         /*
2124          * Let the node device driver know we're here so it can
2125          * [un]register hstate attributes on node hotplug.
2126          */
2127         register_hugetlbfs_with_node(hugetlb_register_node,
2128                                      hugetlb_unregister_node);
2129 }
2130 #else   /* !CONFIG_NUMA */
2131
2132 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2133 {
2134         BUG();
2135         if (nidp)
2136                 *nidp = -1;
2137         return NULL;
2138 }
2139
2140 static void hugetlb_unregister_all_nodes(void) { }
2141
2142 static void hugetlb_register_all_nodes(void) { }
2143
2144 #endif
2145
2146 static void __exit hugetlb_exit(void)
2147 {
2148         struct hstate *h;
2149
2150         hugetlb_unregister_all_nodes();
2151
2152         for_each_hstate(h) {
2153                 kobject_put(hstate_kobjs[hstate_index(h)]);
2154         }
2155
2156         kobject_put(hugepages_kobj);
2157         kfree(htlb_fault_mutex_table);
2158 }
2159 module_exit(hugetlb_exit);
2160
2161 static int __init hugetlb_init(void)
2162 {
2163         int i;
2164
2165         if (!hugepages_supported())
2166                 return 0;
2167
2168         if (!size_to_hstate(default_hstate_size)) {
2169                 default_hstate_size = HPAGE_SIZE;
2170                 if (!size_to_hstate(default_hstate_size))
2171                         hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2172         }
2173         default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2174         if (default_hstate_max_huge_pages)
2175                 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2176
2177         hugetlb_init_hstates();
2178         gather_bootmem_prealloc();
2179         report_hugepages();
2180
2181         hugetlb_sysfs_init();
2182         hugetlb_register_all_nodes();
2183         hugetlb_cgroup_file_init();
2184
2185 #ifdef CONFIG_SMP
2186         num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2187 #else
2188         num_fault_mutexes = 1;
2189 #endif
2190         htlb_fault_mutex_table =
2191                 kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2192         BUG_ON(!htlb_fault_mutex_table);
2193
2194         for (i = 0; i < num_fault_mutexes; i++)
2195                 mutex_init(&htlb_fault_mutex_table[i]);
2196         return 0;
2197 }
2198 module_init(hugetlb_init);
2199
2200 /* Should be called on processing a hugepagesz=... option */
2201 void __init hugetlb_add_hstate(unsigned int order)
2202 {
2203         struct hstate *h;
2204         unsigned long i;
2205
2206         if (size_to_hstate(PAGE_SIZE << order)) {
2207                 pr_warning("hugepagesz= specified twice, ignoring\n");
2208                 return;
2209         }
2210         BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2211         BUG_ON(order == 0);
2212         h = &hstates[hugetlb_max_hstate++];
2213         h->order = order;
2214         h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2215         h->nr_huge_pages = 0;
2216         h->free_huge_pages = 0;
2217         for (i = 0; i < MAX_NUMNODES; ++i)
2218                 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2219         INIT_LIST_HEAD(&h->hugepage_activelist);
2220         h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
2221         h->next_nid_to_free = first_node(node_states[N_MEMORY]);
2222         snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2223                                         huge_page_size(h)/1024);
2224
2225         parsed_hstate = h;
2226 }
2227
2228 static int __init hugetlb_nrpages_setup(char *s)
2229 {
2230         unsigned long *mhp;
2231         static unsigned long *last_mhp;
2232
2233         /*
2234          * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2235          * so this hugepages= parameter goes to the "default hstate".
2236          */
2237         if (!hugetlb_max_hstate)
2238                 mhp = &default_hstate_max_huge_pages;
2239         else
2240                 mhp = &parsed_hstate->max_huge_pages;
2241
2242         if (mhp == last_mhp) {
2243                 pr_warning("hugepages= specified twice without "
2244                            "interleaving hugepagesz=, ignoring\n");
2245                 return 1;
2246         }
2247
2248         if (sscanf(s, "%lu", mhp) <= 0)
2249                 *mhp = 0;
2250
2251         /*
2252          * Global state is always initialized later in hugetlb_init.
2253          * But we need to allocate >= MAX_ORDER hstates here early to still
2254          * use the bootmem allocator.
2255          */
2256         if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2257                 hugetlb_hstate_alloc_pages(parsed_hstate);
2258
2259         last_mhp = mhp;
2260
2261         return 1;
2262 }
2263 __setup("hugepages=", hugetlb_nrpages_setup);
2264
2265 static int __init hugetlb_default_setup(char *s)
2266 {
2267         default_hstate_size = memparse(s, &s);
2268         return 1;
2269 }
2270 __setup("default_hugepagesz=", hugetlb_default_setup);
2271
2272 static unsigned int cpuset_mems_nr(unsigned int *array)
2273 {
2274         int node;
2275         unsigned int nr = 0;
2276
2277         for_each_node_mask(node, cpuset_current_mems_allowed)
2278                 nr += array[node];
2279
2280         return nr;
2281 }
2282
2283 #ifdef CONFIG_SYSCTL
2284 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2285                          struct ctl_table *table, int write,
2286                          void __user *buffer, size_t *length, loff_t *ppos)
2287 {
2288         struct hstate *h = &default_hstate;
2289         unsigned long tmp = h->max_huge_pages;
2290         int ret;
2291
2292         if (!hugepages_supported())
2293                 return -ENOTSUPP;
2294
2295         table->data = &tmp;
2296         table->maxlen = sizeof(unsigned long);
2297         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2298         if (ret)
2299                 goto out;
2300
2301         if (write)
2302                 ret = __nr_hugepages_store_common(obey_mempolicy, h,
2303                                                   NUMA_NO_NODE, tmp, *length);
2304 out:
2305         return ret;
2306 }
2307
2308 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2309                           void __user *buffer, size_t *length, loff_t *ppos)
2310 {
2311
2312         return hugetlb_sysctl_handler_common(false, table, write,
2313                                                         buffer, length, ppos);
2314 }
2315
2316 #ifdef CONFIG_NUMA
2317 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2318                           void __user *buffer, size_t *length, loff_t *ppos)
2319 {
2320         return hugetlb_sysctl_handler_common(true, table, write,
2321                                                         buffer, length, ppos);
2322 }
2323 #endif /* CONFIG_NUMA */
2324
2325 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2326                         void __user *buffer,
2327                         size_t *length, loff_t *ppos)
2328 {
2329         struct hstate *h = &default_hstate;
2330         unsigned long tmp;
2331         int ret;
2332
2333         if (!hugepages_supported())
2334                 return -ENOTSUPP;
2335
2336         tmp = h->nr_overcommit_huge_pages;
2337
2338         if (write && hstate_is_gigantic(h))
2339                 return -EINVAL;
2340
2341         table->data = &tmp;
2342         table->maxlen = sizeof(unsigned long);
2343         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2344         if (ret)
2345                 goto out;
2346
2347         if (write) {
2348                 spin_lock(&hugetlb_lock);
2349                 h->nr_overcommit_huge_pages = tmp;
2350                 spin_unlock(&hugetlb_lock);
2351         }
2352 out:
2353         return ret;
2354 }
2355
2356 #endif /* CONFIG_SYSCTL */
2357
2358 void hugetlb_report_meminfo(struct seq_file *m)
2359 {
2360         struct hstate *h = &default_hstate;
2361         if (!hugepages_supported())
2362                 return;
2363         seq_printf(m,
2364                         "HugePages_Total:   %5lu\n"
2365                         "HugePages_Free:    %5lu\n"
2366                         "HugePages_Rsvd:    %5lu\n"
2367                         "HugePages_Surp:    %5lu\n"
2368                         "Hugepagesize:   %8lu kB\n",
2369                         h->nr_huge_pages,
2370                         h->free_huge_pages,
2371                         h->resv_huge_pages,
2372                         h->surplus_huge_pages,
2373                         1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2374 }
2375
2376 int hugetlb_report_node_meminfo(int nid, char *buf)
2377 {
2378         struct hstate *h = &default_hstate;
2379         if (!hugepages_supported())
2380                 return 0;
2381         return sprintf(buf,
2382                 "Node %d HugePages_Total: %5u\n"
2383                 "Node %d HugePages_Free:  %5u\n"
2384                 "Node %d HugePages_Surp:  %5u\n",
2385                 nid, h->nr_huge_pages_node[nid],
2386                 nid, h->free_huge_pages_node[nid],
2387                 nid, h->surplus_huge_pages_node[nid]);
2388 }
2389
2390 void hugetlb_show_meminfo(void)
2391 {
2392         struct hstate *h;
2393         int nid;
2394
2395         if (!hugepages_supported())
2396                 return;
2397
2398         for_each_node_state(nid, N_MEMORY)
2399                 for_each_hstate(h)
2400                         pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
2401                                 nid,
2402                                 h->nr_huge_pages_node[nid],
2403                                 h->free_huge_pages_node[nid],
2404                                 h->surplus_huge_pages_node[nid],
2405                                 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2406 }
2407
2408 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2409 unsigned long hugetlb_total_pages(void)
2410 {
2411         struct hstate *h;
2412         unsigned long nr_total_pages = 0;
2413
2414         for_each_hstate(h)
2415                 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2416         return nr_total_pages;
2417 }
2418
2419 static int hugetlb_acct_memory(struct hstate *h, long delta)
2420 {
2421         int ret = -ENOMEM;
2422
2423         spin_lock(&hugetlb_lock);
2424         /*
2425          * When cpuset is configured, it breaks the strict hugetlb page
2426          * reservation as the accounting is done on a global variable. Such
2427          * reservation is completely rubbish in the presence of cpuset because
2428          * the reservation is not checked against page availability for the
2429          * current cpuset. Application can still potentially OOM'ed by kernel
2430          * with lack of free htlb page in cpuset that the task is in.
2431          * Attempt to enforce strict accounting with cpuset is almost
2432          * impossible (or too ugly) because cpuset is too fluid that
2433          * task or memory node can be dynamically moved between cpusets.
2434          *
2435          * The change of semantics for shared hugetlb mapping with cpuset is
2436          * undesirable. However, in order to preserve some of the semantics,
2437          * we fall back to check against current free page availability as
2438          * a best attempt and hopefully to minimize the impact of changing
2439          * semantics that cpuset has.
2440          */
2441         if (delta > 0) {
2442                 if (gather_surplus_pages(h, delta) < 0)
2443                         goto out;
2444
2445                 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2446                         return_unused_surplus_pages(h, delta);
2447                         goto out;
2448                 }
2449         }
2450
2451         ret = 0;
2452         if (delta < 0)
2453                 return_unused_surplus_pages(h, (unsigned long) -delta);
2454
2455 out:
2456         spin_unlock(&hugetlb_lock);
2457         return ret;
2458 }
2459
2460 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2461 {
2462         struct resv_map *resv = vma_resv_map(vma);
2463
2464         /*
2465          * This new VMA should share its siblings reservation map if present.
2466          * The VMA will only ever have a valid reservation map pointer where
2467          * it is being copied for another still existing VMA.  As that VMA
2468          * has a reference to the reservation map it cannot disappear until
2469          * after this open call completes.  It is therefore safe to take a
2470          * new reference here without additional locking.
2471          */
2472         if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2473                 kref_get(&resv->refs);
2474 }
2475
2476 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2477 {
2478         struct hstate *h = hstate_vma(vma);
2479         struct resv_map *resv = vma_resv_map(vma);
2480         struct hugepage_subpool *spool = subpool_vma(vma);
2481         unsigned long reserve, start, end;
2482
2483         if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2484                 return;
2485
2486         start = vma_hugecache_offset(h, vma, vma->vm_start);
2487         end = vma_hugecache_offset(h, vma, vma->vm_end);
2488
2489         reserve = (end - start) - region_count(resv, start, end);
2490
2491         kref_put(&resv->refs, resv_map_release);
2492
2493         if (reserve) {
2494                 hugetlb_acct_memory(h, -reserve);
2495                 hugepage_subpool_put_pages(spool, reserve);
2496         }
2497 }
2498
2499 /*
2500  * We cannot handle pagefaults against hugetlb pages at all.  They cause
2501  * handle_mm_fault() to try to instantiate regular-sized pages in the
2502  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
2503  * this far.
2504  */
2505 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2506 {
2507         BUG();
2508         return 0;
2509 }
2510
2511 const struct vm_operations_struct hugetlb_vm_ops = {
2512         .fault = hugetlb_vm_op_fault,
2513         .open = hugetlb_vm_op_open,
2514         .close = hugetlb_vm_op_close,
2515 };
2516
2517 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2518                                 int writable)
2519 {
2520         pte_t entry;
2521
2522         if (writable) {
2523                 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
2524                                          vma->vm_page_prot)));
2525         } else {
2526                 entry = huge_pte_wrprotect(mk_huge_pte(page,
2527                                            vma->vm_page_prot));
2528         }
2529         entry = pte_mkyoung(entry);
2530         entry = pte_mkhuge(entry);
2531         entry = arch_make_huge_pte(entry, vma, page, writable);
2532
2533         return entry;
2534 }
2535
2536 static void set_huge_ptep_writable(struct vm_area_struct *vma,
2537                                    unsigned long address, pte_t *ptep)
2538 {
2539         pte_t entry;
2540
2541         entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
2542         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
2543                 update_mmu_cache(vma, address, ptep);
2544 }
2545
2546 static int is_hugetlb_entry_migration(pte_t pte)
2547 {
2548         swp_entry_t swp;
2549
2550         if (huge_pte_none(pte) || pte_present(pte))
2551                 return 0;
2552         swp = pte_to_swp_entry(pte);
2553         if (non_swap_entry(swp) && is_migration_entry(swp))
2554                 return 1;
2555         else
2556                 return 0;
2557 }
2558
2559 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2560 {
2561         swp_entry_t swp;
2562
2563         if (huge_pte_none(pte) || pte_present(pte))
2564                 return 0;
2565         swp = pte_to_swp_entry(pte);
2566         if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2567                 return 1;
2568         else
2569                 return 0;
2570 }
2571
2572 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2573                             struct vm_area_struct *vma)
2574 {
2575         pte_t *src_pte, *dst_pte, entry;
2576         struct page *ptepage;
2577         unsigned long addr;
2578         int cow;
2579         struct hstate *h = hstate_vma(vma);
2580         unsigned long sz = huge_page_size(h);
2581         unsigned long mmun_start;       /* For mmu_notifiers */
2582         unsigned long mmun_end;         /* For mmu_notifiers */
2583         int ret = 0;
2584
2585         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2586
2587         mmun_start = vma->vm_start;
2588         mmun_end = vma->vm_end;
2589         if (cow)
2590                 mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
2591
2592         for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2593                 spinlock_t *src_ptl, *dst_ptl;
2594                 src_pte = huge_pte_offset(src, addr);
2595                 if (!src_pte)
2596                         continue;
2597                 dst_pte = huge_pte_alloc(dst, addr, sz);
2598                 if (!dst_pte) {
2599                         ret = -ENOMEM;
2600                         break;
2601                 }
2602
2603                 /* If the pagetables are shared don't copy or take references */
2604                 if (dst_pte == src_pte)
2605                         continue;
2606
2607                 dst_ptl = huge_pte_lock(h, dst, dst_pte);
2608                 src_ptl = huge_pte_lockptr(h, src, src_pte);
2609                 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
2610                 entry = huge_ptep_get(src_pte);
2611                 if (huge_pte_none(entry)) { /* skip none entry */
2612                         ;
2613                 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
2614                                     is_hugetlb_entry_hwpoisoned(entry))) {
2615                         swp_entry_t swp_entry = pte_to_swp_entry(entry);
2616
2617                         if (is_write_migration_entry(swp_entry) && cow) {
2618                                 /*
2619                                  * COW mappings require pages in both
2620                                  * parent and child to be set to read.
2621                                  */
2622                                 make_migration_entry_read(&swp_entry);
2623                                 entry = swp_entry_to_pte(swp_entry);
2624                                 set_huge_pte_at(src, addr, src_pte, entry);
2625                         }
2626                         set_huge_pte_at(dst, addr, dst_pte, entry);
2627                 } else {
2628                         if (cow)
2629                                 huge_ptep_set_wrprotect(src, addr, src_pte);
2630                         entry = huge_ptep_get(src_pte);
2631                         ptepage = pte_page(entry);
2632                         get_page(ptepage);
2633                         page_dup_rmap(ptepage);
2634                         set_huge_pte_at(dst, addr, dst_pte, entry);
2635                 }
2636                 spin_unlock(src_ptl);
2637                 spin_unlock(dst_ptl);
2638         }
2639
2640         if (cow)
2641                 mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
2642
2643         return ret;
2644 }
2645
2646 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2647                             unsigned long start, unsigned long end,
2648                             struct page *ref_page)
2649 {
2650         int force_flush = 0;
2651         struct mm_struct *mm = vma->vm_mm;
2652         unsigned long address;
2653         pte_t *ptep;
2654         pte_t pte;
2655         spinlock_t *ptl;
2656         struct page *page;
2657         struct hstate *h = hstate_vma(vma);
2658         unsigned long sz = huge_page_size(h);
2659         const unsigned long mmun_start = start; /* For mmu_notifiers */
2660         const unsigned long mmun_end   = end;   /* For mmu_notifiers */
2661
2662         WARN_ON(!is_vm_hugetlb_page(vma));
2663         BUG_ON(start & ~huge_page_mask(h));
2664         BUG_ON(end & ~huge_page_mask(h));
2665
2666         tlb_start_vma(tlb, vma);
2667         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2668 again:
2669         for (address = start; address < end; address += sz) {
2670                 ptep = huge_pte_offset(mm, address);
2671                 if (!ptep)
2672                         continue;
2673
2674                 ptl = huge_pte_lock(h, mm, ptep);
2675                 if (huge_pmd_unshare(mm, &address, ptep))
2676                         goto unlock;
2677
2678                 pte = huge_ptep_get(ptep);
2679                 if (huge_pte_none(pte))
2680                         goto unlock;
2681
2682                 /*
2683                  * Migrating hugepage or HWPoisoned hugepage is already
2684                  * unmapped and its refcount is dropped, so just clear pte here.
2685                  */
2686                 if (unlikely(!pte_present(pte))) {
2687                         huge_pte_clear(mm, address, ptep);
2688                         goto unlock;
2689                 }
2690
2691                 page = pte_page(pte);
2692                 /*
2693                  * If a reference page is supplied, it is because a specific
2694                  * page is being unmapped, not a range. Ensure the page we
2695                  * are about to unmap is the actual page of interest.
2696                  */
2697                 if (ref_page) {
2698                         if (page != ref_page)
2699                                 goto unlock;
2700
2701                         /*
2702                          * Mark the VMA as having unmapped its page so that
2703                          * future faults in this VMA will fail rather than
2704                          * looking like data was lost
2705                          */
2706                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2707                 }
2708
2709                 pte = huge_ptep_get_and_clear(mm, address, ptep);
2710                 tlb_remove_tlb_entry(tlb, ptep, address);
2711                 if (huge_pte_dirty(pte))
2712                         set_page_dirty(page);
2713
2714                 page_remove_rmap(page);
2715                 force_flush = !__tlb_remove_page(tlb, page);
2716                 if (force_flush) {
2717                         spin_unlock(ptl);
2718                         break;
2719                 }
2720                 /* Bail out after unmapping reference page if supplied */
2721                 if (ref_page) {
2722                         spin_unlock(ptl);
2723                         break;
2724                 }
2725 unlock:
2726                 spin_unlock(ptl);
2727         }
2728         /*
2729          * mmu_gather ran out of room to batch pages, we break out of
2730          * the PTE lock to avoid doing the potential expensive TLB invalidate
2731          * and page-free while holding it.
2732          */
2733         if (force_flush) {
2734                 force_flush = 0;
2735                 tlb_flush_mmu(tlb);
2736                 if (address < end && !ref_page)
2737                         goto again;
2738         }
2739         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2740         tlb_end_vma(tlb, vma);
2741 }
2742
2743 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
2744                           struct vm_area_struct *vma, unsigned long start,
2745                           unsigned long end, struct page *ref_page)
2746 {
2747         __unmap_hugepage_range(tlb, vma, start, end, ref_page);
2748
2749         /*
2750          * Clear this flag so that x86's huge_pmd_share page_table_shareable
2751          * test will fail on a vma being torn down, and not grab a page table
2752          * on its way out.  We're lucky that the flag has such an appropriate
2753          * name, and can in fact be safely cleared here. We could clear it
2754          * before the __unmap_hugepage_range above, but all that's necessary
2755          * is to clear it before releasing the i_mmap_mutex. This works
2756          * because in the context this is called, the VMA is about to be
2757          * destroyed and the i_mmap_mutex is held.
2758          */
2759         vma->vm_flags &= ~VM_MAYSHARE;
2760 }
2761
2762 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2763                           unsigned long end, struct page *ref_page)
2764 {
2765         struct mm_struct *mm;
2766         struct mmu_gather tlb;
2767
2768         mm = vma->vm_mm;
2769
2770         tlb_gather_mmu(&tlb, mm, start, end);
2771         __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
2772         tlb_finish_mmu(&tlb, start, end);
2773 }
2774
2775 /*
2776  * This is called when the original mapper is failing to COW a MAP_PRIVATE
2777  * mappping it owns the reserve page for. The intention is to unmap the page
2778  * from other VMAs and let the children be SIGKILLed if they are faulting the
2779  * same region.
2780  */
2781 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2782                               struct page *page, unsigned long address)
2783 {
2784         struct hstate *h = hstate_vma(vma);
2785         struct vm_area_struct *iter_vma;
2786         struct address_space *mapping;
2787         pgoff_t pgoff;
2788
2789         /*
2790          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
2791          * from page cache lookup which is in HPAGE_SIZE units.
2792          */
2793         address = address & huge_page_mask(h);
2794         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
2795                         vma->vm_pgoff;
2796         mapping = file_inode(vma->vm_file)->i_mapping;
2797
2798         /*
2799          * Take the mapping lock for the duration of the table walk. As
2800          * this mapping should be shared between all the VMAs,
2801          * __unmap_hugepage_range() is called as the lock is already held
2802          */
2803         mutex_lock(&mapping->i_mmap_mutex);
2804         vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
2805                 /* Do not unmap the current VMA */
2806                 if (iter_vma == vma)
2807                         continue;
2808
2809                 /*
2810                  * Shared VMAs have their own reserves and do not affect
2811                  * MAP_PRIVATE accounting but it is possible that a shared
2812                  * VMA is using the same page so check and skip such VMAs.
2813                  */
2814                 if (iter_vma->vm_flags & VM_MAYSHARE)
2815                         continue;
2816
2817                 /*
2818                  * Unmap the page from other VMAs without their own reserves.
2819                  * They get marked to be SIGKILLed if they fault in these
2820                  * areas. This is because a future no-page fault on this VMA
2821                  * could insert a zeroed page instead of the data existing
2822                  * from the time of fork. This would look like data corruption
2823                  */
2824                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
2825                         unmap_hugepage_range(iter_vma, address,
2826                                              address + huge_page_size(h), page);
2827         }
2828         mutex_unlock(&mapping->i_mmap_mutex);
2829 }
2830
2831 /*
2832  * Hugetlb_cow() should be called with page lock of the original hugepage held.
2833  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
2834  * cannot race with other handlers or page migration.
2835  * Keep the pte_same checks anyway to make transition from the mutex easier.
2836  */
2837 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
2838                         unsigned long address, pte_t *ptep, pte_t pte,
2839                         struct page *pagecache_page, spinlock_t *ptl)
2840 {
2841         struct hstate *h = hstate_vma(vma);
2842         struct page *old_page, *new_page;
2843         int ret = 0, outside_reserve = 0;
2844         unsigned long mmun_start;       /* For mmu_notifiers */
2845         unsigned long mmun_end;         /* For mmu_notifiers */
2846
2847         old_page = pte_page(pte);
2848
2849 retry_avoidcopy:
2850         /* If no-one else is actually using this page, avoid the copy
2851          * and just make the page writable */
2852         if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
2853                 page_move_anon_rmap(old_page, vma, address);
2854                 set_huge_ptep_writable(vma, address, ptep);
2855                 return 0;
2856         }
2857
2858         /*
2859          * If the process that created a MAP_PRIVATE mapping is about to
2860          * perform a COW due to a shared page count, attempt to satisfy
2861          * the allocation without using the existing reserves. The pagecache
2862          * page is used to determine if the reserve at this address was
2863          * consumed or not. If reserves were used, a partial faulted mapping
2864          * at the time of fork() could consume its reserves on COW instead
2865          * of the full address range.
2866          */
2867         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2868                         old_page != pagecache_page)
2869                 outside_reserve = 1;
2870
2871         page_cache_get(old_page);
2872
2873         /*
2874          * Drop page table lock as buddy allocator may be called. It will
2875          * be acquired again before returning to the caller, as expected.
2876          */
2877         spin_unlock(ptl);
2878         new_page = alloc_huge_page(vma, address, outside_reserve);
2879
2880         if (IS_ERR(new_page)) {
2881                 /*
2882                  * If a process owning a MAP_PRIVATE mapping fails to COW,
2883                  * it is due to references held by a child and an insufficient
2884                  * huge page pool. To guarantee the original mappers
2885                  * reliability, unmap the page from child processes. The child
2886                  * may get SIGKILLed if it later faults.
2887                  */
2888                 if (outside_reserve) {
2889                         page_cache_release(old_page);
2890                         BUG_ON(huge_pte_none(pte));
2891                         unmap_ref_private(mm, vma, old_page, address);
2892                         BUG_ON(huge_pte_none(pte));
2893                         spin_lock(ptl);
2894                         ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2895                         if (likely(ptep &&
2896                                    pte_same(huge_ptep_get(ptep), pte)))
2897                                 goto retry_avoidcopy;
2898                         /*
2899                          * race occurs while re-acquiring page table
2900                          * lock, and our job is done.
2901                          */
2902                         return 0;
2903                 }
2904
2905                 ret = (PTR_ERR(new_page) == -ENOMEM) ?
2906                         VM_FAULT_OOM : VM_FAULT_SIGBUS;
2907                 goto out_release_old;
2908         }
2909
2910         /*
2911          * When the original hugepage is shared one, it does not have
2912          * anon_vma prepared.
2913          */
2914         if (unlikely(anon_vma_prepare(vma))) {
2915                 ret = VM_FAULT_OOM;
2916                 goto out_release_all;
2917         }
2918
2919         copy_user_huge_page(new_page, old_page, address, vma,
2920                             pages_per_huge_page(h));
2921         __SetPageUptodate(new_page);
2922         set_page_huge_active(new_page);
2923
2924         mmun_start = address & huge_page_mask(h);
2925         mmun_end = mmun_start + huge_page_size(h);
2926         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2927
2928         /*
2929          * Retake the page table lock to check for racing updates
2930          * before the page tables are altered
2931          */
2932         spin_lock(ptl);
2933         ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2934         if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
2935                 ClearPagePrivate(new_page);
2936
2937                 /* Break COW */
2938                 huge_ptep_clear_flush(vma, address, ptep);
2939                 set_huge_pte_at(mm, address, ptep,
2940                                 make_huge_pte(vma, new_page, 1));
2941                 page_remove_rmap(old_page);
2942                 hugepage_add_new_anon_rmap(new_page, vma, address);
2943                 /* Make the old page be freed below */
2944                 new_page = old_page;
2945         }
2946         spin_unlock(ptl);
2947         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2948 out_release_all:
2949         page_cache_release(new_page);
2950 out_release_old:
2951         page_cache_release(old_page);
2952
2953         spin_lock(ptl); /* Caller expects lock to be held */
2954         return ret;
2955 }
2956
2957 /* Return the pagecache page at a given address within a VMA */
2958 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
2959                         struct vm_area_struct *vma, unsigned long address)
2960 {
2961         struct address_space *mapping;
2962         pgoff_t idx;
2963
2964         mapping = vma->vm_file->f_mapping;
2965         idx = vma_hugecache_offset(h, vma, address);
2966
2967         return find_lock_page(mapping, idx);
2968 }
2969
2970 /*
2971  * Return whether there is a pagecache page to back given address within VMA.
2972  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
2973  */
2974 static bool hugetlbfs_pagecache_present(struct hstate *h,
2975                         struct vm_area_struct *vma, unsigned long address)
2976 {
2977         struct address_space *mapping;
2978         pgoff_t idx;
2979         struct page *page;
2980
2981         mapping = vma->vm_file->f_mapping;
2982         idx = vma_hugecache_offset(h, vma, address);
2983
2984         page = find_get_page(mapping, idx);
2985         if (page)
2986                 put_page(page);
2987         return page != NULL;
2988 }
2989
2990 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2991                            struct address_space *mapping, pgoff_t idx,
2992                            unsigned long address, pte_t *ptep, unsigned int flags)
2993 {
2994         struct hstate *h = hstate_vma(vma);
2995         int ret = VM_FAULT_SIGBUS;
2996         int anon_rmap = 0;
2997         unsigned long size;
2998         struct page *page;
2999         pte_t new_pte;
3000         spinlock_t *ptl;
3001
3002         /*
3003          * Currently, we are forced to kill the process in the event the
3004          * original mapper has unmapped pages from the child due to a failed
3005          * COW. Warn that such a situation has occurred as it may not be obvious
3006          */
3007         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3008                 pr_warning("PID %d killed due to inadequate hugepage pool\n",
3009                            current->pid);
3010                 return ret;
3011         }
3012
3013         /*
3014          * Use page lock to guard against racing truncation
3015          * before we get page_table_lock.
3016          */
3017 retry:
3018         page = find_lock_page(mapping, idx);
3019         if (!page) {
3020                 size = i_size_read(mapping->host) >> huge_page_shift(h);
3021                 if (idx >= size)
3022                         goto out;
3023                 page = alloc_huge_page(vma, address, 0);
3024                 if (IS_ERR(page)) {
3025                         ret = PTR_ERR(page);
3026                         if (ret == -ENOMEM)
3027                                 ret = VM_FAULT_OOM;
3028                         else
3029                                 ret = VM_FAULT_SIGBUS;
3030                         goto out;
3031                 }
3032                 clear_huge_page(page, address, pages_per_huge_page(h));
3033                 __SetPageUptodate(page);
3034                 set_page_huge_active(page);
3035
3036                 if (vma->vm_flags & VM_MAYSHARE) {
3037                         int err;
3038                         struct inode *inode = mapping->host;
3039
3040                         err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3041                         if (err) {
3042                                 put_page(page);
3043                                 if (err == -EEXIST)
3044                                         goto retry;
3045                                 goto out;
3046                         }
3047                         ClearPagePrivate(page);
3048
3049                         spin_lock(&inode->i_lock);
3050                         inode->i_blocks += blocks_per_huge_page(h);
3051                         spin_unlock(&inode->i_lock);
3052                 } else {
3053                         lock_page(page);
3054                         if (unlikely(anon_vma_prepare(vma))) {
3055                                 ret = VM_FAULT_OOM;
3056                                 goto backout_unlocked;
3057                         }
3058                         anon_rmap = 1;
3059                 }
3060         } else {
3061                 /*
3062                  * If memory error occurs between mmap() and fault, some process
3063                  * don't have hwpoisoned swap entry for errored virtual address.
3064                  * So we need to block hugepage fault by PG_hwpoison bit check.
3065                  */
3066                 if (unlikely(PageHWPoison(page))) {
3067                         ret = VM_FAULT_HWPOISON |
3068                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3069                         goto backout_unlocked;
3070                 }
3071         }
3072
3073         /*
3074          * If we are going to COW a private mapping later, we examine the
3075          * pending reservations for this page now. This will ensure that
3076          * any allocations necessary to record that reservation occur outside
3077          * the spinlock.
3078          */
3079         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
3080                 if (vma_needs_reservation(h, vma, address) < 0) {
3081                         ret = VM_FAULT_OOM;
3082                         goto backout_unlocked;
3083                 }
3084
3085         ptl = huge_pte_lockptr(h, mm, ptep);
3086         spin_lock(ptl);
3087         size = i_size_read(mapping->host) >> huge_page_shift(h);
3088         if (idx >= size)
3089                 goto backout;
3090
3091         ret = 0;
3092         if (!huge_pte_none(huge_ptep_get(ptep)))
3093                 goto backout;
3094
3095         if (anon_rmap) {
3096                 ClearPagePrivate(page);
3097                 hugepage_add_new_anon_rmap(page, vma, address);
3098         } else
3099                 page_dup_rmap(page);
3100         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3101                                 && (vma->vm_flags & VM_SHARED)));
3102         set_huge_pte_at(mm, address, ptep, new_pte);
3103
3104         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3105                 /* Optimization, do the COW without a second fault */
3106                 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
3107         }
3108
3109         spin_unlock(ptl);
3110         unlock_page(page);
3111 out:
3112         return ret;
3113
3114 backout:
3115         spin_unlock(ptl);
3116 backout_unlocked:
3117         unlock_page(page);
3118         put_page(page);
3119         goto out;
3120 }
3121
3122 #ifdef CONFIG_SMP
3123 static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3124                             struct vm_area_struct *vma,
3125                             struct address_space *mapping,
3126                             pgoff_t idx, unsigned long address)
3127 {
3128         unsigned long key[2];
3129         u32 hash;
3130
3131         if (vma->vm_flags & VM_SHARED) {
3132                 key[0] = (unsigned long) mapping;
3133                 key[1] = idx;
3134         } else {
3135                 key[0] = (unsigned long) mm;
3136                 key[1] = address >> huge_page_shift(h);
3137         }
3138
3139         hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3140
3141         return hash & (num_fault_mutexes - 1);
3142 }
3143 #else
3144 /*
3145  * For uniprocesor systems we always use a single mutex, so just
3146  * return 0 and avoid the hashing overhead.
3147  */
3148 static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3149                             struct vm_area_struct *vma,
3150                             struct address_space *mapping,
3151                             pgoff_t idx, unsigned long address)
3152 {
3153         return 0;
3154 }
3155 #endif
3156
3157 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3158                         unsigned long address, unsigned int flags)
3159 {
3160         pte_t *ptep, entry;
3161         spinlock_t *ptl;
3162         int ret;
3163         u32 hash;
3164         pgoff_t idx;
3165         struct page *page = NULL;
3166         struct page *pagecache_page = NULL;
3167         struct hstate *h = hstate_vma(vma);
3168         struct address_space *mapping;
3169         int need_wait_lock = 0;
3170
3171         address &= huge_page_mask(h);
3172
3173         ptep = huge_pte_offset(mm, address);
3174         if (ptep) {
3175                 entry = huge_ptep_get(ptep);
3176                 if (unlikely(is_hugetlb_entry_migration(entry))) {
3177                         migration_entry_wait_huge(vma, mm, ptep);
3178                         return 0;
3179                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3180                         return VM_FAULT_HWPOISON_LARGE |
3181                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3182         }
3183
3184         ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3185         if (!ptep)
3186                 return VM_FAULT_OOM;
3187
3188         mapping = vma->vm_file->f_mapping;
3189         idx = vma_hugecache_offset(h, vma, address);
3190
3191         /*
3192          * Serialize hugepage allocation and instantiation, so that we don't
3193          * get spurious allocation failures if two CPUs race to instantiate
3194          * the same page in the page cache.
3195          */
3196         hash = fault_mutex_hash(h, mm, vma, mapping, idx, address);
3197         mutex_lock(&htlb_fault_mutex_table[hash]);
3198
3199         entry = huge_ptep_get(ptep);
3200         if (huge_pte_none(entry)) {
3201                 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3202                 goto out_mutex;
3203         }
3204
3205         ret = 0;
3206
3207         /*
3208          * entry could be a migration/hwpoison entry at this point, so this
3209          * check prevents the kernel from going below assuming that we have
3210          * a active hugepage in pagecache. This goto expects the 2nd page fault,
3211          * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
3212          * handle it.
3213          */
3214         if (!pte_present(entry))
3215                 goto out_mutex;
3216
3217         /*
3218          * If we are going to COW the mapping later, we examine the pending
3219          * reservations for this page now. This will ensure that any
3220          * allocations necessary to record that reservation occur outside the
3221          * spinlock. For private mappings, we also lookup the pagecache
3222          * page now as it is used to determine if a reservation has been
3223          * consumed.
3224          */
3225         if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3226                 if (vma_needs_reservation(h, vma, address) < 0) {
3227                         ret = VM_FAULT_OOM;
3228                         goto out_mutex;
3229                 }
3230
3231                 if (!(vma->vm_flags & VM_MAYSHARE))
3232                         pagecache_page = hugetlbfs_pagecache_page(h,
3233                                                                 vma, address);
3234         }
3235
3236         ptl = huge_pte_lock(h, mm, ptep);
3237
3238         /* Check for a racing update before calling hugetlb_cow */
3239         if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3240                 goto out_ptl;
3241
3242         /*
3243          * hugetlb_cow() requires page locks of pte_page(entry) and
3244          * pagecache_page, so here we need take the former one
3245          * when page != pagecache_page or !pagecache_page.
3246          */
3247         page = pte_page(entry);
3248         if (page != pagecache_page)
3249                 if (!trylock_page(page)) {
3250                         need_wait_lock = 1;
3251                         goto out_ptl;
3252                 }
3253
3254         get_page(page);
3255
3256         if (flags & FAULT_FLAG_WRITE) {
3257                 if (!huge_pte_write(entry)) {
3258                         ret = hugetlb_cow(mm, vma, address, ptep, entry,
3259                                         pagecache_page, ptl);
3260                         goto out_put_page;
3261                 }
3262                 entry = huge_pte_mkdirty(entry);
3263         }
3264         entry = pte_mkyoung(entry);
3265         if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3266                                                 flags & FAULT_FLAG_WRITE))
3267                 update_mmu_cache(vma, address, ptep);
3268 out_put_page:
3269         if (page != pagecache_page)
3270                 unlock_page(page);
3271         put_page(page);
3272 out_ptl:
3273         spin_unlock(ptl);
3274
3275         if (pagecache_page) {
3276                 unlock_page(pagecache_page);
3277                 put_page(pagecache_page);
3278         }
3279 out_mutex:
3280         mutex_unlock(&htlb_fault_mutex_table[hash]);
3281         /*
3282          * Generally it's safe to hold refcount during waiting page lock. But
3283          * here we just wait to defer the next page fault to avoid busy loop and
3284          * the page is not used after unlocked before returning from the current
3285          * page fault. So we are safe from accessing freed page, even if we wait
3286          * here without taking refcount.
3287          */
3288         if (need_wait_lock)
3289                 wait_on_page_locked(page);
3290         return ret;
3291 }
3292
3293 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3294                          struct page **pages, struct vm_area_struct **vmas,
3295                          unsigned long *position, unsigned long *nr_pages,
3296                          long i, unsigned int flags)
3297 {
3298         unsigned long pfn_offset;
3299         unsigned long vaddr = *position;
3300         unsigned long remainder = *nr_pages;
3301         struct hstate *h = hstate_vma(vma);
3302
3303         while (vaddr < vma->vm_end && remainder) {
3304                 pte_t *pte;
3305                 spinlock_t *ptl = NULL;
3306                 int absent;
3307                 struct page *page;
3308
3309                 /*
3310                  * Some archs (sparc64, sh*) have multiple pte_ts to
3311                  * each hugepage.  We have to make sure we get the
3312                  * first, for the page indexing below to work.
3313                  *
3314                  * Note that page table lock is not held when pte is null.
3315                  */
3316                 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
3317                 if (pte)
3318                         ptl = huge_pte_lock(h, mm, pte);
3319                 absent = !pte || huge_pte_none(huge_ptep_get(pte));
3320
3321                 /*
3322                  * When coredumping, it suits get_dump_page if we just return
3323                  * an error where there's an empty slot with no huge pagecache
3324                  * to back it.  This way, we avoid allocating a hugepage, and
3325                  * the sparse dumpfile avoids allocating disk blocks, but its
3326                  * huge holes still show up with zeroes where they need to be.
3327                  */
3328                 if (absent && (flags & FOLL_DUMP) &&
3329                     !hugetlbfs_pagecache_present(h, vma, vaddr)) {
3330                         if (pte)
3331                                 spin_unlock(ptl);
3332                         remainder = 0;
3333                         break;
3334                 }
3335
3336                 /*
3337                  * We need call hugetlb_fault for both hugepages under migration
3338                  * (in which case hugetlb_fault waits for the migration,) and
3339                  * hwpoisoned hugepages (in which case we need to prevent the
3340                  * caller from accessing to them.) In order to do this, we use
3341                  * here is_swap_pte instead of is_hugetlb_entry_migration and
3342                  * is_hugetlb_entry_hwpoisoned. This is because it simply covers
3343                  * both cases, and because we can't follow correct pages
3344                  * directly from any kind of swap entries.
3345                  */
3346                 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
3347                     ((flags & FOLL_WRITE) &&
3348                       !huge_pte_write(huge_ptep_get(pte)))) {
3349                         int ret;
3350
3351                         if (pte)
3352                                 spin_unlock(ptl);
3353                         ret = hugetlb_fault(mm, vma, vaddr,
3354                                 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
3355                         if (!(ret & VM_FAULT_ERROR))
3356                                 continue;
3357
3358                         remainder = 0;
3359                         break;
3360                 }
3361
3362                 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
3363                 page = pte_page(huge_ptep_get(pte));
3364 same_page:
3365                 if (pages) {
3366                         pages[i] = mem_map_offset(page, pfn_offset);
3367                         get_page_foll(pages[i]);
3368                 }
3369
3370                 if (vmas)
3371                         vmas[i] = vma;
3372
3373                 vaddr += PAGE_SIZE;
3374                 ++pfn_offset;
3375                 --remainder;
3376                 ++i;
3377                 if (vaddr < vma->vm_end && remainder &&
3378                                 pfn_offset < pages_per_huge_page(h)) {
3379                         /*
3380                          * We use pfn_offset to avoid touching the pageframes
3381                          * of this compound page.
3382                          */
3383                         goto same_page;
3384                 }
3385                 spin_unlock(ptl);
3386         }
3387         *nr_pages = remainder;
3388         *position = vaddr;
3389
3390         return i ? i : -EFAULT;
3391 }
3392
3393 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3394                 unsigned long address, unsigned long end, pgprot_t newprot)
3395 {
3396         struct mm_struct *mm = vma->vm_mm;
3397         unsigned long start = address;
3398         pte_t *ptep;
3399         pte_t pte;
3400         struct hstate *h = hstate_vma(vma);
3401         unsigned long pages = 0;
3402
3403         BUG_ON(address >= end);
3404         flush_cache_range(vma, address, end);
3405
3406         mmu_notifier_invalidate_range_start(mm, start, end);
3407         mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
3408         for (; address < end; address += huge_page_size(h)) {
3409                 spinlock_t *ptl;
3410                 ptep = huge_pte_offset(mm, address);
3411                 if (!ptep)
3412                         continue;
3413                 ptl = huge_pte_lock(h, mm, ptep);
3414                 if (huge_pmd_unshare(mm, &address, ptep)) {
3415                         pages++;
3416                         spin_unlock(ptl);
3417                         continue;
3418                 }
3419                 pte = huge_ptep_get(ptep);
3420                 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
3421                         spin_unlock(ptl);
3422                         continue;
3423                 }
3424                 if (unlikely(is_hugetlb_entry_migration(pte))) {
3425                         swp_entry_t entry = pte_to_swp_entry(pte);
3426
3427                         if (is_write_migration_entry(entry)) {
3428                                 pte_t newpte;
3429
3430                                 make_migration_entry_read(&entry);
3431                                 newpte = swp_entry_to_pte(entry);
3432                                 set_huge_pte_at(mm, address, ptep, newpte);
3433                                 pages++;
3434                         }
3435                         spin_unlock(ptl);
3436                         continue;
3437                 }
3438                 if (!huge_pte_none(pte)) {
3439                         pte = huge_ptep_get_and_clear(mm, address, ptep);
3440                         pte = pte_mkhuge(huge_pte_modify(pte, newprot));
3441                         pte = arch_make_huge_pte(pte, vma, NULL, 0);
3442                         set_huge_pte_at(mm, address, ptep, pte);
3443                         pages++;
3444                 }
3445                 spin_unlock(ptl);
3446         }
3447         /*
3448          * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
3449          * may have cleared our pud entry and done put_page on the page table:
3450          * once we release i_mmap_mutex, another task can do the final put_page
3451          * and that page table be reused and filled with junk.
3452          */
3453         flush_tlb_range(vma, start, end);
3454         mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
3455         mmu_notifier_invalidate_range_end(mm, start, end);
3456
3457         return pages << h->order;
3458 }
3459
3460 int hugetlb_reserve_pages(struct inode *inode,
3461                                         long from, long to,
3462                                         struct vm_area_struct *vma,
3463                                         vm_flags_t vm_flags)
3464 {
3465         long ret, chg;
3466         struct hstate *h = hstate_inode(inode);
3467         struct hugepage_subpool *spool = subpool_inode(inode);
3468         struct resv_map *resv_map;
3469
3470         /*
3471          * Only apply hugepage reservation if asked. At fault time, an
3472          * attempt will be made for VM_NORESERVE to allocate a page
3473          * without using reserves
3474          */
3475         if (vm_flags & VM_NORESERVE)
3476                 return 0;
3477
3478         /*
3479          * Shared mappings base their reservation on the number of pages that
3480          * are already allocated on behalf of the file. Private mappings need
3481          * to reserve the full area even if read-only as mprotect() may be
3482          * called to make the mapping read-write. Assume !vma is a shm mapping
3483          */
3484         if (!vma || vma->vm_flags & VM_MAYSHARE) {
3485                 resv_map = inode_resv_map(inode);
3486
3487                 chg = region_chg(resv_map, from, to);
3488
3489         } else {
3490                 resv_map = resv_map_alloc();
3491                 if (!resv_map)
3492                         return -ENOMEM;
3493
3494                 chg = to - from;
3495
3496                 set_vma_resv_map(vma, resv_map);
3497                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
3498         }
3499
3500         if (chg < 0) {
3501                 ret = chg;
3502                 goto out_err;
3503         }
3504
3505         /* There must be enough pages in the subpool for the mapping */
3506         if (hugepage_subpool_get_pages(spool, chg)) {
3507                 ret = -ENOSPC;
3508                 goto out_err;
3509         }
3510
3511         /*
3512          * Check enough hugepages are available for the reservation.
3513          * Hand the pages back to the subpool if there are not
3514          */
3515         ret = hugetlb_acct_memory(h, chg);
3516         if (ret < 0) {
3517                 hugepage_subpool_put_pages(spool, chg);
3518                 goto out_err;
3519         }
3520
3521         /*
3522          * Account for the reservations made. Shared mappings record regions
3523          * that have reservations as they are shared by multiple VMAs.
3524          * When the last VMA disappears, the region map says how much
3525          * the reservation was and the page cache tells how much of
3526          * the reservation was consumed. Private mappings are per-VMA and
3527          * only the consumed reservations are tracked. When the VMA
3528          * disappears, the original reservation is the VMA size and the
3529          * consumed reservations are stored in the map. Hence, nothing
3530          * else has to be done for private mappings here
3531          */
3532         if (!vma || vma->vm_flags & VM_MAYSHARE)
3533                 region_add(resv_map, from, to);
3534         return 0;
3535 out_err:
3536         if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3537                 kref_put(&resv_map->refs, resv_map_release);
3538         return ret;
3539 }
3540
3541 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
3542 {
3543         struct hstate *h = hstate_inode(inode);
3544         struct resv_map *resv_map = inode_resv_map(inode);
3545         long chg = 0;
3546         struct hugepage_subpool *spool = subpool_inode(inode);
3547
3548         if (resv_map)
3549                 chg = region_truncate(resv_map, offset);
3550         spin_lock(&inode->i_lock);
3551         inode->i_blocks -= (blocks_per_huge_page(h) * freed);
3552         spin_unlock(&inode->i_lock);
3553
3554         hugepage_subpool_put_pages(spool, (chg - freed));
3555         hugetlb_acct_memory(h, -(chg - freed));
3556 }
3557
3558 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
3559 static unsigned long page_table_shareable(struct vm_area_struct *svma,
3560                                 struct vm_area_struct *vma,
3561                                 unsigned long addr, pgoff_t idx)
3562 {
3563         unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
3564                                 svma->vm_start;
3565         unsigned long sbase = saddr & PUD_MASK;
3566         unsigned long s_end = sbase + PUD_SIZE;
3567
3568         /* Allow segments to share if only one is marked locked */
3569         unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
3570         unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
3571
3572         /*
3573          * match the virtual addresses, permission and the alignment of the
3574          * page table page.
3575          */
3576         if (pmd_index(addr) != pmd_index(saddr) ||
3577             vm_flags != svm_flags ||
3578             sbase < svma->vm_start || svma->vm_end < s_end)
3579                 return 0;
3580
3581         return saddr;
3582 }
3583
3584 static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
3585 {
3586         unsigned long base = addr & PUD_MASK;
3587         unsigned long end = base + PUD_SIZE;
3588
3589         /*
3590          * check on proper vm_flags and page table alignment
3591          */
3592         if (vma->vm_flags & VM_MAYSHARE &&
3593             vma->vm_start <= base && end <= vma->vm_end)
3594                 return 1;
3595         return 0;
3596 }
3597
3598 /*
3599  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
3600  * and returns the corresponding pte. While this is not necessary for the
3601  * !shared pmd case because we can allocate the pmd later as well, it makes the
3602  * code much cleaner. pmd allocation is essential for the shared case because
3603  * pud has to be populated inside the same i_mmap_mutex section - otherwise
3604  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
3605  * bad pmd for sharing.
3606  */
3607 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3608 {
3609         struct vm_area_struct *vma = find_vma(mm, addr);
3610         struct address_space *mapping = vma->vm_file->f_mapping;
3611         pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
3612                         vma->vm_pgoff;
3613         struct vm_area_struct *svma;
3614         unsigned long saddr;
3615         pte_t *spte = NULL;
3616         pte_t *pte;
3617         spinlock_t *ptl;
3618
3619         if (!vma_shareable(vma, addr))
3620                 return (pte_t *)pmd_alloc(mm, pud, addr);
3621
3622         mutex_lock(&mapping->i_mmap_mutex);
3623         vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
3624                 if (svma == vma)
3625                         continue;
3626
3627                 saddr = page_table_shareable(svma, vma, addr, idx);
3628                 if (saddr) {
3629                         spte = huge_pte_offset(svma->vm_mm, saddr);
3630                         if (spte) {
3631                                 get_page(virt_to_page(spte));
3632                                 break;
3633                         }
3634                 }
3635         }
3636
3637         if (!spte)
3638                 goto out;
3639
3640         ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
3641         spin_lock(ptl);
3642         if (pud_none(*pud))
3643                 pud_populate(mm, pud,
3644                                 (pmd_t *)((unsigned long)spte & PAGE_MASK));
3645         else
3646                 put_page(virt_to_page(spte));
3647         spin_unlock(ptl);
3648 out:
3649         pte = (pte_t *)pmd_alloc(mm, pud, addr);
3650         mutex_unlock(&mapping->i_mmap_mutex);
3651         return pte;
3652 }
3653
3654 /*
3655  * unmap huge page backed by shared pte.
3656  *
3657  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
3658  * indicated by page_count > 1, unmap is achieved by clearing pud and
3659  * decrementing the ref count. If count == 1, the pte page is not shared.
3660  *
3661  * called with page table lock held.
3662  *
3663  * returns: 1 successfully unmapped a shared pte page
3664  *          0 the underlying pte page is not shared, or it is the last user
3665  */
3666 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
3667 {
3668         pgd_t *pgd = pgd_offset(mm, *addr);
3669         pud_t *pud = pud_offset(pgd, *addr);
3670
3671         BUG_ON(page_count(virt_to_page(ptep)) == 0);
3672         if (page_count(virt_to_page(ptep)) == 1)
3673                 return 0;
3674
3675         pud_clear(pud);
3676         put_page(virt_to_page(ptep));
3677         *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
3678         return 1;
3679 }
3680 #define want_pmd_share()        (1)
3681 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
3682 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3683 {
3684         return NULL;
3685 }
3686 #define want_pmd_share()        (0)
3687 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
3688
3689 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
3690 pte_t *huge_pte_alloc(struct mm_struct *mm,
3691                         unsigned long addr, unsigned long sz)
3692 {
3693         pgd_t *pgd;
3694         pud_t *pud;
3695         pte_t *pte = NULL;
3696
3697         pgd = pgd_offset(mm, addr);
3698         pud = pud_alloc(mm, pgd, addr);
3699         if (pud) {
3700                 if (sz == PUD_SIZE) {
3701                         pte = (pte_t *)pud;
3702                 } else {
3703                         BUG_ON(sz != PMD_SIZE);
3704                         if (want_pmd_share() && pud_none(*pud))
3705                                 pte = huge_pmd_share(mm, addr, pud);
3706                         else
3707                                 pte = (pte_t *)pmd_alloc(mm, pud, addr);
3708                 }
3709         }
3710         BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
3711
3712         return pte;
3713 }
3714
3715 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
3716 {
3717         pgd_t *pgd;
3718         pud_t *pud;
3719         pmd_t *pmd = NULL;
3720
3721         pgd = pgd_offset(mm, addr);
3722         if (pgd_present(*pgd)) {
3723                 pud = pud_offset(pgd, addr);
3724                 if (pud_present(*pud)) {
3725                         if (pud_huge(*pud))
3726                                 return (pte_t *)pud;
3727                         pmd = pmd_offset(pud, addr);
3728                 }
3729         }
3730         return (pte_t *) pmd;
3731 }
3732
3733 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
3734
3735 /*
3736  * These functions are overwritable if your architecture needs its own
3737  * behavior.
3738  */
3739 struct page * __weak
3740 follow_huge_addr(struct mm_struct *mm, unsigned long address,
3741                               int write)
3742 {
3743         return ERR_PTR(-EINVAL);
3744 }
3745
3746 struct page * __weak
3747 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
3748                 pmd_t *pmd, int flags)
3749 {
3750         struct page *page = NULL;
3751         spinlock_t *ptl;
3752 retry:
3753         ptl = pmd_lockptr(mm, pmd);
3754         spin_lock(ptl);
3755         /*
3756          * make sure that the address range covered by this pmd is not
3757          * unmapped from other threads.
3758          */
3759         if (!pmd_huge(*pmd))
3760                 goto out;
3761         if (pmd_present(*pmd)) {
3762                 page = pte_page(*(pte_t *)pmd) +
3763                         ((address & ~PMD_MASK) >> PAGE_SHIFT);
3764                 if (flags & FOLL_GET)
3765                         get_page(page);
3766         } else {
3767                 if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
3768                         spin_unlock(ptl);
3769                         __migration_entry_wait(mm, (pte_t *)pmd, ptl);
3770                         goto retry;
3771                 }
3772                 /*
3773                  * hwpoisoned entry is treated as no_page_table in
3774                  * follow_page_mask().
3775                  */
3776         }
3777 out:
3778         spin_unlock(ptl);
3779         return page;
3780 }
3781
3782 struct page * __weak
3783 follow_huge_pud(struct mm_struct *mm, unsigned long address,
3784                 pud_t *pud, int flags)
3785 {
3786         if (flags & FOLL_GET)
3787                 return NULL;
3788
3789         return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
3790 }
3791
3792 #ifdef CONFIG_MEMORY_FAILURE
3793
3794 /* Should be called in hugetlb_lock */
3795 static int is_hugepage_on_freelist(struct page *hpage)
3796 {
3797         struct page *page;
3798         struct page *tmp;
3799         struct hstate *h = page_hstate(hpage);
3800         int nid = page_to_nid(hpage);
3801
3802         list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
3803                 if (page == hpage)
3804                         return 1;
3805         return 0;
3806 }
3807
3808 /*
3809  * This function is called from memory failure code.
3810  * Assume the caller holds page lock of the head page.
3811  */
3812 int dequeue_hwpoisoned_huge_page(struct page *hpage)
3813 {
3814         struct hstate *h = page_hstate(hpage);
3815         int nid = page_to_nid(hpage);
3816         int ret = -EBUSY;
3817
3818         spin_lock(&hugetlb_lock);
3819         if (is_hugepage_on_freelist(hpage)) {
3820                 /*
3821                  * Hwpoisoned hugepage isn't linked to activelist or freelist,
3822                  * but dangling hpage->lru can trigger list-debug warnings
3823                  * (this happens when we call unpoison_memory() on it),
3824                  * so let it point to itself with list_del_init().
3825                  */
3826                 list_del_init(&hpage->lru);
3827                 set_page_refcounted(hpage);
3828                 h->free_huge_pages--;
3829                 h->free_huge_pages_node[nid]--;
3830                 ret = 0;
3831         }
3832         spin_unlock(&hugetlb_lock);
3833         return ret;
3834 }
3835 #endif
3836
3837 bool isolate_huge_page(struct page *page, struct list_head *list)
3838 {
3839         bool ret = true;
3840
3841         VM_BUG_ON_PAGE(!PageHead(page), page);
3842         spin_lock(&hugetlb_lock);
3843         if (!page_huge_active(page) || !get_page_unless_zero(page)) {
3844                 ret = false;
3845                 goto unlock;
3846         }
3847         clear_page_huge_active(page);
3848         list_move_tail(&page->lru, list);
3849 unlock:
3850         spin_unlock(&hugetlb_lock);
3851         return ret;
3852 }
3853
3854 void putback_active_hugepage(struct page *page)
3855 {
3856         VM_BUG_ON_PAGE(!PageHead(page), page);
3857         spin_lock(&hugetlb_lock);
3858         set_page_huge_active(page);
3859         list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
3860         spin_unlock(&hugetlb_lock);
3861         put_page(page);
3862 }
3863
3864 bool is_hugepage_active(struct page *page)
3865 {
3866         VM_BUG_ON_PAGE(!PageHuge(page), page);
3867         /*
3868          * This function can be called for a tail page because the caller,
3869          * scan_movable_pages, scans through a given pfn-range which typically
3870          * covers one memory block. In systems using gigantic hugepage (1GB
3871          * for x86_64,) a hugepage is larger than a memory block, and we don't
3872          * support migrating such large hugepages for now, so return false
3873          * when called for tail pages.
3874          */
3875         if (PageTail(page))
3876                 return false;
3877         /*
3878          * Refcount of a hwpoisoned hugepages is 1, but they are not active,
3879          * so we should return false for them.
3880          */
3881         if (unlikely(PageHWPoison(page)))
3882                 return false;
3883         return page_count(page) > 0;
3884 }