1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_HUGETLB_H
3 #define _ASM_POWERPC_BOOK3S_64_HUGETLB_H
5 * For radix we want generic code to handle hugetlb. But then if we want
6 * both hash and radix to be enabled together we need to workaround the
9 void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
10 void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
12 radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
13 unsigned long len, unsigned long pgoff,
16 extern void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
17 unsigned long addr, pte_t *ptep,
18 pte_t old_pte, pte_t pte);
20 static inline int hstate_get_psize(struct hstate *hstate)
24 shift = huge_page_shift(hstate);
25 if (shift == mmu_psize_defs[MMU_PAGE_2M].shift)
27 else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift)
29 else if (shift == mmu_psize_defs[MMU_PAGE_16M].shift)
31 else if (shift == mmu_psize_defs[MMU_PAGE_16G].shift)
34 WARN(1, "Wrong huge page shift\n");
35 return mmu_virtual_psize;
39 #define __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED
40 static inline bool gigantic_page_runtime_supported(void)
43 * We used gigantic page reservation with hypervisor assist in some case.
44 * We cannot use runtime allocation of gigantic pages in those platforms
45 * This is hash translation mode LPARs.
47 if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
53 /* hugepd entry valid bit */
54 #define HUGEPD_VAL_BITS (0x8000000000000000UL)
56 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
57 extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
58 unsigned long addr, pte_t *ptep);
60 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
61 extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
62 unsigned long addr, pte_t *ptep,
63 pte_t old_pte, pte_t new_pte);
65 * This should work for other subarchs too. But right now we use the
66 * new format only for 64bit book3s
68 static inline pte_t *hugepd_page(hugepd_t hpd)
70 BUG_ON(!hugepd_ok(hpd));
72 * We have only four bits to encode, MMU page size
74 BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
75 return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
78 static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
80 return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2;
83 static inline unsigned int hugepd_shift(hugepd_t hpd)
85 return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
87 static inline void flush_hugetlb_page(struct vm_area_struct *vma,
91 return radix__flush_hugetlb_page(vma, vmaddr);
94 static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
97 unsigned long idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
99 return hugepd_page(hpd) + idx;
102 static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift)
104 *hpdp = __hugepd(__pa(new) | HUGEPD_VAL_BITS | (shift_to_mmu_psize(pshift) << 2));
107 void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
109 static inline int check_and_get_huge_psize(int shift)
113 if (shift > SLICE_HIGH_SHIFT)
116 mmu_psize = shift_to_mmu_psize(shift);
119 * We need to make sure that for different page sizes reported by
120 * firmware we only add hugetlb support for page sizes that can be
121 * supported by linux page table layout.
126 if (radix_enabled()) {
127 if (mmu_psize != MMU_PAGE_2M && mmu_psize != MMU_PAGE_1G)
130 if (mmu_psize != MMU_PAGE_16M && mmu_psize != MMU_PAGE_16G)