1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_PGTABLE_H
3 #define _ASM_POWERPC_PGTABLE_H
6 #include <linux/mmdebug.h>
7 #include <linux/mmzone.h>
8 #include <asm/processor.h> /* For TASK_SIZE */
11 #include <asm/tlbflush.h>
15 #endif /* !__ASSEMBLY__ */
17 #ifdef CONFIG_PPC_BOOK3S
18 #include <asm/book3s/pgtable.h>
20 #include <asm/nohash/pgtable.h>
21 #endif /* !CONFIG_PPC_BOOK3S */
23 /* Note due to the way vm flags are laid out, the bits are XWR */
24 #define __P000 PAGE_NONE
25 #define __P001 PAGE_READONLY
26 #define __P010 PAGE_COPY
27 #define __P011 PAGE_COPY
28 #define __P100 PAGE_READONLY_X
29 #define __P101 PAGE_READONLY_X
30 #define __P110 PAGE_COPY_X
31 #define __P111 PAGE_COPY_X
33 #define __S000 PAGE_NONE
34 #define __S001 PAGE_READONLY
35 #define __S010 PAGE_SHARED
36 #define __S011 PAGE_SHARED
37 #define __S100 PAGE_READONLY_X
38 #define __S101 PAGE_READONLY_X
39 #define __S110 PAGE_SHARED_X
40 #define __S111 PAGE_SHARED_X
44 #include <asm/tlbflush.h>
46 /* Keep these as a macros to avoid include dependency mess */
47 #define pte_page(x) pfn_to_page(pte_pfn(x))
48 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
50 * Select all bits except the pfn
52 static inline pgprot_t pte_pgprot(pte_t pte)
54 unsigned long pte_flags;
56 pte_flags = pte_val(pte) & ~PTE_RPN_MASK;
57 return __pgprot(pte_flags);
61 * ZERO_PAGE is a global shared page that is always zero: used
62 * for zero-mapped memory areas etc..
64 extern unsigned long empty_zero_page[];
65 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
67 extern pgd_t swapper_pg_dir[];
69 int dma_pfn_limit_to_zone(u64 pfn_limit);
70 extern void paging_init(void);
73 * kern_addr_valid is intended to indicate whether an address is a valid
74 * kernel address. Most 32-bit archs define it as always true (like this)
75 * but most 64-bit archs actually perform a test. What should we do here?
77 #define kern_addr_valid(addr) (1)
79 #include <asm-generic/pgtable.h>
83 * This gets called at the end of handling a page fault, when
84 * the kernel has put a new PTE into the page table for the process.
85 * We use it to ensure coherency between the i-cache and d-cache
86 * for the page which has just been mapped in.
87 * On machines which use an MMU hash table, we use this to put a
88 * corresponding HPTE into the hash table ahead of time, instead of
89 * waiting for the inevitable extra hash-table miss exception.
91 extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
93 extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
94 unsigned long end, int write,
95 struct page **pages, int *nr);
96 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
97 #define pmd_large(pmd) 0
100 /* can we use this in kvm */
101 unsigned long vmalloc_to_phys(void *vmalloc_addr);
103 void pgtable_cache_add(unsigned int shift);
104 void pgtable_cache_init(void);
106 #if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
107 void mark_initmem_nx(void);
109 static inline void mark_initmem_nx(void) { }
113 * When used, PTE_FRAG_NR is defined in subarch pgtable.h
114 * so we are sure it is included when arriving here.
117 static inline void *pte_frag_get(mm_context_t *ctx)
119 return ctx->pte_frag;
122 static inline void pte_frag_set(mm_context_t *ctx, void *p)
127 #define PTE_FRAG_NR 1
128 #define PTE_FRAG_SIZE_SHIFT PAGE_SHIFT
129 #define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
131 static inline void *pte_frag_get(mm_context_t *ctx)
136 static inline void pte_frag_set(mm_context_t *ctx, void *p)
141 #endif /* __ASSEMBLY__ */
143 #endif /* _ASM_POWERPC_PGTABLE_H */