1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * This file contains the routines setting up the linux page tables.
6 * Derived from arch/ppc/mm/init.c:
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
9 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
10 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
11 * Copyright (C) 1996 Paul Mackerras
13 * Derived from "arch/i386/mm/init.c"
14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/types.h>
21 #include <linux/vmalloc.h>
22 #include <linux/init.h>
23 #include <linux/highmem.h>
24 #include <linux/memblock.h>
25 #include <linux/slab.h>
27 #include <asm/pgtable.h>
28 #include <asm/pgalloc.h>
29 #include <asm/fixmap.h>
30 #include <asm/setup.h>
31 #include <asm/sections.h>
33 #include <mm/mmu_decl.h>
35 extern char etext[], _stext[], _sinittext[], _einittext[];
37 static void __init *early_alloc_pgtable(unsigned long size)
39 void *ptr = memblock_alloc(size, size);
42 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
43 __func__, size, size);
48 static pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
50 if (pmd_none(*pmdp)) {
51 pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE);
53 pmd_populate_kernel(&init_mm, pmdp, ptep);
55 return pte_offset_kernel(pmdp, va);
59 int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
65 /* Use upper 10 bits of VA to index the first level map */
66 pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va);
67 /* Use middle 10 bits of VA to index the second-level map */
68 if (likely(slab_is_available()))
69 pg = pte_alloc_kernel(pd, va);
71 pg = early_pte_alloc_kernel(pd, va);
74 /* The PTE should never be already set nor present in the
77 BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot));
78 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot));
85 * Map in a chunk of physical memory starting at start.
87 static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
95 p = memstart_addr + s;
96 for (; s < top; s += PAGE_SIZE) {
97 ktext = ((char *)v >= _stext && (char *)v < etext) ||
98 ((char *)v >= _sinittext && (char *)v < _einittext);
99 map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
100 #ifdef CONFIG_PPC_BOOK3S_32
102 hash_preload(&init_mm, v);
109 void __init mapin_ram(void)
111 struct memblock_region *reg;
113 for_each_memblock(memory, reg) {
114 phys_addr_t base = reg->base;
115 phys_addr_t top = min(base + reg->size, total_lowmem);
119 base = mmu_mapin_ram(base, top);
120 if (IS_ENABLED(CONFIG_BDI_SWITCH))
121 __mapin_ram_chunk(reg->base, top);
123 __mapin_ram_chunk(base, top);
127 /* Scan the real Linux page tables and return a PTE pointer for
128 * a virtual address in a context.
129 * Returns true (1) if PTE was found, zero otherwise. The pointer to
130 * the PTE pointer is unmodified if PTE is not found.
133 get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
141 pgd = pgd_offset(mm, addr & PAGE_MASK);
143 pud = pud_offset(pgd, addr & PAGE_MASK);
144 if (pud && pud_present(*pud)) {
145 pmd = pmd_offset(pud, addr & PAGE_MASK);
146 if (pmd_present(*pmd)) {
147 pte = pte_offset_map(pmd, addr & PAGE_MASK);
153 /* XXX caller needs to do pte_unmap, yuck */
161 static int __change_page_attr_noflush(struct page *page, pgprot_t prot)
165 unsigned long address;
167 BUG_ON(PageHighMem(page));
168 address = (unsigned long)page_address(page);
170 if (v_block_mapped(address))
172 if (!get_pteptr(&init_mm, address, &kpte, &kpmd))
174 __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
181 * Change the page attributes of an page in the linear mapping.
183 * THIS DOES NOTHING WITH BAT MAPPINGS, DEBUG USE ONLY
185 static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
189 struct page *start = page;
191 local_irq_save(flags);
192 for (i = 0; i < numpages; i++, page++) {
193 err = __change_page_attr_noflush(page, prot);
198 local_irq_restore(flags);
199 flush_tlb_kernel_range((unsigned long)page_address(start),
200 (unsigned long)page_address(page));
204 void mark_initmem_nx(void)
206 struct page *page = virt_to_page(_sinittext);
207 unsigned long numpages = PFN_UP((unsigned long)_einittext) -
208 PFN_DOWN((unsigned long)_sinittext);
210 if (v_block_mapped((unsigned long)_sinittext))
211 mmu_mark_initmem_nx();
213 change_page_attr(page, numpages, PAGE_KERNEL);
216 #ifdef CONFIG_STRICT_KERNEL_RWX
217 void mark_rodata_ro(void)
220 unsigned long numpages;
222 if (v_block_mapped((unsigned long)_stext + 1)) {
223 mmu_mark_rodata_ro();
228 page = virt_to_page(_stext);
229 numpages = PFN_UP((unsigned long)_etext) -
230 PFN_DOWN((unsigned long)_stext);
232 change_page_attr(page, numpages, PAGE_KERNEL_ROX);
234 * mark .rodata as read only. Use __init_begin rather than __end_rodata
235 * to cover NOTES and EXCEPTION_TABLE.
237 page = virt_to_page(__start_rodata);
238 numpages = PFN_UP((unsigned long)__init_begin) -
239 PFN_DOWN((unsigned long)__start_rodata);
241 change_page_attr(page, numpages, PAGE_KERNEL_RO);
243 // mark_initmem_nx() should have already run by now
248 #ifdef CONFIG_DEBUG_PAGEALLOC
249 void __kernel_map_pages(struct page *page, int numpages, int enable)
251 if (PageHighMem(page))
254 change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
256 #endif /* CONFIG_DEBUG_PAGEALLOC */