1 // SPDX-License-Identifier: GPL-2.0+
4 * David Feng <fenghua@phytium.com.cn>
7 * Alexander Graf <agraf@suse.de>
13 #include <asm/system.h>
14 #include <asm/armv8/mmu.h>
16 DECLARE_GLOBAL_DATA_PTR;
18 #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
21 * With 4k page granule, a virtual address is split into 4 lookup parts
22 * spanning 9 bits each:
24 * _______________________________________________
26 * | 0 | Lv0 | Lv1 | Lv2 | Lv3 | off |
27 * |_______|_______|_______|_______|_______|_______|
28 * 63-48 47-39 38-30 29-21 20-12 11-00
32 * Lv0: FF8000000000 --
39 u64 get_tcr(int el, u64 *pips, u64 *pva_bits)
46 /* Find the largest address we need to support */
47 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
48 max_addr = max(max_addr, mem_map[i].virt + mem_map[i].size);
50 /* Calculate the maximum physical (and thus virtual) address */
51 if (max_addr > (1ULL << 44)) {
54 } else if (max_addr > (1ULL << 42)) {
57 } else if (max_addr > (1ULL << 40)) {
60 } else if (max_addr > (1ULL << 36)) {
63 } else if (max_addr > (1ULL << 32)) {
72 tcr = TCR_EL1_RSVD | (ips << 32) | TCR_EPD1_DISABLE;
74 tcr = TCR_EL2_RSVD | (ips << 16);
76 tcr = TCR_EL3_RSVD | (ips << 16);
79 /* PTWs cacheable, inner/outer WBWA and inner shareable */
80 tcr |= TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA;
81 tcr |= TCR_T0SZ(va_bits);
91 #define MAX_PTE_ENTRIES 512
93 static int pte_type(u64 *pte)
95 return *pte & PTE_TYPE_MASK;
98 /* Returns the LSB number for a PTE on level <level> */
99 static int level2shift(int level)
101 /* Page is 12 bits wide, every level translates 9 bits */
102 return (12 + 9 * (3 - level));
105 static u64 *find_pte(u64 addr, int level)
113 debug("addr=%llx level=%d\n", addr, level);
115 get_tcr(0, NULL, &va_bits);
119 if (level < start_level)
122 /* Walk through all page table levels to find our PTE */
123 pte = (u64*)gd->arch.tlb_addr;
124 for (i = start_level; i < 4; i++) {
125 idx = (addr >> level2shift(i)) & 0x1FF;
127 debug("idx=%llx PTE %p at level %d: %llx\n", idx, pte, i, *pte);
132 /* PTE is no table (either invalid or block), can't traverse */
133 if (pte_type(pte) != PTE_TYPE_TABLE)
135 /* Off to the next level */
136 pte = (u64*)(*pte & 0x0000fffffffff000ULL);
139 /* Should never reach here */
143 /* Returns and creates a new full table (512 entries) */
144 static u64 *create_table(void)
146 u64 *new_table = (u64*)gd->arch.tlb_fillptr;
147 u64 pt_len = MAX_PTE_ENTRIES * sizeof(u64);
149 /* Allocate MAX_PTE_ENTRIES pte entries */
150 gd->arch.tlb_fillptr += pt_len;
152 if (gd->arch.tlb_fillptr - gd->arch.tlb_addr > gd->arch.tlb_size)
153 panic("Insufficient RAM for page table: 0x%lx > 0x%lx. "
154 "Please increase the size in get_page_table_size()",
155 gd->arch.tlb_fillptr - gd->arch.tlb_addr,
158 /* Mark all entries as invalid */
159 memset(new_table, 0, pt_len);
164 static void set_pte_table(u64 *pte, u64 *table)
166 /* Point *pte to the new table */
167 debug("Setting %p to addr=%p\n", pte, table);
168 *pte = PTE_TYPE_TABLE | (ulong)table;
171 /* Splits a block PTE into table with subpages spanning the old block */
172 static void split_block(u64 *pte, int level)
177 /* level describes the parent level, we need the child ones */
178 int levelshift = level2shift(level + 1);
180 if (pte_type(pte) != PTE_TYPE_BLOCK)
181 panic("PTE %p (%llx) is not a block. Some driver code wants to "
182 "modify dcache settings for an range not covered in "
183 "mem_map.", pte, old_pte);
185 new_table = create_table();
186 debug("Splitting pte %p (%llx) into %p\n", pte, old_pte, new_table);
188 for (i = 0; i < MAX_PTE_ENTRIES; i++) {
189 new_table[i] = old_pte | (i << levelshift);
191 /* Level 3 block PTEs have the table type */
192 if ((level + 1) == 3)
193 new_table[i] |= PTE_TYPE_TABLE;
195 debug("Setting new_table[%lld] = %llx\n", i, new_table[i]);
198 /* Set the new table into effect */
199 set_pte_table(pte, new_table);
202 /* Add one mm_region map entry to the page tables */
203 static void add_map(struct mm_region *map)
206 u64 virt = map->virt;
207 u64 phys = map->phys;
208 u64 size = map->size;
209 u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF;
215 pte = find_pte(virt, 0);
216 if (pte && (pte_type(pte) == PTE_TYPE_FAULT)) {
217 debug("Creating table for virt 0x%llx\n", virt);
218 new_table = create_table();
219 set_pte_table(pte, new_table);
222 for (level = 1; level < 4; level++) {
223 pte = find_pte(virt, level);
225 panic("pte not found\n");
227 blocksize = 1ULL << level2shift(level);
228 debug("Checking if pte fits for virt=%llx size=%llx blocksize=%llx\n",
229 virt, size, blocksize);
230 if (size >= blocksize && !(virt & (blocksize - 1))) {
231 /* Page fits, create block PTE */
232 debug("Setting PTE %p to block virt=%llx\n",
235 *pte = phys | attrs | PTE_TYPE_PAGE;
242 } else if (pte_type(pte) == PTE_TYPE_FAULT) {
243 /* Page doesn't fit, create subpages */
244 debug("Creating subtable for virt 0x%llx blksize=%llx\n",
246 new_table = create_table();
247 set_pte_table(pte, new_table);
248 } else if (pte_type(pte) == PTE_TYPE_BLOCK) {
249 debug("Split block into subtable for virt 0x%llx blksize=0x%llx\n",
251 split_block(pte, level);
264 * This is a recursively called function to count the number of
265 * page tables we need to cover a particular PTE range. If you
266 * call this with level = -1 you basically get the full 48 bit
269 static int count_required_pts(u64 addr, int level, u64 maxaddr)
271 int levelshift = level2shift(level);
272 u64 levelsize = 1ULL << levelshift;
273 u64 levelmask = levelsize - 1;
274 u64 levelend = addr + levelsize;
277 enum pte_type pte_type = PTE_INVAL;
279 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) {
280 struct mm_region *map = &mem_map[i];
281 u64 start = map->virt;
282 u64 end = start + map->size;
284 /* Check if the PTE would overlap with the map */
285 if (max(addr, start) <= min(levelend, end)) {
286 start = max(addr, start);
287 end = min(levelend, end);
289 /* We need a sub-pt for this level */
290 if ((start & levelmask) || (end & levelmask)) {
291 pte_type = PTE_LEVEL;
295 /* Lv0 can not do block PTEs, so do levels here too */
297 pte_type = PTE_LEVEL;
301 /* PTE is active, but fits into a block */
302 pte_type = PTE_BLOCK;
307 * Block PTEs at this level are already covered by the parent page
308 * table, so we only need to count sub page tables.
310 if (pte_type == PTE_LEVEL) {
311 int sublevel = level + 1;
312 u64 sublevelsize = 1ULL << level2shift(sublevel);
314 /* Account for the new sub page table ... */
317 /* ... and for all child page tables that one might have */
318 for (i = 0; i < MAX_PTE_ENTRIES; i++) {
319 r += count_required_pts(addr, sublevel, maxaddr);
320 addr += sublevelsize;
322 if (addr >= maxaddr) {
324 * We reached the end of address space, no need
325 * to look any further.
335 /* Returns the estimated required size of all page tables */
336 __weak u64 get_page_table_size(void)
338 u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64);
343 get_tcr(0, NULL, &va_bits);
347 /* Account for all page tables we would need to cover our memory map */
348 size = one_pt * count_required_pts(0, start_level - 1, 1ULL << va_bits);
351 * We need to duplicate our page table once to have an emergency pt to
352 * resort to when splitting page tables later on
357 * We may need to split page tables later on if dcache settings change,
358 * so reserve up to 4 (random pick) page tables for that.
365 void setup_pgtables(void)
369 if (!gd->arch.tlb_fillptr || !gd->arch.tlb_addr)
370 panic("Page table pointer not setup.");
373 * Allocate the first level we're on with invalidate entries.
374 * If the starting level is 0 (va_bits >= 39), then this is our
375 * Lv0 page table, otherwise it's the entry Lv1 page table.
379 /* Now add all MMU table entries one after another to the table */
380 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
381 add_map(&mem_map[i]);
384 static void setup_all_pgtables(void)
386 u64 tlb_addr = gd->arch.tlb_addr;
387 u64 tlb_size = gd->arch.tlb_size;
389 /* Reset the fill ptr */
390 gd->arch.tlb_fillptr = tlb_addr;
392 /* Create normal system page tables */
395 /* Create emergency page tables */
396 gd->arch.tlb_size -= (uintptr_t)gd->arch.tlb_fillptr -
397 (uintptr_t)gd->arch.tlb_addr;
398 gd->arch.tlb_addr = gd->arch.tlb_fillptr;
400 gd->arch.tlb_emerg = gd->arch.tlb_addr;
401 gd->arch.tlb_addr = tlb_addr;
402 gd->arch.tlb_size = tlb_size;
405 /* to activate the MMU we need to set up virtual memory */
406 __weak void mmu_setup(void)
410 /* Set up page tables only once */
411 if (!gd->arch.tlb_fillptr)
412 setup_all_pgtables();
415 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
419 set_sctlr(get_sctlr() | CR_M);
423 * Performs a invalidation of the entire data cache at all levels
425 void invalidate_dcache_all(void)
427 __asm_invalidate_dcache_all();
428 __asm_invalidate_l3_dcache();
432 * Performs a clean & invalidation of the entire data cache at all levels.
433 * This function needs to be inline to avoid using stack.
434 * __asm_flush_l3_dcache return status of timeout
436 inline void flush_dcache_all(void)
440 __asm_flush_dcache_all();
441 ret = __asm_flush_l3_dcache();
443 debug("flushing dcache returns 0x%x\n", ret);
445 debug("flushing dcache successfully.\n");
448 #ifndef CONFIG_SYS_DISABLE_DCACHE_OPS
450 * Invalidates range in all levels of D-cache/unified cache
452 void invalidate_dcache_range(unsigned long start, unsigned long stop)
454 __asm_invalidate_dcache_range(start, stop);
458 * Flush range(clean & invalidate) from all levels of D-cache/unified cache
460 void flush_dcache_range(unsigned long start, unsigned long stop)
462 __asm_flush_dcache_range(start, stop);
465 void invalidate_dcache_range(unsigned long start, unsigned long stop)
469 void flush_dcache_range(unsigned long start, unsigned long stop)
472 #endif /* CONFIG_SYS_DISABLE_DCACHE_OPS */
474 void dcache_enable(void)
476 /* The data cache is not active unless the mmu is enabled */
477 if (!(get_sctlr() & CR_M)) {
478 invalidate_dcache_all();
479 __asm_invalidate_tlb_all();
483 set_sctlr(get_sctlr() | CR_C);
486 void dcache_disable(void)
492 /* if cache isn't enabled no need to disable */
496 set_sctlr(sctlr & ~(CR_C|CR_M));
499 __asm_invalidate_tlb_all();
502 int dcache_status(void)
504 return (get_sctlr() & CR_C) != 0;
507 u64 *__weak arch_get_page_table(void) {
508 puts("No page table offset defined\n");
513 static bool is_aligned(u64 addr, u64 size, u64 align)
515 return !(addr & (align - 1)) && !(size & (align - 1));
518 /* Use flag to indicate if attrs has more than d-cache attributes */
519 static u64 set_one_region(u64 start, u64 size, u64 attrs, bool flag, int level)
521 int levelshift = level2shift(level);
522 u64 levelsize = 1ULL << levelshift;
523 u64 *pte = find_pte(start, level);
525 /* Can we can just modify the current level block PTE? */
526 if (is_aligned(start, size, levelsize)) {
528 *pte &= ~PMD_ATTRMASK;
529 *pte |= attrs & PMD_ATTRMASK;
531 *pte &= ~PMD_ATTRINDX_MASK;
532 *pte |= attrs & PMD_ATTRINDX_MASK;
534 debug("Set attrs=%llx pte=%p level=%d\n", attrs, pte, level);
539 /* Unaligned or doesn't fit, maybe split block into table */
540 debug("addr=%llx level=%d pte=%p (%llx)\n", start, level, pte, *pte);
542 /* Maybe we need to split the block into a table */
543 if (pte_type(pte) == PTE_TYPE_BLOCK)
544 split_block(pte, level);
546 /* And then double-check it became a table or already is one */
547 if (pte_type(pte) != PTE_TYPE_TABLE)
548 panic("PTE %p (%llx) for addr=%llx should be a table",
551 /* Roll on to the next page table level */
555 void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
556 enum dcache_option option)
558 u64 attrs = PMD_ATTRINDX(option);
559 u64 real_start = start;
560 u64 real_size = size;
562 debug("start=%lx size=%lx\n", (ulong)start, (ulong)size);
564 if (!gd->arch.tlb_emerg)
565 panic("Emergency page table not setup.");
568 * We can not modify page tables that we're currently running on,
569 * so we first need to switch to the "emergency" page tables where
570 * we can safely modify our primary page tables and then switch back
572 __asm_switch_ttbr(gd->arch.tlb_emerg);
575 * Loop through the address range until we find a page granule that fits
576 * our alignment constraints, then set it to the new cache attributes
582 for (level = 1; level < 4; level++) {
583 /* Set d-cache attributes only */
584 r = set_one_region(start, size, attrs, false, level);
586 /* PTE successfully replaced */
595 /* We're done modifying page tables, switch back to our primary ones */
596 __asm_switch_ttbr(gd->arch.tlb_addr);
599 * Make sure there's nothing stale in dcache for a region that might
600 * have caches off now
602 flush_dcache_range(real_start, real_start + real_size);
606 * Modify MMU table for a region with updated PXN/UXN/Memory type/valid bits.
607 * The procecess is break-before-make. The target region will be marked as
608 * invalid during the process of changing.
610 void mmu_change_region_attr(phys_addr_t addr, size_t siz, u64 attrs)
618 * Loop through the address range until we find a page granule that fits
619 * our alignment constraints, then set it to "invalid".
622 for (level = 1; level < 4; level++) {
623 /* Set PTE to fault */
624 r = set_one_region(start, size, PTE_TYPE_FAULT, true,
627 /* PTE successfully invalidated */
635 flush_dcache_range(gd->arch.tlb_addr,
636 gd->arch.tlb_addr + gd->arch.tlb_size);
637 __asm_invalidate_tlb_all();
640 * Loop through the address range until we find a page granule that fits
641 * our alignment constraints, then set it to the new cache attributes
646 for (level = 1; level < 4; level++) {
647 /* Set PTE to new attributes */
648 r = set_one_region(start, size, attrs, true, level);
650 /* PTE successfully updated */
657 flush_dcache_range(gd->arch.tlb_addr,
658 gd->arch.tlb_addr + gd->arch.tlb_size);
659 __asm_invalidate_tlb_all();
662 #else /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
665 * For SPL builds, we may want to not have dcache enabled. Any real U-Boot
666 * running however really wants to have dcache and the MMU active. Check that
667 * everything is sane and give the developer a hint if it isn't.
669 #ifndef CONFIG_SPL_BUILD
670 #error Please describe your MMU layout in CONFIG_SYS_MEM_MAP and enable dcache.
673 void invalidate_dcache_all(void)
677 void flush_dcache_all(void)
681 void dcache_enable(void)
685 void dcache_disable(void)
689 int dcache_status(void)
694 void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
695 enum dcache_option option)
699 #endif /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
701 #if !CONFIG_IS_ENABLED(SYS_ICACHE_OFF)
703 void icache_enable(void)
705 invalidate_icache_all();
706 set_sctlr(get_sctlr() | CR_I);
709 void icache_disable(void)
711 set_sctlr(get_sctlr() & ~CR_I);
714 int icache_status(void)
716 return (get_sctlr() & CR_I) != 0;
719 void invalidate_icache_all(void)
721 __asm_invalidate_icache_all();
722 __asm_invalidate_l3_icache();
725 #else /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */
727 void icache_enable(void)
731 void icache_disable(void)
735 int icache_status(void)
740 void invalidate_icache_all(void)
744 #endif /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */
747 * Enable dCache & iCache, whether cache is actually enabled
748 * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF
750 void __weak enable_caches(void)