X-Git-Url: https://git.librecmc.org/?a=blobdiff_plain;f=arch%2Farm%2Flib%2Fcache-cp15.c;h=f0c1b03728be3bca6ff16e0ded0abb4eeb19abae;hb=c1c597e8a8abfe16938ce8a1d792c703c1a6c79b;hp=1121dc3a936c8ce5f1ee10314240136516d30b94;hpb=0badc648dcb0d0e10db959ffe8ab1b1e156b3724;p=oweals%2Fu-boot.git diff --git a/arch/arm/lib/cache-cp15.c b/arch/arm/lib/cache-cp15.c index 1121dc3a93..f0c1b03728 100644 --- a/arch/arm/lib/cache-cp15.c +++ b/arch/arm/lib/cache-cp15.c @@ -22,16 +22,6 @@ __weak void arm_init_domains(void) { } -static void cp_delay (void) -{ - volatile int i; - - /* copro seems to need some delay between reading and writing */ - for (i = 0; i < 100; i++) - nop(); - asm volatile("" : : : "memory"); -} - void set_section_dcache(int section, enum dcache_option option) { #ifdef CONFIG_ARMV7_LPAE @@ -61,16 +51,37 @@ __weak void mmu_page_table_flush(unsigned long start, unsigned long stop) void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, enum dcache_option option) { +#ifdef CONFIG_ARMV7_LPAE + u64 *page_table = (u64 *)gd->arch.tlb_addr; +#else u32 *page_table = (u32 *)gd->arch.tlb_addr; +#endif + unsigned long startpt, stoppt; unsigned long upto, end; end = ALIGN(start + size, MMU_SECTION_SIZE) >> MMU_SECTION_SHIFT; start = start >> MMU_SECTION_SHIFT; - debug("%s: start=%pa, size=%zu, option=%d\n", __func__, &start, size, +#ifdef CONFIG_ARMV7_LPAE + debug("%s: start=%pa, size=%zu, option=%llx\n", __func__, &start, size, + option); +#else + debug("%s: start=%pa, size=%zu, option=0x%x\n", __func__, &start, size, option); +#endif for (upto = start; upto < end; upto++) set_section_dcache(upto, option); - mmu_page_table_flush((u32)&page_table[start], (u32)&page_table[end]); + + /* + * Make sure range is cache line aligned + * Only CPU maintains page tables, hence it is safe to always + * flush complete cache lines... + */ + + startpt = (unsigned long)&page_table[start]; + startpt &= ~(CONFIG_SYS_CACHELINE_SIZE - 1); + stoppt = (unsigned long)&page_table[end]; + stoppt = ALIGN(stoppt, CONFIG_SYS_CACHELINE_SIZE); + mmu_page_table_flush(startpt, stoppt); } __weak void dram_bank_mmu_setup(int bank) @@ -108,7 +119,7 @@ static inline void mmu_setup(void) dram_bank_mmu_setup(i); } -#ifdef CONFIG_ARMV7_LPAE +#if defined(CONFIG_ARMV7_LPAE) && __LINUX_ARM_ARCH__ != 4 /* Set up 4 PTE entries pointing to our 4 1GB page tables */ for (i = 0; i < 4; i++) { u64 *page_table = (u64 *)(gd->arch.tlb_addr + (4096 * 4)); @@ -126,7 +137,7 @@ static inline void mmu_setup(void) #endif if (is_hyp()) { - /* Set HCTR to enable LPAE */ + /* Set HTCR to enable LPAE */ asm volatile("mcr p15, 4, %0, c2, c0, 2" : : "r" (reg) : "memory"); /* Set HTTBR0 */ @@ -151,6 +162,15 @@ static inline void mmu_setup(void) : : "r" (MEMORY_ATTRIBUTES) : "memory"); } #elif defined(CONFIG_CPU_V7) + if (is_hyp()) { + /* Set HTCR to disable LPAE */ + asm volatile("mcr p15, 4, %0, c2, c0, 2" + : : "r" (0) : "memory"); + } else { + /* Set TTBCR to disable LPAE */ + asm volatile("mcr p15, 0, %0, c2, c0, 2" + : : "r" (0) : "memory"); + } /* Set TTBR0 */ reg = gd->arch.tlb_addr & TTBR0_BASE_ADDR_MASK; #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH) @@ -175,7 +195,6 @@ static inline void mmu_setup(void) /* and enable the mmu */ reg = get_cr(); /* get control reg. */ - cp_delay(); set_cr(reg | CR_M); } @@ -193,7 +212,6 @@ static void cache_enable(uint32_t cache_bit) if ((cache_bit == CR_C) && !mmu_enabled()) mmu_setup(); reg = get_cr(); /* get control reg. */ - cp_delay(); set_cr(reg | cache_bit); } @@ -203,7 +221,6 @@ static void cache_disable(uint32_t cache_bit) uint32_t reg; reg = get_cr(); - cp_delay(); if (cache_bit == CR_C) { /* if cache isn;t enabled no need to disable */ @@ -213,7 +230,7 @@ static void cache_disable(uint32_t cache_bit) cache_bit |= CR_M; } reg = get_cr(); - cp_delay(); + if (cache_bit == (CR_C | CR_M)) flush_dcache_all(); set_cr(reg & ~cache_bit);