#include <linux/linkage.h>
/*
- * void __asm_flush_dcache_level(level)
+ * void __asm_dcache_level(level)
*
- * clean and invalidate one level cache.
+ * flush or invalidate one level cache.
*
* x0: cache level
- * x1: 0 flush & invalidate, 1 invalidate only
+ * x1: 0 clean & invalidate, 1 invalidate only
* x2~x9: clobbered
*/
-ENTRY(__asm_flush_dcache_level)
+.pushsection .text.__asm_dcache_level, "ax"
+ENTRY(__asm_dcache_level)
lsl x12, x0, #1
msr csselr_el1, x12 /* select cache level */
isb /* sync change of cssidr_el1 */
b.ge loop_set
ret
-ENDPROC(__asm_flush_dcache_level)
+ENDPROC(__asm_dcache_level)
+.popsection
/*
* void __asm_flush_dcache_all(int invalidate_only)
*
- * x0: 0 flush & invalidate, 1 invalidate only
+ * x0: 0 clean & invalidate, 1 invalidate only
*
- * clean and invalidate all data cache by SET/WAY.
+ * flush or invalidate all data cache by SET/WAY.
*/
+.pushsection .text.__asm_dcache_all, "ax"
ENTRY(__asm_dcache_all)
mov x1, x0
dsb sy
and x12, x12, #7 /* x12 <- cache type */
cmp x12, #2
b.lt skip /* skip if no cache or icache */
- bl __asm_flush_dcache_level /* x1 = 0 flush, 1 invalidate */
+ bl __asm_dcache_level /* x1 = 0 flush, 1 invalidate */
skip:
add x0, x0, #1 /* increment cache level */
cmp x11, x0
finished:
ret
ENDPROC(__asm_dcache_all)
+.popsection
+.pushsection .text.__asm_flush_dcache_all, "ax"
ENTRY(__asm_flush_dcache_all)
mov x0, #0
b __asm_dcache_all
ENDPROC(__asm_flush_dcache_all)
+.popsection
+.pushsection .text.__asm_invalidate_dcache_all, "ax"
ENTRY(__asm_invalidate_dcache_all)
mov x0, #0x1
b __asm_dcache_all
ENDPROC(__asm_invalidate_dcache_all)
+.popsection
/*
* void __asm_flush_dcache_range(start, end)
* x0: start address
* x1: end address
*/
+.pushsection .text.__asm_flush_dcache_range, "ax"
ENTRY(__asm_flush_dcache_range)
mrs x3, ctr_el0
lsr x3, x3, #16
dsb sy
ret
ENDPROC(__asm_flush_dcache_range)
+.popsection
+/*
+ * void __asm_invalidate_dcache_range(start, end)
+ *
+ * invalidate data cache in the range
+ *
+ * x0: start address
+ * x1: end address
+ */
+.pushsection .text.__asm_invalidate_dcache_range, "ax"
+ENTRY(__asm_invalidate_dcache_range)
+ mrs x3, ctr_el0
+ ubfm x3, x3, #16, #19
+ mov x2, #4
+ lsl x2, x2, x3 /* cache line size */
+
+ /* x2 <- minimal cache line size in cache system */
+ sub x3, x2, #1
+ bic x0, x0, x3
+1: dc ivac, x0 /* invalidate data or unified cache */
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo 1b
+ dsb sy
+ ret
+ENDPROC(__asm_invalidate_dcache_range)
+.popsection
/*
* void __asm_invalidate_icache_all(void)
*
* invalidate all tlb entries.
*/
+.pushsection .text.__asm_invalidate_icache_all, "ax"
ENTRY(__asm_invalidate_icache_all)
ic ialluis
isb sy
ret
ENDPROC(__asm_invalidate_icache_all)
+.popsection
+
+.pushsection .text.__asm_invalidate_l3_dcache, "ax"
+ENTRY(__asm_invalidate_l3_dcache)
+ mov x0, #0 /* return status as success */
+ ret
+ENDPROC(__asm_invalidate_l3_dcache)
+ .weak __asm_invalidate_l3_dcache
+.popsection
+
+.pushsection .text.__asm_flush_l3_dcache, "ax"
+ENTRY(__asm_flush_l3_dcache)
+ mov x0, #0 /* return status as success */
+ ret
+ENDPROC(__asm_flush_l3_dcache)
+ .weak __asm_flush_l3_dcache
+.popsection
-ENTRY(__asm_flush_l3_cache)
+.pushsection .text.__asm_invalidate_l3_icache, "ax"
+ENTRY(__asm_invalidate_l3_icache)
mov x0, #0 /* return status as success */
ret
-ENDPROC(__asm_flush_l3_cache)
- .weak __asm_flush_l3_cache
+ENDPROC(__asm_invalidate_l3_icache)
+ .weak __asm_invalidate_l3_icache
+.popsection
/*
* void __asm_switch_ttbr(ulong new_ttbr)
*
* Safely switches to a new page table.
*/
+.pushsection .text.__asm_switch_ttbr, "ax"
ENTRY(__asm_switch_ttbr)
/* x2 = SCTLR (alive throghout the function) */
switch_el x4, 3f, 2f, 1f
ret x3
ENDPROC(__asm_switch_ttbr)
+.popsection