1 /* SPDX-License-Identifier: GPL-2.0+ */
4 * David Feng <fenghua@phytium.com.cn>
6 * This file is based on sample code from ARMv8 ARM.
9 #include <asm-offsets.h>
11 #include <asm/macro.h>
12 #include <asm/system.h>
13 #include <linux/linkage.h>
16 * void __asm_dcache_level(level)
18 * flush or invalidate one level cache.
21 * x1: 0 clean & invalidate, 1 invalidate only
24 .pushsection .text.__asm_dcache_level, "ax"
25 ENTRY(__asm_dcache_level)
27 msr csselr_el1, x12 /* select cache level */
28 isb /* sync change of cssidr_el1 */
29 mrs x6, ccsidr_el1 /* read the new cssidr_el1 */
30 and x2, x6, #7 /* x2 <- log2(cache line size)-4 */
31 add x2, x2, #4 /* x2 <- log2(cache line size) */
33 and x3, x3, x6, lsr #3 /* x3 <- max number of #ways */
34 clz w5, w3 /* bit position of #ways */
36 and x4, x4, x6, lsr #13 /* x4 <- max number of #sets */
37 /* x12 <- cache level << 1 */
38 /* x2 <- line length offset */
39 /* x3 <- number of cache ways - 1 */
40 /* x4 <- number of cache sets - 1 */
41 /* x5 <- bit position of #ways */
44 mov x6, x3 /* x6 <- working copy of #ways */
47 orr x9, x12, x7 /* map way and level to cisw value */
49 orr x9, x9, x7 /* map set number to cisw value */
53 1: dc cisw, x9 /* clean & invalidate by set/way */
54 2: subs x6, x6, #1 /* decrement the way */
56 subs x4, x4, #1 /* decrement the set */
60 ENDPROC(__asm_dcache_level)
64 * void __asm_flush_dcache_all(int invalidate_only)
66 * x0: 0 clean & invalidate, 1 invalidate only
68 * flush or invalidate all data cache by SET/WAY.
70 .pushsection .text.__asm_dcache_all, "ax"
71 ENTRY(__asm_dcache_all)
74 mrs x10, clidr_el1 /* read clidr_el1 */
76 and x11, x11, #0x7 /* x11 <- loc */
77 cbz x11, finished /* if loc is 0, exit */
79 mov x0, #0 /* start flush at cache level 0 */
80 /* x0 <- cache level */
81 /* x10 <- clidr_el1 */
83 /* x15 <- return address */
87 add x12, x12, x0 /* x0 <- tripled cache level */
89 and x12, x12, #7 /* x12 <- cache type */
91 b.lt skip /* skip if no cache or icache */
92 bl __asm_dcache_level /* x1 = 0 flush, 1 invalidate */
94 add x0, x0, #1 /* increment cache level */
99 msr csselr_el1, x0 /* restore csselr_el1 */
106 ENDPROC(__asm_dcache_all)
109 .pushsection .text.__asm_flush_dcache_all, "ax"
110 ENTRY(__asm_flush_dcache_all)
113 ENDPROC(__asm_flush_dcache_all)
116 .pushsection .text.__asm_invalidate_dcache_all, "ax"
117 ENTRY(__asm_invalidate_dcache_all)
120 ENDPROC(__asm_invalidate_dcache_all)
124 * void __asm_flush_dcache_range(start, end)
126 * clean & invalidate data cache in the range
131 .pushsection .text.__asm_flush_dcache_range, "ax"
132 ENTRY(__asm_flush_dcache_range)
137 lsl x2, x2, x3 /* cache line size */
139 /* x2 <- minimal cache line size in cache system */
142 1: dc civac, x0 /* clean & invalidate data or unified cache */
148 ENDPROC(__asm_flush_dcache_range)
151 * void __asm_invalidate_dcache_range(start, end)
153 * invalidate data cache in the range
158 .pushsection .text.__asm_invalidate_dcache_range, "ax"
159 ENTRY(__asm_invalidate_dcache_range)
161 ubfm x3, x3, #16, #19
163 lsl x2, x2, x3 /* cache line size */
165 /* x2 <- minimal cache line size in cache system */
168 1: dc ivac, x0 /* invalidate data or unified cache */
174 ENDPROC(__asm_invalidate_dcache_range)
178 * void __asm_invalidate_icache_all(void)
180 * invalidate all tlb entries.
182 .pushsection .text.__asm_invalidate_icache_all, "ax"
183 ENTRY(__asm_invalidate_icache_all)
187 ENDPROC(__asm_invalidate_icache_all)
190 .pushsection .text.__asm_invalidate_l3_dcache, "ax"
191 ENTRY(__asm_invalidate_l3_dcache)
192 mov x0, #0 /* return status as success */
194 ENDPROC(__asm_invalidate_l3_dcache)
195 .weak __asm_invalidate_l3_dcache
198 .pushsection .text.__asm_flush_l3_dcache, "ax"
199 ENTRY(__asm_flush_l3_dcache)
200 mov x0, #0 /* return status as success */
202 ENDPROC(__asm_flush_l3_dcache)
203 .weak __asm_flush_l3_dcache
206 .pushsection .text.__asm_invalidate_l3_icache, "ax"
207 ENTRY(__asm_invalidate_l3_icache)
208 mov x0, #0 /* return status as success */
210 ENDPROC(__asm_invalidate_l3_icache)
211 .weak __asm_invalidate_l3_icache
215 * void __asm_switch_ttbr(ulong new_ttbr)
217 * Safely switches to a new page table.
219 .pushsection .text.__asm_switch_ttbr, "ax"
220 ENTRY(__asm_switch_ttbr)
221 /* x2 = SCTLR (alive throghout the function) */
222 switch_el x4, 3f, 2f, 1f
230 /* Unset CR_M | CR_C | CR_I from SCTLR to disable all caches */
231 movn x1, #(CR_M | CR_C | CR_I)
233 switch_el x4, 3f, 2f, 1f
241 /* This call only clobbers x30 (lr) and x9 (unused) */
243 bl __asm_invalidate_tlb_all
245 /* From here on we're running safely with caches disabled */
247 /* Set TTBR to our first argument */
248 switch_el x4, 3f, 2f, 1f
256 /* Restore original SCTLR and thus enable caches again */
257 switch_el x4, 3f, 2f, 1f
266 ENDPROC(__asm_switch_ttbr)