1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
9 #include <linux/bitops.h>
10 #include <linux/compiler.h>
11 #include <linux/kernel.h>
12 #include <linux/log2.h>
13 #include <asm/arcregs.h>
14 #include <asm/arc-bcr.h>
15 #include <asm/cache.h>
19 * Data cache (L1 D$ or SL$) entire invalidate operation or data cache disable
20 * operation may result in unexpected behavior and data loss even if we flush
21 * data cache right before invalidation. That may happens if we store any context
22 * on stack (like we store BLINK register on stack before function call).
23 * BLINK register is the register where return address is automatically saved
24 * when we do function call with instructions like 'bl'.
26 * There is the real example:
27 * We may hang in the next code as we store any BLINK register on stack in
28 * invalidate_dcache_all() function.
30 * void flush_dcache_all() {
31 * __dc_entire_op(OP_FLUSH);
35 * void invalidate_dcache_all() {
36 * __dc_entire_op(OP_INV);
42 * invalidate_dcache_all();
45 * Now let's see what really happens during that code execution:
48 * |->> call flush_dcache_all
49 * [return address is saved to BLINK register]
50 * [push BLINK] (save to stack) ![point 1]
51 * |->> call __dc_entire_op(OP_FLUSH)
52 * [return address is saved to BLINK register]
54 * return [jump to BLINK]
56 * [other flush_dcache_all code]
57 * [pop BLINK] (get from stack)
58 * return [jump to BLINK]
60 * |->> call invalidate_dcache_all
61 * [return address is saved to BLINK register]
62 * [push BLINK] (save to stack) ![point 2]
63 * |->> call __dc_entire_op(OP_FLUSH)
64 * [return address is saved to BLINK register]
65 * [invalidate L1 D$] ![point 3]
67 * // We lose return address from invalidate_dcache_all function:
68 * // we save it to stack and invalidate L1 D$ after that!
69 * return [jump to BLINK]
71 * [other invalidate_dcache_all code]
72 * [pop BLINK] (get from stack)
73 * // we don't have this data in L1 dcache as we invalidated it in [point 3]
74 * // so we get it from next memory level (for example DDR memory)
75 * // but in the memory we have value which we save in [point 1], which
76 * // is return address from flush_dcache_all function (instead of
77 * // address from current invalidate_dcache_all function which we
78 * // saved in [point 2] !)
79 * return [jump to BLINK]
81 * // As BLINK points to invalidate_dcache_all, we call it again and
84 * Fortunately we may fix that by using flush & invalidation of D$ with a single
85 * one instruction (instead of flush and invalidation instructions pair) and
86 * enabling force function inline with '__attribute__((always_inline))' gcc
87 * attribute to avoid any function call (and BLINK store) between cache flush
92 * As of today we only support the following cache configurations on ARC.
93 * Other configurations may exist in HW but we don't support it in SW.
95 * ______________________
98 * |______________________|
101 * | L1 I$ | | L1 D$ |
102 * |_______| |_______|
104 * ___|______________|____
107 * |______________________|
110 * ______________________
113 * |______________________|
116 * | L1 I$ | | L1 D$ |
117 * |_______| |_______|
119 * ___|______________|____
122 * |______________________|
123 * always on (ARCv2, HS < 3.0)
124 * on/off (ARCv2, HS >= 3.0)
125 * ___|______________|____
128 * |______________________|
131 * ______________________
134 * |______________________|
137 * | L1 I$ | | L1 D$ |
138 * |_______| |_______|
140 * ___|______________|____ _______
142 * | L2 (SL$) |-----| IOC |
143 * |______________________| |_______|
144 * always must be on on/off
145 * ___|______________|____
148 * |______________________|
151 DECLARE_GLOBAL_DATA_PTR;
153 /* Bit values in IC_CTRL */
154 #define IC_CTRL_CACHE_DISABLE BIT(0)
156 /* Bit values in DC_CTRL */
157 #define DC_CTRL_CACHE_DISABLE BIT(0)
158 #define DC_CTRL_INV_MODE_FLUSH BIT(6)
159 #define DC_CTRL_FLUSH_STATUS BIT(8)
161 #define OP_INV BIT(0)
162 #define OP_FLUSH BIT(1)
163 #define OP_FLUSH_N_INV (OP_FLUSH | OP_INV)
165 /* Bit val in SLC_CONTROL */
166 #define SLC_CTRL_DIS 0x001
167 #define SLC_CTRL_IM 0x040
168 #define SLC_CTRL_BUSY 0x100
169 #define SLC_CTRL_RGN_OP_INV 0x200
171 #define CACHE_LINE_MASK (~(gd->arch.l1_line_sz - 1))
174 * We don't want to use '__always_inline' macro here as it can be redefined
175 * to simple 'inline' in some cases which breaks stuff. See [ NOTE 1 ] for more
176 * details about the reasons we need to use always_inline functions.
178 #define inlined_cachefunc inline __attribute__((always_inline))
180 static inlined_cachefunc void __ic_entire_invalidate(void);
181 static inlined_cachefunc void __dc_entire_op(const int cacheop);
182 static inlined_cachefunc void __slc_entire_op(const int op);
183 static inlined_cachefunc bool ioc_enabled(void);
185 static inline bool pae_exists(void)
187 /* TODO: should we compare mmu version from BCR and from CONFIG? */
188 #if (CONFIG_ARC_MMU_VER >= 4)
189 union bcr_mmu_4 mmu4;
191 mmu4.word = read_aux_reg(ARC_AUX_MMU_BCR);
195 #endif /* (CONFIG_ARC_MMU_VER >= 4) */
200 static inlined_cachefunc bool icache_exists(void)
202 union bcr_di_cache ibcr;
204 ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD);
205 return !!ibcr.fields.ver;
208 static inlined_cachefunc bool icache_enabled(void)
210 if (!icache_exists())
213 return !(read_aux_reg(ARC_AUX_IC_CTRL) & IC_CTRL_CACHE_DISABLE);
216 static inlined_cachefunc bool dcache_exists(void)
218 union bcr_di_cache dbcr;
220 dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD);
221 return !!dbcr.fields.ver;
224 static inlined_cachefunc bool dcache_enabled(void)
226 if (!dcache_exists())
229 return !(read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_CACHE_DISABLE);
232 static inlined_cachefunc bool slc_exists(void)
234 if (is_isa_arcv2()) {
235 union bcr_generic sbcr;
237 sbcr.word = read_aux_reg(ARC_BCR_SLC);
238 return !!sbcr.fields.ver;
244 enum slc_dis_status {
246 ST_SLC_NO_DISABLE_CTRL,
251 * ARCv1 -> ST_SLC_MISSING
252 * ARCv2 && SLC absent -> ST_SLC_MISSING
253 * ARCv2 && SLC exists && SLC version <= 2 -> ST_SLC_NO_DISABLE_CTRL
254 * ARCv2 && SLC exists && SLC version > 2 -> ST_SLC_DISABLE_CTRL
256 static inlined_cachefunc enum slc_dis_status slc_disable_supported(void)
258 if (is_isa_arcv2()) {
259 union bcr_generic sbcr;
261 sbcr.word = read_aux_reg(ARC_BCR_SLC);
262 if (sbcr.fields.ver == 0)
263 return ST_SLC_MISSING;
264 else if (sbcr.fields.ver <= 2)
265 return ST_SLC_NO_DISABLE_CTRL;
267 return ST_SLC_DISABLE_CTRL;
270 return ST_SLC_MISSING;
273 static inlined_cachefunc bool __slc_enabled(void)
275 return !(read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_DIS);
278 static inlined_cachefunc void __slc_enable(void)
282 ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
283 ctrl &= ~SLC_CTRL_DIS;
284 write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
287 static inlined_cachefunc void __slc_disable(void)
291 ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
292 ctrl |= SLC_CTRL_DIS;
293 write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
296 static inlined_cachefunc bool slc_enabled(void)
298 enum slc_dis_status slc_status = slc_disable_supported();
300 if (slc_status == ST_SLC_MISSING)
302 else if (slc_status == ST_SLC_NO_DISABLE_CTRL)
305 return __slc_enabled();
308 static inlined_cachefunc bool slc_data_bypass(void)
311 * If L1 data cache is disabled SL$ is bypassed and all load/store
312 * requests are sent directly to main memory.
314 return !dcache_enabled();
317 void slc_enable(void)
319 if (slc_disable_supported() != ST_SLC_DISABLE_CTRL)
328 /* TODO: warn if we are not able to disable SLC */
329 void slc_disable(void)
331 if (slc_disable_supported() != ST_SLC_DISABLE_CTRL)
334 /* we don't support SLC disabling if we use IOC */
338 if (!__slc_enabled())
342 * We need to flush L1D$ to guarantee that we won't have any
343 * writeback operations during SLC disabling.
345 __dc_entire_op(OP_FLUSH);
346 __slc_entire_op(OP_FLUSH_N_INV);
350 static inlined_cachefunc bool ioc_exists(void)
352 if (is_isa_arcv2()) {
353 union bcr_clust_cfg cbcr;
355 cbcr.word = read_aux_reg(ARC_BCR_CLUSTER);
356 return cbcr.fields.c;
362 static inlined_cachefunc bool ioc_enabled(void)
365 * We check only CONFIG option instead of IOC HW state check as IOC
366 * must be disabled by default.
368 if (is_ioc_enabled())
374 static inlined_cachefunc void __slc_entire_op(const int op)
381 ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
383 if (!(op & OP_FLUSH)) /* i.e. OP_INV */
384 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
388 write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
390 if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
391 write_aux_reg(ARC_AUX_SLC_INVALIDATE, 0x1);
393 write_aux_reg(ARC_AUX_SLC_FLUSH, 0x1);
395 /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
396 read_aux_reg(ARC_AUX_SLC_CTRL);
398 /* Important to wait for flush to complete */
399 while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
402 static void slc_upper_region_init(void)
405 * ARC_AUX_SLC_RGN_START1 and ARC_AUX_SLC_RGN_END1 register exist
406 * only if PAE exists in current HW. So we had to check pae_exist
413 * ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1 are always == 0
414 * as we don't use PAE40.
416 write_aux_reg(ARC_AUX_SLC_RGN_END1, 0);
417 write_aux_reg(ARC_AUX_SLC_RGN_START1, 0);
420 static void __slc_rgn_op(unsigned long paddr, unsigned long sz, const int op)
422 #ifdef CONFIG_ISA_ARCV2
431 * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
432 * - b'000 (default) is Flush,
433 * - b'001 is Invalidate if CTRL.IM == 0
434 * - b'001 is Flush-n-Invalidate if CTRL.IM == 1
436 ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
438 /* Don't rely on default value of IM bit */
439 if (!(op & OP_FLUSH)) /* i.e. OP_INV */
440 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
445 ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */
447 ctrl &= ~SLC_CTRL_RGN_OP_INV;
449 write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
452 * Lower bits are ignored, no need to clip
453 * END needs to be setup before START (latter triggers the operation)
454 * END can't be same as START, so add (l2_line_sz - 1) to sz
456 end = paddr + sz + gd->arch.slc_line_sz - 1;
459 * Upper addresses (ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1)
460 * are always == 0 as we don't use PAE40, so we only setup lower ones
461 * (ARC_AUX_SLC_RGN_END and ARC_AUX_SLC_RGN_START)
463 write_aux_reg(ARC_AUX_SLC_RGN_END, end);
464 write_aux_reg(ARC_AUX_SLC_RGN_START, paddr);
466 /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
467 read_aux_reg(ARC_AUX_SLC_CTRL);
469 while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
471 #endif /* CONFIG_ISA_ARCV2 */
474 static void arc_ioc_setup(void)
476 /* IOC Aperture start is equal to DDR start */
477 unsigned int ap_base = CONFIG_SYS_SDRAM_BASE;
478 /* IOC Aperture size is equal to DDR size */
479 long ap_size = CONFIG_SYS_SDRAM_SIZE;
481 /* Unsupported configuration. See [ NOTE 2 ] for more details. */
483 panic("Try to enable IOC but SLC is not present");
486 panic("Try to enable IOC but SLC is disabled");
488 /* Unsupported configuration. See [ NOTE 2 ] for more details. */
489 if (!dcache_enabled())
490 panic("Try to enable IOC but L1 D$ is disabled");
492 if (!is_power_of_2(ap_size) || ap_size < 4096)
493 panic("IOC Aperture size must be power of 2 and bigger 4Kib");
495 /* IOC Aperture start must be aligned to the size of the aperture */
496 if (ap_base % ap_size != 0)
497 panic("IOC Aperture start must be aligned to the size of the aperture");
499 flush_n_invalidate_dcache_all();
502 * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB,
503 * so setting 0x11 implies 512M, 0x12 implies 1G...
505 write_aux_reg(ARC_AUX_IO_COH_AP0_SIZE,
506 order_base_2(ap_size / 1024) - 2);
508 write_aux_reg(ARC_AUX_IO_COH_AP0_BASE, ap_base >> 12);
509 write_aux_reg(ARC_AUX_IO_COH_PARTIAL, 1);
510 write_aux_reg(ARC_AUX_IO_COH_ENABLE, 1);
513 static void read_decode_cache_bcr_arcv2(void)
515 #ifdef CONFIG_ISA_ARCV2
517 union bcr_slc_cfg slc_cfg;
520 slc_cfg.word = read_aux_reg(ARC_AUX_SLC_CONFIG);
521 gd->arch.slc_line_sz = (slc_cfg.fields.lsz == 0) ? 128 : 64;
524 * We don't support configuration where L1 I$ or L1 D$ is
525 * absent but SL$ exists. See [ NOTE 2 ] for more details.
527 if (!icache_exists() || !dcache_exists())
528 panic("Unsupported cache configuration: SLC exists but one of L1 caches is absent");
531 #endif /* CONFIG_ISA_ARCV2 */
534 void read_decode_cache_bcr(void)
536 int dc_line_sz = 0, ic_line_sz = 0;
537 union bcr_di_cache ibcr, dbcr;
540 * We don't care much about I$ line length really as there're
541 * no per-line ops on I$ instead we only do full invalidation of it
542 * on occasion of relocation and right before jumping to the OS.
543 * Still we check insane config with zero-encoded line length in
544 * presense of version field in I$ BCR. Just in case.
546 ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD);
547 if (ibcr.fields.ver) {
548 ic_line_sz = 8 << ibcr.fields.line_len;
550 panic("Instruction exists but line length is 0\n");
553 dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD);
554 if (dbcr.fields.ver) {
555 gd->arch.l1_line_sz = dc_line_sz = 16 << dbcr.fields.line_len;
557 panic("Data cache exists but line length is 0\n");
561 void cache_init(void)
563 read_decode_cache_bcr();
566 read_decode_cache_bcr_arcv2();
568 if (is_isa_arcv2() && ioc_enabled())
571 if (is_isa_arcv2() && slc_exists())
572 slc_upper_region_init();
575 int icache_status(void)
577 return icache_enabled();
580 void icache_enable(void)
583 write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) &
584 ~IC_CTRL_CACHE_DISABLE);
587 void icache_disable(void)
589 if (!icache_exists())
592 __ic_entire_invalidate();
594 write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) |
595 IC_CTRL_CACHE_DISABLE);
598 /* IC supports only invalidation */
599 static inlined_cachefunc void __ic_entire_invalidate(void)
601 if (!icache_enabled())
604 /* Any write to IC_IVIC register triggers invalidation of entire I$ */
605 write_aux_reg(ARC_AUX_IC_IVIC, 1);
607 * As per ARC HS databook (see chapter 5.3.3.2)
608 * it is required to add 3 NOPs after each write to IC_IVIC.
613 read_aux_reg(ARC_AUX_IC_CTRL); /* blocks */
616 void invalidate_icache_all(void)
618 __ic_entire_invalidate();
621 * If SL$ is bypassed for data it is used only for instructions,
622 * so we need to invalidate it too.
624 if (is_isa_arcv2() && slc_data_bypass())
625 __slc_entire_op(OP_INV);
628 int dcache_status(void)
630 return dcache_enabled();
633 void dcache_enable(void)
635 if (!dcache_exists())
638 write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) &
639 ~(DC_CTRL_INV_MODE_FLUSH | DC_CTRL_CACHE_DISABLE));
642 void dcache_disable(void)
644 if (!dcache_exists())
647 __dc_entire_op(OP_FLUSH_N_INV);
650 * As SLC will be bypassed for data after L1 D$ disable we need to
651 * flush it first before L1 D$ disable. Also we invalidate SLC to
652 * avoid any inconsistent data problems after enabling L1 D$ again with
653 * dcache_enable function.
656 __slc_entire_op(OP_FLUSH_N_INV);
658 write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) |
659 DC_CTRL_CACHE_DISABLE);
662 /* Common Helper for Line Operations on D-cache */
663 static inline void __dcache_line_loop(unsigned long paddr, unsigned long sz,
666 unsigned int aux_cmd;
669 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
670 aux_cmd = cacheop & OP_INV ? ARC_AUX_DC_IVDL : ARC_AUX_DC_FLDL;
672 sz += paddr & ~CACHE_LINE_MASK;
673 paddr &= CACHE_LINE_MASK;
675 num_lines = DIV_ROUND_UP(sz, gd->arch.l1_line_sz);
677 while (num_lines-- > 0) {
678 #if (CONFIG_ARC_MMU_VER == 3)
679 write_aux_reg(ARC_AUX_DC_PTAG, paddr);
681 write_aux_reg(aux_cmd, paddr);
682 paddr += gd->arch.l1_line_sz;
686 static inlined_cachefunc void __before_dc_op(const int op)
690 ctrl = read_aux_reg(ARC_AUX_DC_CTRL);
692 /* IM bit implies flush-n-inv, instead of vanilla inv */
694 ctrl &= ~DC_CTRL_INV_MODE_FLUSH;
696 ctrl |= DC_CTRL_INV_MODE_FLUSH;
698 write_aux_reg(ARC_AUX_DC_CTRL, ctrl);
701 static inlined_cachefunc void __after_dc_op(const int op)
703 if (op & OP_FLUSH) /* flush / flush-n-inv both wait */
704 while (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_FLUSH_STATUS);
707 static inlined_cachefunc void __dc_entire_op(const int cacheop)
711 if (!dcache_enabled())
714 __before_dc_op(cacheop);
716 if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */
717 aux = ARC_AUX_DC_IVDC;
719 aux = ARC_AUX_DC_FLSH;
721 write_aux_reg(aux, 0x1);
723 __after_dc_op(cacheop);
726 static inline void __dc_line_op(unsigned long paddr, unsigned long sz,
729 if (!dcache_enabled())
732 __before_dc_op(cacheop);
733 __dcache_line_loop(paddr, sz, cacheop);
734 __after_dc_op(cacheop);
737 void invalidate_dcache_range(unsigned long start, unsigned long end)
743 * ARCv1 -> call __dc_line_op
744 * ARCv2 && L1 D$ disabled -> nothing
745 * ARCv2 && L1 D$ enabled && IOC enabled -> nothing
746 * ARCv2 && L1 D$ enabled && no IOC -> call __dc_line_op; call __slc_rgn_op
748 if (!is_isa_arcv2() || !ioc_enabled())
749 __dc_line_op(start, end - start, OP_INV);
751 if (is_isa_arcv2() && !ioc_enabled() && !slc_data_bypass())
752 __slc_rgn_op(start, end - start, OP_INV);
755 void flush_dcache_range(unsigned long start, unsigned long end)
761 * ARCv1 -> call __dc_line_op
762 * ARCv2 && L1 D$ disabled -> nothing
763 * ARCv2 && L1 D$ enabled && IOC enabled -> nothing
764 * ARCv2 && L1 D$ enabled && no IOC -> call __dc_line_op; call __slc_rgn_op
766 if (!is_isa_arcv2() || !ioc_enabled())
767 __dc_line_op(start, end - start, OP_FLUSH);
769 if (is_isa_arcv2() && !ioc_enabled() && !slc_data_bypass())
770 __slc_rgn_op(start, end - start, OP_FLUSH);
773 void flush_cache(unsigned long start, unsigned long size)
775 flush_dcache_range(start, start + size);
779 * As invalidate_dcache_all() is not used in generic U-Boot code and as we
780 * don't need it in arch/arc code alone (invalidate without flush) we implement
781 * flush_n_invalidate_dcache_all (flush and invalidate in 1 operation) because
782 * it's much safer. See [ NOTE 1 ] for more details.
784 void flush_n_invalidate_dcache_all(void)
786 __dc_entire_op(OP_FLUSH_N_INV);
788 if (is_isa_arcv2() && !slc_data_bypass())
789 __slc_entire_op(OP_FLUSH_N_INV);
792 void flush_dcache_all(void)
794 __dc_entire_op(OP_FLUSH);
796 if (is_isa_arcv2() && !slc_data_bypass())
797 __slc_entire_op(OP_FLUSH);
801 * This is function to cleanup all caches (and therefore sync I/D caches) which
802 * can be used for cleanup before linux launch or to sync caches during
805 void sync_n_cleanup_cache_all(void)
807 __dc_entire_op(OP_FLUSH_N_INV);
810 * If SL$ is bypassed for data it is used only for instructions,
811 * and we shouldn't flush it. So invalidate it instead of flush_n_inv.
813 if (is_isa_arcv2()) {
814 if (slc_data_bypass())
815 __slc_entire_op(OP_INV);
817 __slc_entire_op(OP_FLUSH_N_INV);
820 __ic_entire_invalidate();