brcm47xx: rename target to bcm47xx
[oweals/openwrt.git] / target / linux / bcm47xx / patches-4.14 / 159-cpu_fixes.patch
1 --- a/arch/mips/include/asm/r4kcache.h
2 +++ b/arch/mips/include/asm/r4kcache.h
3 @@ -26,6 +26,38 @@
4  extern void (*r4k_blast_dcache)(void);
5  extern void (*r4k_blast_icache)(void);
6  
7 +#if defined(CONFIG_BCM47XX) && !defined(CONFIG_CPU_MIPS32_R2)
8 +#include <asm/paccess.h>
9 +#include <linux/ssb/ssb.h>
10 +#define BCM4710_DUMMY_RREG() bcm4710_dummy_rreg()
11 +
12 +static inline unsigned long bcm4710_dummy_rreg(void)
13 +{
14 +      return *(volatile unsigned long *)(KSEG1ADDR(SSB_ENUM_BASE));
15 +}
16 +
17 +#define BCM4710_FILL_TLB(addr) bcm4710_fill_tlb((void *)(addr))
18 +
19 +static inline unsigned long bcm4710_fill_tlb(void *addr)
20 +{
21 +      return *(unsigned long *)addr;
22 +}
23 +
24 +#define BCM4710_PROTECTED_FILL_TLB(addr) bcm4710_protected_fill_tlb((void *)(addr))
25 +
26 +static inline void bcm4710_protected_fill_tlb(void *addr)
27 +{
28 +      unsigned long x;
29 +      get_dbe(x, (unsigned long *)addr);;
30 +}
31 +
32 +#else
33 +#define BCM4710_DUMMY_RREG()
34 +
35 +#define BCM4710_FILL_TLB(addr)
36 +#define BCM4710_PROTECTED_FILL_TLB(addr)
37 +#endif
38 +
39  /*
40   * This macro return a properly sign-extended address suitable as base address
41   * for indexed cache operations.  Two issues here:
42 @@ -99,6 +131,7 @@ static inline void flush_icache_line_ind
43  static inline void flush_dcache_line_indexed(unsigned long addr)
44  {
45         __dflush_prologue
46 +       BCM4710_DUMMY_RREG();
47         cache_op(Index_Writeback_Inv_D, addr);
48         __dflush_epilogue
49  }
50 @@ -126,6 +159,7 @@ static inline void flush_icache_line(uns
51  static inline void flush_dcache_line(unsigned long addr)
52  {
53         __dflush_prologue
54 +       BCM4710_DUMMY_RREG();
55         cache_op(Hit_Writeback_Inv_D, addr);
56         __dflush_epilogue
57  }
58 @@ -133,6 +167,7 @@ static inline void flush_dcache_line(uns
59  static inline void invalidate_dcache_line(unsigned long addr)
60  {
61         __dflush_prologue
62 +       BCM4710_DUMMY_RREG();
63         cache_op(Hit_Invalidate_D, addr);
64         __dflush_epilogue
65  }
66 @@ -206,6 +241,7 @@ static inline int protected_flush_icache
67  #ifdef CONFIG_EVA
68                 return protected_cachee_op(Hit_Invalidate_I, addr);
69  #else
70 +               BCM4710_DUMMY_RREG();
71                 return protected_cache_op(Hit_Invalidate_I, addr);
72  #endif
73         }
74 @@ -219,6 +255,7 @@ static inline int protected_flush_icache
75   */
76  static inline int protected_writeback_dcache_line(unsigned long addr)
77  {
78 +       BCM4710_DUMMY_RREG();
79  #ifdef CONFIG_EVA
80         return protected_cachee_op(Hit_Writeback_Inv_D, addr);
81  #else
82 @@ -576,8 +613,51 @@ static inline void invalidate_tcache_pag
83                 : "r" (base),                                           \
84                   "i" (op));
85  
86 +static inline void blast_dcache(void)
87 +{
88 +       unsigned long start = KSEG0;
89 +       unsigned long dcache_size = current_cpu_data.dcache.waysize * current_cpu_data.dcache.ways;
90 +       unsigned long end = (start + dcache_size);
91 +
92 +       do {
93 +               BCM4710_DUMMY_RREG();
94 +               cache_op(Index_Writeback_Inv_D, start);
95 +               start += current_cpu_data.dcache.linesz;
96 +       } while(start < end);
97 +}
98 +
99 +static inline void blast_dcache_page(unsigned long page)
100 +{
101 +       unsigned long start = page;
102 +       unsigned long end = start + PAGE_SIZE;
103 +
104 +       BCM4710_FILL_TLB(start);
105 +       do {
106 +               BCM4710_DUMMY_RREG();
107 +               cache_op(Hit_Writeback_Inv_D, start);
108 +               start += current_cpu_data.dcache.linesz;
109 +       } while(start < end);
110 +}
111 +
112 +static inline void blast_dcache_page_indexed(unsigned long page)
113 +{
114 +       unsigned long start = page;
115 +       unsigned long end = start + PAGE_SIZE;
116 +       unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
117 +       unsigned long ws_end = current_cpu_data.dcache.ways <<
118 +                              current_cpu_data.dcache.waybit;
119 +       unsigned long ws, addr;
120 +       for (ws = 0; ws < ws_end; ws += ws_inc) {
121 +               start = page + ws;
122 +               for (addr = start; addr < end; addr += current_cpu_data.dcache.linesz) {
123 +                       BCM4710_DUMMY_RREG();
124 +                       cache_op(Index_Writeback_Inv_D, addr);
125 +               }
126 +       }
127 +}
128 +
129  /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
130 -#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra)   \
131 +#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra, war) \
132  static inline void extra##blast_##pfx##cache##lsize(void)              \
133  {                                                                      \
134         unsigned long start = INDEX_BASE;                               \
135 @@ -589,6 +669,7 @@ static inline void extra##blast_##pfx##c
136                                                                         \
137         __##pfx##flush_prologue                                         \
138                                                                         \
139 +       war                                                             \
140         for (ws = 0; ws < ws_end; ws += ws_inc)                         \
141                 for (addr = start; addr < end; addr += lsize * 32)      \
142                         cache##lsize##_unroll32(addr|ws, indexop);      \
143 @@ -603,6 +684,7 @@ static inline void extra##blast_##pfx##c
144                                                                         \
145         __##pfx##flush_prologue                                         \
146                                                                         \
147 +       war                                                             \
148         do {                                                            \
149                 cache##lsize##_unroll32(start, hitop);                  \
150                 start += lsize * 32;                                    \
151 @@ -621,6 +703,8 @@ static inline void extra##blast_##pfx##c
152                                current_cpu_data.desc.waybit;            \
153         unsigned long ws, addr;                                         \
154                                                                         \
155 +       war                                                             \
156 +                                                                       \
157         __##pfx##flush_prologue                                         \
158                                                                         \
159         for (ws = 0; ws < ws_end; ws += ws_inc)                         \
160 @@ -630,26 +714,26 @@ static inline void extra##blast_##pfx##c
161         __##pfx##flush_epilogue                                         \
162  }
163  
164 -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
165 -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
166 -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
167 -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
168 -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
169 -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
170 -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
171 -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
172 -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
173 -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
174 -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
175 -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
176 -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
177 -
178 -__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
179 -__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
180 -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
181 -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
182 -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
183 -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
184 +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, , )
185 +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, , BCM4710_FILL_TLB(start);)
186 +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, , )
187 +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, , )
188 +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, , BCM4710_FILL_TLB(start);)
189 +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_, BCM4710_FILL_TLB(start);)
190 +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, , )
191 +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, , )
192 +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, , BCM4710_FILL_TLB(start);)
193 +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, , )
194 +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, , )
195 +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, , )
196 +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, , )
197 +
198 +__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, , )
199 +__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, , )
200 +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, , )
201 +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, , )
202 +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, , )
203 +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, , )
204  
205  #define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
206  static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
207 @@ -678,53 +762,23 @@ __BUILD_BLAST_USER_CACHE(d, dcache, Inde
208  __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
209  
210  /* build blast_xxx_range, protected_blast_xxx_range */
211 -#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra)       \
212 +#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra, war, war2)    \
213  static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
214                                                     unsigned long end)  \
215  {                                                                      \
216         unsigned long lsize = cpu_##desc##_line_size();                 \
217 -       unsigned long lsize_2 = lsize * 2;                              \
218 -       unsigned long lsize_3 = lsize * 3;                              \
219 -       unsigned long lsize_4 = lsize * 4;                              \
220 -       unsigned long lsize_5 = lsize * 5;                              \
221 -       unsigned long lsize_6 = lsize * 6;                              \
222 -       unsigned long lsize_7 = lsize * 7;                              \
223 -       unsigned long lsize_8 = lsize * 8;                              \
224         unsigned long addr = start & ~(lsize - 1);                      \
225 -       unsigned long aend = (end + lsize - 1) & ~(lsize - 1);          \
226 -       int lines = (aend - addr) / lsize;                              \
227 +       unsigned long aend = (end - 1) & ~(lsize - 1);                  \
228 +       war                                                             \
229                                                                         \
230         __##pfx##flush_prologue                                         \
231                                                                         \
232 -       while (lines >= 8) {                                            \
233 -               prot##cache_op(hitop, addr);                            \
234 -               prot##cache_op(hitop, addr + lsize);                    \
235 -               prot##cache_op(hitop, addr + lsize_2);                  \
236 -               prot##cache_op(hitop, addr + lsize_3);                  \
237 -               prot##cache_op(hitop, addr + lsize_4);                  \
238 -               prot##cache_op(hitop, addr + lsize_5);                  \
239 -               prot##cache_op(hitop, addr + lsize_6);                  \
240 -               prot##cache_op(hitop, addr + lsize_7);                  \
241 -               addr += lsize_8;                                        \
242 -               lines -= 8;                                             \
243 -       }                                                               \
244 -                                                                       \
245 -       if (lines & 0x4) {                                              \
246 -               prot##cache_op(hitop, addr);                            \
247 -               prot##cache_op(hitop, addr + lsize);                    \
248 -               prot##cache_op(hitop, addr + lsize_2);                  \
249 -               prot##cache_op(hitop, addr + lsize_3);                  \
250 -               addr += lsize_4;                                        \
251 -       }                                                               \
252 -                                                                       \
253 -       if (lines & 0x2) {                                              \
254 -               prot##cache_op(hitop, addr);                            \
255 -               prot##cache_op(hitop, addr + lsize);                    \
256 -               addr += lsize_2;                                        \
257 -       }                                                               \
258 -                                                                       \
259 -       if (lines & 0x1) {                                              \
260 +       while (1) {                                                     \
261 +               war2                                                    \
262                 prot##cache_op(hitop, addr);                            \
263 +               if (addr == aend)                                       \
264 +                       break;                                          \
265 +               addr += lsize;                                          \
266         }                                                               \
267                                                                         \
268         __##pfx##flush_epilogue                                         \
269 @@ -732,8 +786,8 @@ static inline void prot##extra##blast_##
270  
271  #ifndef CONFIG_EVA
272  
273 -__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
274 -__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
275 +__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, , BCM4710_PROTECTED_FILL_TLB(addr); BCM4710_PROTECTED_FILL_TLB(aend);, BCM4710_DUMMY_RREG();)
276 +__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, , , )
277  
278  #else
279  
280 @@ -770,15 +824,15 @@ __BUILD_PROT_BLAST_CACHE_RANGE(d, dcache
281  __BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
282  
283  #endif
284 -__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
285 +__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, , , )
286  __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
287 -       protected_, loongson2_)
288 -__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
289 -__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
290 -__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
291 +       protected_, loongson2_, , )
292 +__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , , BCM4710_FILL_TLB(addr); BCM4710_FILL_TLB(aend);, BCM4710_DUMMY_RREG();)
293 +__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , , , )
294 +__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , , , )
295  /* blast_inv_dcache_range */
296 -__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
297 -__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
298 +__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , , , BCM4710_DUMMY_RREG();)
299 +__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , , , )
300  
301  /* Currently, this is very specific to Loongson-3 */
302  #define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize)     \
303 --- a/arch/mips/include/asm/stackframe.h
304 +++ b/arch/mips/include/asm/stackframe.h
305 @@ -428,6 +428,10 @@
306                 eretnc
307  #else
308                 .set    arch=r4000
309 +#ifdef CONFIG_BCM47XX
310 +               nop
311 +               nop
312 +#endif
313                 eret
314                 .set    mips0
315  #endif
316 --- a/arch/mips/kernel/genex.S
317 +++ b/arch/mips/kernel/genex.S
318 @@ -21,6 +21,19 @@
319  #include <asm/war.h>
320  #include <asm/thread_info.h>
321  
322 +#ifdef CONFIG_BCM47XX
323 +# ifdef eret
324 +#  undef eret
325 +# endif
326 +# define eret                                  \
327 +       .set push;                              \
328 +       .set noreorder;                         \
329 +        nop;                                   \
330 +        nop;                                   \
331 +        eret;                                  \
332 +       .set pop;
333 +#endif
334 +
335         __INIT
336  
337  /*
338 @@ -32,6 +45,9 @@
339  NESTED(except_vec3_generic, 0, sp)
340         .set    push
341         .set    noat
342 +#ifdef CONFIG_BCM47XX
343 +       nop
344 +#endif
345  #if R5432_CP0_INTERRUPT_WAR
346         mfc0    k0, CP0_INDEX
347  #endif
348 @@ -55,6 +71,9 @@ NESTED(except_vec3_r4000, 0, sp)
349         .set    push
350         .set    arch=r4000
351         .set    noat
352 +#ifdef CONFIG_BCM47XX
353 +       nop
354 +#endif
355         mfc0    k1, CP0_CAUSE
356         li      k0, 31<<2
357         andi    k1, k1, 0x7c
358 --- a/arch/mips/mm/c-r4k.c
359 +++ b/arch/mips/mm/c-r4k.c
360 @@ -39,6 +39,9 @@
361  #include <asm/dma-coherence.h>
362  #include <asm/mips-cps.h>
363  
364 +/* For enabling BCM4710 cache workarounds */
365 +static int bcm4710 = 0;
366 +
367  /*
368   * Bits describing what cache ops an SMP callback function may perform.
369   *
370 @@ -190,6 +193,9 @@ static void r4k_blast_dcache_user_page_s
371  {
372         unsigned long  dc_lsize = cpu_dcache_line_size();
373  
374 +       if (bcm4710)
375 +               r4k_blast_dcache_page = blast_dcache_page;
376 +       else
377         if (dc_lsize == 0)
378                 r4k_blast_dcache_user_page = (void *)cache_noop;
379         else if (dc_lsize == 16)
380 @@ -208,6 +214,9 @@ static void r4k_blast_dcache_page_indexe
381  {
382         unsigned long dc_lsize = cpu_dcache_line_size();
383  
384 +       if (bcm4710)
385 +               r4k_blast_dcache_page_indexed = blast_dcache_page_indexed;
386 +       else
387         if (dc_lsize == 0)
388                 r4k_blast_dcache_page_indexed = (void *)cache_noop;
389         else if (dc_lsize == 16)
390 @@ -227,6 +236,9 @@ static void r4k_blast_dcache_setup(void)
391  {
392         unsigned long dc_lsize = cpu_dcache_line_size();
393  
394 +       if (bcm4710)
395 +               r4k_blast_dcache = blast_dcache;
396 +       else
397         if (dc_lsize == 0)
398                 r4k_blast_dcache = (void *)cache_noop;
399         else if (dc_lsize == 16)
400 @@ -986,6 +998,8 @@ static void local_r4k_flush_cache_sigtra
401         }
402  
403         R4600_HIT_CACHEOP_WAR_IMPL;
404 +       BCM4710_PROTECTED_FILL_TLB(addr);
405 +       BCM4710_PROTECTED_FILL_TLB(addr + 4);
406         if (!cpu_has_ic_fills_f_dc) {
407                 if (dc_lsize)
408                         vaddr ? flush_dcache_line(addr & ~(dc_lsize - 1))
409 @@ -1880,6 +1894,17 @@ static void coherency_setup(void)
410          * silly idea of putting something else there ...
411          */
412         switch (current_cpu_type()) {
413 +       case CPU_BMIPS3300:
414 +               {
415 +                       u32 cm;
416 +                       cm = read_c0_diag();
417 +                       /* Enable icache */
418 +                       cm |= (1 << 31);
419 +                       /* Enable dcache */
420 +                       cm |= (1 << 30);
421 +                       write_c0_diag(cm);
422 +               }
423 +               break;
424         case CPU_R4000PC:
425         case CPU_R4000SC:
426         case CPU_R4000MC:
427 @@ -1926,6 +1951,15 @@ void r4k_cache_init(void)
428         extern void build_copy_page(void);
429         struct cpuinfo_mips *c = &current_cpu_data;
430  
431 +       /* Check if special workarounds are required */
432 +#if defined(CONFIG_BCM47XX) && !defined(CONFIG_CPU_MIPS32_R2)
433 +       if (current_cpu_data.cputype == CPU_BMIPS32 && (current_cpu_data.processor_id & 0xff) == 0) {
434 +               printk("Enabling BCM4710A0 cache workarounds.\n");
435 +               bcm4710 = 1;
436 +       } else
437 +#endif
438 +               bcm4710 = 0;
439 +
440         probe_pcache();
441         probe_vcache();
442         setup_scache();
443 @@ -2004,7 +2038,15 @@ void r4k_cache_init(void)
444          */
445         local_r4k___flush_cache_all(NULL);
446  
447 +#ifdef CONFIG_BCM47XX
448 +       {
449 +               static void (*_coherency_setup)(void);
450 +               _coherency_setup = (void (*)(void)) KSEG1ADDR(coherency_setup);
451 +               _coherency_setup();
452 +       }
453 +#else
454         coherency_setup();
455 +#endif
456         board_cache_error_setup = r4k_cache_error_setup;
457  
458         /*
459 --- a/arch/mips/mm/tlbex.c
460 +++ b/arch/mips/mm/tlbex.c
461 @@ -983,6 +983,9 @@ void build_get_pgde32(u32 **p, unsigned
462                 uasm_i_srl(p, ptr, ptr, SMP_CPUID_PTRSHIFT);
463                 uasm_i_addu(p, ptr, tmp, ptr);
464  #else
465 +#ifdef CONFIG_BCM47XX
466 +               uasm_i_nop(p);
467 +#endif
468                 UASM_i_LA_mostly(p, ptr, pgdc);
469  #endif
470                 uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
471 @@ -1344,6 +1347,9 @@ static void build_r4000_tlb_refill_handl
472  #ifdef CONFIG_64BIT
473                 build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
474  #else
475 +# ifdef CONFIG_BCM47XX
476 +               uasm_i_nop(&p);
477 +# endif
478                 build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
479  #endif
480  
481 @@ -1355,6 +1361,9 @@ static void build_r4000_tlb_refill_handl
482                 build_update_entries(&p, K0, K1);
483                 build_tlb_write_entry(&p, &l, &r, tlb_random);
484                 uasm_l_leave(&l, p);
485 +#ifdef CONFIG_BCM47XX
486 +               uasm_i_nop(&p);
487 +#endif
488                 uasm_i_eret(&p); /* return from trap */
489         }
490  #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
491 @@ -2064,6 +2073,9 @@ build_r4000_tlbchange_handler_head(u32 *
492  #ifdef CONFIG_64BIT
493         build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */
494  #else
495 +# ifdef CONFIG_BCM47XX
496 +       uasm_i_nop(p);
497 +# endif
498         build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */
499  #endif
500  
501 @@ -2110,6 +2122,9 @@ build_r4000_tlbchange_handler_tail(u32 *
502         build_tlb_write_entry(p, l, r, tlb_indexed);
503         uasm_l_leave(l, *p);
504         build_restore_work_registers(p);
505 +#ifdef CONFIG_BCM47XX
506 +       uasm_i_nop(p);
507 +#endif
508         uasm_i_eret(p); /* return from trap */
509  
510  #ifdef CONFIG_64BIT