2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8 * Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
11 #include <linux/cpu_pm.h>
12 #include <linux/init.h>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
16 #include <linux/hugetlb.h>
17 #include <linux/module.h>
20 #include <asm/cpu-type.h>
21 #include <asm/bootinfo.h>
22 #include <asm/mmu_context.h>
23 #include <asm/pgtable.h>
25 #include <asm/tlbmisc.h>
27 extern void build_tlb_refill_handler(void);
30 * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb,
31 * unfortunately, itlb is not totally transparent to software.
33 static inline void flush_itlb(void)
35 switch (current_cpu_type()) {
45 static inline void flush_itlb_vm(struct vm_area_struct *vma)
47 if (vma->vm_flags & VM_EXEC)
51 void local_flush_tlb_all(void)
54 unsigned long old_ctx;
55 int entry, ftlbhighset;
57 local_irq_save(flags);
58 /* Save old context and create impossible VPN2 value */
59 old_ctx = read_c0_entryhi();
63 entry = read_c0_wired();
65 /* Blast 'em all away. */
67 if (current_cpu_data.tlbsizevtlb) {
70 tlbinvf(); /* invalidate VTLB */
72 ftlbhighset = current_cpu_data.tlbsizevtlb +
73 current_cpu_data.tlbsizeftlbsets;
74 for (entry = current_cpu_data.tlbsizevtlb;
77 write_c0_index(entry);
79 tlbinvf(); /* invalidate one FTLB set */
82 while (entry < current_cpu_data.tlbsize) {
83 /* Make sure all entries differ. */
84 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
85 write_c0_index(entry);
92 write_c0_entryhi(old_ctx);
94 local_irq_restore(flags);
96 EXPORT_SYMBOL(local_flush_tlb_all);
98 /* All entries common to a mm share an asid. To effectively flush
99 these entries, we just bump the asid. */
100 void local_flush_tlb_mm(struct mm_struct *mm)
106 cpu = smp_processor_id();
108 if (cpu_context(cpu, mm) != 0) {
109 drop_mmu_context(mm, cpu);
115 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
118 struct mm_struct *mm = vma->vm_mm;
119 int cpu = smp_processor_id();
121 if (cpu_context(cpu, mm) != 0) {
122 unsigned long size, flags;
124 local_irq_save(flags);
125 start = round_down(start, PAGE_SIZE << 1);
126 end = round_up(end, PAGE_SIZE << 1);
127 size = (end - start) >> (PAGE_SHIFT + 1);
128 if (size <= (current_cpu_data.tlbsizeftlbsets ?
129 current_cpu_data.tlbsize / 8 :
130 current_cpu_data.tlbsize / 2)) {
131 int oldpid = read_c0_entryhi();
132 int newpid = cpu_asid(cpu, mm);
134 while (start < end) {
137 write_c0_entryhi(start | newpid);
138 start += (PAGE_SIZE << 1);
142 idx = read_c0_index();
143 write_c0_entrylo0(0);
144 write_c0_entrylo1(0);
147 /* Make sure all entries differ. */
148 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
153 write_c0_entryhi(oldpid);
155 drop_mmu_context(mm, cpu);
158 local_irq_restore(flags);
162 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
164 unsigned long size, flags;
166 local_irq_save(flags);
167 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
168 size = (size + 1) >> 1;
169 if (size <= (current_cpu_data.tlbsizeftlbsets ?
170 current_cpu_data.tlbsize / 8 :
171 current_cpu_data.tlbsize / 2)) {
172 int pid = read_c0_entryhi();
174 start &= (PAGE_MASK << 1);
175 end += ((PAGE_SIZE << 1) - 1);
176 end &= (PAGE_MASK << 1);
178 while (start < end) {
181 write_c0_entryhi(start);
182 start += (PAGE_SIZE << 1);
186 idx = read_c0_index();
187 write_c0_entrylo0(0);
188 write_c0_entrylo1(0);
191 /* Make sure all entries differ. */
192 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
197 write_c0_entryhi(pid);
199 local_flush_tlb_all();
202 local_irq_restore(flags);
205 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
207 int cpu = smp_processor_id();
209 if (cpu_context(cpu, vma->vm_mm) != 0) {
211 int oldpid, newpid, idx;
213 newpid = cpu_asid(cpu, vma->vm_mm);
214 page &= (PAGE_MASK << 1);
215 local_irq_save(flags);
216 oldpid = read_c0_entryhi();
217 write_c0_entryhi(page | newpid);
221 idx = read_c0_index();
222 write_c0_entrylo0(0);
223 write_c0_entrylo1(0);
226 /* Make sure all entries differ. */
227 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
233 write_c0_entryhi(oldpid);
235 local_irq_restore(flags);
240 * This one is only used for pages with the global bit set so we don't care
241 * much about the ASID.
243 void local_flush_tlb_one(unsigned long page)
248 local_irq_save(flags);
249 oldpid = read_c0_entryhi();
250 page &= (PAGE_MASK << 1);
251 write_c0_entryhi(page);
255 idx = read_c0_index();
256 write_c0_entrylo0(0);
257 write_c0_entrylo1(0);
259 /* Make sure all entries differ. */
260 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
265 write_c0_entryhi(oldpid);
267 local_irq_restore(flags);
271 * We will need multiple versions of update_mmu_cache(), one that just
272 * updates the TLB with the new pte(s), and another which also checks
273 * for the R4k "end of page" hardware bug and does the needy.
275 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
285 * Handle debugger faulting in for debugee.
287 if (current->active_mm != vma->vm_mm)
290 local_irq_save(flags);
292 pid = read_c0_entryhi() & ASID_MASK;
293 address &= (PAGE_MASK << 1);
294 write_c0_entryhi(address | pid);
295 pgdp = pgd_offset(vma->vm_mm, address);
299 pudp = pud_offset(pgdp, address);
300 pmdp = pmd_offset(pudp, address);
301 idx = read_c0_index();
302 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
303 /* this could be a huge page */
304 if (pmd_huge(*pmdp)) {
306 write_c0_pagemask(PM_HUGE_MASK);
307 ptep = (pte_t *)pmdp;
308 lo = pte_to_entrylo(pte_val(*ptep));
309 write_c0_entrylo0(lo);
310 write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
318 write_c0_pagemask(PM_DEFAULT_MASK);
322 ptep = pte_offset_map(pmdp, address);
324 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
325 write_c0_entrylo0(ptep->pte_high);
327 write_c0_entrylo1(ptep->pte_high);
329 write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
330 write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
340 local_irq_restore(flags);
343 void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
344 unsigned long entryhi, unsigned long pagemask)
348 unsigned long old_pagemask;
349 unsigned long old_ctx;
351 local_irq_save(flags);
352 /* Save old context and create impossible VPN2 value */
353 old_ctx = read_c0_entryhi();
354 old_pagemask = read_c0_pagemask();
355 wired = read_c0_wired();
356 write_c0_wired(wired + 1);
357 write_c0_index(wired);
358 tlbw_use_hazard(); /* What is the hazard here? */
359 write_c0_pagemask(pagemask);
360 write_c0_entryhi(entryhi);
361 write_c0_entrylo0(entrylo0);
362 write_c0_entrylo1(entrylo1);
367 write_c0_entryhi(old_ctx);
368 tlbw_use_hazard(); /* What is the hazard here? */
369 write_c0_pagemask(old_pagemask);
370 local_flush_tlb_all();
371 local_irq_restore(flags);
374 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
376 int __init has_transparent_hugepage(void)
381 local_irq_save(flags);
382 write_c0_pagemask(PM_HUGE_MASK);
383 back_to_back_c0_hazard();
384 mask = read_c0_pagemask();
385 write_c0_pagemask(PM_DEFAULT_MASK);
387 local_irq_restore(flags);
389 return mask == PM_HUGE_MASK;
392 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
395 static int __init set_ntlb(char *str)
397 get_option(&str, &ntlb);
401 __setup("ntlb=", set_ntlb);
404 * Configure TLB (for init or after a CPU has been powered off).
406 static void r4k_tlb_configure(void)
409 * You should never change this register:
410 * - On R4600 1.7 the tlbp never hits for pages smaller than
411 * the value in the c0_pagemask register.
412 * - The entire mm handling assumes the c0_pagemask register to
413 * be set to fixed-size pages.
415 write_c0_pagemask(PM_DEFAULT_MASK);
417 if (current_cpu_type() == CPU_R10000 ||
418 current_cpu_type() == CPU_R12000 ||
419 current_cpu_type() == CPU_R14000)
420 write_c0_framemask(0);
424 * Enable the no read, no exec bits, and enable large virtual
427 u32 pg = PG_RIE | PG_XIE;
431 write_c0_pagegrain(pg);
434 /* From this point on the ARC firmware is dead. */
435 local_flush_tlb_all();
437 /* Did I tell you that ARC SUCKS? */
445 if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
446 int wired = current_cpu_data.tlbsize - ntlb;
447 write_c0_wired(wired);
448 write_c0_index(wired-1);
449 printk("Restricting TLB to %d entries\n", ntlb);
451 printk("Ignoring invalid argument ntlb=%d\n", ntlb);
454 build_tlb_refill_handler();
457 static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd,
461 case CPU_PM_ENTER_FAILED:
470 static struct notifier_block r4k_tlb_pm_notifier_block = {
471 .notifier_call = r4k_tlb_pm_notifier,
474 static int __init r4k_tlb_init_pm(void)
476 return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block);
478 arch_initcall(r4k_tlb_init_pm);