1 #ifndef _ASM_X86_TLBFLUSH_H
2 #define _ASM_X86_TLBFLUSH_H
5 #include <linux/sched.h>
7 #include <asm/processor.h>
8 #include <asm/cpufeature.h>
9 #include <asm/special_insns.h>
12 static inline void __invpcid(unsigned long pcid, unsigned long addr,
15 struct { u64 d[2]; } desc = { { pcid, addr } };
18 * The memory clobber is because the whole point is to invalidate
19 * stale TLB entries and, especially if we're flushing global
20 * mappings, we don't want the compiler to reorder any subsequent
21 * memory accesses before the TLB flush.
23 * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
24 * invpcid (%rcx), %rax in long mode.
26 asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
27 : : "m" (desc), "a" (type), "c" (&desc) : "memory");
30 #define INVPCID_TYPE_INDIV_ADDR 0
31 #define INVPCID_TYPE_SINGLE_CTXT 1
32 #define INVPCID_TYPE_ALL_INCL_GLOBAL 2
33 #define INVPCID_TYPE_ALL_NON_GLOBAL 3
35 /* Flush all mappings for a given pcid and addr, not including globals. */
36 static inline void invpcid_flush_one(unsigned long pcid,
39 __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
42 /* Flush all mappings for a given PCID, not including globals. */
43 static inline void invpcid_flush_single_context(unsigned long pcid)
45 __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
48 /* Flush all mappings, including globals, for all PCIDs. */
49 static inline void invpcid_flush_all(void)
51 __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
54 /* Flush all mappings for all PCIDs except globals. */
55 static inline void invpcid_flush_all_nonglobals(void)
57 __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
60 #ifdef CONFIG_PARAVIRT
61 #include <asm/paravirt.h>
63 #define __flush_tlb() __native_flush_tlb()
64 #define __flush_tlb_global() __native_flush_tlb_global()
65 #define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
69 struct mm_struct *active_mm;
71 /* last user mm's ctx id */
75 * Access to this CR4 shadow and to H/W CR4 is protected by
76 * disabling interrupts when modifying either one.
80 DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
82 /* Initialize cr4 shadow for this CPU. */
83 static inline void cr4_init_shadow(void)
85 this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
88 /* Set in this cpu's CR4. */
89 static inline void cr4_set_bits(unsigned long mask)
93 cr4 = this_cpu_read(cpu_tlbstate.cr4);
94 if ((cr4 | mask) != cr4) {
96 this_cpu_write(cpu_tlbstate.cr4, cr4);
101 /* Clear in this cpu's CR4. */
102 static inline void cr4_clear_bits(unsigned long mask)
106 cr4 = this_cpu_read(cpu_tlbstate.cr4);
107 if ((cr4 & ~mask) != cr4) {
109 this_cpu_write(cpu_tlbstate.cr4, cr4);
114 static inline void cr4_toggle_bits(unsigned long mask)
118 cr4 = this_cpu_read(cpu_tlbstate.cr4);
120 this_cpu_write(cpu_tlbstate.cr4, cr4);
124 /* Read the CR4 shadow. */
125 static inline unsigned long cr4_read_shadow(void)
127 return this_cpu_read(cpu_tlbstate.cr4);
131 * Save some of cr4 feature set we're using (e.g. Pentium 4MB
132 * enable and PPro Global page enable), so that any CPU's that boot
133 * up after us can get the correct flags. This should only be used
134 * during boot on the boot cpu.
136 extern unsigned long mmu_cr4_features;
137 extern u32 *trampoline_cr4_features;
139 static inline void cr4_set_bits_and_update_boot(unsigned long mask)
141 mmu_cr4_features |= mask;
142 if (trampoline_cr4_features)
143 *trampoline_cr4_features = mmu_cr4_features;
148 * Declare a couple of kaiser interfaces here for convenience,
149 * to avoid the need for asm/kaiser.h in unexpected places.
151 #ifdef CONFIG_PAGE_TABLE_ISOLATION
152 extern int kaiser_enabled;
153 extern void kaiser_setup_pcid(void);
154 extern void kaiser_flush_tlb_on_return_to_user(void);
156 #define kaiser_enabled 0
157 static inline void kaiser_setup_pcid(void)
160 static inline void kaiser_flush_tlb_on_return_to_user(void)
165 static inline void __native_flush_tlb(void)
168 * If current->mm == NULL then we borrow a mm which may change during a
169 * task switch and therefore we must not be preempted while we write CR3
174 kaiser_flush_tlb_on_return_to_user();
175 native_write_cr3(native_read_cr3());
179 static inline void __native_flush_tlb_global_irq_disabled(void)
183 cr4 = this_cpu_read(cpu_tlbstate.cr4);
184 if (cr4 & X86_CR4_PGE) {
185 /* clear PGE and flush TLB of all entries */
186 native_write_cr4(cr4 & ~X86_CR4_PGE);
187 /* restore PGE as it was before */
188 native_write_cr4(cr4);
190 /* do it with cr3, letting kaiser flush user PCID */
191 __native_flush_tlb();
195 static inline void __native_flush_tlb_global(void)
199 if (this_cpu_has(X86_FEATURE_INVPCID)) {
201 * Using INVPCID is considerably faster than a pair of writes
202 * to CR4 sandwiched inside an IRQ flag save/restore.
204 * Note, this works with CR4.PCIDE=0 or 1.
211 * Read-modify-write to CR4 - protect it from preemption and
212 * from interrupts. (Use the raw variant because this code can
213 * be called from deep inside debugging code.)
215 raw_local_irq_save(flags);
216 __native_flush_tlb_global_irq_disabled();
217 raw_local_irq_restore(flags);
220 static inline void __native_flush_tlb_single(unsigned long addr)
223 * SIMICS #GP's if you run INVPCID with type 2/3
224 * and X86_CR4_PCIDE clear. Shame!
226 * The ASIDs used below are hard-coded. But, we must not
227 * call invpcid(type=1/2) before CR4.PCIDE=1. Just call
228 * invlpg in the case we are called early.
231 if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE)) {
233 kaiser_flush_tlb_on_return_to_user();
234 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
237 /* Flush the address out of both PCIDs. */
239 * An optimization here might be to determine addresses
240 * that are only kernel-mapped and only flush the kernel
241 * ASID. But, userspace flushes are probably much more
242 * important performance-wise.
244 * Make sure to do only a single invpcid when KAISER is
245 * disabled and we have only a single ASID.
248 invpcid_flush_one(X86_CR3_PCID_ASID_USER, addr);
249 invpcid_flush_one(X86_CR3_PCID_ASID_KERN, addr);
252 static inline void __flush_tlb_all(void)
254 __flush_tlb_global();
256 * Note: if we somehow had PCID but not PGE, then this wouldn't work --
257 * we'd end up flushing kernel translations for the current ASID but
258 * we might fail to flush kernel translations for other cached ASIDs.
260 * To avoid this issue, we force PCID off if PGE is off.
264 static inline void __flush_tlb_one(unsigned long addr)
266 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
267 __flush_tlb_single(addr);
270 #define TLB_FLUSH_ALL -1UL
275 * - flush_tlb_all() flushes all processes TLBs
276 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
277 * - flush_tlb_page(vma, vmaddr) flushes one page
278 * - flush_tlb_range(vma, start, end) flushes a range of pages
279 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
280 * - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus
282 * ..but the i386 has somewhat limited tlb flushing capabilities,
283 * and page-granular flushes are available only on i486 and up.
286 #define local_flush_tlb() __flush_tlb()
288 #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
290 #define flush_tlb_range(vma, start, end) \
291 flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
293 extern void flush_tlb_all(void);
294 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
295 unsigned long end, unsigned long vmflag);
296 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
298 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
300 flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE);
303 void native_flush_tlb_others(const struct cpumask *cpumask,
304 struct mm_struct *mm,
305 unsigned long start, unsigned long end);
307 #define TLBSTATE_OK 1
308 #define TLBSTATE_LAZY 2
310 static inline void reset_lazy_tlbstate(void)
312 this_cpu_write(cpu_tlbstate.state, 0);
313 this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
316 #ifndef CONFIG_PARAVIRT
317 #define flush_tlb_others(mask, mm, start, end) \
318 native_flush_tlb_others(mask, mm, start, end)
321 #endif /* _ASM_X86_TLBFLUSH_H */