1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
4 #include <linux/string.h>
5 #include <linux/bitops.h>
7 #include <linux/sched.h>
8 #include <linux/sched/clock.h>
9 #include <linux/thread_info.h>
10 #include <linux/init.h>
11 #include <linux/uaccess.h>
13 #include <asm/cpufeature.h>
14 #include <asm/pgtable.h>
18 #include <asm/intel-family.h>
19 #include <asm/microcode_intel.h>
20 #include <asm/hwcap2.h>
24 #include <linux/topology.h>
29 #ifdef CONFIG_X86_LOCAL_APIC
30 #include <asm/mpspec.h>
35 * Just in case our CPU detection goes bad, or you have a weird system,
36 * allow a way to override the automatic disabling of MPX.
40 static int __init forcempx_setup(char *__unused)
46 __setup("intel-skd-046-workaround=disable", forcempx_setup);
48 void check_mpx_erratum(struct cpuinfo_x86 *c)
53 * Turn off the MPX feature on CPUs where SMEP is not
54 * available or disabled.
56 * Works around Intel Erratum SKD046: "Branch Instructions
57 * May Initialize MPX Bound Registers Incorrectly".
59 * This might falsely disable MPX on systems without
60 * SMEP, like Atom processors without SMEP. But there
61 * is no such hardware known at the moment.
63 if (cpu_has(c, X86_FEATURE_MPX) && !cpu_has(c, X86_FEATURE_SMEP)) {
64 setup_clear_cpu_cap(X86_FEATURE_MPX);
65 pr_warn("x86/mpx: Disabling MPX since SMEP not present\n");
70 * Processors which have self-snooping capability can handle conflicting
71 * memory type across CPUs by snooping its own cache. However, there exists
72 * CPU models in which having conflicting memory types still leads to
73 * unpredictable behavior, machine check errors, or hangs. Clear this
74 * feature to prevent its use on machines with known erratas.
76 static void check_memory_type_self_snoop_errata(struct cpuinfo_x86 *c)
78 switch (c->x86_model) {
79 case INTEL_FAM6_CORE_YONAH:
80 case INTEL_FAM6_CORE2_MEROM:
81 case INTEL_FAM6_CORE2_MEROM_L:
82 case INTEL_FAM6_CORE2_PENRYN:
83 case INTEL_FAM6_CORE2_DUNNINGTON:
84 case INTEL_FAM6_NEHALEM:
85 case INTEL_FAM6_NEHALEM_G:
86 case INTEL_FAM6_NEHALEM_EP:
87 case INTEL_FAM6_NEHALEM_EX:
88 case INTEL_FAM6_WESTMERE:
89 case INTEL_FAM6_WESTMERE_EP:
90 case INTEL_FAM6_SANDYBRIDGE:
91 setup_clear_cpu_cap(X86_FEATURE_SELFSNOOP);
95 static bool ring3mwait_disabled __read_mostly;
97 static int __init ring3mwait_disable(char *__unused)
99 ring3mwait_disabled = true;
102 __setup("ring3mwait=disable", ring3mwait_disable);
104 static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
107 * Ring 3 MONITOR/MWAIT feature cannot be detected without
108 * cpu model and family comparison.
112 switch (c->x86_model) {
113 case INTEL_FAM6_XEON_PHI_KNL:
114 case INTEL_FAM6_XEON_PHI_KNM:
120 if (ring3mwait_disabled)
123 set_cpu_cap(c, X86_FEATURE_RING3MWAIT);
124 this_cpu_or(msr_misc_features_shadow,
125 1UL << MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT);
127 if (c == &boot_cpu_data)
128 ELF_HWCAP2 |= HWCAP2_RING3MWAIT;
132 * Early microcode releases for the Spectre v2 mitigation were broken.
133 * Information taken from;
134 * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
135 * - https://kb.vmware.com/s/article/52345
136 * - Microcode revisions observed in the wild
137 * - Release note from 20180108 microcode release
139 struct sku_microcode {
144 static const struct sku_microcode spectre_bad_microcodes[] = {
145 { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 },
146 { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 },
147 { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 },
148 { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 },
149 { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
150 { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
151 { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
152 { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
153 { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
154 { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 },
155 { INTEL_FAM6_BROADWELL_XEON_D, 0x03, 0x07000011 },
156 { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 },
157 { INTEL_FAM6_HASWELL_ULT, 0x01, 0x21 },
158 { INTEL_FAM6_HASWELL_GT3E, 0x01, 0x18 },
159 { INTEL_FAM6_HASWELL_CORE, 0x03, 0x23 },
160 { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
161 { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
162 { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
163 /* Observed in the wild */
164 { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
165 { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
168 static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
173 * We know that the hypervisor lie to us on the microcode version so
174 * we may as well hope that it is running the correct version.
176 if (cpu_has(c, X86_FEATURE_HYPERVISOR))
182 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
183 if (c->x86_model == spectre_bad_microcodes[i].model &&
184 c->x86_stepping == spectre_bad_microcodes[i].stepping)
185 return (c->microcode <= spectre_bad_microcodes[i].microcode);
190 static void early_init_intel(struct cpuinfo_x86 *c)
194 /* Unmask CPUID levels if masked: */
195 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
196 if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
197 MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) {
198 c->cpuid_level = cpuid_eax(0);
203 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
204 (c->x86 == 0x6 && c->x86_model >= 0x0e))
205 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
207 if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
208 c->microcode = intel_get_microcode_revision();
210 /* Now if any of them are set, check the blacklist and clear the lot */
211 if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
212 cpu_has(c, X86_FEATURE_INTEL_STIBP) ||
213 cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
214 cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
215 pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
216 setup_clear_cpu_cap(X86_FEATURE_IBRS);
217 setup_clear_cpu_cap(X86_FEATURE_IBPB);
218 setup_clear_cpu_cap(X86_FEATURE_STIBP);
219 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
220 setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
221 setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
222 setup_clear_cpu_cap(X86_FEATURE_SSBD);
223 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
227 * Atom erratum AAE44/AAF40/AAG38/AAH41:
229 * A race condition between speculative fetches and invalidating
230 * a large page. This is worked around in microcode, but we
231 * need the microcode to have already been loaded... so if it is
232 * not, recommend a BIOS update and disable large pages.
234 if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 &&
235 c->microcode < 0x20e) {
236 pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
237 clear_cpu_cap(c, X86_FEATURE_PSE);
241 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
243 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
244 if (c->x86 == 15 && c->x86_cache_alignment == 64)
245 c->x86_cache_alignment = 128;
248 /* CPUID workaround for 0F33/0F34 CPU */
249 if (c->x86 == 0xF && c->x86_model == 0x3
250 && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
251 c->x86_phys_bits = 36;
254 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
255 * with P/T states and does not stop in deep C-states.
257 * It is also reliable across cores and sockets. (but not across
258 * cabinets - we turn it off in that case explicitly.)
260 if (c->x86_power & (1 << 8)) {
261 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
262 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
265 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
267 switch (c->x86_model) {
268 case 0x27: /* Penwell */
269 case 0x35: /* Cloverview */
270 case 0x4a: /* Merrifield */
271 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
279 * There is a known erratum on Pentium III and Core Solo
281 * " Page with PAT set to WC while associated MTRR is UC
282 * may consolidate to UC "
283 * Because of this erratum, it is better to stick with
284 * setting WC in MTRR rather than using PAT on these CPUs.
286 * Enable PAT WC only on P4, Core 2 or later CPUs.
288 if (c->x86 == 6 && c->x86_model < 15)
289 clear_cpu_cap(c, X86_FEATURE_PAT);
292 * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
293 * clear the fast string and enhanced fast string CPU capabilities.
295 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
296 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
297 if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
298 pr_info("Disabled fast string operations\n");
299 setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
300 setup_clear_cpu_cap(X86_FEATURE_ERMS);
305 * Intel Quark Core DevMan_001.pdf section 6.4.11
306 * "The operating system also is required to invalidate (i.e., flush)
307 * the TLB when any changes are made to any of the page table entries.
308 * The operating system must reload CR3 to cause the TLB to be flushed"
310 * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
311 * should be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
314 if (c->x86 == 5 && c->x86_model == 9) {
315 pr_info("Disabling PGE capability bit\n");
316 setup_clear_cpu_cap(X86_FEATURE_PGE);
319 if (c->cpuid_level >= 0x00000001) {
320 u32 eax, ebx, ecx, edx;
322 cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
324 * If HTT (EDX[28]) is set EBX[16:23] contain the number of
325 * apicids which are reserved per package. Store the resulting
326 * shift value for the package management code.
328 if (edx & (1U << 28))
329 c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff);
332 check_mpx_erratum(c);
333 check_memory_type_self_snoop_errata(c);
336 * Get the number of SMT siblings early from the extended topology
337 * leaf, if available. Otherwise try the legacy SMT detection.
339 if (detect_extended_topology_early(c) < 0)
345 * Early probe support logic for ppro memory erratum #50
347 * This is called before we do cpu ident work
350 int ppro_with_ram_bug(void)
352 /* Uses data from early_cpu_detect now */
353 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
354 boot_cpu_data.x86 == 6 &&
355 boot_cpu_data.x86_model == 1 &&
356 boot_cpu_data.x86_stepping < 8) {
357 pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
363 static void intel_smp_check(struct cpuinfo_x86 *c)
365 /* calling is from identify_secondary_cpu() ? */
370 * Mask B, Pentium, but not Pentium MMX
373 c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
376 * Remember we have B step Pentia with bugs
378 WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
379 "with B stepping processors.\n");
384 static int __init forcepae_setup(char *__unused)
389 __setup("forcepae", forcepae_setup);
391 static void intel_workarounds(struct cpuinfo_x86 *c)
393 #ifdef CONFIG_X86_F00F_BUG
395 * All models of Pentium and Pentium with MMX technology CPUs
396 * have the F0 0F bug, which lets nonprivileged users lock up the
397 * system. Announce that the fault handler will be checking for it.
398 * The Quark is also family 5, but does not have the same bug.
400 clear_cpu_bug(c, X86_BUG_F00F);
401 if (c->x86 == 5 && c->x86_model < 9) {
402 static int f00f_workaround_enabled;
404 set_cpu_bug(c, X86_BUG_F00F);
405 if (!f00f_workaround_enabled) {
406 pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n");
407 f00f_workaround_enabled = 1;
413 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
416 if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
417 clear_cpu_cap(c, X86_FEATURE_SEP);
420 * PAE CPUID issue: many Pentium M report no PAE but may have a
421 * functionally usable PAE implementation.
422 * Forcefully enable PAE if kernel parameter "forcepae" is present.
425 pr_warn("PAE forced!\n");
426 set_cpu_cap(c, X86_FEATURE_PAE);
427 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
431 * P4 Xeon erratum 037 workaround.
432 * Hardware prefetcher may cause stale data to be loaded into the cache.
434 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
435 if (msr_set_bit(MSR_IA32_MISC_ENABLE,
436 MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
437 pr_info("CPU: C0 stepping P4 Xeon detected.\n");
438 pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n");
443 * See if we have a good local APIC by checking for buggy Pentia,
444 * i.e. all B steppings and the C2 stepping of P54C when using their
445 * integrated APIC (see 11AP erratum in "Pentium Processor
446 * Specification Update").
448 if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
449 (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
450 set_cpu_bug(c, X86_BUG_11AP);
453 #ifdef CONFIG_X86_INTEL_USERCOPY
455 * Set up the preferred alignment for movsl bulk memory moves
458 case 4: /* 486: untested */
460 case 5: /* Old Pentia: untested */
462 case 6: /* PII/PIII only like movsl with 8-byte alignment */
465 case 15: /* P4 is OK down to 8-byte alignment */
474 static void intel_workarounds(struct cpuinfo_x86 *c)
479 static void srat_detect_node(struct cpuinfo_x86 *c)
483 int cpu = smp_processor_id();
485 /* Don't do the funky fallback heuristics the AMD version employs
487 node = numa_cpu_node(cpu);
488 if (node == NUMA_NO_NODE || !node_online(node)) {
489 /* reuse the value from init_cpu_to_node() */
490 node = cpu_to_node(cpu);
492 numa_set_node(cpu, node);
496 static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
498 /* Intel VMX MSR indicated features */
499 #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000
500 #define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000
501 #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000
502 #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001
503 #define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002
504 #define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020
505 #define x86_VMX_FEATURE_EPT_CAP_AD 0x00200000
507 u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
508 u32 msr_vpid_cap, msr_ept_cap;
510 clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
511 clear_cpu_cap(c, X86_FEATURE_VNMI);
512 clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
513 clear_cpu_cap(c, X86_FEATURE_EPT);
514 clear_cpu_cap(c, X86_FEATURE_VPID);
515 clear_cpu_cap(c, X86_FEATURE_EPT_AD);
517 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
518 msr_ctl = vmx_msr_high | vmx_msr_low;
519 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)
520 set_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
521 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI)
522 set_cpu_cap(c, X86_FEATURE_VNMI);
523 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) {
524 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
525 vmx_msr_low, vmx_msr_high);
526 msr_ctl2 = vmx_msr_high | vmx_msr_low;
527 if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
528 (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
529 set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
530 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT) {
531 set_cpu_cap(c, X86_FEATURE_EPT);
532 rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
533 msr_ept_cap, msr_vpid_cap);
534 if (msr_ept_cap & x86_VMX_FEATURE_EPT_CAP_AD)
535 set_cpu_cap(c, X86_FEATURE_EPT_AD);
537 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
538 set_cpu_cap(c, X86_FEATURE_VPID);
542 #define MSR_IA32_TME_ACTIVATE 0x982
544 /* Helpers to access TME_ACTIVATE MSR */
545 #define TME_ACTIVATE_LOCKED(x) (x & 0x1)
546 #define TME_ACTIVATE_ENABLED(x) (x & 0x2)
548 #define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */
549 #define TME_ACTIVATE_POLICY_AES_XTS_128 0
551 #define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
553 #define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */
554 #define TME_ACTIVATE_CRYPTO_AES_XTS_128 1
556 /* Values for mktme_status (SW only construct) */
557 #define MKTME_ENABLED 0
558 #define MKTME_DISABLED 1
559 #define MKTME_UNINITIALIZED 2
560 static int mktme_status = MKTME_UNINITIALIZED;
562 static void detect_tme(struct cpuinfo_x86 *c)
564 u64 tme_activate, tme_policy, tme_crypto_algs;
565 int keyid_bits = 0, nr_keyids = 0;
566 static u64 tme_activate_cpu0 = 0;
568 rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
570 if (mktme_status != MKTME_UNINITIALIZED) {
571 if (tme_activate != tme_activate_cpu0) {
573 pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
574 pr_err_once("x86/tme: MKTME is not usable\n");
575 mktme_status = MKTME_DISABLED;
577 /* Proceed. We may need to exclude bits from x86_phys_bits. */
580 tme_activate_cpu0 = tme_activate;
583 if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
584 pr_info_once("x86/tme: not enabled by BIOS\n");
585 mktme_status = MKTME_DISABLED;
589 if (mktme_status != MKTME_UNINITIALIZED)
590 goto detect_keyid_bits;
592 pr_info("x86/tme: enabled by BIOS\n");
594 tme_policy = TME_ACTIVATE_POLICY(tme_activate);
595 if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
596 pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
598 tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
599 if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
600 pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
602 mktme_status = MKTME_DISABLED;
605 keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
606 nr_keyids = (1UL << keyid_bits) - 1;
608 pr_info_once("x86/mktme: enabled by BIOS\n");
609 pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
611 pr_info_once("x86/mktme: disabled by BIOS\n");
614 if (mktme_status == MKTME_UNINITIALIZED) {
615 /* MKTME is usable */
616 mktme_status = MKTME_ENABLED;
620 * KeyID bits effectively lower the number of physical address
621 * bits. Update cpuinfo_x86::x86_phys_bits accordingly.
623 c->x86_phys_bits -= keyid_bits;
626 static void init_cpuid_fault(struct cpuinfo_x86 *c)
630 if (!rdmsrl_safe(MSR_PLATFORM_INFO, &msr)) {
631 if (msr & MSR_PLATFORM_INFO_CPUID_FAULT)
632 set_cpu_cap(c, X86_FEATURE_CPUID_FAULT);
636 static void init_intel_misc_features(struct cpuinfo_x86 *c)
640 if (rdmsrl_safe(MSR_MISC_FEATURES_ENABLES, &msr))
643 /* Clear all MISC features */
644 this_cpu_write(msr_misc_features_shadow, 0);
646 /* Check features and update capabilities and shadow control bits */
648 probe_xeon_phi_r3mwait(c);
650 msr = this_cpu_read(msr_misc_features_shadow);
651 wrmsrl(MSR_MISC_FEATURES_ENABLES, msr);
654 static void init_intel(struct cpuinfo_x86 *c)
658 intel_workarounds(c);
661 * Detect the extended topology information if available. This
662 * will reinitialise the initial_apicid which will be used
663 * in init_intel_cacheinfo()
665 detect_extended_topology(c);
667 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
669 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
672 detect_num_cpu_cores(c);
678 init_intel_cacheinfo(c);
680 if (c->cpuid_level > 9) {
681 unsigned eax = cpuid_eax(10);
682 /* Check for version and the number of counters */
683 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
684 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
687 if (cpu_has(c, X86_FEATURE_XMM2))
688 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
690 if (boot_cpu_has(X86_FEATURE_DS)) {
693 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
695 set_cpu_cap(c, X86_FEATURE_BTS);
697 set_cpu_cap(c, X86_FEATURE_PEBS);
700 if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_CLFLUSH) &&
701 (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
702 set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
704 if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_MWAIT) &&
705 ((c->x86_model == INTEL_FAM6_ATOM_GOLDMONT)))
706 set_cpu_bug(c, X86_BUG_MONITOR);
710 c->x86_cache_alignment = c->x86_clflush_size * 2;
712 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
715 * Names for the Pentium II/Celeron processors
716 * detectable only by also checking the cache size.
717 * Dixon is NOT a Celeron.
720 unsigned int l2 = c->x86_cache_size;
723 switch (c->x86_model) {
726 p = "Celeron (Covington)";
728 p = "Mobile Pentium II (Dixon)";
733 p = "Celeron (Mendocino)";
734 else if (c->x86_stepping == 0 || c->x86_stepping == 5)
740 p = "Celeron (Coppermine)";
745 strcpy(c->x86_model_id, p);
749 set_cpu_cap(c, X86_FEATURE_P4);
751 set_cpu_cap(c, X86_FEATURE_P3);
754 /* Work around errata */
757 if (cpu_has(c, X86_FEATURE_VMX))
758 detect_vmx_virtcap(c);
760 if (cpu_has(c, X86_FEATURE_TME))
763 init_intel_misc_features(c);
765 if (tsx_ctrl_state == TSX_CTRL_ENABLE)
767 if (tsx_ctrl_state == TSX_CTRL_DISABLE)
772 static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
775 * Intel PIII Tualatin. This comes in two flavours.
776 * One has 256kb of cache, the other 512. We have no way
777 * to determine which, so we use a boottime override
778 * for the 512kb model, and assume 256 otherwise.
780 if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
784 * Intel Quark SoC X1000 contains a 4-way set associative
785 * 16K cache with a 16 byte cache line and 256 lines per tag
787 if ((c->x86 == 5) && (c->x86_model == 9))
793 #define TLB_INST_4K 0x01
794 #define TLB_INST_4M 0x02
795 #define TLB_INST_2M_4M 0x03
797 #define TLB_INST_ALL 0x05
798 #define TLB_INST_1G 0x06
800 #define TLB_DATA_4K 0x11
801 #define TLB_DATA_4M 0x12
802 #define TLB_DATA_2M_4M 0x13
803 #define TLB_DATA_4K_4M 0x14
805 #define TLB_DATA_1G 0x16
807 #define TLB_DATA0_4K 0x21
808 #define TLB_DATA0_4M 0x22
809 #define TLB_DATA0_2M_4M 0x23
812 #define STLB_4K_2M 0x42
814 static const struct _tlb_table intel_tlb_table[] = {
815 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
816 { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" },
817 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" },
818 { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" },
819 { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" },
820 { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" },
821 { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages */" },
822 { 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
823 { 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
824 { 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
825 { 0x55, TLB_INST_2M_4M, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
826 { 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" },
827 { 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" },
828 { 0x59, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, fully associative" },
829 { 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
830 { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" },
831 { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
832 { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
833 { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
834 { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
835 { 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" },
836 { 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
837 { 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" },
838 { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
839 { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
840 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
841 { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" },
842 { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" },
843 { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" },
844 { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set associative" },
845 { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set associative" },
846 { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" },
847 { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
848 { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" },
849 { 0xc2, TLB_DATA_2M_4M, 16, " DTLB 2 MByte/4MByte pages, 4-way associative" },
850 { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" },
854 static void intel_tlb_lookup(const unsigned char desc)
860 /* look up this descriptor in the table */
861 for (k = 0; intel_tlb_table[k].descriptor != desc && \
862 intel_tlb_table[k].descriptor != 0; k++)
865 if (intel_tlb_table[k].tlb_type == 0)
868 switch (intel_tlb_table[k].tlb_type) {
870 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
871 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
872 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
873 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
876 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
877 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
878 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
879 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
880 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
881 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
882 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
883 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
884 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
885 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
886 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
887 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
890 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
891 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
892 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
893 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
894 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
895 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
898 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
899 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
902 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
903 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
906 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
907 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
908 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
909 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
913 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
914 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
918 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
919 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
922 case TLB_DATA0_2M_4M:
923 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
924 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
925 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
926 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
929 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
930 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
931 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
932 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
935 if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
936 tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
941 static void intel_detect_tlb(struct cpuinfo_x86 *c)
944 unsigned int regs[4];
945 unsigned char *desc = (unsigned char *)regs;
947 if (c->cpuid_level < 2)
950 /* Number of times to iterate */
951 n = cpuid_eax(2) & 0xFF;
953 for (i = 0 ; i < n ; i++) {
954 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
956 /* If bit 31 is set, this is an unknown format */
957 for (j = 0 ; j < 3 ; j++)
958 if (regs[j] & (1 << 31))
961 /* Byte 0 is level count, not a descriptor */
962 for (j = 1 ; j < 16 ; j++)
963 intel_tlb_lookup(desc[j]);
967 static const struct cpu_dev intel_cpu_dev = {
969 .c_ident = { "GenuineIntel" },
972 { .family = 4, .model_names =
974 [0] = "486 DX-25/33",
985 { .family = 5, .model_names =
987 [0] = "Pentium 60/66 A-step",
988 [1] = "Pentium 60/66",
989 [2] = "Pentium 75 - 200",
990 [3] = "OverDrive PODP5V83",
992 [7] = "Mobile Pentium 75 - 200",
993 [8] = "Mobile Pentium MMX",
994 [9] = "Quark SoC X1000",
997 { .family = 6, .model_names =
999 [0] = "Pentium Pro A-step",
1000 [1] = "Pentium Pro",
1001 [3] = "Pentium II (Klamath)",
1002 [4] = "Pentium II (Deschutes)",
1003 [5] = "Pentium II (Deschutes)",
1004 [6] = "Mobile Pentium II",
1005 [7] = "Pentium III (Katmai)",
1006 [8] = "Pentium III (Coppermine)",
1007 [10] = "Pentium III (Cascades)",
1008 [11] = "Pentium III (Tualatin)",
1011 { .family = 15, .model_names =
1013 [0] = "Pentium 4 (Unknown)",
1014 [1] = "Pentium 4 (Willamette)",
1015 [2] = "Pentium 4 (Northwood)",
1016 [4] = "Pentium 4 (Foster)",
1017 [5] = "Pentium 4 (Foster)",
1021 .legacy_cache_size = intel_size_cache,
1023 .c_detect_tlb = intel_detect_tlb,
1024 .c_early_init = early_init_intel,
1025 .c_init = init_intel,
1026 .c_x86_vendor = X86_VENDOR_INTEL,
1029 cpu_dev_register(intel_cpu_dev);