1 // SPDX-License-Identifier: GPL-2.0-only
3 * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
4 * because MTRRs can span up to 40 bits (36bits on most modern x86)
8 #include <linux/export.h>
9 #include <linux/init.h>
13 #include <asm/processor-flags.h>
14 #include <asm/cpufeature.h>
15 #include <asm/tlbflush.h>
22 struct fixed_range_block {
23 int base_msr; /* start address of an MTRR block */
24 int ranges; /* number of MTRRs in this block */
27 static struct fixed_range_block fixed_range_blocks[] = {
28 { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */
29 { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */
30 { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */
34 static unsigned long smp_changes_mask;
35 static int mtrr_state_set;
38 struct mtrr_state_type mtrr_state;
39 EXPORT_SYMBOL_GPL(mtrr_state);
42 * BIOS is expected to clear MtrrFixDramModEn bit, see for example
43 * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
44 * Opteron Processors" (26094 Rev. 3.30 February 2006), section
45 * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set
46 * to 1 during BIOS initialization of the fixed MTRRs, then cleared to
49 static inline void k8_check_syscfg_dram_mod_en(void)
53 if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
54 (boot_cpu_data.x86 >= 0x0f)))
57 rdmsr(MSR_K8_SYSCFG, lo, hi);
58 if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
59 pr_err(FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
60 " not cleared by BIOS, clearing this bit\n",
62 lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
63 mtrr_wrmsr(MSR_K8_SYSCFG, lo, hi);
67 /* Get the size of contiguous MTRR range */
68 static u64 get_mtrr_size(u64 mask)
80 * Check and return the effective type for MTRR-MTRR type overlap.
81 * Returns 1 if the effective type is UNCACHEABLE, else returns 0
83 static int check_type_overlap(u8 *prev, u8 *curr)
85 if (*prev == MTRR_TYPE_UNCACHABLE || *curr == MTRR_TYPE_UNCACHABLE) {
86 *prev = MTRR_TYPE_UNCACHABLE;
87 *curr = MTRR_TYPE_UNCACHABLE;
91 if ((*prev == MTRR_TYPE_WRBACK && *curr == MTRR_TYPE_WRTHROUGH) ||
92 (*prev == MTRR_TYPE_WRTHROUGH && *curr == MTRR_TYPE_WRBACK)) {
93 *prev = MTRR_TYPE_WRTHROUGH;
94 *curr = MTRR_TYPE_WRTHROUGH;
98 *prev = MTRR_TYPE_UNCACHABLE;
99 *curr = MTRR_TYPE_UNCACHABLE;
107 * mtrr_type_lookup_fixed - look up memory type in MTRR fixed entries
109 * Return the MTRR fixed memory type of 'start'.
111 * MTRR fixed entries are divided into the following ways:
112 * 0x00000 - 0x7FFFF : This range is divided into eight 64KB sub-ranges
113 * 0x80000 - 0xBFFFF : This range is divided into sixteen 16KB sub-ranges
114 * 0xC0000 - 0xFFFFF : This range is divided into sixty-four 4KB sub-ranges
117 * MTRR_TYPE_(type) - Matched memory type
118 * MTRR_TYPE_INVALID - Unmatched
120 static u8 mtrr_type_lookup_fixed(u64 start, u64 end)
124 if (start >= 0x100000)
125 return MTRR_TYPE_INVALID;
128 if (start < 0x80000) {
130 idx += (start >> 16);
131 return mtrr_state.fixed_ranges[idx];
132 /* 0x80000 - 0xBFFFF */
133 } else if (start < 0xC0000) {
135 idx += ((start - 0x80000) >> 14);
136 return mtrr_state.fixed_ranges[idx];
139 /* 0xC0000 - 0xFFFFF */
141 idx += ((start - 0xC0000) >> 12);
142 return mtrr_state.fixed_ranges[idx];
146 * mtrr_type_lookup_variable - look up memory type in MTRR variable entries
149 * MTRR_TYPE_(type) - Matched memory type or default memory type (unmatched)
152 * repeat - Set to 1 when [start:end] spanned across MTRR range and type
153 * returned corresponds only to [start:*partial_end]. Caller has
154 * to lookup again for [*partial_end:end].
156 * uniform - Set to 1 when an MTRR covers the region uniformly, i.e. the
157 * region is fully covered by a single MTRR entry or the default
160 static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end,
161 int *repeat, u8 *uniform)
165 u8 prev_match, curr_match;
170 /* Make end inclusive instead of exclusive */
173 prev_match = MTRR_TYPE_INVALID;
174 for (i = 0; i < num_var_ranges; ++i) {
175 unsigned short start_state, end_state, inclusive;
177 if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
180 base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
181 (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
182 mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
183 (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
185 start_state = ((start & mask) == (base & mask));
186 end_state = ((end & mask) == (base & mask));
187 inclusive = ((start < base) && (end > base));
189 if ((start_state != end_state) || inclusive) {
191 * We have start:end spanning across an MTRR.
192 * We split the region into either
195 * (start:mtrr_end)(mtrr_end:end)
197 * (start:mtrr_start)(mtrr_start:end)
199 * (start:mtrr_start)(mtrr_start:mtrr_end)(mtrr_end:end)
201 * depending on kind of overlap.
203 * Return the type of the first region and a pointer
204 * to the start of next region so that caller will be
205 * advised to lookup again after having adjusted start
208 * Note: This way we handle overlaps with multiple
209 * entries and the default type properly.
212 *partial_end = base + get_mtrr_size(mask);
216 if (unlikely(*partial_end <= start)) {
218 *partial_end = start + PAGE_SIZE;
221 end = *partial_end - 1; /* end is inclusive */
226 if ((start & mask) != (base & mask))
229 curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
230 if (prev_match == MTRR_TYPE_INVALID) {
231 prev_match = curr_match;
236 if (check_type_overlap(&prev_match, &curr_match))
240 if (prev_match != MTRR_TYPE_INVALID)
243 return mtrr_state.def_type;
247 * mtrr_type_lookup - look up memory type in MTRR
250 * MTRR_TYPE_(type) - The effective MTRR type for the region
251 * MTRR_TYPE_INVALID - MTRR is disabled
254 * uniform - Set to 1 when an MTRR covers the region uniformly, i.e. the
255 * region is fully covered by a single MTRR entry or the default
258 u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform)
260 u8 type, prev_type, is_uniform = 1, dummy;
265 return MTRR_TYPE_INVALID;
267 if (!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED))
268 return MTRR_TYPE_INVALID;
271 * Look up the fixed ranges first, which take priority over
272 * the variable ranges.
274 if ((start < 0x100000) &&
275 (mtrr_state.have_fixed) &&
276 (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) {
278 type = mtrr_type_lookup_fixed(start, end);
283 * Look up the variable ranges. Look of multiple ranges matching
284 * this address and pick type as per MTRR precedence.
286 type = mtrr_type_lookup_variable(start, end, &partial_end,
287 &repeat, &is_uniform);
290 * Common path is with repeat = 0.
291 * However, we can have cases where [start:end] spans across some
292 * MTRR ranges and/or the default type. Do repeated lookups for
299 type = mtrr_type_lookup_variable(start, end, &partial_end,
302 if (check_type_overlap(&prev_type, &type))
306 if (mtrr_tom2 && (start >= (1ULL<<32)) && (end < mtrr_tom2))
307 type = MTRR_TYPE_WRBACK;
310 *uniform = is_uniform;
314 /* Get the MSR pair relating to a var range */
316 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
318 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
319 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
322 /* Fill the MSR pair relating to a var range */
323 void fill_mtrr_var_range(unsigned int index,
324 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi)
326 struct mtrr_var_range *vr;
328 vr = mtrr_state.var_ranges;
330 vr[index].base_lo = base_lo;
331 vr[index].base_hi = base_hi;
332 vr[index].mask_lo = mask_lo;
333 vr[index].mask_hi = mask_hi;
336 static void get_fixed_ranges(mtrr_type *frs)
338 unsigned int *p = (unsigned int *)frs;
341 k8_check_syscfg_dram_mod_en();
343 rdmsr(MSR_MTRRfix64K_00000, p[0], p[1]);
345 for (i = 0; i < 2; i++)
346 rdmsr(MSR_MTRRfix16K_80000 + i, p[2 + i * 2], p[3 + i * 2]);
347 for (i = 0; i < 8; i++)
348 rdmsr(MSR_MTRRfix4K_C0000 + i, p[6 + i * 2], p[7 + i * 2]);
351 void mtrr_save_fixed_ranges(void *info)
353 if (boot_cpu_has(X86_FEATURE_MTRR))
354 get_fixed_ranges(mtrr_state.fixed_ranges);
357 static unsigned __initdata last_fixed_start;
358 static unsigned __initdata last_fixed_end;
359 static mtrr_type __initdata last_fixed_type;
361 static void __init print_fixed_last(void)
366 pr_debug(" %05X-%05X %s\n", last_fixed_start,
367 last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type));
372 static void __init update_fixed_last(unsigned base, unsigned end,
375 last_fixed_start = base;
376 last_fixed_end = end;
377 last_fixed_type = type;
381 print_fixed(unsigned base, unsigned step, const mtrr_type *types)
385 for (i = 0; i < 8; ++i, ++types, base += step) {
386 if (last_fixed_end == 0) {
387 update_fixed_last(base, base + step, *types);
390 if (last_fixed_end == base && last_fixed_type == *types) {
391 last_fixed_end = base + step;
394 /* new segments: gap or different type */
396 update_fixed_last(base, base + step, *types);
400 static void prepare_set(void);
401 static void post_set(void);
403 static void __init print_mtrr_state(void)
408 pr_debug("MTRR default type: %s\n",
409 mtrr_attrib_to_str(mtrr_state.def_type));
410 if (mtrr_state.have_fixed) {
411 pr_debug("MTRR fixed ranges %sabled:\n",
412 ((mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) &&
413 (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) ?
415 print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
416 for (i = 0; i < 2; ++i)
417 print_fixed(0x80000 + i * 0x20000, 0x04000,
418 mtrr_state.fixed_ranges + (i + 1) * 8);
419 for (i = 0; i < 8; ++i)
420 print_fixed(0xC0000 + i * 0x08000, 0x01000,
421 mtrr_state.fixed_ranges + (i + 3) * 8);
426 pr_debug("MTRR variable ranges %sabled:\n",
427 mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED ? "en" : "dis");
428 high_width = (__ffs64(size_or_mask) - (32 - PAGE_SHIFT) + 3) / 4;
430 for (i = 0; i < num_var_ranges; ++i) {
431 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
432 pr_debug(" %u base %0*X%05X000 mask %0*X%05X000 %s\n",
435 mtrr_state.var_ranges[i].base_hi,
436 mtrr_state.var_ranges[i].base_lo >> 12,
438 mtrr_state.var_ranges[i].mask_hi,
439 mtrr_state.var_ranges[i].mask_lo >> 12,
440 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
442 pr_debug(" %u disabled\n", i);
445 pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20);
448 /* PAT setup for BP. We need to go through sync steps here */
449 void __init mtrr_bp_pat_init(void)
453 local_irq_save(flags);
459 local_irq_restore(flags);
462 /* Grab all of the MTRR state for this CPU into *state */
463 bool __init get_mtrr_state(void)
465 struct mtrr_var_range *vrs;
469 vrs = mtrr_state.var_ranges;
471 rdmsr(MSR_MTRRcap, lo, dummy);
472 mtrr_state.have_fixed = (lo >> 8) & 1;
474 for (i = 0; i < num_var_ranges; i++)
475 get_mtrr_var_range(i, &vrs[i]);
476 if (mtrr_state.have_fixed)
477 get_fixed_ranges(mtrr_state.fixed_ranges);
479 rdmsr(MSR_MTRRdefType, lo, dummy);
480 mtrr_state.def_type = (lo & 0xff);
481 mtrr_state.enabled = (lo & 0xc00) >> 10;
483 if (amd_special_default_mtrr()) {
487 rdmsr(MSR_K8_TOP_MEM2, low, high);
491 mtrr_tom2 &= 0xffffff800000ULL;
498 return !!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED);
501 /* Some BIOS's are messed up and don't set all MTRRs the same! */
502 void __init mtrr_state_warn(void)
504 unsigned long mask = smp_changes_mask;
508 if (mask & MTRR_CHANGE_MASK_FIXED)
509 pr_warn("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
510 if (mask & MTRR_CHANGE_MASK_VARIABLE)
511 pr_warn("mtrr: your CPUs had inconsistent variable MTRR settings\n");
512 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
513 pr_warn("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
515 pr_info("mtrr: probably your BIOS does not setup all CPUs.\n");
516 pr_info("mtrr: corrected configuration.\n");
520 * Doesn't attempt to pass an error out to MTRR users
521 * because it's quite complicated in some cases and probably not
522 * worth it because the best error handling is to ignore it.
524 void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
526 if (wrmsr_safe(msr, a, b) < 0) {
527 pr_err("MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
528 smp_processor_id(), msr, a, b);
533 * set_fixed_range - checks & updates a fixed-range MTRR if it
534 * differs from the value it should have
535 * @msr: MSR address of the MTTR which should be checked and updated
536 * @changed: pointer which indicates whether the MTRR needed to be changed
537 * @msrwords: pointer to the MSR values which the MSR should have
539 static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
545 if (lo != msrwords[0] || hi != msrwords[1]) {
546 mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
552 * generic_get_free_region - Get a free MTRR.
553 * @base: The starting (base) address of the region.
554 * @size: The size (in bytes) of the region.
555 * @replace_reg: mtrr index to be replaced; set to invalid value if none.
557 * Returns: The index of the region on success, else negative on error.
560 generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
562 unsigned long lbase, lsize;
566 max = num_var_ranges;
567 if (replace_reg >= 0 && replace_reg < max)
570 for (i = 0; i < max; ++i) {
571 mtrr_if->get(i, &lbase, &lsize, <ype);
579 static void generic_get_mtrr(unsigned int reg, unsigned long *base,
580 unsigned long *size, mtrr_type *type)
582 u32 mask_lo, mask_hi, base_lo, base_hi;
587 * get_mtrr doesn't need to update mtrr_state, also it could be called
588 * from any cpu, so try to print it out directly.
592 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
594 if ((mask_lo & 0x800) == 0) {
595 /* Invalid (i.e. free) range */
602 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
604 /* Work out the shifted address mask: */
605 tmp = (u64)mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
606 mask = size_or_mask | tmp;
608 /* Expand tmp with high bits to all 1s: */
611 tmp |= ~((1ULL<<(hi - 1)) - 1);
614 pr_warn("mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
615 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
621 * This works correctly if size is a power of two, i.e. a
625 *base = (u64)base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
626 *type = base_lo & 0xff;
633 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they
634 * differ from the saved set
635 * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
637 static int set_fixed_ranges(mtrr_type *frs)
639 unsigned long long *saved = (unsigned long long *)frs;
640 bool changed = false;
641 int block = -1, range;
643 k8_check_syscfg_dram_mod_en();
645 while (fixed_range_blocks[++block].ranges) {
646 for (range = 0; range < fixed_range_blocks[block].ranges; range++)
647 set_fixed_range(fixed_range_blocks[block].base_msr + range,
648 &changed, (unsigned int *)saved++);
655 * Set the MSR pair relating to a var range.
656 * Returns true if changes are made.
658 static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
661 bool changed = false;
663 rdmsr(MTRRphysBase_MSR(index), lo, hi);
664 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
665 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
666 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
668 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
672 rdmsr(MTRRphysMask_MSR(index), lo, hi);
674 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
675 || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
676 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
677 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
683 static u32 deftype_lo, deftype_hi;
686 * set_mtrr_state - Set the MTRR state for this CPU.
688 * NOTE: The CPU must already be in a safe state for MTRR changes.
689 * RETURNS: 0 if no changes made, else a mask indicating what was changed.
691 static unsigned long set_mtrr_state(void)
693 unsigned long change_mask = 0;
696 for (i = 0; i < num_var_ranges; i++) {
697 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
698 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
701 if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
702 change_mask |= MTRR_CHANGE_MASK_FIXED;
705 * Set_mtrr_restore restores the old value of MTRRdefType,
706 * so to set it we fiddle with the saved value:
708 if ((deftype_lo & 0xff) != mtrr_state.def_type
709 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
711 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type |
712 (mtrr_state.enabled << 10);
713 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
720 static unsigned long cr4;
721 static DEFINE_RAW_SPINLOCK(set_atomicity_lock);
724 * Since we are disabling the cache don't allow any interrupts,
725 * they would run extremely slow and would only increase the pain.
727 * The caller must ensure that local interrupts are disabled and
728 * are reenabled after post_set() has been called.
730 static void prepare_set(void) __acquires(set_atomicity_lock)
735 * Note that this is not ideal
736 * since the cache is only flushed/disabled for this CPU while the
737 * MTRRs are changed, but changing this requires more invasive
738 * changes to the way the kernel boots
741 raw_spin_lock(&set_atomicity_lock);
743 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
744 cr0 = read_cr0() | X86_CR0_CD;
748 * Cache flushing is the most time-consuming step when programming
749 * the MTRRs. Fortunately, as per the Intel Software Development
750 * Manual, we can skip it if the processor supports cache self-
753 if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
756 /* Save value of CR4 and clear Page Global Enable (bit 7) */
757 if (boot_cpu_has(X86_FEATURE_PGE)) {
759 __write_cr4(cr4 & ~X86_CR4_PGE);
762 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
763 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
766 /* Save MTRR state */
767 rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
769 /* Disable MTRRs, and set the default type to uncached */
770 mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
772 /* Again, only flush caches if we have to. */
773 if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
777 static void post_set(void) __releases(set_atomicity_lock)
779 /* Flush TLBs (no need to flush caches - they are disabled) */
780 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
783 /* Intel (P6) standard MTRRs */
784 mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
787 write_cr0(read_cr0() & ~X86_CR0_CD);
789 /* Restore value of CR4 */
790 if (boot_cpu_has(X86_FEATURE_PGE))
792 raw_spin_unlock(&set_atomicity_lock);
795 static void generic_set_all(void)
797 unsigned long mask, count;
800 local_irq_save(flags);
803 /* Actually set the state */
804 mask = set_mtrr_state();
810 local_irq_restore(flags);
812 /* Use the atomic bitops to update the global mask */
813 for (count = 0; count < sizeof(mask) * 8; ++count) {
815 set_bit(count, &smp_changes_mask);
822 * generic_set_mtrr - set variable MTRR register on the local CPU.
824 * @reg: The register to set.
825 * @base: The base address of the region.
826 * @size: The size of the region. If this is 0 the region is disabled.
827 * @type: The type of the region.
831 static void generic_set_mtrr(unsigned int reg, unsigned long base,
832 unsigned long size, mtrr_type type)
835 struct mtrr_var_range *vr;
837 vr = &mtrr_state.var_ranges[reg];
839 local_irq_save(flags);
844 * The invalid bit is kept in the mask, so we simply
845 * clear the relevant mask register to disable a range.
847 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
848 memset(vr, 0, sizeof(struct mtrr_var_range));
850 vr->base_lo = base << PAGE_SHIFT | type;
851 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
852 vr->mask_lo = -size << PAGE_SHIFT | 0x800;
853 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
855 mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
856 mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
860 local_irq_restore(flags);
863 int generic_validate_add_page(unsigned long base, unsigned long size,
866 unsigned long lbase, last;
869 * For Intel PPro stepping <= 7
870 * must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF
872 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
873 boot_cpu_data.x86_model == 1 &&
874 boot_cpu_data.x86_stepping <= 7) {
875 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
876 pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
879 if (!(base + size < 0x70000 || base > 0x7003F) &&
880 (type == MTRR_TYPE_WRCOMB
881 || type == MTRR_TYPE_WRBACK)) {
882 pr_warn("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
888 * Check upper bits of base and last are equal and lower bits are 0
889 * for base and 1 for last
891 last = base + size - 1;
892 for (lbase = base; !(lbase & 1) && (last & 1);
893 lbase = lbase >> 1, last = last >> 1)
896 pr_warn("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size);
902 static int generic_have_wrcomb(void)
904 unsigned long config, dummy;
905 rdmsr(MSR_MTRRcap, config, dummy);
906 return config & (1 << 10);
909 int positive_have_wrcomb(void)
915 * Generic structure...
917 const struct mtrr_ops generic_mtrr_ops = {
919 .set_all = generic_set_all,
920 .get = generic_get_mtrr,
921 .get_free_region = generic_get_free_region,
922 .set = generic_set_mtrr,
923 .validate_add_page = generic_validate_add_page,
924 .have_wrcomb = generic_have_wrcomb,