[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/2] x86/MTRR: consolidation



- use a single set of manifest constants (the ones from msr-index.h)
- drop unnecessary MSR index constants
- get hvm_msr_{read,write}_intercept() in line with the rest of the
  MTRR emulation code regarding the number of emulated MTRRs
- remove use of hardcoded numbers where expressions can be used (at
  once serving as documentation)
- centrally check mtrr_state.have_fixed in get_fixed_ranges(), making
  unnecessary the cpu_has_mtrr check in mtrr_save_fixed_ranges

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/x86/cpu/mtrr/generic.c
+++ b/xen/arch/x86/cpu/mtrr/generic.c
@@ -11,15 +11,13 @@
 #include <asm/cpufeature.h>
 #include "mtrr.h"
 
-struct fixed_range_block {
-       int base_msr; /* start address of an MTRR block */
-       int ranges;   /* number of MTRRs in this block  */
-};
-
-static struct fixed_range_block fixed_range_blocks[] = {
-       { MTRRfix64K_00000_MSR, 1 }, /* one  64k MTRR  */
-       { MTRRfix16K_80000_MSR, 2 }, /* two  16k MTRRs */
-       { MTRRfix4K_C0000_MSR,  8 }, /* eight 4k MTRRs */
+static const struct fixed_range_block {
+       uint32_t base_msr;   /* start address of an MTRR block */
+       unsigned int ranges; /* number of MTRRs in this block  */
+} fixed_range_blocks[] = {
+       { MSR_MTRRfix64K_00000, (0x80000 - 0x00000) >> (16 + 3) },
+       { MSR_MTRRfix16K_80000, (0xC0000 - 0x80000) >> (14 + 3) },
+       { MSR_MTRRfix4K_C0000, (0x100000 - 0xC0000) >> (12 + 3) },
        {}
 };
 
@@ -30,28 +28,30 @@ struct mtrr_state mtrr_state = {};
 static void
 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
 {
-       rdmsrl(MTRRphysBase_MSR(index), vr->base);
-       rdmsrl(MTRRphysMask_MSR(index), vr->mask);
+       rdmsrl(MSR_IA32_MTRR_PHYSBASE(index), vr->base);
+       rdmsrl(MSR_IA32_MTRR_PHYSMASK(index), vr->mask);
 }
 
 static void
 get_fixed_ranges(mtrr_type * frs)
 {
        uint64_t *p = (uint64_t *) frs;
-       int i;
+       const struct fixed_range_block *block;
 
-       rdmsrl(MTRRfix64K_00000_MSR, p[0]);
+       if (!mtrr_state.have_fixed)
+               return;
 
-       for (i = 0; i < 2; i++)
-               rdmsrl(MTRRfix16K_80000_MSR + i, p[1 + i]);
-       for (i = 0; i < 8; i++)
-               rdmsrl(MTRRfix4K_C0000_MSR + i, p[3 + i]);
+       for (block = fixed_range_blocks; block->ranges; ++block) {
+               unsigned int i;
+
+               for (i = 0; i < block->ranges; ++i, ++p)
+                       rdmsrl(block->base_msr + i, *p);
+       }
 }
 
 void mtrr_save_fixed_ranges(void *info)
 {
-       if (cpu_has_mtrr)
-               get_fixed_ranges(mtrr_state.fixed_ranges);
+       get_fixed_ranges(mtrr_state.fixed_ranges);
 }
 
 /*  Grab all of the MTRR state for this CPU into *state  */
@@ -69,20 +69,19 @@ void __init get_mtrr_state(void)
        } 
        vrs = mtrr_state.var_ranges;
 
-       rdmsrl(MTRRcap_MSR, msr_content);
+       rdmsrl(MSR_MTRRcap, msr_content);
        mtrr_state.have_fixed = (msr_content >> 8) & 1;
 
        for (i = 0; i < num_var_ranges; i++)
                get_mtrr_var_range(i, &vrs[i]);
-       if (mtrr_state.have_fixed)
-               get_fixed_ranges(mtrr_state.fixed_ranges);
+       get_fixed_ranges(mtrr_state.fixed_ranges);
 
-       rdmsrl(MTRRdefType_MSR, msr_content);
+       rdmsrl(MSR_MTRRdefType, msr_content);
        mtrr_state.def_type = (msr_content & 0xff);
        mtrr_state.enabled = (msr_content & 0xc00) >> 10;
 
        /* Store mtrr_cap for HVM MTRR virtualisation. */
-       rdmsrl(MTRRcap_MSR, mtrr_state.mtrr_cap);
+       rdmsrl(MSR_MTRRcap, mtrr_state.mtrr_cap);
 }
 
 /*  Some BIOS's are fucked and don't set all MTRRs the same!  */
@@ -163,8 +162,8 @@ static void generic_get_mtrr(unsigned in
 {
        uint64_t _mask, _base;
 
-       rdmsrl(MTRRphysMask_MSR(reg), _mask);
-       if ((_mask & 0x800) == 0) {
+       rdmsrl(MSR_IA32_MTRR_PHYSMASK(reg), _mask);
+       if (!(_mask & MTRR_PHYSMASK_VALID)) {
                /*  Invalid (i.e. free) range  */
                *base = 0;
                *size = 0;
@@ -172,7 +171,7 @@ static void generic_get_mtrr(unsigned in
                return;
        }
 
-       rdmsrl(MTRRphysBase_MSR(reg), _base);
+       rdmsrl(MSR_IA32_MTRR_PHYSBASE(reg), _base);
 
        /* Work out the shifted address mask. */
        _mask = size_or_mask | (_mask >> PAGE_SHIFT);
@@ -210,7 +209,7 @@ static int set_mtrr_var_ranges(unsigned 
        uint64_t msr_content;
        int changed = FALSE;
 
-       rdmsrl(MTRRphysBase_MSR(index), msr_content);
+       rdmsrl(MSR_IA32_MTRR_PHYSBASE(index), msr_content);
        lo = (uint32_t)msr_content;
        hi = (uint32_t)(msr_content >> 32);
        base_lo = (uint32_t)vr->base;
@@ -222,11 +221,11 @@ static int set_mtrr_var_ranges(unsigned 
        base_hi &= size_and_mask >> (32 - PAGE_SHIFT);
 
        if ((base_lo != lo) || (base_hi != hi)) {
-               mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base);
+               mtrr_wrmsr(MSR_IA32_MTRR_PHYSBASE(index), vr->base);
                changed = TRUE;
        }
 
-       rdmsrl(MTRRphysMask_MSR(index), msr_content);
+       rdmsrl(MSR_IA32_MTRR_PHYSMASK(index), msr_content);
        lo = (uint32_t)msr_content;
        hi = (uint32_t)(msr_content >> 32);
        mask_lo = (uint32_t)vr->mask;
@@ -238,7 +237,7 @@ static int set_mtrr_var_ranges(unsigned 
        mask_hi &= size_and_mask >> (32 - PAGE_SHIFT);
 
        if ((mask_lo != lo) || (mask_hi != hi)) {
-               mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask);
+               mtrr_wrmsr(MSR_IA32_MTRR_PHYSMASK(index), vr->mask);
                changed = TRUE;
        }
        return changed;
@@ -311,10 +310,10 @@ static void prepare_set(void)
        flush_tlb_local();
 
        /*  Save MTRR state */
-       rdmsrl(MTRRdefType_MSR, deftype);
+       rdmsrl(MSR_MTRRdefType, deftype);
 
        /*  Disable MTRRs, and set the default type to uncached  */
-       mtrr_wrmsr(MTRRdefType_MSR, deftype & ~0xcff);
+       mtrr_wrmsr(MSR_MTRRdefType, deftype & ~0xcff);
 }
 
 static void post_set(void)
@@ -323,7 +322,7 @@ static void post_set(void)
        flush_tlb_local();
 
        /* Intel (P6) standard MTRRs */
-       mtrr_wrmsr(MTRRdefType_MSR, deftype);
+       mtrr_wrmsr(MSR_MTRRdefType, deftype);
                
        /*  Enable caches  */
        write_cr0(read_cr0() & 0xbfffffff);
@@ -380,20 +379,20 @@ static void generic_set_mtrr(unsigned in
        if (size == 0) {
                /* The invalid bit is kept in the mask, so we simply clear the
                   relevant mask register to disable a range. */
-               mtrr_wrmsr(MTRRphysMask_MSR(reg), 0);
+               mtrr_wrmsr(MSR_IA32_MTRR_PHYSMASK(reg), 0);
                memset(vr, 0, sizeof(struct mtrr_var_range));
        } else {
                uint32_t base_lo, base_hi, mask_lo, mask_hi;
 
                base_lo = base << PAGE_SHIFT | type;
                base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
-               mask_lo = -size << PAGE_SHIFT | 0x800;
+               mask_lo = (-size << PAGE_SHIFT) | MTRR_PHYSMASK_VALID;
                mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
                vr->base = ((uint64_t)base_hi << 32) | base_lo;
                vr->mask = ((uint64_t)mask_hi << 32) | mask_lo;
 
-               mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base);
-               mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask);
+               mtrr_wrmsr(MSR_IA32_MTRR_PHYSBASE(reg), vr->base);
+               mtrr_wrmsr(MSR_IA32_MTRR_PHYSMASK(reg), vr->mask);
        }
 
        post_set();
@@ -438,7 +437,7 @@ int generic_validate_add_page(unsigned l
 static int generic_have_wrcomb(void)
 {
        unsigned long config;
-       rdmsrl(MTRRcap_MSR, config);
+       rdmsrl(MSR_MTRRcap, config);
        return (config & (1ULL << 10));
 }
 
--- a/xen/arch/x86/cpu/mtrr/main.c
+++ b/xen/arch/x86/cpu/mtrr/main.c
@@ -91,7 +91,7 @@ static void __init set_num_var_ranges(vo
        unsigned long config = 0;
 
        if (use_intel()) {
-               rdmsrl(MTRRcap_MSR, config);
+               rdmsrl(MSR_MTRRcap, config);
        } else if (is_cpu(AMD))
                config = 2;
        else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
--- a/xen/arch/x86/cpu/mtrr/mtrr.h
+++ b/xen/arch/x86/cpu/mtrr/mtrr.h
@@ -7,24 +7,6 @@
 #define FALSE 0
 #endif
 
-#define MTRRcap_MSR     0x0fe
-#define MTRRdefType_MSR 0x2ff
-
-#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
-#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
-
-#define MTRRfix64K_00000_MSR 0x250
-#define MTRRfix16K_80000_MSR 0x258
-#define MTRRfix16K_A0000_MSR 0x259
-#define MTRRfix4K_C0000_MSR 0x268
-#define MTRRfix4K_C8000_MSR 0x269
-#define MTRRfix4K_D0000_MSR 0x26a
-#define MTRRfix4K_D8000_MSR 0x26b
-#define MTRRfix4K_E0000_MSR 0x26c
-#define MTRRfix4K_E8000_MSR 0x26d
-#define MTRRfix4K_F0000_MSR 0x26e
-#define MTRRfix4K_F8000_MSR 0x26f
-
 #define MTRR_CHANGE_MASK_FIXED     0x01
 #define MTRR_CHANGE_MASK_VARIABLE  0x02
 #define MTRR_CHANGE_MASK_DEFTYPE   0x04
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3178,10 +3178,10 @@ int hvm_msr_read_intercept(unsigned int 
         index = msr - MSR_MTRRfix4K_C0000;
         *msr_content = fixed_range_base[index + 3];
         break;
-    case MSR_IA32_MTRR_PHYSBASE0...MSR_IA32_MTRR_PHYSMASK7:
+    case MSR_IA32_MTRR_PHYSBASE(0)...MSR_IA32_MTRR_PHYSMASK(MTRR_VCNT-1):
         if ( !mtrr )
             goto gp_fault;
-        index = msr - MSR_IA32_MTRR_PHYSBASE0;
+        index = msr - MSR_IA32_MTRR_PHYSBASE(0);
         *msr_content = var_range_base[index];
         break;
 
@@ -3305,7 +3305,7 @@ int hvm_msr_write_intercept(unsigned int
                                      index, msr_content) )
             goto gp_fault;
         break;
-    case MSR_IA32_MTRR_PHYSBASE0...MSR_IA32_MTRR_PHYSMASK7:
+    case MSR_IA32_MTRR_PHYSBASE(0)...MSR_IA32_MTRR_PHYSMASK(MTRR_VCNT-1):
         if ( !mtrr )
             goto gp_fault;
         if ( !mtrr_var_range_msr_set(v->domain, &v->arch.hvm_vcpu.mtrr,
--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -88,7 +88,7 @@ static void get_mtrr_range(uint64_t base
     uint32_t base_hi = (uint32_t)(base_msr >> 32);
     uint32_t size;
 
-    if ( (mask_lo & 0x800) == 0 )
+    if ( !(mask_lo & MTRR_PHYSMASK_VALID) )
     {
         /* Invalid (i.e. free) range */
         *base = 0;
@@ -143,16 +143,6 @@ bool_t is_var_mtrr_overlapped(struct mtr
     return 0;
 }
 
-#define MTRR_PHYSMASK_VALID_BIT  11
-#define MTRR_PHYSMASK_SHIFT      12
-
-#define MTRR_PHYSBASE_TYPE_MASK  0xff   /* lowest 8 bits */
-#define MTRR_PHYSBASE_SHIFT      12
-#define MTRR_VCNT                8
-
-#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
-#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
-
 static int __init hvm_mtrr_pat_init(void)
 {
     unsigned int i, j, phys_addr;
@@ -280,7 +270,7 @@ static uint8_t get_mtrr_type(struct mtrr
    {
        phys_base = ((uint64_t*)m->var_ranges)[seg*2];
        phys_mask = ((uint64_t*)m->var_ranges)[seg*2 + 1];
-       if ( phys_mask & (1 << MTRR_PHYSMASK_VALID_BIT) )
+       if ( phys_mask & MTRR_PHYSMASK_VALID )
        {
            if ( ((uint64_t) pa & phys_mask) >> MTRR_PHYSMASK_SHIFT ==
                 (phys_base & phys_mask) >> MTRR_PHYSMASK_SHIFT )
@@ -477,7 +467,7 @@ bool_t mtrr_var_range_msr_set(
     uint64_t msr_mask;
     uint64_t *var_range_base = (uint64_t*)m->var_ranges;
 
-    index = msr - MSR_IA32_MTRR_PHYSBASE0;
+    index = msr - MSR_IA32_MTRR_PHYSBASE(0);
     if ( var_range_base[index] == msr_content )
         return 1;
 
@@ -683,9 +673,11 @@ static int hvm_load_mtrr_msr(struct doma
     for ( i = 0; i < MTRR_VCNT; i++ )
     {
         mtrr_var_range_msr_set(d, mtrr_state,
-                MTRRphysBase_MSR(i), hw_mtrr.msr_mtrr_var[i*2]);
+                               MSR_IA32_MTRR_PHYSBASE(i),
+                               hw_mtrr.msr_mtrr_var[i * 2]);
         mtrr_var_range_msr_set(d, mtrr_state,
-                MTRRphysMask_MSR(i), hw_mtrr.msr_mtrr_var[i*2+1]);
+                               MSR_IA32_MTRR_PHYSMASK(i),
+                               hw_mtrr.msr_mtrr_var[i * 2 + 1]);
     }
 
     mtrr_def_type_msr_set(mtrr_state, hw_mtrr.msr_mtrr_def_type);
--- a/xen/include/asm-x86/msr-index.h
+++ b/xen/include/asm-x86/msr-index.h
@@ -87,22 +87,8 @@
 
 #define MSR_IA32_POWER_CTL             0x000001fc
 
-#define MSR_IA32_MTRR_PHYSBASE0     0x00000200
-#define MSR_IA32_MTRR_PHYSMASK0     0x00000201
-#define MSR_IA32_MTRR_PHYSBASE1     0x00000202
-#define MSR_IA32_MTRR_PHYSMASK1     0x00000203
-#define MSR_IA32_MTRR_PHYSBASE2     0x00000204
-#define MSR_IA32_MTRR_PHYSMASK2     0x00000205
-#define MSR_IA32_MTRR_PHYSBASE3     0x00000206
-#define MSR_IA32_MTRR_PHYSMASK3     0x00000207
-#define MSR_IA32_MTRR_PHYSBASE4     0x00000208
-#define MSR_IA32_MTRR_PHYSMASK4     0x00000209
-#define MSR_IA32_MTRR_PHYSBASE5     0x0000020a
-#define MSR_IA32_MTRR_PHYSMASK5     0x0000020b
-#define MSR_IA32_MTRR_PHYSBASE6     0x0000020c
-#define MSR_IA32_MTRR_PHYSMASK6     0x0000020d
-#define MSR_IA32_MTRR_PHYSBASE7     0x0000020e
-#define MSR_IA32_MTRR_PHYSMASK7     0x0000020f
+#define MSR_IA32_MTRR_PHYSBASE(n)   (0x00000200 + 2 * (n))
+#define MSR_IA32_MTRR_PHYSMASK(n)   (0x00000201 + 2 * (n))
 
 #define MSR_IA32_CR_PAT             0x00000277
 #define MSR_IA32_CR_PAT_RESET       0x0007040600070406ULL
--- a/xen/include/asm-x86/mtrr.h
+++ b/xen/include/asm-x86/mtrr.h
@@ -33,6 +33,14 @@ enum {
    an 8 bit field: */
 typedef u8 mtrr_type;
 
+#define MTRR_PHYSMASK_VALID_BIT  11
+#define MTRR_PHYSMASK_VALID      (1 << MTRR_PHYSMASK_VALID_BIT)
+#define MTRR_PHYSMASK_SHIFT      12
+#define MTRR_PHYSBASE_TYPE_MASK  0xff
+#define MTRR_PHYSBASE_SHIFT      12
+/* Number of variable range MSR pairs we emulate for HVM guests: */
+#define MTRR_VCNT                8
+
 struct mtrr_var_range {
        uint64_t base;
        uint64_t mask;


Attachment: x86-MTRR-consistent-MSR-names.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.