[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 20/30] x86/cpu: Context switch cpuid masks and faulting state in context_switch()



A single ctxt_switch_levelling() function pointer is provided
(defaulting to an empty nop), which is overridden in the appropriate
$VENDOR_init_levelling().

set_cpuid_faulting() is made private and included within
intel_ctxt_switch_levelling().

One functional change is that the faulting configuration is no longer special
cased for dom0.  There was never any need to, and it will cause dom0 to
observe the same information through native and enlightened cpuid.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>

v2:
 * Style fixes
 * ASSERT() that faulting is available in set_cpuid_faulting()
---
 xen/arch/x86/cpu/amd.c          |  3 +++
 xen/arch/x86/cpu/common.c       |  7 +++++++
 xen/arch/x86/cpu/intel.c        | 20 +++++++++++++++-----
 xen/arch/x86/domain.c           |  4 +---
 xen/include/asm-x86/processor.h |  2 +-
 5 files changed, 27 insertions(+), 9 deletions(-)

diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index 1708dd9..9d162bc 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -317,6 +317,9 @@ static void __init noinline amd_init_levelling(void)
                       (uint32_t)cpuidmask_defaults._7ab0,
                       (uint32_t)cpuidmask_defaults._6c);
        }
+
+       if (levelling_caps)
+               ctxt_switch_levelling = amd_ctxt_switch_levelling;
 }
 
 /*
diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
index 3fdae96..dc2442b 100644
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -90,6 +90,13 @@ static const struct cpu_dev default_cpu = {
 };
 static const struct cpu_dev *this_cpu = &default_cpu;
 
+static void default_ctxt_switch_levelling(const struct domain *nextd)
+{
+       /* Nop */
+}
+void (* __read_mostly ctxt_switch_levelling)(const struct domain *nextd) =
+       default_ctxt_switch_levelling;
+
 bool_t opt_cpu_info;
 boolean_param("cpuinfo", opt_cpu_info);
 
diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
index 143f497..95d44dd 100644
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -32,13 +32,15 @@ static bool_t __init probe_intel_cpuid_faulting(void)
        return 1;
 }
 
-static DEFINE_PER_CPU(bool_t, cpuid_faulting_enabled);
-void set_cpuid_faulting(bool_t enable)
+static void set_cpuid_faulting(bool_t enable)
 {
+       static DEFINE_PER_CPU(bool_t, cpuid_faulting_enabled);
+       bool_t *this_enabled = &this_cpu(cpuid_faulting_enabled);
        uint32_t hi, lo;
 
-       if (!cpu_has_cpuid_faulting ||
-           this_cpu(cpuid_faulting_enabled) == enable )
+       ASSERT(cpu_has_cpuid_faulting);
+
+       if (*this_enabled == enable)
                return;
 
        rdmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
@@ -47,7 +49,7 @@ void set_cpuid_faulting(bool_t enable)
                lo |= MSR_MISC_FEATURES_CPUID_FAULTING;
        wrmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
 
-       this_cpu(cpuid_faulting_enabled) = enable;
+       *this_enabled = enable;
 }
 
 /*
@@ -151,6 +153,11 @@ static void intel_ctxt_switch_levelling(const struct 
domain *nextd)
        struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
        const struct cpuidmasks *masks = &cpuidmask_defaults;
 
+       if (cpu_has_cpuid_faulting) {
+               set_cpuid_faulting(nextd && is_pv_domain(nextd));
+               return;
+       }
+
 #define LAZY(msr, field)                                               \
        ({                                                              \
                if (msr && (these_masks->field != masks->field))        \
@@ -221,6 +228,9 @@ static void __init noinline intel_init_levelling(void)
                       (uint32_t)cpuidmask_defaults.e1cd,
                       (uint32_t)cpuidmask_defaults.Da1);
        }
+
+       if (levelling_caps)
+               ctxt_switch_levelling = intel_ctxt_switch_levelling;
 }
 
 static void early_init_intel(struct cpuinfo_x86 *c)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 8f2c0b6..dbce90f 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -2079,9 +2079,7 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
             load_segments(next);
         }
 
-        set_cpuid_faulting(is_pv_domain(nextd) &&
-                           !is_control_domain(nextd) &&
-                           !is_hardware_domain(nextd));
+        ctxt_switch_levelling(nextd);
     }
 
     context_saved(prev);
diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h
index 09e82d8..12b6e25 100644
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -210,7 +210,7 @@ extern struct cpuinfo_x86 boot_cpu_data;
 extern struct cpuinfo_x86 cpu_data[];
 #define current_cpu_data cpu_data[smp_processor_id()]
 
-extern void set_cpuid_faulting(bool_t enable);
+extern void (*ctxt_switch_levelling)(const struct domain *nextd);
 
 extern u64 host_pat;
 extern bool_t opt_cpu_info;
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.