[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3/6] x86/vpmu: Remove core2_no_vpmu_ops



core2_no_vpmu_ops exists solely to work around the default-leaking of CPUID/MSR
values in Xen.

With CPUID handling removed from arch_vpmu_ops, the RDMSR handling is the last
remaining hook.  Since core2_no_vpmu_ops's introduction in c/s 25250ed7 "vpmu
intel: Add cpuid handling when vpmu disabled", a lot of work has been done and
the nop path in vpmu_do_msr() now suffices.

vpmu_do_msr() also falls into the nop path for un-configured or unprivileged
domains, which enables the removal the duplicate logic in priv_op_read_msr().

Finally, make all arch_vpmu_ops structures const as they are never modified,
and make them static as they are not referred to externally.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
CC: Jun Nakajima <jun.nakajima@xxxxxxxxx>
CC: Kevin Tian <kevin.tian@xxxxxxxxx>
---
 xen/arch/x86/cpu/vpmu.c       |  3 ++-
 xen/arch/x86/cpu/vpmu_amd.c   |  2 +-
 xen/arch/x86/cpu/vpmu_intel.c | 22 +---------------------
 xen/arch/x86/traps.c          |  6 +-----
 xen/include/asm-x86/vpmu.h    |  2 +-
 5 files changed, 6 insertions(+), 29 deletions(-)

diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
index a542f4d..1f822ca 100644
--- a/xen/arch/x86/cpu/vpmu.c
+++ b/xen/arch/x86/cpu/vpmu.c
@@ -136,9 +136,10 @@ int vpmu_do_msr(unsigned int msr, uint64_t *msr_content,
     const struct arch_vpmu_ops *ops;
     int ret = 0;
 
+    /* Don't leak PMU MSRs to unprivileged domains. */
     if ( likely(vpmu_mode == XENPMU_MODE_OFF) ||
          ((vpmu_mode & XENPMU_MODE_ALL) &&
-          !is_hardware_domain(current->domain)) )
+          !is_hardware_domain(curr->domain)) )
          goto nop;
 
     vpmu = vcpu_vpmu(curr);
diff --git a/xen/arch/x86/cpu/vpmu_amd.c b/xen/arch/x86/cpu/vpmu_amd.c
index 55d03b3..bae81a9 100644
--- a/xen/arch/x86/cpu/vpmu_amd.c
+++ b/xen/arch/x86/cpu/vpmu_amd.c
@@ -488,7 +488,7 @@ static void amd_vpmu_dump(const struct vcpu *v)
     }
 }
 
-struct arch_vpmu_ops amd_vpmu_ops = {
+const static struct arch_vpmu_ops amd_vpmu_ops = {
     .do_wrmsr = amd_vpmu_do_wrmsr,
     .do_rdmsr = amd_vpmu_do_rdmsr,
     .do_interrupt = amd_vpmu_do_interrupt,
diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c
index e3f25c8..babe4cb 100644
--- a/xen/arch/x86/cpu/vpmu_intel.c
+++ b/xen/arch/x86/cpu/vpmu_intel.c
@@ -865,7 +865,7 @@ static void core2_vpmu_destroy(struct vcpu *v)
     vpmu_clear(vpmu);
 }
 
-struct arch_vpmu_ops core2_vpmu_ops = {
+const static struct arch_vpmu_ops core2_vpmu_ops = {
     .do_wrmsr = core2_vpmu_do_wrmsr,
     .do_rdmsr = core2_vpmu_do_rdmsr,
     .do_interrupt = core2_vpmu_do_interrupt,
@@ -875,32 +875,12 @@ struct arch_vpmu_ops core2_vpmu_ops = {
     .arch_vpmu_dump = core2_vpmu_dump
 };
 
-/*
- * If its a vpmu msr set it to 0.
- */
-static int core2_no_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
-{
-    int type = -1, index = -1;
-    if ( !is_core2_vpmu_msr(msr, &type, &index) )
-        return -EINVAL;
-    *msr_content = 0;
-    return 0;
-}
-
-/*
- * These functions are used in case vpmu is not enabled.
- */
-struct arch_vpmu_ops core2_no_vpmu_ops = {
-    .do_rdmsr = core2_no_vpmu_do_rdmsr,
-};
-
 int vmx_vpmu_initialise(struct vcpu *v)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
     u64 msr_content;
     static bool_t ds_warned;
 
-    vpmu->arch_vpmu_ops = &core2_no_vpmu_ops;
     if ( vpmu_mode == XENPMU_MODE_OFF )
         return 0;
 
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 59e6b16..cdce72d 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2532,11 +2532,7 @@ static int priv_op_read_msr(unsigned int reg, uint64_t 
*val,
     case MSR_K7_EVNTSEL0...MSR_K7_PERFCTR3:
             if ( vpmu_msr || (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) )
             {
-                /* Don't leak PMU MSRs to unprivileged domains. */
-                if ( (vpmu_mode & XENPMU_MODE_ALL) &&
-                     !is_hardware_domain(currd) )
-                    *val = 0;
-                else if ( vpmu_do_rdmsr(reg, val) )
+                if ( vpmu_do_rdmsr(reg, val) )
                     break;
                 return X86EMUL_OKAY;
             }
diff --git a/xen/include/asm-x86/vpmu.h b/xen/include/asm-x86/vpmu.h
index d1dda4b..e50618f 100644
--- a/xen/include/asm-x86/vpmu.h
+++ b/xen/include/asm-x86/vpmu.h
@@ -60,7 +60,7 @@ struct vpmu_struct {
     u32 hw_lapic_lvtpc;
     void *context;      /* May be shared with PV guest */
     void *priv_context; /* hypervisor-only */
-    struct arch_vpmu_ops *arch_vpmu_ops;
+    const struct arch_vpmu_ops *arch_vpmu_ops;
     struct xen_pmu_data *xenpmu_data;
     spinlock_t vpmu_lock;
 };
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.