[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 11/16] x86/VPMU: Add support for PMU register handling on PV guests



Intercept accesses to PMU MSRs and process them in VPMU module.

Dump VPMU state for all domains (HVM and PV) when requested.

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
---
 xen/arch/x86/domain.c             |  3 +-
 xen/arch/x86/hvm/vmx/vpmu_core2.c | 60 ++++++++++++++++++++++++++++++++-------
 xen/arch/x86/hvm/vpmu.c           |  8 ++++++
 xen/arch/x86/traps.c              | 30 ++++++++++++++++++--
 xen/include/public/xenpmu.h       |  1 +
 5 files changed, 88 insertions(+), 14 deletions(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index da8e522..25572d5 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1972,8 +1972,7 @@ void arch_dump_vcpu_info(struct vcpu *v)
 {
     paging_dump_vcpu_info(v);
 
-    if ( is_hvm_vcpu(v) )
-        vpmu_dump(v);
+    vpmu_dump(v);
 }
 
 void domain_cpuid(
diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c 
b/xen/arch/x86/hvm/vmx/vpmu_core2.c
index 25b2a96..b9b2ea9 100644
--- a/xen/arch/x86/hvm/vmx/vpmu_core2.c
+++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c
@@ -27,6 +27,7 @@
 #include <asm/regs.h>
 #include <asm/types.h>
 #include <asm/apic.h>
+#include <asm/traps.h>
 #include <asm/msr.h>
 #include <asm/msr-index.h>
 #include <asm/hvm/support.h>
@@ -297,6 +298,9 @@ static inline void __core2_vpmu_save(struct vcpu *v)
         rdmsrl(MSR_CORE_PERF_FIXED_CTR0 + i, fixed_counters[i]);
     for ( i = 0; i < arch_pmc_cnt; i++ )
         rdmsrl(MSR_IA32_PERFCTR0 + i, arch_cntr_pair[i].counter);
+
+    if ( !is_hvm_domain(v->domain) )
+        rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, core2_vpmu_cxt->global_status);
 }
 
 static int core2_vpmu_save(struct vcpu *v)
@@ -306,10 +310,14 @@ static int core2_vpmu_save(struct vcpu *v)
     if ( !vpmu_is_set_all(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED) )
         return 0;
 
+    if ( !is_hvm_domain(v->domain) )
+        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+
     __core2_vpmu_save(v);
 
     /* Unset PMU MSR bitmap to trap lazy load. */
-    if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && cpu_has_vmx_msr_bitmap )
+    if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && cpu_has_vmx_msr_bitmap
+        && is_hvm_domain(v->domain) )
         core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap);
 
     return 1;
@@ -339,6 +347,13 @@ static inline void __core2_vpmu_load(struct vcpu *v)
     wrmsrl(MSR_CORE_PERF_FIXED_CTR_CTRL, core2_vpmu_cxt->fixed_ctrl);
     wrmsrl(MSR_IA32_DS_AREA, core2_vpmu_cxt->ds_area);
     wrmsrl(MSR_IA32_PEBS_ENABLE, core2_vpmu_cxt->pebs_enable);
+
+    if ( !is_hvm_domain(v->domain) )
+    {
+        wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, core2_vpmu_cxt->global_ovf_ctrl);
+        core2_vpmu_cxt->global_ovf_ctrl = 0;
+        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, core2_vpmu_cxt->global_ctrl);
+    }
 }
 
 static void core2_vpmu_load(struct vcpu *v)
@@ -424,6 +439,14 @@ static int core2_vpmu_msr_common_check(u32 msr_index, int 
*type, int *index)
     return 1;
 }
 
+static void inject_trap(struct vcpu *v, unsigned int trapno)
+{
+    if ( is_hvm_domain(v->domain) )
+        hvm_inject_hw_exception(trapno, 0);
+    else
+        send_guest_trap(v->domain, v->vcpu_id, trapno);
+}
+
 static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
 {
     u64 global_ctrl, non_global_ctrl;
@@ -450,7 +473,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t 
msr_content)
                 if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_BTS) )
                     return 1;
                 gdprintk(XENLOG_WARNING, "Debug Store is not supported on this 
cpu\n");
-                hvm_inject_hw_exception(TRAP_gp_fault, 0);
+                inject_trap(v, TRAP_gp_fault);
                 return 0;
             }
         }
@@ -462,11 +485,12 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t 
msr_content)
     {
     case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
         core2_vpmu_cxt->global_status &= ~msr_content;
+        wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, msr_content);
         return 1;
     case MSR_CORE_PERF_GLOBAL_STATUS:
         gdprintk(XENLOG_INFO, "Can not write readonly MSR: "
                  "MSR_PERF_GLOBAL_STATUS(0x38E)!\n");
-        hvm_inject_hw_exception(TRAP_gp_fault, 0);
+        inject_trap(v, TRAP_gp_fault);
         return 1;
     case MSR_IA32_PEBS_ENABLE:
         if ( msr_content & 1 )
@@ -482,7 +506,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t 
msr_content)
                 gdprintk(XENLOG_WARNING,
                          "Illegal address for IA32_DS_AREA: %#" PRIx64 "x\n",
                          msr_content);
-                hvm_inject_hw_exception(TRAP_gp_fault, 0);
+                inject_trap(v, TRAP_gp_fault);
                 return 1;
             }
             core2_vpmu_cxt->ds_area = msr_content;
@@ -507,10 +531,14 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t 
msr_content)
             non_global_ctrl >>= FIXED_CTR_CTRL_BITS;
             global_ctrl >>= 1;
         }
+        core2_vpmu_cxt->global_ctrl = msr_content;
         break;
     case MSR_CORE_PERF_FIXED_CTR_CTRL:
         non_global_ctrl = msr_content;
-        vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
+        if ( is_hvm_domain(v->domain) )
+            vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
+        else
+            rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, global_ctrl);
         global_ctrl >>= 32;
         for ( i = 0; i < fixed_pmc_cnt; i++ )
         {
@@ -527,7 +555,10 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t 
msr_content)
             struct arch_cntr_pair *arch_cntr_pair =
                 vpmu_reg_pointer(core2_vpmu_cxt, arch_counters);
 
-            vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
+            if ( is_hvm_domain(v->domain) )
+                vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
+            else
+                rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, global_ctrl);
             arch_cntr_pair[tmp].control = msr_content;
             for ( i = 0; i < arch_pmc_cnt && !pmu_enable; i++ )
                 pmu_enable += (global_ctrl >> i) &
@@ -566,13 +597,19 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t 
msr_content)
                 inject_gp = 1;
             break;
         }
-        if (inject_gp)
-            hvm_inject_hw_exception(TRAP_gp_fault, 0);
+
+        if (inject_gp) 
+            inject_trap(v, TRAP_gp_fault);
         else
             wrmsrl(msr, msr_content);
     }
     else
-        vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
+    {
+       if ( is_hvm_domain(v->domain) )
+           vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
+       else
+           wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
+    }
 
     return 1;
 }
@@ -596,7 +633,10 @@ static int core2_vpmu_do_rdmsr(unsigned int msr, uint64_t 
*msr_content)
             *msr_content = core2_vpmu_cxt->global_status;
             break;
         case MSR_CORE_PERF_GLOBAL_CTRL:
-            vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
+            if ( is_hvm_domain(v->domain) )
+                vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
+            else
+                rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, *msr_content);
             break;
         default:
             rdmsrl(msr, *msr_content);
diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c
index 1a97b0e..2cc37cc 100644
--- a/xen/arch/x86/hvm/vpmu.c
+++ b/xen/arch/x86/hvm/vpmu.c
@@ -396,6 +396,14 @@ long do_xenpmu_op(int op, 
XEN_GUEST_HANDLE_PARAM(xenpmu_params_t) arg)
             return -EFAULT;
         pvpmu_finish(current->domain, &pmu_params);
         break;
+
+    case XENPMU_lvtpc_set:
+        if ( copy_from_guest(&pmu_params, arg, 1) )
+            return -EFAULT;
+
+        vpmu_lvtpc_update((uint32_t)pmu_params.val);
+        ret = 0;
+        break;
     }
 
     return ret;
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 3f7a3c7..8a3353f 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -72,6 +72,7 @@
 #include <asm/apic.h>
 #include <asm/mc146818rtc.h>
 #include <asm/hpet.h>
+#include <asm/hvm/vpmu.h>
 #include <public/arch-x86/cpuid.h>
 #include <xsm/xsm.h>
 
@@ -866,7 +867,6 @@ void pv_cpuid(struct cpu_user_regs *regs)
         break;
 
     case 0x00000005: /* MONITOR/MWAIT */
-    case 0x0000000a: /* Architectural Performance Monitor Features */
     case 0x0000000b: /* Extended Topology Enumeration */
     case 0x8000000a: /* SVM revision and features */
     case 0x8000001b: /* Instruction Based Sampling */
@@ -875,7 +875,9 @@ void pv_cpuid(struct cpu_user_regs *regs)
     unsupported:
         a = b = c = d = 0;
         break;
-
+    case 0x0000000a: /* Architectural Performance Monitor Features (Intel) */
+        vpmu_do_cpuid(0xa, &a, &b, &c, &d);
+        break;
     default:
         (void)cpuid_hypervisor_leaves(regs->eax, 0, &a, &b, &c, &d);
         break;
@@ -2499,6 +2501,14 @@ static int emulate_privileged_op(struct cpu_user_regs 
*regs)
             if ( wrmsr_safe(regs->ecx, msr_content) != 0 )
                 goto fail;
             break;
+        case MSR_P6_PERFCTR0...MSR_P6_PERFCTR1:
+        case MSR_P6_EVNTSEL0...MSR_P6_EVNTSEL1:
+        case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
+        case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+        case MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5:
+            if ( !vpmu_do_wrmsr(regs->ecx, msr_content) )
+                goto invalid;
+            break;
         default:
             if ( wrmsr_hypervisor_regs(regs->ecx, msr_content) == 1 )
                 break;
@@ -2587,6 +2597,22 @@ static int emulate_privileged_op(struct cpu_user_regs 
*regs)
             regs->eax = (uint32_t)msr_content;
             regs->edx = (uint32_t)(msr_content >> 32);
             break;
+        case MSR_IA32_PERF_CAPABILITIES:
+            /* No extra capabilities are supported */
+            regs->eax = regs->edx = 0;
+            break;
+        case MSR_P6_PERFCTR0...MSR_P6_PERFCTR1:
+        case MSR_P6_EVNTSEL0...MSR_P6_EVNTSEL1:
+        case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
+        case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+        case MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5:
+            if ( vpmu_do_rdmsr(regs->ecx, &msr_content) ) 
+            {
+                regs->eax = (uint32_t)msr_content;
+                regs->edx = (uint32_t)(msr_content >> 32);
+                break;
+            }
+            goto rdmsr_normal;
         default:
             if ( rdmsr_hypervisor_regs(regs->ecx, &val) )
             {
diff --git a/xen/include/public/xenpmu.h b/xen/include/public/xenpmu.h
index 37592f8..bad5211 100644
--- a/xen/include/public/xenpmu.h
+++ b/xen/include/public/xenpmu.h
@@ -27,6 +27,7 @@
 #define XENPMU_feature_set     3
 #define XENPMU_init            4
 #define XENPMU_finish          5
+#define XENPMU_lvtpc_set       6
 /* ` } */
 
 /* ANSI-C does not support anonymous unions */
-- 
1.8.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.