[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v1 10/13] x86/PMU: Add support for PMU registes handling on PV guests
Intercept accesses to PMU MSRs and LVTPC APIC vector (only APIC_LVT_MASKED bit is processed) and process them in VPMU module. Dump VPMU state for all domains (HVM and PV) when requested. Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> --- xen/arch/x86/domain.c | 3 +- xen/arch/x86/hvm/vmx/vpmu_core2.c | 94 ++++++++++++++++++++++++++++++--------- xen/arch/x86/hvm/vpmu.c | 16 +++++++ xen/arch/x86/traps.c | 38 +++++++++++++++- xen/include/public/xenpmu.h | 2 + 5 files changed, 128 insertions(+), 25 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index e119d7b..36f4192 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -1940,8 +1940,7 @@ void arch_dump_vcpu_info(struct vcpu *v) { paging_dump_vcpu_info(v); - if ( is_hvm_vcpu(v) ) - vpmu_dump(v); + vpmu_dump(v); } void domain_cpuid( diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c b/xen/arch/x86/hvm/vmx/vpmu_core2.c index ecaa799..489dc49 100644 --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c @@ -27,6 +27,7 @@ #include <asm/regs.h> #include <asm/types.h> #include <asm/apic.h> +#include <asm/traps.h> #include <asm/msr.h> #include <asm/msr-index.h> #include <asm/hvm/support.h> @@ -270,6 +271,9 @@ static inline void __core2_vpmu_save(struct vcpu *v) rdmsrl(core2_fix_counters_msr[i], core2_vpmu_cxt->fix_counters[i]); for ( i = 0; i < arch_pmc_cnt; i++ ) rdmsrl(MSR_IA32_PERFCTR0+i, core2_vpmu_cxt->arch_msr_pair[i].counter); + + if ( !is_hvm_domain(v->domain) ) + rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, core2_vpmu_cxt->global_status); } static int core2_vpmu_save(struct vcpu *v) @@ -279,10 +283,14 @@ static int core2_vpmu_save(struct vcpu *v) if ( !vpmu_is_set_all(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED) ) return 0; + if ( !is_hvm_domain(v->domain) ) + wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); + __core2_vpmu_save(v); /* Unset PMU MSR bitmap to trap lazy load. */ - if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && cpu_has_vmx_msr_bitmap ) + if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && cpu_has_vmx_msr_bitmap + && is_hvm_domain(v->domain) ) core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap); return 1; @@ -302,6 +310,12 @@ static inline void __core2_vpmu_load(struct vcpu *v) wrmsrl(core2_ctrls_msr[i], core2_vpmu_cxt->ctrls[i]); for ( i = 0; i < arch_pmc_cnt; i++ ) wrmsrl(MSR_P6_EVNTSEL0+i, core2_vpmu_cxt->arch_msr_pair[i].control); + + if ( !is_hvm_domain(v->domain) ) + { + wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, core2_vpmu_cxt->global_ovf_ctrl); + wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, core2_vpmu_cxt->global_ctrl); + } } static void core2_vpmu_load(struct vcpu *v) @@ -431,7 +445,12 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_BTS) ) return 1; gdprintk(XENLOG_WARNING, "Debug Store is not supported on this cpu\n"); - hvm_inject_hw_exception(TRAP_gp_fault, 0); + + if ( is_hvm_domain(v->domain) ) + hvm_inject_hw_exception(TRAP_gp_fault, 0); + else + send_guest_trap(v->domain, v->vcpu_id, TRAP_gp_fault); + return 0; } } @@ -443,11 +462,15 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) { case MSR_CORE_PERF_GLOBAL_OVF_CTRL: core2_vpmu_cxt->global_ovf_status &= ~msr_content; + core2_vpmu_cxt->global_ovf_ctrl = msr_content; return 1; case MSR_CORE_PERF_GLOBAL_STATUS: gdprintk(XENLOG_INFO, "Can not write readonly MSR: " "MSR_PERF_GLOBAL_STATUS(0x38E)!\n"); - hvm_inject_hw_exception(TRAP_gp_fault, 0); + if ( is_hvm_domain(v->domain) ) + hvm_inject_hw_exception(TRAP_gp_fault, 0); + else + send_guest_trap(v->domain, v->vcpu_id, TRAP_gp_fault); return 1; case MSR_IA32_PEBS_ENABLE: if ( msr_content & 1 ) @@ -462,7 +485,10 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) gdprintk(XENLOG_WARNING, "Illegal address for IA32_DS_AREA: %#" PRIx64 "x\n", msr_content); - hvm_inject_hw_exception(TRAP_gp_fault, 0); + if ( is_hvm_domain(v->domain) ) + hvm_inject_hw_exception(TRAP_gp_fault, 0); + else + send_guest_trap(v->domain, v->vcpu_id, TRAP_gp_fault); return 1; } core2_vpmu_cxt->pmu_enable->ds_area_enable = msr_content ? 1 : 0; @@ -492,7 +518,10 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) break; case MSR_CORE_PERF_FIXED_CTR_CTRL: non_global_ctrl = msr_content; - vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl); + if ( is_hvm_domain(v->domain) ) + vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl); + else + rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, global_ctrl); global_ctrl >>= 32; for ( i = 0; i < VPMU_CORE2_NUM_FIXED; i++ ) { @@ -504,7 +533,10 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) break; default: tmp = msr - MSR_P6_EVNTSEL0; - vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl); + if ( is_hvm_domain(v->domain) ) + vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl); + else + rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, global_ctrl); if ( tmp >= 0 && tmp < arch_pmc_cnt ) core2_vpmu_cxt->pmu_enable->arch_pmc_enable[tmp] = (global_ctrl >> tmp) & (msr_content >> 22) & 1; @@ -520,17 +552,20 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) else vpmu_reset(vpmu, VPMU_RUNNING); - /* Setup LVTPC in local apic */ - if ( vpmu_is_set(vpmu, VPMU_RUNNING) && - is_vlapic_lvtpc_enabled(vcpu_vlapic(v)) ) - { - apic_write_around(APIC_LVTPC, PMU_APIC_VECTOR); - vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR; - } - else + if ( is_hvm_domain(v->domain) ) { - apic_write_around(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED); - vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | APIC_LVT_MASKED; + /* Setup LVTPC in local apic */ + if ( vpmu_is_set(vpmu, VPMU_RUNNING) && + is_vlapic_lvtpc_enabled(vcpu_vlapic(v)) ) + { + apic_write_around(APIC_LVTPC, PMU_APIC_VECTOR); + vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR; + } + else + { + apic_write_around(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED); + vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | APIC_LVT_MASKED; + } } core2_vpmu_save_msr_context(v, type, index, msr_content); @@ -559,13 +594,27 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) inject_gp = 1; break; } - if (inject_gp) - hvm_inject_hw_exception(TRAP_gp_fault, 0); + + if (inject_gp) + { + if ( is_hvm_domain(v->domain) ) + hvm_inject_hw_exception(TRAP_gp_fault, 0); + else + send_guest_trap(v->domain, v->vcpu_id, TRAP_gp_fault); + } else wrmsrl(msr, msr_content); } - else - vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content); + else + { + if ( is_hvm_domain(v->domain) ) + vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content); + else + { + wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, msr_content); + core2_vpmu_cxt->global_ctrl = msr_content; + } + } return 1; } @@ -589,7 +638,10 @@ static int core2_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content) *msr_content = core2_vpmu_cxt->global_ovf_status; break; case MSR_CORE_PERF_GLOBAL_CTRL: - vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content); + if ( is_hvm_domain(v->domain) ) + vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content); + else + rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, *msr_content); break; default: rdmsrl(msr, *msr_content); diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c index 04cc114..0adacce 100644 --- a/xen/arch/x86/hvm/vpmu.c +++ b/xen/arch/x86/hvm/vpmu.c @@ -70,6 +70,14 @@ static void __init parse_vpmu_param(char *s) } } +static void vpmu_lvtpc_update(uint32_t val) +{ + struct vpmu_struct *vpmu = vcpu_vpmu(current); + + vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | (val & APIC_LVT_MASKED); + apic_write(APIC_LVTPC, vpmu->hw_lapic_lvtpc); +} + int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) { struct vpmu_struct *vpmu = vcpu_vpmu(current); @@ -425,6 +433,14 @@ long do_xenpmu_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) return -EFAULT; pvpmu_finish(current->domain, &pmu_params); break; + + case XENPMU_lvtpc_set: + if ( copy_from_guest(&pmu_params, arg, 1) ) + return -EFAULT; + + vpmu_lvtpc_update((uint32_t)pmu_params.lvtpc); + ret = 0; + break; } return ret; diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c index 57dbd0c..64c9c25 100644 --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -71,6 +71,7 @@ #include <asm/apic.h> #include <asm/mc146818rtc.h> #include <asm/hpet.h> +#include <asm/hvm/vpmu.h> #include <public/arch-x86/cpuid.h> #include <xsm/xsm.h> @@ -871,7 +872,6 @@ static void pv_cpuid(struct cpu_user_regs *regs) break; case 0x00000005: /* MONITOR/MWAIT */ - case 0x0000000a: /* Architectural Performance Monitor Features */ case 0x0000000b: /* Extended Topology Enumeration */ case 0x8000000a: /* SVM revision and features */ case 0x8000001b: /* Instruction Based Sampling */ @@ -880,7 +880,8 @@ static void pv_cpuid(struct cpu_user_regs *regs) unsupported: a = b = c = d = 0; break; - + case 0x0000000a: /* Architectural Performance Monitor Features (Intel) */ + break; default: (void)cpuid_hypervisor_leaves(regs->eax, 0, &a, &b, &c, &d); break; @@ -2486,6 +2487,17 @@ static int emulate_privileged_op(struct cpu_user_regs *regs) if ( wrmsr_safe(regs->ecx, msr_content) != 0 ) goto fail; break; + case MSR_P6_PERFCTR0...MSR_P6_PERFCTR1: + case MSR_P6_EVNTSEL0...MSR_P6_EVNTSEL1: + case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2: + case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL: + case MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5: + if ( !vpmu_do_wrmsr(regs->ecx, msr_content) ) + { + if ( (vpmu_mode & VPMU_PRIV) && (v->domain == dom0) ) + goto invalid; + } + break; default: if ( wrmsr_hypervisor_regs(regs->ecx, msr_content) == 1 ) break; @@ -2574,6 +2586,24 @@ static int emulate_privileged_op(struct cpu_user_regs *regs) regs->eax = (uint32_t)msr_content; regs->edx = (uint32_t)(msr_content >> 32); break; + case MSR_IA32_PERF_CAPABILITIES: + if ( rdmsr_safe(regs->ecx, msr_content) ) + goto fail; + /* Full-Width Writes not supported */ + regs->eax = (uint32_t)msr_content & ~(1 << 13); + regs->edx = (uint32_t)(msr_content >> 32); + break; + case MSR_P6_PERFCTR0...MSR_P6_PERFCTR1: + case MSR_P6_EVNTSEL0...MSR_P6_EVNTSEL1: + case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2: + case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL: + case MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5: + if ( vpmu_do_rdmsr(regs->ecx, &msr_content) ) { + regs->eax = (uint32_t)msr_content; + regs->edx = (uint32_t)(msr_content >> 32); + break; + } + goto rdmsr_normal; default: if ( rdmsr_hypervisor_regs(regs->ecx, &val) ) { @@ -2606,6 +2636,10 @@ static int emulate_privileged_op(struct cpu_user_regs *regs) pv_cpuid(regs); break; + case 0x33: /* RDPMC */ + rdpmc(regs->ecx, regs->eax, regs->edx); + break; + default: goto fail; } diff --git a/xen/include/public/xenpmu.h b/xen/include/public/xenpmu.h index ffaf3fe..dc8bad2 100644 --- a/xen/include/public/xenpmu.h +++ b/xen/include/public/xenpmu.h @@ -15,6 +15,7 @@ #define XENPMU_flags_set 3 #define XENPMU_init 4 #define XENPMU_finish 5 +#define XENPMU_lvtpc_set 6 /* Parameters structure for HYPERVISOR_xenpmu_op call */ typedef struct xenpmu_params { @@ -28,6 +29,7 @@ typedef struct xenpmu_params { uint64_t control; uint64_t mfn; uint64_t vcpu; + uint64_t lvtpc; } xenpmu_params_t; -- 1.8.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |