[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v5 05/17] x86/VPMU: Handle APIC_LVTPC accesses
Update APIC_LVTPC vector when HVM guest writes to it. Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> --- xen/arch/x86/hvm/svm/vpmu.c | 4 ---- xen/arch/x86/hvm/vlapic.c | 5 ++++- xen/arch/x86/hvm/vmx/vpmu_core2.c | 17 ----------------- xen/arch/x86/hvm/vpmu.c | 14 +++++++++++--- xen/include/asm-x86/hvm/vpmu.h | 1 + 5 files changed, 16 insertions(+), 25 deletions(-) diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c index 3666915..2fbe2c1 100644 --- a/xen/arch/x86/hvm/svm/vpmu.c +++ b/xen/arch/x86/hvm/svm/vpmu.c @@ -298,8 +298,6 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) if ( !acquire_pmu_ownership(PMU_OWNER_HVM) ) return 1; vpmu_set(vpmu, VPMU_RUNNING); - apic_write(APIC_LVTPC, PMU_APIC_VECTOR); - vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR; if ( is_hvm_domain(v->domain) && !((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set ) @@ -310,8 +308,6 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) && (is_pmu_enabled(msr_content) == 0) && vpmu_is_set(vpmu, VPMU_RUNNING) ) { - apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED); - vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | APIC_LVT_MASKED; vpmu_reset(vpmu, VPMU_RUNNING); if ( is_hvm_domain(v->domain) && ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set ) diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c index bc06010..d954f4f 100644 --- a/xen/arch/x86/hvm/vlapic.c +++ b/xen/arch/x86/hvm/vlapic.c @@ -38,6 +38,7 @@ #include <asm/hvm/support.h> #include <asm/hvm/vmx/vmx.h> #include <asm/hvm/nestedhvm.h> +#include <asm/hvm/vpmu.h> #include <public/hvm/ioreq.h> #include <public/hvm/params.h> @@ -732,8 +733,10 @@ static int vlapic_reg_write(struct vcpu *v, vlapic_adjust_i8259_target(v->domain); pt_may_unmask_irq(v->domain, NULL); } - if ( (offset == APIC_LVTT) && !(val & APIC_LVT_MASKED) ) + else if ( (offset == APIC_LVTT) && !(val & APIC_LVT_MASKED) ) pt_may_unmask_irq(NULL, &vlapic->pt); + else if ( offset == APIC_LVTPC ) + vpmu_lvtpc_update(val); break; case APIC_TMICT: diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c b/xen/arch/x86/hvm/vmx/vpmu_core2.c index 513eca4..c16ae10 100644 --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c @@ -534,19 +534,6 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) else vpmu_reset(vpmu, VPMU_RUNNING); - /* Setup LVTPC in local apic */ - if ( vpmu_is_set(vpmu, VPMU_RUNNING) && - is_vlapic_lvtpc_enabled(vcpu_vlapic(v)) ) - { - apic_write_around(APIC_LVTPC, PMU_APIC_VECTOR); - vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR; - } - else - { - apic_write_around(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED); - vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | APIC_LVT_MASKED; - } - if ( type != MSR_TYPE_GLOBAL ) { u64 mask; @@ -712,10 +699,6 @@ static int core2_vpmu_do_interrupt(struct cpu_user_regs *regs) return 0; } - /* HW sets the MASK bit when performance counter interrupt occurs*/ - vpmu->hw_lapic_lvtpc = apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED; - apic_write_around(APIC_LVTPC, vpmu->hw_lapic_lvtpc); - return 1; } diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c index a48dae2..979bd33 100644 --- a/xen/arch/x86/hvm/vpmu.c +++ b/xen/arch/x86/hvm/vpmu.c @@ -64,6 +64,14 @@ static void __init parse_vpmu_param(char *s) } } +void vpmu_lvtpc_update(uint32_t val) +{ + struct vpmu_struct *vpmu = vcpu_vpmu(current); + + vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | (val & APIC_LVT_MASKED); + apic_write(APIC_LVTPC, vpmu->hw_lapic_lvtpc); +} + int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) { struct vpmu_struct *vpmu = vcpu_vpmu(current); @@ -230,18 +238,18 @@ void vpmu_initialise(struct vcpu *v) case X86_VENDOR_AMD: if ( svm_vpmu_initialise(v, opt_vpmu_enabled) != 0 ) opt_vpmu_enabled = 0; - break; + return; case X86_VENDOR_INTEL: if ( vmx_vpmu_initialise(v, opt_vpmu_enabled) != 0 ) opt_vpmu_enabled = 0; - break; + return; default: printk("VPMU: Initialization failed. " "Unknown CPU vendor %d\n", vendor); opt_vpmu_enabled = 0; - break; + return; } } diff --git a/xen/include/asm-x86/hvm/vpmu.h b/xen/include/asm-x86/hvm/vpmu.h index 2a713be..7ee0f01 100644 --- a/xen/include/asm-x86/hvm/vpmu.h +++ b/xen/include/asm-x86/hvm/vpmu.h @@ -87,6 +87,7 @@ struct vpmu_struct { #define vpmu_is_set_all(_vpmu, _x) (((_vpmu)->flags & (_x)) == (_x)) #define vpmu_clear(_vpmu) ((_vpmu)->flags = 0) +void vpmu_lvtpc_update(uint32_t val); int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content); int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content); int vpmu_do_interrupt(struct cpu_user_regs *regs); -- 1.8.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |