Index: xen-unstable.hg/xen/arch/x86/hvm/vmx/vpmu_core2.c =================================================================== --- xen-unstable.hg.orig/xen/arch/x86/hvm/vmx/vpmu_core2.c +++ xen-unstable.hg/xen/arch/x86/hvm/vmx/vpmu_core2.c @@ -101,7 +101,7 @@ static void handle_pmc_quirk(u64 msr_con } } -static const u32 core2_counters_msr[] = { +static const u32 core2_fix_counters_msr[] = { MSR_CORE_PERF_FIXED_CTR0, MSR_CORE_PERF_FIXED_CTR1, MSR_CORE_PERF_FIXED_CTR2 @@ -119,13 +119,13 @@ struct pmumsr { const u32 *msr; }; -static const struct pmumsr core2_counters = { - 3, - core2_counters_msr +static const struct pmumsr core2_fix_counters = { + VPMU_CORE2_NUM_FIXED, + core2_fix_counters_msr }; static const struct pmumsr core2_ctrls = { - 3, + VPMU_CORE2_NUM_CTRLS, core2_ctrls_msr }; static int arch_pmc_cnt; @@ -162,16 +162,16 @@ static int is_core2_vpmu_msr(u32 msr_ind { int i; - for ( i = 0; i < core2_counters.num; i++ ) + for ( i = 0; i < core2_fix_counters.num; i++ ) { - if ( core2_counters.msr[i] == msr_index ) + if ( core2_fix_counters.msr[i] == msr_index ) { *type = MSR_TYPE_COUNTER; *index = i; return 1; } } - + for ( i = 0; i < core2_ctrls.num; i++ ) { if ( core2_ctrls.msr[i] == msr_index ) @@ -214,10 +214,10 @@ static void core2_vpmu_set_msr_bitmap(un int i; /* Allow Read/Write PMU Counters MSR Directly. */ - for ( i = 0; i < core2_counters.num; i++ ) + for ( i = 0; i < core2_fix_counters.num; i++ ) { - clear_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap); - clear_bit(msraddr_to_bitpos(core2_counters.msr[i]), + clear_bit(msraddr_to_bitpos(core2_fix_counters.msr[i]), msr_bitmap); + clear_bit(msraddr_to_bitpos(core2_fix_counters.msr[i]), msr_bitmap + 0x800/BYTES_PER_LONG); } for ( i = 0; i < core2_get_pmc_count(); i++ ) @@ -238,10 +238,10 @@ static void core2_vpmu_unset_msr_bitmap( { int i; - for ( i = 0; i < core2_counters.num; i++ ) + for ( i = 0; i < core2_fix_counters.num; i++ ) { - set_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap); - set_bit(msraddr_to_bitpos(core2_counters.msr[i]), + set_bit(msraddr_to_bitpos(core2_fix_counters.msr[i]), msr_bitmap); + set_bit(msraddr_to_bitpos(core2_fix_counters.msr[i]), msr_bitmap + 0x800/BYTES_PER_LONG); } for ( i = 0; i < core2_get_pmc_count(); i++ ) @@ -261,8 +261,8 @@ static inline void __core2_vpmu_save(str int i; struct core2_vpmu_context *core2_vpmu_cxt = vcpu_vpmu(v)->context; - for ( i = 0; i < core2_counters.num; i++ ) - rdmsrl(core2_counters.msr[i], core2_vpmu_cxt->counters[i]); + for ( i = 0; i < core2_fix_counters.num; i++ ) + rdmsrl(core2_fix_counters.msr[i], core2_vpmu_cxt->fix_counters[i]); for ( i = 0; i < core2_get_pmc_count(); i++ ) rdmsrl(MSR_IA32_PERFCTR0+i, core2_vpmu_cxt->arch_msr_pair[i].counter); core2_vpmu_cxt->hw_lapic_lvtpc = apic_read(APIC_LVTPC); @@ -292,8 +292,8 @@ static inline void __core2_vpmu_load(str int i; struct core2_vpmu_context *core2_vpmu_cxt = vcpu_vpmu(v)->context; - for ( i = 0; i < core2_counters.num; i++ ) - wrmsrl(core2_counters.msr[i], core2_vpmu_cxt->counters[i]); + for ( i = 0; i < core2_fix_counters.num; i++ ) + wrmsrl(core2_fix_counters.msr[i], core2_vpmu_cxt->fix_counters[i]); for ( i = 0; i < core2_get_pmc_count(); i++ ) wrmsrl(MSR_IA32_PERFCTR0+i, core2_vpmu_cxt->arch_msr_pair[i].counter); @@ -474,7 +474,7 @@ static int core2_vpmu_do_wrmsr(unsigned rdmsrl(MSR_CORE_PERF_FIXED_CTR_CTRL, non_global_ctrl); global_ctrl = msr_content >> 32; - for ( i = 0; i < 3; i++ ) + for ( i = 0; i < core2_fix_counters.num; i++ ) { core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i] = (global_ctrl & 1) & ((non_global_ctrl & 0x3)? 1: 0); @@ -486,7 +486,7 @@ static int core2_vpmu_do_wrmsr(unsigned non_global_ctrl = msr_content; vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl); global_ctrl >>= 32; - for ( i = 0; i < 3; i++ ) + for ( i = 0; i < core2_fix_counters.num; i++ ) { core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i] = (global_ctrl & 1) & ((non_global_ctrl & 0x3)? 1: 0); @@ -502,7 +502,7 @@ static int core2_vpmu_do_wrmsr(unsigned (global_ctrl >> tmp) & (msr_content >> 22) & 1; } - for ( i = 0; i < 3; i++ ) + for ( i = 0; i < core2_fix_counters.num; i++ ) pmu_enable |= core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i]; for ( i = 0; i < core2_get_pmc_count(); i++ ) pmu_enable |= core2_vpmu_cxt->pmu_enable->arch_pmc_enable[i]; Index: xen-unstable.hg/xen/include/asm-x86/hvm/vmx/vpmu_core2.h =================================================================== --- xen-unstable.hg.orig/xen/include/asm-x86/hvm/vmx/vpmu_core2.h +++ xen-unstable.hg/xen/include/asm-x86/hvm/vmx/vpmu_core2.h @@ -23,6 +23,11 @@ #ifndef __ASM_X86_HVM_VPMU_CORE_H_ #define __ASM_X86_HVM_VPMU_CORE_H_ +/* Currently only 3 fixed counters are supported. */ +#define VPMU_CORE2_NUM_FIXED 3 +/* Currently only 3 Non-architectual Performance Control MSRs */ +#define VPMU_CORE2_NUM_CTRLS 3 + struct arch_msr_pair { u64 counter; u64 control; @@ -30,14 +35,14 @@ struct arch_msr_pair { struct core2_pmu_enable { char ds_area_enable; - char fixed_ctr_enable[3]; + char fixed_ctr_enable[VPMU_CORE2_NUM_FIXED]; char arch_pmc_enable[1]; }; struct core2_vpmu_context { struct core2_pmu_enable *pmu_enable; - u64 counters[3]; - u64 ctrls[3]; + u64 fix_counters[VPMU_CORE2_NUM_FIXED]; + u64 ctrls[VPMU_CORE2_NUM_CTRLS]; u64 global_ovf_status; u32 hw_lapic_lvtpc; struct arch_msr_pair arch_msr_pair[1];