[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v1 05/13] intel/VPMU: Clean up Intel VPMU code
Remove struct pmumsr and convert core2_fix_counters and core2_ctrls into arrays of u32 (MSR offsets). Call core2_get_pmc_count() once, during initialization. Properly clean up when core2_vpmu_alloc_resource() fails and add routines to remove MSRs from VMCS. Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> --- xen/arch/x86/hvm/vmx/vmcs.c | 59 +++++++++ xen/arch/x86/hvm/vmx/vpmu_core2.c | 218 ++++++++++++++++--------------- xen/include/asm-x86/hvm/vmx/vmcs.h | 2 + xen/include/asm-x86/hvm/vmx/vpmu_core2.h | 19 --- 4 files changed, 171 insertions(+), 127 deletions(-) diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index de9f592..756bc13 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -1136,6 +1136,36 @@ int vmx_add_guest_msr(u32 msr) return 0; } +void vmx_rm_guest_msr(u32 msr) +{ + struct vcpu *curr = current; + unsigned int i, idx, msr_count = curr->arch.hvm_vmx.msr_count; + struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area; + + if ( msr_area == NULL ) + return; + + for ( idx = 0; idx < msr_count; idx++ ) + if ( msr_area[idx].index == msr ) + break; + + if ( idx == msr_count ) + return; + + for ( i = idx; i < msr_count - 1; i++ ) + { + msr_area[i].index = msr_area[i + 1].index; + rdmsrl(msr_area[i].index, msr_area[i].data); + } + msr_area[msr_count - 1].index = 0; + + curr->arch.hvm_vmx.msr_count = --msr_count; + __vmwrite(VM_EXIT_MSR_STORE_COUNT, msr_count); + __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, msr_count); + + return; +} + int vmx_add_host_load_msr(u32 msr) { struct vcpu *curr = current; @@ -1166,6 +1196,35 @@ int vmx_add_host_load_msr(u32 msr) return 0; } +void vmx_rm_host_load_msr(u32 msr) +{ + struct vcpu *curr = current; + unsigned int i, idx, msr_count = curr->arch.hvm_vmx.host_msr_count; + struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.host_msr_area; + + if ( msr_area == NULL ) + return; + + for ( idx = 0; idx < msr_count; idx++ ) + if ( msr_area[idx].index == msr ) + break; + + if ( idx == msr_count ) + return; + + for ( i = idx; i < msr_count - 1; i++ ) + { + msr_area[i].index = msr_area[i + 1].index; + rdmsrl(msr_area[i].index, msr_area[i].data); + } + msr_area[msr_count - 1].index = 0; + + curr->arch.hvm_vmx.host_msr_count = --msr_count; + __vmwrite(VM_EXIT_MSR_LOAD_COUNT, msr_count); + + return; +} + void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector) { int index, offset, changed; diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c b/xen/arch/x86/hvm/vmx/vpmu_core2.c index 101888d..30a948e 100644 --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c @@ -64,6 +64,47 @@ #define PMU_FIXED_WIDTH_BITS 8 /* 8 bits 5..12 */ #define PMU_FIXED_WIDTH_MASK (((1 << PMU_FIXED_WIDTH_BITS) -1) << PMU_FIXED_WIDTH_SHIFT) +static const u32 core2_fix_counters_msr[] = { + MSR_CORE_PERF_FIXED_CTR0, + MSR_CORE_PERF_FIXED_CTR1, + MSR_CORE_PERF_FIXED_CTR2 +}; +#define VPMU_CORE2_NUM_FIXED (sizeof(core2_fix_counters_msr) / sizeof(u32)) + +/* + * MSR_CORE_PERF_FIXED_CTR_CTRL contains the configuration of all fixed + * counters. 4 bits for every counter. + */ +#define FIXED_CTR_CTRL_BITS 4 +#define FIXED_CTR_CTRL_MASK ((1 << FIXED_CTR_CTRL_BITS) - 1) + +/* The index into the core2_ctrls_msr[] of this MSR used in core2_vpmu_dump() */ +#define MSR_CORE_PERF_FIXED_CTR_CTRL_IDX 0 + +/* Core 2 Non-architectual Performance Control MSRs. */ +static const u32 core2_ctrls_msr[] = { + MSR_CORE_PERF_FIXED_CTR_CTRL, + MSR_IA32_PEBS_ENABLE, + MSR_IA32_DS_AREA +}; +#define VPMU_CORE2_NUM_CTRLS (sizeof(core2_ctrls_msr) / sizeof(u32)) + +struct core2_pmu_enable { + char ds_area_enable; + char fixed_ctr_enable[VPMU_CORE2_NUM_FIXED]; + char arch_pmc_enable[1]; +}; + +struct core2_vpmu_context { + struct core2_pmu_enable *pmu_enable; + u64 fix_counters[VPMU_CORE2_NUM_FIXED]; + u64 ctrls[VPMU_CORE2_NUM_CTRLS]; + u64 global_ovf_status; + struct arch_msr_pair arch_msr_pair[1]; +}; + +static int arch_pmc_cnt; /* Number of general-purpose performance counters */ + /* * QUIRK to workaround an issue on various family 6 cpus. * The issue leads to endless PMC interrupt loops on the processor. @@ -84,11 +125,8 @@ static void check_pmc_quirk(void) is_pmc_quirk = 0; } -static int core2_get_pmc_count(void); static void handle_pmc_quirk(u64 msr_content) { - int num_gen_pmc = core2_get_pmc_count(); - int num_fix_pmc = 3; int i; u64 val; @@ -96,7 +134,7 @@ static void handle_pmc_quirk(u64 msr_content) return; val = msr_content; - for ( i = 0; i < num_gen_pmc; i++ ) + for ( i = 0; i < arch_pmc_cnt; i++ ) { if ( val & 0x1 ) { @@ -108,7 +146,7 @@ static void handle_pmc_quirk(u64 msr_content) val >>= 1; } val = msr_content >> 32; - for ( i = 0; i < num_fix_pmc; i++ ) + for ( i = 0; i < VPMU_CORE2_NUM_FIXED; i++ ) { if ( val & 0x1 ) { @@ -121,45 +159,6 @@ static void handle_pmc_quirk(u64 msr_content) } } -static const u32 core2_fix_counters_msr[] = { - MSR_CORE_PERF_FIXED_CTR0, - MSR_CORE_PERF_FIXED_CTR1, - MSR_CORE_PERF_FIXED_CTR2 -}; - -/* - * MSR_CORE_PERF_FIXED_CTR_CTRL contains the configuration of all fixed - * counters. 4 bits for every counter. - */ -#define FIXED_CTR_CTRL_BITS 4 -#define FIXED_CTR_CTRL_MASK ((1 << FIXED_CTR_CTRL_BITS) - 1) - -/* The index into the core2_ctrls_msr[] of this MSR used in core2_vpmu_dump() */ -#define MSR_CORE_PERF_FIXED_CTR_CTRL_IDX 0 - -/* Core 2 Non-architectual Performance Control MSRs. */ -static const u32 core2_ctrls_msr[] = { - MSR_CORE_PERF_FIXED_CTR_CTRL, - MSR_IA32_PEBS_ENABLE, - MSR_IA32_DS_AREA -}; - -struct pmumsr { - unsigned int num; - const u32 *msr; -}; - -static const struct pmumsr core2_fix_counters = { - VPMU_CORE2_NUM_FIXED, - core2_fix_counters_msr -}; - -static const struct pmumsr core2_ctrls = { - VPMU_CORE2_NUM_CTRLS, - core2_ctrls_msr -}; -static int arch_pmc_cnt; - /* * Read the number of general counters via CPUID.EAX[0xa].EAX[8..15] */ @@ -167,19 +166,14 @@ static int core2_get_pmc_count(void) { u32 eax, ebx, ecx, edx; - if ( arch_pmc_cnt == 0 ) - { - cpuid(0xa, &eax, &ebx, &ecx, &edx); - arch_pmc_cnt = (eax & PMU_GENERAL_NR_MASK) >> PMU_GENERAL_NR_SHIFT; - } - - return arch_pmc_cnt; + cpuid(0xa, &eax, &ebx, &ecx, &edx); + return ( (eax & PMU_GENERAL_NR_MASK) >> PMU_GENERAL_NR_SHIFT ); } static u64 core2_calc_intial_glb_ctrl_msr(void) { - int arch_pmc_bits = (1 << core2_get_pmc_count()) - 1; - u64 fix_pmc_bits = (1 << 3) - 1; + int arch_pmc_bits = (1 << arch_pmc_cnt) - 1; + u64 fix_pmc_bits = (1 << VPMU_CORE2_NUM_FIXED) - 1; return ((fix_pmc_bits << 32) | arch_pmc_bits); } @@ -196,9 +190,9 @@ static int is_core2_vpmu_msr(u32 msr_index, int *type, int *index) { int i; - for ( i = 0; i < core2_fix_counters.num; i++ ) + for ( i = 0; i < VPMU_CORE2_NUM_FIXED; i++ ) { - if ( core2_fix_counters.msr[i] == msr_index ) + if ( core2_fix_counters_msr[i] == msr_index ) { *type = MSR_TYPE_COUNTER; *index = i; @@ -206,9 +200,9 @@ static int is_core2_vpmu_msr(u32 msr_index, int *type, int *index) } } - for ( i = 0; i < core2_ctrls.num; i++ ) + for ( i = 0; i < VPMU_CORE2_NUM_CTRLS; i++ ) { - if ( core2_ctrls.msr[i] == msr_index ) + if ( core2_ctrls_msr[i] == msr_index ) { *type = MSR_TYPE_CTRL; *index = i; @@ -225,7 +219,7 @@ static int is_core2_vpmu_msr(u32 msr_index, int *type, int *index) } if ( (msr_index >= MSR_IA32_PERFCTR0) && - (msr_index < (MSR_IA32_PERFCTR0 + core2_get_pmc_count())) ) + (msr_index < (MSR_IA32_PERFCTR0 + arch_pmc_cnt)) ) { *type = MSR_TYPE_ARCH_COUNTER; *index = msr_index - MSR_IA32_PERFCTR0; @@ -233,7 +227,7 @@ static int is_core2_vpmu_msr(u32 msr_index, int *type, int *index) } if ( (msr_index >= MSR_P6_EVNTSEL0) && - (msr_index < (MSR_P6_EVNTSEL0 + core2_get_pmc_count())) ) + (msr_index < (MSR_P6_EVNTSEL0 + arch_pmc_cnt)) ) { *type = MSR_TYPE_ARCH_CTRL; *index = msr_index - MSR_P6_EVNTSEL0; @@ -248,13 +242,13 @@ static void core2_vpmu_set_msr_bitmap(unsigned long *msr_bitmap) int i; /* Allow Read/Write PMU Counters MSR Directly. */ - for ( i = 0; i < core2_fix_counters.num; i++ ) + for ( i = 0; i < VPMU_CORE2_NUM_FIXED; i++ ) { - clear_bit(msraddr_to_bitpos(core2_fix_counters.msr[i]), msr_bitmap); - clear_bit(msraddr_to_bitpos(core2_fix_counters.msr[i]), + clear_bit(msraddr_to_bitpos(core2_fix_counters_msr[i]), msr_bitmap); + clear_bit(msraddr_to_bitpos(core2_fix_counters_msr[i]), msr_bitmap + 0x800/BYTES_PER_LONG); } - for ( i = 0; i < core2_get_pmc_count(); i++ ) + for ( i = 0; i < arch_pmc_cnt; i++ ) { clear_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap); clear_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), @@ -262,9 +256,9 @@ static void core2_vpmu_set_msr_bitmap(unsigned long *msr_bitmap) } /* Allow Read PMU Non-global Controls Directly. */ - for ( i = 0; i < core2_ctrls.num; i++ ) - clear_bit(msraddr_to_bitpos(core2_ctrls.msr[i]), msr_bitmap); - for ( i = 0; i < core2_get_pmc_count(); i++ ) + for ( i = 0; i < VPMU_CORE2_NUM_CTRLS; i++ ) + clear_bit(msraddr_to_bitpos(core2_ctrls_msr[i]), msr_bitmap); + for ( i = 0; i < arch_pmc_cnt; i++ ) clear_bit(msraddr_to_bitpos(MSR_P6_EVNTSEL0+i), msr_bitmap); } @@ -272,21 +266,21 @@ static void core2_vpmu_unset_msr_bitmap(unsigned long *msr_bitmap) { int i; - for ( i = 0; i < core2_fix_counters.num; i++ ) + for ( i = 0; i < VPMU_CORE2_NUM_FIXED; i++ ) { - set_bit(msraddr_to_bitpos(core2_fix_counters.msr[i]), msr_bitmap); - set_bit(msraddr_to_bitpos(core2_fix_counters.msr[i]), + set_bit(msraddr_to_bitpos(core2_fix_counters_msr[i]), msr_bitmap); + set_bit(msraddr_to_bitpos(core2_fix_counters_msr[i]), msr_bitmap + 0x800/BYTES_PER_LONG); } - for ( i = 0; i < core2_get_pmc_count(); i++ ) + for ( i = 0; i < arch_pmc_cnt; i++ ) { set_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap); set_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap + 0x800/BYTES_PER_LONG); } - for ( i = 0; i < core2_ctrls.num; i++ ) - set_bit(msraddr_to_bitpos(core2_ctrls.msr[i]), msr_bitmap); - for ( i = 0; i < core2_get_pmc_count(); i++ ) + for ( i = 0; i < VPMU_CORE2_NUM_CTRLS; i++ ) + set_bit(msraddr_to_bitpos(core2_ctrls_msr[i]), msr_bitmap); + for ( i = 0; i < arch_pmc_cnt; i++ ) set_bit(msraddr_to_bitpos(MSR_P6_EVNTSEL0+i), msr_bitmap); } @@ -295,9 +289,9 @@ static inline void __core2_vpmu_save(struct vcpu *v) int i; struct core2_vpmu_context *core2_vpmu_cxt = vcpu_vpmu(v)->context; - for ( i = 0; i < core2_fix_counters.num; i++ ) - rdmsrl(core2_fix_counters.msr[i], core2_vpmu_cxt->fix_counters[i]); - for ( i = 0; i < core2_get_pmc_count(); i++ ) + for ( i = 0; i < VPMU_CORE2_NUM_FIXED; i++ ) + rdmsrl(core2_fix_counters_msr[i], core2_vpmu_cxt->fix_counters[i]); + for ( i = 0; i < arch_pmc_cnt; i++ ) rdmsrl(MSR_IA32_PERFCTR0+i, core2_vpmu_cxt->arch_msr_pair[i].counter); } @@ -322,14 +316,14 @@ static inline void __core2_vpmu_load(struct vcpu *v) int i; struct core2_vpmu_context *core2_vpmu_cxt = vcpu_vpmu(v)->context; - for ( i = 0; i < core2_fix_counters.num; i++ ) - wrmsrl(core2_fix_counters.msr[i], core2_vpmu_cxt->fix_counters[i]); - for ( i = 0; i < core2_get_pmc_count(); i++ ) + for ( i = 0; i < VPMU_CORE2_NUM_FIXED; i++ ) + wrmsrl(core2_fix_counters_msr[i], core2_vpmu_cxt->fix_counters[i]); + for ( i = 0; i < arch_pmc_cnt; i++ ) wrmsrl(MSR_IA32_PERFCTR0+i, core2_vpmu_cxt->arch_msr_pair[i].counter); - for ( i = 0; i < core2_ctrls.num; i++ ) - wrmsrl(core2_ctrls.msr[i], core2_vpmu_cxt->ctrls[i]); - for ( i = 0; i < core2_get_pmc_count(); i++ ) + for ( i = 0; i < VPMU_CORE2_NUM_CTRLS; i++ ) + wrmsrl(core2_ctrls_msr[i], core2_vpmu_cxt->ctrls[i]); + for ( i = 0; i < arch_pmc_cnt; i++ ) wrmsrl(MSR_P6_EVNTSEL0+i, core2_vpmu_cxt->arch_msr_pair[i].control); } @@ -347,39 +341,46 @@ static int core2_vpmu_alloc_resource(struct vcpu *v) { struct vpmu_struct *vpmu = vcpu_vpmu(v); struct core2_vpmu_context *core2_vpmu_cxt; - struct core2_pmu_enable *pmu_enable; + struct core2_pmu_enable *pmu_enable = NULL; if ( !acquire_pmu_ownership(PMU_OWNER_HVM) ) return 0; wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); if ( vmx_add_host_load_msr(MSR_CORE_PERF_GLOBAL_CTRL) ) - return 0; + goto out_err; if ( vmx_add_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL) ) - return 0; + goto out_err; vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, core2_calc_intial_glb_ctrl_msr()); pmu_enable = xzalloc_bytes(sizeof(struct core2_pmu_enable) + - core2_get_pmc_count() - 1); + arch_pmc_cnt - 1); if ( !pmu_enable ) - goto out1; + goto out_err; core2_vpmu_cxt = xzalloc_bytes(sizeof(struct core2_vpmu_context) + - (core2_get_pmc_count()-1)*sizeof(struct arch_msr_pair)); + (arch_pmc_cnt-1)*sizeof(struct arch_msr_pair)); if ( !core2_vpmu_cxt ) - goto out2; + goto out_err; + core2_vpmu_cxt->pmu_enable = pmu_enable; vpmu->context = (void *)core2_vpmu_cxt; + vpmu_set(vpmu, VPMU_CONTEXT_ALLOCATED); + return 1; - out2: + +out_err: xfree(pmu_enable); - out1: - gdprintk(XENLOG_WARNING, "Insufficient memory for PMU, PMU feature is " - "unavailable on domain %d vcpu %d.\n", - v->vcpu_id, v->domain->domain_id); + vmx_rm_host_load_msr(MSR_CORE_PERF_GLOBAL_CTRL); + vmx_rm_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL); + release_pmu_ownship(PMU_OWNER_HVM); + + printk("Failed to allocate VPMU resources for domain %u vcpu %u\n", + v->vcpu_id, v->domain->domain_id); + return 0; } @@ -407,10 +408,8 @@ static int core2_vpmu_msr_common_check(u32 msr_index, int *type, int *index) return 0; if ( unlikely(!vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED)) && - (vpmu->context != NULL || - !core2_vpmu_alloc_resource(current)) ) + !core2_vpmu_alloc_resource(current) ) return 0; - vpmu_set(vpmu, VPMU_CONTEXT_ALLOCATED); /* Do the lazy load staff. */ if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) ) @@ -490,7 +489,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) return 1; case MSR_CORE_PERF_GLOBAL_CTRL: global_ctrl = msr_content; - for ( i = 0; i < core2_get_pmc_count(); i++ ) + for ( i = 0; i < arch_pmc_cnt; i++ ) { rdmsrl(MSR_P6_EVNTSEL0+i, non_global_ctrl); core2_vpmu_cxt->pmu_enable->arch_pmc_enable[i] = @@ -500,7 +499,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) rdmsrl(MSR_CORE_PERF_FIXED_CTR_CTRL, non_global_ctrl); global_ctrl = msr_content >> 32; - for ( i = 0; i < core2_fix_counters.num; i++ ) + for ( i = 0; i < VPMU_CORE2_NUM_FIXED; i++ ) { core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i] = (global_ctrl & 1) & ((non_global_ctrl & 0x3)? 1: 0); @@ -512,7 +511,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) non_global_ctrl = msr_content; vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl); global_ctrl >>= 32; - for ( i = 0; i < core2_fix_counters.num; i++ ) + for ( i = 0; i < VPMU_CORE2_NUM_FIXED; i++ ) { core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i] = (global_ctrl & 1) & ((non_global_ctrl & 0x3)? 1: 0); @@ -523,14 +522,14 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) default: tmp = msr - MSR_P6_EVNTSEL0; vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl); - if ( tmp >= 0 && tmp < core2_get_pmc_count() ) + if ( tmp >= 0 && tmp < arch_pmc_cnt ) core2_vpmu_cxt->pmu_enable->arch_pmc_enable[tmp] = (global_ctrl >> tmp) & (msr_content >> 22) & 1; } - for ( i = 0; i < core2_fix_counters.num; i++ ) + for ( i = 0; i < VPMU_CORE2_NUM_FIXED; i++ ) pmu_enable |= core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i]; - for ( i = 0; i < core2_get_pmc_count(); i++ ) + for ( i = 0; i < arch_pmc_cnt; i++ ) pmu_enable |= core2_vpmu_cxt->pmu_enable->arch_pmc_enable[i]; pmu_enable |= core2_vpmu_cxt->pmu_enable->ds_area_enable; if ( pmu_enable ) @@ -652,7 +651,7 @@ static void core2_vpmu_do_cpuid(unsigned int input, static void core2_vpmu_dump(struct vcpu *v) { struct vpmu_struct *vpmu = vcpu_vpmu(v); - int i, num; + int i; struct core2_vpmu_context *core2_vpmu_cxt = NULL; u64 val; @@ -670,9 +669,9 @@ static void core2_vpmu_dump(struct vcpu *v) printk(" vPMU running\n"); core2_vpmu_cxt = vpmu->context; - num = core2_get_pmc_count(); + /* Print the contents of the counter and its configuration msr. */ - for ( i = 0; i < num; i++ ) + for ( i = 0; i < arch_pmc_cnt; i++ ) { struct arch_msr_pair* msr_pair = core2_vpmu_cxt->arch_msr_pair; if ( core2_vpmu_cxt->pmu_enable->arch_pmc_enable[i] ) @@ -684,7 +683,7 @@ static void core2_vpmu_dump(struct vcpu *v) * MSR_CORE_PERF_FIXED_CTR_CTRL. */ val = core2_vpmu_cxt->ctrls[MSR_CORE_PERF_FIXED_CTR_CTRL_IDX]; - for ( i = 0; i < core2_fix_counters.num; i++ ) + for ( i = 0; i < VPMU_CORE2_NUM_FIXED; i++ ) { if ( core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i] ) printk(" fixed_%d: 0x%016lx ctrl: 0x%lx\n", @@ -707,7 +706,7 @@ static int core2_vpmu_do_interrupt(struct cpu_user_regs *regs) if ( is_pmc_quirk ) handle_pmc_quirk(msr_content); core2_vpmu_cxt->global_ovf_status |= msr_content; - msr_content = 0xC000000700000000 | ((1 << core2_get_pmc_count()) - 1); + msr_content = 0xC000000700000000 | ((1 << arch_pmc_cnt) - 1); wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, msr_content); } else @@ -770,7 +769,10 @@ static int core2_vpmu_initialise(struct vcpu *v, unsigned int vpmu_flags) } } func_out: + + arch_pmc_cnt = core2_get_pmc_count(); check_pmc_quirk(); + return 0; } diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h index f30e5ac..5971613 100644 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h @@ -470,7 +470,9 @@ void vmx_enable_intercept_for_msr(struct vcpu *v, u32 msr, int type); int vmx_read_guest_msr(u32 msr, u64 *val); int vmx_write_guest_msr(u32 msr, u64 val); int vmx_add_guest_msr(u32 msr); +void vmx_rm_guest_msr(u32 msr); int vmx_add_host_load_msr(u32 msr); +void vmx_rm_host_load_msr(u32 msr); void vmx_vmcs_switch(struct vmcs_struct *from, struct vmcs_struct *to); void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector); void vmx_clear_eoi_exit_bitmap(struct vcpu *v, u8 vector); diff --git a/xen/include/asm-x86/hvm/vmx/vpmu_core2.h b/xen/include/asm-x86/hvm/vmx/vpmu_core2.h index 60b05fd..410372d 100644 --- a/xen/include/asm-x86/hvm/vmx/vpmu_core2.h +++ b/xen/include/asm-x86/hvm/vmx/vpmu_core2.h @@ -23,29 +23,10 @@ #ifndef __ASM_X86_HVM_VPMU_CORE_H_ #define __ASM_X86_HVM_VPMU_CORE_H_ -/* Currently only 3 fixed counters are supported. */ -#define VPMU_CORE2_NUM_FIXED 3 -/* Currently only 3 Non-architectual Performance Control MSRs */ -#define VPMU_CORE2_NUM_CTRLS 3 - struct arch_msr_pair { u64 counter; u64 control; }; -struct core2_pmu_enable { - char ds_area_enable; - char fixed_ctr_enable[VPMU_CORE2_NUM_FIXED]; - char arch_pmc_enable[1]; -}; - -struct core2_vpmu_context { - struct core2_pmu_enable *pmu_enable; - u64 fix_counters[VPMU_CORE2_NUM_FIXED]; - u64 ctrls[VPMU_CORE2_NUM_CTRLS]; - u64 global_ovf_status; - struct arch_msr_pair arch_msr_pair[1]; -}; - #endif /* __ASM_X86_HVM_VPMU_CORE_H_ */ -- 1.8.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |