[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v2 05/13] intel/VPMU: Clean up Intel VPMU code
Am Freitag 20 September 2013, 05:42:04 schrieb Boris Ostrovsky: > Remove struct pmumsr and core2_pmu_enable. Replace static MSR structures with > fields in core2_vpmu_context. > > Call core2_get_pmc_count() once, during initialization. > > Properly clean up when core2_vpmu_alloc_resource() fails and add routines > to remove MSRs from VMCS. > > Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> > --- > xen/arch/x86/hvm/vmx/vmcs.c | 59 +++++++ > xen/arch/x86/hvm/vmx/vpmu_core2.c | 289 > ++++++++++++++----------------- > xen/include/asm-x86/hvm/vmx/vmcs.h | 2 + > xen/include/asm-x86/hvm/vmx/vpmu_core2.h | 19 -- > 4 files changed, 191 insertions(+), 178 deletions(-) > > diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c > index de9f592..756bc13 100644 > --- a/xen/arch/x86/hvm/vmx/vmcs.c > +++ b/xen/arch/x86/hvm/vmx/vmcs.c > @@ -1136,6 +1136,36 @@ int vmx_add_guest_msr(u32 msr) > return 0; > } > > +void vmx_rm_guest_msr(u32 msr) > +{ > + struct vcpu *curr = current; > + unsigned int i, idx, msr_count = curr->arch.hvm_vmx.msr_count; > + struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area; > + > + if ( msr_area == NULL ) > + return; > + unneeded spaces > + for ( idx = 0; idx < msr_count; idx++ ) > + if ( msr_area[idx].index == msr ) > + break; > + > + if ( idx == msr_count ) > + return; > + unneeded spaces > + for ( i = idx; i < msr_count - 1; i++ ) > + { > + msr_area[i].index = msr_area[i + 1].index; > + rdmsrl(msr_area[i].index, msr_area[i].data); > + } > + msr_area[msr_count - 1].index = 0; > + > + curr->arch.hvm_vmx.msr_count = --msr_count; > + __vmwrite(VM_EXIT_MSR_STORE_COUNT, msr_count); > + __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, msr_count); > + > + return; > +} > + > int vmx_add_host_load_msr(u32 msr) > { > struct vcpu *curr = current; > @@ -1166,6 +1196,35 @@ int vmx_add_host_load_msr(u32 msr) > return 0; > } > > +void vmx_rm_host_load_msr(u32 msr) > +{ > + struct vcpu *curr = current; > + unsigned int i, idx, msr_count = curr->arch.hvm_vmx.host_msr_count; > + struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.host_msr_area; > + > + if ( msr_area == NULL ) > + return; > + > + for ( idx = 0; idx < msr_count; idx++ ) > + if ( msr_area[idx].index == msr ) > + break; > + > + if ( idx == msr_count ) > + return; > + unneeded spaces > + for ( i = idx; i < msr_count - 1; i++ ) > + { > + msr_area[i].index = msr_area[i + 1].index; > + rdmsrl(msr_area[i].index, msr_area[i].data); > + } > + msr_area[msr_count - 1].index = 0; trailing spaces > + > + curr->arch.hvm_vmx.host_msr_count = --msr_count; > + __vmwrite(VM_EXIT_MSR_LOAD_COUNT, msr_count); > + > + return; > +} > + > void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector) > { > int index, offset, changed; > diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c > b/xen/arch/x86/hvm/vmx/vpmu_core2.c > index 101888d..50f784f 100644 > --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c > +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c > @@ -65,6 +65,26 @@ > #define PMU_FIXED_WIDTH_MASK (((1 << PMU_FIXED_WIDTH_BITS) -1) << > PMU_FIXED_WIDTH_SHIFT) > > /* > + * MSR_CORE_PERF_FIXED_CTR_CTRL contains the configuration of all fixed > + * counters. 4 bits for every counter. > + */ > +#define FIXED_CTR_CTRL_BITS 4 > +#define FIXED_CTR_CTRL_MASK ((1 << FIXED_CTR_CTRL_BITS) - 1) > + > +#define VPMU_CORE2_MAX_FIXED_PMCS 4 > +struct core2_vpmu_context { > + u64 fixed_ctrl; > + u64 ds_area; > + u64 pebs_enable; > + u64 global_ovf_status; > + u64 fix_counters[VPMU_CORE2_MAX_FIXED_PMCS]; > + struct arch_msr_pair arch_msr_pair[1]; > +}; > + > +static int arch_pmc_cnt; /* Number of general-purpose performance counters */ > +static int fixed_pmc_cnt; /* Number of fixed performance counters */ > + > +/* > * QUIRK to workaround an issue on various family 6 cpus. > * The issue leads to endless PMC interrupt loops on the processor. > * If the interrupt handler is running and a pmc reaches the value 0, this > @@ -84,11 +104,8 @@ static void check_pmc_quirk(void) > is_pmc_quirk = 0; > } > > -static int core2_get_pmc_count(void); > static void handle_pmc_quirk(u64 msr_content) > { > - int num_gen_pmc = core2_get_pmc_count(); > - int num_fix_pmc = 3; > int i; > u64 val; > > @@ -96,7 +113,7 @@ static void handle_pmc_quirk(u64 msr_content) > return; > > val = msr_content; > - for ( i = 0; i < num_gen_pmc; i++ ) > + for ( i = 0; i < arch_pmc_cnt; i++ ) > { > if ( val & 0x1 ) > { > @@ -108,7 +125,7 @@ static void handle_pmc_quirk(u64 msr_content) > val >>= 1; > } > val = msr_content >> 32; > - for ( i = 0; i < num_fix_pmc; i++ ) > + for ( i = 0; i < fixed_pmc_cnt; i++ ) > { > if ( val & 0x1 ) > { > @@ -121,65 +138,32 @@ static void handle_pmc_quirk(u64 msr_content) > } > } > > -static const u32 core2_fix_counters_msr[] = { > - MSR_CORE_PERF_FIXED_CTR0, > - MSR_CORE_PERF_FIXED_CTR1, > - MSR_CORE_PERF_FIXED_CTR2 > -}; > - > /* > - * MSR_CORE_PERF_FIXED_CTR_CTRL contains the configuration of all fixed > - * counters. 4 bits for every counter. > + * Read the number of general counters via CPUID.EAX[0xa].EAX[8..15] > */ > -#define FIXED_CTR_CTRL_BITS 4 > -#define FIXED_CTR_CTRL_MASK ((1 << FIXED_CTR_CTRL_BITS) - 1) > - > -/* The index into the core2_ctrls_msr[] of this MSR used in > core2_vpmu_dump() */ > -#define MSR_CORE_PERF_FIXED_CTR_CTRL_IDX 0 > - > -/* Core 2 Non-architectual Performance Control MSRs. */ > -static const u32 core2_ctrls_msr[] = { > - MSR_CORE_PERF_FIXED_CTR_CTRL, > - MSR_IA32_PEBS_ENABLE, > - MSR_IA32_DS_AREA > -}; > - > -struct pmumsr { > - unsigned int num; > - const u32 *msr; > -}; > - > -static const struct pmumsr core2_fix_counters = { > - VPMU_CORE2_NUM_FIXED, > - core2_fix_counters_msr > -}; > +static int core2_get_arch_pmc_count(void) > +{ > + u32 eax, ebx, ecx, edx; > > -static const struct pmumsr core2_ctrls = { > - VPMU_CORE2_NUM_CTRLS, > - core2_ctrls_msr > -}; > -static int arch_pmc_cnt; > + cpuid(0xa, &eax, &ebx, &ecx, &edx); > + return ( (eax & PMU_GENERAL_NR_MASK) >> PMU_GENERAL_NR_SHIFT ); > +} > > /* > - * Read the number of general counters via CPUID.EAX[0xa].EAX[8..15] > + * Read the number of fixed counters via CPUID.EDX[0xa].EDX[0..4] > */ > -static int core2_get_pmc_count(void) > +static int core2_get_fixed_pmc_count(void) > { > u32 eax, ebx, ecx, edx; > > - if ( arch_pmc_cnt == 0 ) > - { > - cpuid(0xa, &eax, &ebx, &ecx, &edx); > - arch_pmc_cnt = (eax & PMU_GENERAL_NR_MASK) >> PMU_GENERAL_NR_SHIFT; > - } > - > - return arch_pmc_cnt; > + cpuid(0xa, &eax, &ebx, &ecx, &edx); > + return ( (eax & PMU_FIXED_NR_MASK) >> PMU_FIXED_NR_SHIFT ); > } > > static u64 core2_calc_intial_glb_ctrl_msr(void) > { > - int arch_pmc_bits = (1 << core2_get_pmc_count()) - 1; > - u64 fix_pmc_bits = (1 << 3) - 1; > + int arch_pmc_bits = (1 << arch_pmc_cnt) - 1; > + u64 fix_pmc_bits = (1 << fixed_pmc_cnt) - 1; > return ((fix_pmc_bits << 32) | arch_pmc_bits); > } > > @@ -196,9 +180,9 @@ static int is_core2_vpmu_msr(u32 msr_index, int *type, > int *index) > { > int i; > > - for ( i = 0; i < core2_fix_counters.num; i++ ) > + for ( i = 0; i < fixed_pmc_cnt; i++ ) > { > - if ( core2_fix_counters.msr[i] == msr_index ) > + if ( msr_index == MSR_CORE_PERF_FIXED_CTR0 + i ) > { > *type = MSR_TYPE_COUNTER; > *index = i; > @@ -206,14 +190,12 @@ static int is_core2_vpmu_msr(u32 msr_index, int *type, > int *index) > } > } > > - for ( i = 0; i < core2_ctrls.num; i++ ) > + if ( (msr_index == MSR_CORE_PERF_FIXED_CTR_CTRL ) || > + (msr_index == MSR_IA32_DS_AREA) || > + (msr_index == MSR_IA32_PEBS_ENABLE) ) > { > - if ( core2_ctrls.msr[i] == msr_index ) > - { > - *type = MSR_TYPE_CTRL; > - *index = i; > - return 1; > - } > + *type = MSR_TYPE_CTRL; > + return 1; > } > > if ( (msr_index == MSR_CORE_PERF_GLOBAL_CTRL) || > @@ -225,7 +207,7 @@ static int is_core2_vpmu_msr(u32 msr_index, int *type, > int *index) > } > > if ( (msr_index >= MSR_IA32_PERFCTR0) && > - (msr_index < (MSR_IA32_PERFCTR0 + core2_get_pmc_count())) ) > + (msr_index < (MSR_IA32_PERFCTR0 + arch_pmc_cnt)) ) > { > *type = MSR_TYPE_ARCH_COUNTER; > *index = msr_index - MSR_IA32_PERFCTR0; > @@ -233,7 +215,7 @@ static int is_core2_vpmu_msr(u32 msr_index, int *type, > int *index) > } > > if ( (msr_index >= MSR_P6_EVNTSEL0) && > - (msr_index < (MSR_P6_EVNTSEL0 + core2_get_pmc_count())) ) > + (msr_index < (MSR_P6_EVNTSEL0 + arch_pmc_cnt)) ) > { > *type = MSR_TYPE_ARCH_CTRL; > *index = msr_index - MSR_P6_EVNTSEL0; > @@ -248,13 +230,13 @@ static void core2_vpmu_set_msr_bitmap(unsigned long > *msr_bitmap) > int i; > > /* Allow Read/Write PMU Counters MSR Directly. */ > - for ( i = 0; i < core2_fix_counters.num; i++ ) > + for ( i = 0; i < fixed_pmc_cnt; i++ ) > { > - clear_bit(msraddr_to_bitpos(core2_fix_counters.msr[i]), msr_bitmap); > - clear_bit(msraddr_to_bitpos(core2_fix_counters.msr[i]), > + clear_bit(msraddr_to_bitpos(MSR_CORE_PERF_FIXED_CTR0 + i), > msr_bitmap); > + clear_bit(msraddr_to_bitpos(MSR_CORE_PERF_FIXED_CTR0 + i), > msr_bitmap + 0x800/BYTES_PER_LONG); > } > - for ( i = 0; i < core2_get_pmc_count(); i++ ) > + for ( i = 0; i < arch_pmc_cnt; i++ ) > { > clear_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap); > clear_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), > @@ -262,32 +244,37 @@ static void core2_vpmu_set_msr_bitmap(unsigned long > *msr_bitmap) > } > > /* Allow Read PMU Non-global Controls Directly. */ > - for ( i = 0; i < core2_ctrls.num; i++ ) > - clear_bit(msraddr_to_bitpos(core2_ctrls.msr[i]), msr_bitmap); > - for ( i = 0; i < core2_get_pmc_count(); i++ ) > + for ( i = 0; i < arch_pmc_cnt; i++ ) > clear_bit(msraddr_to_bitpos(MSR_P6_EVNTSEL0+i), msr_bitmap); > + > + clear_bit(msraddr_to_bitpos(MSR_CORE_PERF_FIXED_CTR_CTRL), msr_bitmap); > + clear_bit(msraddr_to_bitpos(MSR_IA32_PEBS_ENABLE), msr_bitmap); > + clear_bit(msraddr_to_bitpos(MSR_IA32_DS_AREA), msr_bitmap); > } > > static void core2_vpmu_unset_msr_bitmap(unsigned long *msr_bitmap) > { > int i; > > - for ( i = 0; i < core2_fix_counters.num; i++ ) > + for ( i = 0; i < fixed_pmc_cnt; i++ ) > { > - set_bit(msraddr_to_bitpos(core2_fix_counters.msr[i]), msr_bitmap); > - set_bit(msraddr_to_bitpos(core2_fix_counters.msr[i]), > + set_bit(msraddr_to_bitpos(MSR_CORE_PERF_FIXED_CTR0 + i), msr_bitmap); > + set_bit(msraddr_to_bitpos(MSR_CORE_PERF_FIXED_CTR0 + i), > msr_bitmap + 0x800/BYTES_PER_LONG); > } > - for ( i = 0; i < core2_get_pmc_count(); i++ ) > + for ( i = 0; i < arch_pmc_cnt; i++ ) > { > set_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap); > set_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), > msr_bitmap + 0x800/BYTES_PER_LONG); > } > - for ( i = 0; i < core2_ctrls.num; i++ ) > - set_bit(msraddr_to_bitpos(core2_ctrls.msr[i]), msr_bitmap); > - for ( i = 0; i < core2_get_pmc_count(); i++ ) > + > + for ( i = 0; i < arch_pmc_cnt; i++ ) > set_bit(msraddr_to_bitpos(MSR_P6_EVNTSEL0+i), msr_bitmap); > + > + set_bit(msraddr_to_bitpos(MSR_CORE_PERF_FIXED_CTR_CTRL), msr_bitmap); > + set_bit(msraddr_to_bitpos(MSR_IA32_PEBS_ENABLE), msr_bitmap); > + set_bit(msraddr_to_bitpos(MSR_IA32_DS_AREA), msr_bitmap); > } > > static inline void __core2_vpmu_save(struct vcpu *v) > @@ -295,10 +282,10 @@ static inline void __core2_vpmu_save(struct vcpu *v) > int i; > struct core2_vpmu_context *core2_vpmu_cxt = vcpu_vpmu(v)->context; > > - for ( i = 0; i < core2_fix_counters.num; i++ ) > - rdmsrl(core2_fix_counters.msr[i], core2_vpmu_cxt->fix_counters[i]); > - for ( i = 0; i < core2_get_pmc_count(); i++ ) > - rdmsrl(MSR_IA32_PERFCTR0+i, > core2_vpmu_cxt->arch_msr_pair[i].counter); > + for ( i = 0; i < fixed_pmc_cnt; i++ ) > + rdmsrl(MSR_CORE_PERF_FIXED_CTR0 + i, > core2_vpmu_cxt->fix_counters[i]); > + for ( i = 0; i < arch_pmc_cnt; i++ ) > + rdmsrl(MSR_IA32_PERFCTR0 + i, > core2_vpmu_cxt->arch_msr_pair[i].counter); > } > > static int core2_vpmu_save(struct vcpu *v) > @@ -322,14 +309,16 @@ static inline void __core2_vpmu_load(struct vcpu *v) > int i; > struct core2_vpmu_context *core2_vpmu_cxt = vcpu_vpmu(v)->context; > > - for ( i = 0; i < core2_fix_counters.num; i++ ) > - wrmsrl(core2_fix_counters.msr[i], core2_vpmu_cxt->fix_counters[i]); > - for ( i = 0; i < core2_get_pmc_count(); i++ ) > - wrmsrl(MSR_IA32_PERFCTR0+i, > core2_vpmu_cxt->arch_msr_pair[i].counter); > + for ( i = 0; i < fixed_pmc_cnt; i++ ) > + wrmsrl(MSR_CORE_PERF_FIXED_CTR0 + i, > core2_vpmu_cxt->fix_counters[i]); > + for ( i = 0; i < arch_pmc_cnt; i++ ) > + wrmsrl(MSR_IA32_PERFCTR0 + i, > core2_vpmu_cxt->arch_msr_pair[i].counter); > > - for ( i = 0; i < core2_ctrls.num; i++ ) > - wrmsrl(core2_ctrls.msr[i], core2_vpmu_cxt->ctrls[i]); > - for ( i = 0; i < core2_get_pmc_count(); i++ ) > + wrmsrl(MSR_CORE_PERF_FIXED_CTR_CTRL, core2_vpmu_cxt->fixed_ctrl); > + wrmsrl(MSR_IA32_DS_AREA, core2_vpmu_cxt->ds_area); > + wrmsrl(MSR_IA32_PEBS_ENABLE, core2_vpmu_cxt->pebs_enable); > + > + for ( i = 0; i < arch_pmc_cnt; i++ ) > wrmsrl(MSR_P6_EVNTSEL0+i, core2_vpmu_cxt->arch_msr_pair[i].control); > } > > @@ -347,56 +336,39 @@ static int core2_vpmu_alloc_resource(struct vcpu *v) > { > struct vpmu_struct *vpmu = vcpu_vpmu(v); > struct core2_vpmu_context *core2_vpmu_cxt; > - struct core2_pmu_enable *pmu_enable; > > if ( !acquire_pmu_ownership(PMU_OWNER_HVM) ) > return 0; > > wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); > if ( vmx_add_host_load_msr(MSR_CORE_PERF_GLOBAL_CTRL) ) > - return 0; > + goto out_err; > > if ( vmx_add_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL) ) > - return 0; > + goto out_err; > vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, > core2_calc_intial_glb_ctrl_msr()); > > - pmu_enable = xzalloc_bytes(sizeof(struct core2_pmu_enable) + > - core2_get_pmc_count() - 1); > - if ( !pmu_enable ) > - goto out1; > - > core2_vpmu_cxt = xzalloc_bytes(sizeof(struct core2_vpmu_context) + > - (core2_get_pmc_count()-1)*sizeof(struct arch_msr_pair)); > + (arch_pmc_cnt-1)*sizeof(struct arch_msr_pair)); > if ( !core2_vpmu_cxt ) > - goto out2; > - core2_vpmu_cxt->pmu_enable = pmu_enable; > + goto out_err; > + > vpmu->context = (void *)core2_vpmu_cxt; > > + vpmu_set(vpmu, VPMU_CONTEXT_ALLOCATED); > + > return 1; > - out2: > - xfree(pmu_enable); > - out1: > - gdprintk(XENLOG_WARNING, "Insufficient memory for PMU, PMU feature is " > - "unavailable on domain %d vcpu %d.\n", > - v->vcpu_id, v->domain->domain_id); > - return 0; > -} > > -static void core2_vpmu_save_msr_context(struct vcpu *v, int type, > - int index, u64 msr_data) > -{ > - struct core2_vpmu_context *core2_vpmu_cxt = vcpu_vpmu(v)->context; > +out_err: > + vmx_rm_host_load_msr(MSR_CORE_PERF_GLOBAL_CTRL); > + vmx_rm_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL); > + release_pmu_ownship(PMU_OWNER_HVM); > > - switch ( type ) > - { > - case MSR_TYPE_CTRL: > - core2_vpmu_cxt->ctrls[index] = msr_data; > - break; > - case MSR_TYPE_ARCH_CTRL: > - core2_vpmu_cxt->arch_msr_pair[index].control = msr_data; > - break; > - } > + printk("Failed to allocate VPMU resources for domain %u vcpu %u\n", > + v->vcpu_id, v->domain->domain_id); > + > + return 0; > } > > static int core2_vpmu_msr_common_check(u32 msr_index, int *type, int *index) > @@ -407,10 +379,8 @@ static int core2_vpmu_msr_common_check(u32 msr_index, > int *type, int *index) > return 0; > > if ( unlikely(!vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED)) && > - (vpmu->context != NULL || > - !core2_vpmu_alloc_resource(current)) ) > + !core2_vpmu_alloc_resource(current) ) > return 0; > - vpmu_set(vpmu, VPMU_CONTEXT_ALLOCATED); > > /* Do the lazy load staff. */ > if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) ) > @@ -426,7 +396,7 @@ static int core2_vpmu_msr_common_check(u32 msr_index, int > *type, int *index) > static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) > { > u64 global_ctrl, non_global_ctrl; > - char pmu_enable = 0; > + unsigned pmu_enable = 0; > int i, tmp; > int type = -1, index = -1; > struct vcpu *v = current; > @@ -471,6 +441,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t > msr_content) > if ( msr_content & 1 ) > gdprintk(XENLOG_WARNING, "Guest is trying to enable PEBS, " > "which is not supported.\n"); > + core2_vpmu_cxt->pebs_enable = msr_content; > return 1; > case MSR_IA32_DS_AREA: > if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_DS) ) > @@ -483,27 +454,25 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, > uint64_t msr_content) > hvm_inject_hw_exception(TRAP_gp_fault, 0); > return 1; > } > - core2_vpmu_cxt->pmu_enable->ds_area_enable = msr_content ? 1 : 0; > + core2_vpmu_cxt->ds_area = msr_content; > break; > } > gdprintk(XENLOG_WARNING, "Guest setting of DTS is ignored.\n"); > return 1; > case MSR_CORE_PERF_GLOBAL_CTRL: > global_ctrl = msr_content; > - for ( i = 0; i < core2_get_pmc_count(); i++ ) > + for ( i = 0; i < arch_pmc_cnt; i++ ) > { > rdmsrl(MSR_P6_EVNTSEL0+i, non_global_ctrl); > - core2_vpmu_cxt->pmu_enable->arch_pmc_enable[i] = > - global_ctrl & (non_global_ctrl >> 22) & 1; > + pmu_enable += global_ctrl & (non_global_ctrl >> 22) & 1; > global_ctrl >>= 1; > } > > rdmsrl(MSR_CORE_PERF_FIXED_CTR_CTRL, non_global_ctrl); > global_ctrl = msr_content >> 32; > - for ( i = 0; i < core2_fix_counters.num; i++ ) > + for ( i = 0; i < fixed_pmc_cnt; i++ ) > { > - core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i] = > - (global_ctrl & 1) & ((non_global_ctrl & 0x3)? 1: 0); > + pmu_enable += (global_ctrl & 1) & ((non_global_ctrl & 0x3)? 1: > 0); > non_global_ctrl >>= FIXED_CTR_CTRL_BITS; > global_ctrl >>= 1; > } > @@ -512,27 +481,27 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, > uint64_t msr_content) > non_global_ctrl = msr_content; > vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl); > global_ctrl >>= 32; > - for ( i = 0; i < core2_fix_counters.num; i++ ) > + for ( i = 0; i < fixed_pmc_cnt; i++ ) > { > - core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i] = > - (global_ctrl & 1) & ((non_global_ctrl & 0x3)? 1: 0); > + pmu_enable += (global_ctrl & 1) & ((non_global_ctrl & 0x3)? 1: > 0); > non_global_ctrl >>= 4; > global_ctrl >>= 1; > } > + core2_vpmu_cxt->fixed_ctrl = msr_content; > break; > default: > tmp = msr - MSR_P6_EVNTSEL0; > - vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl); > - if ( tmp >= 0 && tmp < core2_get_pmc_count() ) > - core2_vpmu_cxt->pmu_enable->arch_pmc_enable[tmp] = > - (global_ctrl >> tmp) & (msr_content >> 22) & 1; > + if ( tmp >= 0 && tmp < arch_pmc_cnt ) > + { > + vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl); > + core2_vpmu_cxt->arch_msr_pair[tmp].control = msr_content; > + for ( i = 0; i < arch_pmc_cnt && !pmu_enable; i++ ) > + pmu_enable += (global_ctrl >> i) & > + (core2_vpmu_cxt->arch_msr_pair[i].control >> 22) & 1; > + } > } > > - for ( i = 0; i < core2_fix_counters.num; i++ ) > - pmu_enable |= core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i]; > - for ( i = 0; i < core2_get_pmc_count(); i++ ) > - pmu_enable |= core2_vpmu_cxt->pmu_enable->arch_pmc_enable[i]; > - pmu_enable |= core2_vpmu_cxt->pmu_enable->ds_area_enable; > + pmu_enable += (core2_vpmu_cxt->ds_area != 0); > if ( pmu_enable ) > vpmu_set(vpmu, VPMU_RUNNING); > else > @@ -551,7 +520,6 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t > msr_content) > vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | APIC_LVT_MASKED; > } > > - core2_vpmu_save_msr_context(v, type, index, msr_content); > if ( type != MSR_TYPE_GLOBAL ) > { > u64 mask; > @@ -567,7 +535,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t > msr_content) > if ( msr == MSR_IA32_DS_AREA ) > break; > /* 4 bits per counter, currently 3 fixed counters implemented. */ > - mask = ~((1ull << (VPMU_CORE2_NUM_FIXED * FIXED_CTR_CTRL_BITS)) > - 1); > + mask = ~((1ull << (fixed_pmc_cnt * FIXED_CTR_CTRL_BITS)) - 1); > if (msr_content & mask) > inject_gp = 1; > break; > @@ -652,7 +620,7 @@ static void core2_vpmu_do_cpuid(unsigned int input, > static void core2_vpmu_dump(struct vcpu *v) > { > struct vpmu_struct *vpmu = vcpu_vpmu(v); > - int i, num; > + int i; > struct core2_vpmu_context *core2_vpmu_cxt = NULL; > u64 val; > > @@ -670,26 +638,24 @@ static void core2_vpmu_dump(struct vcpu *v) > > printk(" vPMU running\n"); > core2_vpmu_cxt = vpmu->context; > - num = core2_get_pmc_count(); > + > /* Print the contents of the counter and its configuration msr. */ > - for ( i = 0; i < num; i++ ) > + for ( i = 0; i < arch_pmc_cnt; i++ ) > { > struct arch_msr_pair* msr_pair = core2_vpmu_cxt->arch_msr_pair; > - if ( core2_vpmu_cxt->pmu_enable->arch_pmc_enable[i] ) > - printk(" general_%d: 0x%016lx ctrl: 0x%016lx\n", > - i, msr_pair[i].counter, msr_pair[i].control); > + printk(" general_%d: 0x%016lx ctrl: 0x%016lx\n", > + i, msr_pair[i].counter, msr_pair[i].control); > } > /* > * The configuration of the fixed counter is 4 bits each in the > * MSR_CORE_PERF_FIXED_CTR_CTRL. > */ > - val = core2_vpmu_cxt->ctrls[MSR_CORE_PERF_FIXED_CTR_CTRL_IDX]; > - for ( i = 0; i < core2_fix_counters.num; i++ ) > + val = core2_vpmu_cxt->fixed_ctrl; > + for ( i = 0; i < fixed_pmc_cnt; i++ ) > { > - if ( core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i] ) > - printk(" fixed_%d: 0x%016lx ctrl: 0x%lx\n", > - i, core2_vpmu_cxt->fix_counters[i], > - val & FIXED_CTR_CTRL_MASK); > + printk(" fixed_%d: 0x%016lx ctrl: 0x%lx\n", > + i, core2_vpmu_cxt->fix_counters[i], > + val & FIXED_CTR_CTRL_MASK); > val >>= FIXED_CTR_CTRL_BITS; > } > } > @@ -707,7 +673,7 @@ static int core2_vpmu_do_interrupt(struct cpu_user_regs > *regs) > if ( is_pmc_quirk ) > handle_pmc_quirk(msr_content); > core2_vpmu_cxt->global_ovf_status |= msr_content; > - msr_content = 0xC000000700000000 | ((1 << core2_get_pmc_count()) - > 1); > + msr_content = 0xC000000700000000 | ((1 << arch_pmc_cnt) - 1); > wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, msr_content); > } > else > @@ -770,18 +736,23 @@ static int core2_vpmu_initialise(struct vcpu *v, > unsigned int vpmu_flags) > } > } > func_out: > + > + arch_pmc_cnt = core2_get_arch_pmc_count(); > + fixed_pmc_cnt = core2_get_fixed_pmc_count(); > + if ( fixed_pmc_cnt > VPMU_CORE2_MAX_FIXED_PMCS ) > + fixed_pmc_cnt = VPMU_CORE2_MAX_FIXED_PMCS; Maybe printing a message when limiting the number of counters? Dietmar. > check_pmc_quirk(); > + > return 0; > } > > static void core2_vpmu_destroy(struct vcpu *v) > { > struct vpmu_struct *vpmu = vcpu_vpmu(v); > - struct core2_vpmu_context *core2_vpmu_cxt = vpmu->context; > > if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) ) > return; > - xfree(core2_vpmu_cxt->pmu_enable); > + > xfree(vpmu->context); > if ( cpu_has_vmx_msr_bitmap && is_hvm_domain(v->domain) ) > core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap); > diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h > b/xen/include/asm-x86/hvm/vmx/vmcs.h > index f30e5ac..5971613 100644 > --- a/xen/include/asm-x86/hvm/vmx/vmcs.h > +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h > @@ -470,7 +470,9 @@ void vmx_enable_intercept_for_msr(struct vcpu *v, u32 > msr, int type); > int vmx_read_guest_msr(u32 msr, u64 *val); > int vmx_write_guest_msr(u32 msr, u64 val); > int vmx_add_guest_msr(u32 msr); > +void vmx_rm_guest_msr(u32 msr); > int vmx_add_host_load_msr(u32 msr); > +void vmx_rm_host_load_msr(u32 msr); > void vmx_vmcs_switch(struct vmcs_struct *from, struct vmcs_struct *to); > void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector); > void vmx_clear_eoi_exit_bitmap(struct vcpu *v, u8 vector); > diff --git a/xen/include/asm-x86/hvm/vmx/vpmu_core2.h > b/xen/include/asm-x86/hvm/vmx/vpmu_core2.h > index 60b05fd..410372d 100644 > --- a/xen/include/asm-x86/hvm/vmx/vpmu_core2.h > +++ b/xen/include/asm-x86/hvm/vmx/vpmu_core2.h > @@ -23,29 +23,10 @@ > #ifndef __ASM_X86_HVM_VPMU_CORE_H_ > #define __ASM_X86_HVM_VPMU_CORE_H_ > > -/* Currently only 3 fixed counters are supported. */ > -#define VPMU_CORE2_NUM_FIXED 3 > -/* Currently only 3 Non-architectual Performance Control MSRs */ > -#define VPMU_CORE2_NUM_CTRLS 3 > - > struct arch_msr_pair { > u64 counter; > u64 control; > }; > > -struct core2_pmu_enable { > - char ds_area_enable; > - char fixed_ctr_enable[VPMU_CORE2_NUM_FIXED]; > - char arch_pmc_enable[1]; > -}; > - > -struct core2_vpmu_context { > - struct core2_pmu_enable *pmu_enable; > - u64 fix_counters[VPMU_CORE2_NUM_FIXED]; > - u64 ctrls[VPMU_CORE2_NUM_CTRLS]; > - u64 global_ovf_status; > - struct arch_msr_pair arch_msr_pair[1]; > -}; > - > #endif /* __ASM_X86_HVM_VPMU_CORE_H_ */ > > -- Company details: http://ts.fujitsu.com/imprint.html _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |