[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH] x86: remove redundancy of MSR_P6_{PERFCTR, EVNTSEL} definitions
> From: Jan Beulich [mailto:JBeulich@xxxxxxxx] > Sent: Wednesday, September 03, 2014 5:27 AM > > Not only did the EVNTSEL ones get defined twice, we can also easily > abstract out the numbers previously attached to them. > > Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Acked-by: Kevin Tian <kevint.tian@xxxxxxxxx> > > --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c > +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c > @@ -105,9 +105,9 @@ static void handle_pmc_quirk(u64 msr_con > if ( val & 0x1 ) > { > u64 cnt; > - rdmsrl(MSR_P6_PERFCTR0 + i, cnt); > + rdmsrl(MSR_P6_PERFCTR(i), cnt); > if ( cnt == 0 ) > - wrmsrl(MSR_P6_PERFCTR0 + i, 1); > + wrmsrl(MSR_P6_PERFCTR(i), 1); > } > val >>= 1; > } > @@ -238,11 +238,11 @@ static int is_core2_vpmu_msr(u32 msr_ind > return 1; > } > > - if ( (msr_index >= MSR_P6_EVNTSEL0) && > - (msr_index < (MSR_P6_EVNTSEL0 + core2_get_pmc_count())) ) > + if ( (msr_index >= MSR_P6_EVNTSEL(0)) && > + (msr_index < (MSR_P6_EVNTSEL(core2_get_pmc_count()))) ) > { > *type = MSR_TYPE_ARCH_CTRL; > - *index = msr_index - MSR_P6_EVNTSEL0; > + *index = msr_index - MSR_P6_EVNTSEL(0); > return 1; > } > > @@ -278,7 +278,7 @@ static void core2_vpmu_set_msr_bitmap(un > for ( i = 0; i < core2_ctrls.num; i++ ) > clear_bit(msraddr_to_bitpos(core2_ctrls.msr[i]), msr_bitmap); > for ( i = 0; i < core2_get_pmc_count(); i++ ) > - clear_bit(msraddr_to_bitpos(MSR_P6_EVNTSEL0+i), msr_bitmap); > + clear_bit(msraddr_to_bitpos(MSR_P6_EVNTSEL(i)), msr_bitmap); > } > > static void core2_vpmu_unset_msr_bitmap(unsigned long *msr_bitmap) > @@ -308,7 +308,7 @@ static void core2_vpmu_unset_msr_bitmap( > for ( i = 0; i < core2_ctrls.num; i++ ) > set_bit(msraddr_to_bitpos(core2_ctrls.msr[i]), msr_bitmap); > for ( i = 0; i < core2_get_pmc_count(); i++ ) > - set_bit(msraddr_to_bitpos(MSR_P6_EVNTSEL0+i), msr_bitmap); > + set_bit(msraddr_to_bitpos(MSR_P6_EVNTSEL(i)), msr_bitmap); > } > > static inline void __core2_vpmu_save(struct vcpu *v) > @@ -359,7 +359,7 @@ static inline void __core2_vpmu_load(str > for ( i = 0; i < core2_ctrls.num; i++ ) > wrmsrl(core2_ctrls.msr[i], core2_vpmu_cxt->ctrls[i]); > for ( i = 0; i < core2_get_pmc_count(); i++ ) > - wrmsrl(MSR_P6_EVNTSEL0+i, > core2_vpmu_cxt->arch_msr_pair[i].control); > + wrmsrl(MSR_P6_EVNTSEL(i), > core2_vpmu_cxt->arch_msr_pair[i].control); > } > > static void core2_vpmu_load(struct vcpu *v) > @@ -526,7 +526,7 @@ static int core2_vpmu_do_wrmsr(unsigned > global_ctrl = msr_content; > for ( i = 0; i < core2_get_pmc_count(); i++ ) > { > - rdmsrl(MSR_P6_EVNTSEL0+i, non_global_ctrl); > + rdmsrl(MSR_P6_EVNTSEL(i), non_global_ctrl); > core2_vpmu_cxt->pmu_enable->arch_pmc_enable[i] = > global_ctrl & (non_global_ctrl >> 22) & 1; > global_ctrl >>= 1; > @@ -555,7 +555,7 @@ static int core2_vpmu_do_wrmsr(unsigned > } > break; > default: > - tmp = msr - MSR_P6_EVNTSEL0; > + tmp = msr - MSR_P6_EVNTSEL(0); > vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, > &global_ctrl); > if ( tmp >= 0 && tmp < core2_get_pmc_count() ) > core2_vpmu_cxt->pmu_enable->arch_pmc_enable[tmp] = > --- a/xen/arch/x86/nmi.c > +++ b/xen/arch/x86/nmi.c > @@ -199,7 +199,7 @@ void disable_lapic_nmi_watchdog(void) > case X86_VENDOR_INTEL: > switch (boot_cpu_data.x86) { > case 6: > - wrmsr(MSR_P6_EVNTSEL0, 0, 0); > + wrmsr(MSR_P6_EVNTSEL(0), 0, 0); > break; > case 15: > wrmsr(MSR_P4_IQ_CCCR0, 0, 0); > @@ -298,21 +298,21 @@ static void __pminit setup_p6_watchdog(u > { > unsigned int evntsel; > > - nmi_perfctr_msr = MSR_P6_PERFCTR0; > + nmi_perfctr_msr = MSR_P6_PERFCTR(0); > > - clear_msr_range(MSR_P6_EVNTSEL0, 2); > - clear_msr_range(MSR_P6_PERFCTR0, 2); > + clear_msr_range(MSR_P6_EVNTSEL(0), 2); > + clear_msr_range(MSR_P6_PERFCTR(0), 2); > > evntsel = P6_EVNTSEL_INT > | P6_EVNTSEL_OS > | P6_EVNTSEL_USR > | counter; > > - wrmsr(MSR_P6_EVNTSEL0, evntsel, 0); > + wrmsr(MSR_P6_EVNTSEL(0), evntsel, 0); > write_watchdog_counter("P6_PERFCTR0"); > apic_write(APIC_LVTPC, APIC_DM_NMI); > evntsel |= P6_EVNTSEL0_ENABLE; > - wrmsr(MSR_P6_EVNTSEL0, evntsel, 0); > + wrmsr(MSR_P6_EVNTSEL(0), evntsel, 0); > } > > static int __pminit setup_p4_watchdog(void) > @@ -502,9 +502,9 @@ bool_t nmi_watchdog_tick(const struct cp > wrmsrl(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val); > apic_write(APIC_LVTPC, APIC_DM_NMI); > } > - else if ( nmi_perfctr_msr == MSR_P6_PERFCTR0 ) > + else if ( nmi_perfctr_msr == MSR_P6_PERFCTR(0) ) > { > - rdmsrl(MSR_P6_PERFCTR0, msr_content); > + rdmsrl(MSR_P6_PERFCTR(0), msr_content); > if ( msr_content & (1ULL << P6_EVENT_WIDTH) ) > watchdog_tick = 0; > > --- a/xen/arch/x86/oprofile/op_model_ppro.c > +++ b/xen/arch/x86/oprofile/op_model_ppro.c > @@ -64,9 +64,9 @@ static void ppro_fill_in_addresses(struc > int i; > > for (i = 0; i < num_counters; i++) > - msrs->counters[i].addr = MSR_P6_PERFCTR0 + i; > + msrs->counters[i].addr = MSR_P6_PERFCTR(i); > for (i = 0; i < num_counters; i++) > - msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i; > + msrs->controls[i].addr = MSR_P6_EVNTSEL(i); > } > > > @@ -211,11 +211,11 @@ static int ppro_is_arch_pmu_msr(u64 msr_ > *index = msr_index - MSR_IA32_PERFCTR0; > return 1; > } > - if ( (msr_index >= MSR_P6_EVNTSEL0) && > - (msr_index < (MSR_P6_EVNTSEL0 + num_counters)) ) > + if ( (msr_index >= MSR_P6_EVNTSEL(0)) && > + (msr_index < (MSR_P6_EVNTSEL(num_counters))) ) > { > *type = MSR_TYPE_ARCH_CTRL; > - *index = msr_index - MSR_P6_EVNTSEL0; > + *index = msr_index - MSR_P6_EVNTSEL(0); > return 1; > } > > --- a/xen/include/asm-x86/msr-index.h > +++ b/xen/include/asm-x86/msr-index.h > @@ -112,11 +112,6 @@ > > #define MSR_AMD64_MCx_MASK(x) (MSR_AMD64_MC0_MASK + (x)) > > -#define MSR_P6_PERFCTR0 0x000000c1 > -#define MSR_P6_PERFCTR1 0x000000c2 > -#define MSR_P6_EVNTSEL0 0x00000186 > -#define MSR_P6_EVNTSEL1 0x00000187 > - > /* MSRs & bits used for VMX enabling */ > #define MSR_IA32_VMX_BASIC 0x480 > #define MSR_IA32_VMX_PINBASED_CTLS 0x481 > @@ -329,8 +324,8 @@ > #define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0 > > /* Intel Model 6 */ > -#define MSR_P6_EVNTSEL0 0x00000186 > -#define MSR_P6_EVNTSEL1 0x00000187 > +#define MSR_P6_PERFCTR(n) (0x000000c1 + (n)) > +#define MSR_P6_EVNTSEL(n) (0x00000186 + (n)) > > /* P4/Xeon+ specific */ > #define MSR_IA32_MCG_EAX 0x00000180 > @@ -488,7 +483,4 @@ > #define _MSR_MISC_FEATURES_CPUID_FAULTING 0 > #define MSR_MISC_FEATURES_CPUID_FAULTING (1ULL << > _MSR_MISC_FEATURES_CPUID_FAULTING) > > -/* Geode defined MSRs */ > -#define MSR_GEODE_BUSCONT_CONF0 0x00001900 > - > #endif /* __ASM_MSR_INDEX_H */ > _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |