[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH-v2] vpmu intel: Add cpuid handling when vpmu disabled
Even though vpmu is disabled in the hypervisor in the HVM guest the call of cpuid(0xa) returns informations about usable performance counters. This may confuse guest software when trying to use the counters and nothing happens. This patch clears most bits in registers eax and edx of cpuid(0xa) instruction for the guest when vpmu is disabled: - version ID of architectural performance counting - number of general pmu registers - width of general pmu registers - number of fixed pmu registers - width of ixed pmu registers Thanks. Dietmar. Signed-off-by: Dietmar Hahn <dietmar.hahn@xxxxxxxxxxxxxx> --- Changes from v1: As Konrad suggested I added a little bit more of documentation to the defines. diff -r 2246964a25a8 xen/arch/x86/hvm/svm/vpmu.c --- a/xen/arch/x86/hvm/svm/vpmu.c Mon Mar 25 16:57:31 2013 +0100 +++ b/xen/arch/x86/hvm/svm/vpmu.c Tue Mar 26 12:01:11 2013 +0100 @@ -370,6 +370,10 @@ int svm_vpmu_initialise(struct vcpu *v, uint8_t family = current_cpu_data.x86; int ret = 0; + /* vpmu enabled? */ + if ( !vpmu_flags ) + return 0; + switch ( family ) { case 0x10: diff -r 2246964a25a8 xen/arch/x86/hvm/vmx/vpmu_core2.c --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c Mon Mar 25 16:57:31 2013 +0100 +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c Tue Mar 26 12:01:11 2013 +0100 @@ -731,6 +731,62 @@ struct arch_vpmu_ops core2_vpmu_ops = { .arch_vpmu_load = core2_vpmu_load }; +/* + * See Intel SDM Vol 2a Instruction Set Referenc for CPUID instruction. + * cpuid 0xa - Architectural Performance Monitoring Leaf + * Register eax + */ +#define X86_FEATURE_PMU_VER_OFF 0 /* Version ID */ +#define FEATURE_PMU_VER_BITS 8 /* 8 bits 0..7 */ +#define X86_FEATURE_NUM_GEN_OFF 8 /* Number of general pmu registers */ +#define FEATURE_NUM_GEN_BITS 8 /* 8 bits 8..15 */ +#define X86_FEATURE_GEN_WIDTH_OFF 16 /* Width of general pmu registers */ +#define FEATURE_GEN_WIDTH_BITS 8 /* 8 bits 16..23 */ +/* Register edx */ +#define X86_FEATURE_NUM_FIX_OFF 0 /* Number of fixed pmu registers */ +#define FEATURE_NUM_FIX_BITS 5 /* 5 bits 0..4 */ +#define X86_FEATURE_FIX_WIDTH_OFF 5 /* Width of fixed pmu registers */ +#define FEATURE_FIX_WIDTH_BITS 8 /* 8 bits 5..12 */ + +static void core2_no_vpmu_do_cpuid(unsigned int input, + unsigned int *eax, unsigned int *ebx, + unsigned int *ecx, unsigned int *edx) +{ + /* + * As in this case the vpmu is not enabled reset some bits in the + * architectural performance monitoring related part. + */ + if ( input == 0xa ) + { + *eax &= ~(((1 << FEATURE_PMU_VER_BITS) -1) << X86_FEATURE_PMU_VER_OFF); + *eax &= ~(((1 << FEATURE_NUM_GEN_BITS) -1) << X86_FEATURE_NUM_GEN_OFF); + *eax &= ~(((1 << FEATURE_GEN_WIDTH_BITS) -1) << X86_FEATURE_GEN_WIDTH_OFF); + + *edx &= ~(((1 << FEATURE_NUM_FIX_BITS) -1) << X86_FEATURE_NUM_FIX_OFF); + *edx &= ~(((1 << FEATURE_FIX_WIDTH_BITS) -1) << X86_FEATURE_FIX_WIDTH_OFF); + } +} + +/* + * If its a vpmu msr set it to 0. + */ +static int core2_no_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content) +{ + int type = -1, index = -1; + if ( !is_core2_vpmu_msr(msr, &type, &index) ) + return 0; + *msr_content = 0; + return 1; +} + +/* + * These functions are used in case vpmu is not enabled. + */ +struct arch_vpmu_ops core2_no_vpmu_ops = { + .do_rdmsr = core2_no_vpmu_do_rdmsr, + .do_cpuid = core2_no_vpmu_do_cpuid, +}; + int vmx_vpmu_initialise(struct vcpu *v, unsigned int vpmu_flags) { struct vpmu_struct *vpmu = vcpu_vpmu(v); @@ -738,6 +794,10 @@ int vmx_vpmu_initialise(struct vcpu *v, uint8_t cpu_model = current_cpu_data.x86_model; int ret = 0; + vpmu->arch_vpmu_ops = &core2_no_vpmu_ops; + if ( !vpmu_flags ) + return 0; + if ( family == 6 ) { switch ( cpu_model ) diff -r 2246964a25a8 xen/arch/x86/hvm/vpmu.c --- a/xen/arch/x86/hvm/vpmu.c Mon Mar 25 16:57:31 2013 +0100 +++ b/xen/arch/x86/hvm/vpmu.c Tue Mar 26 12:01:11 2013 +0100 @@ -67,7 +67,7 @@ int vpmu_do_wrmsr(unsigned int msr, uint { struct vpmu_struct *vpmu = vcpu_vpmu(current); - if ( vpmu->arch_vpmu_ops ) + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_wrmsr ) return vpmu->arch_vpmu_ops->do_wrmsr(msr, msr_content); return 0; } @@ -76,7 +76,7 @@ int vpmu_do_rdmsr(unsigned int msr, uint { struct vpmu_struct *vpmu = vcpu_vpmu(current); - if ( vpmu->arch_vpmu_ops ) + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_rdmsr ) return vpmu->arch_vpmu_ops->do_rdmsr(msr, msr_content); return 0; } @@ -85,7 +85,7 @@ int vpmu_do_interrupt(struct cpu_user_re { struct vpmu_struct *vpmu = vcpu_vpmu(current); - if ( vpmu->arch_vpmu_ops ) + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_interrupt ) return vpmu->arch_vpmu_ops->do_interrupt(regs); return 0; } @@ -104,7 +104,7 @@ void vpmu_save(struct vcpu *v) { struct vpmu_struct *vpmu = vcpu_vpmu(v); - if ( vpmu->arch_vpmu_ops ) + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_save ) vpmu->arch_vpmu_ops->arch_vpmu_save(v); } @@ -112,7 +112,7 @@ void vpmu_load(struct vcpu *v) { struct vpmu_struct *vpmu = vcpu_vpmu(v); - if ( vpmu->arch_vpmu_ops ) + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_load ) vpmu->arch_vpmu_ops->arch_vpmu_load(v); } @@ -121,9 +121,6 @@ void vpmu_initialise(struct vcpu *v) struct vpmu_struct *vpmu = vcpu_vpmu(v); uint8_t vendor = current_cpu_data.x86_vendor; - if ( !opt_vpmu_enabled ) - return; - if ( vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) ) vpmu_destroy(v); vpmu_clear(vpmu); @@ -153,7 +150,7 @@ void vpmu_destroy(struct vcpu *v) { struct vpmu_struct *vpmu = vcpu_vpmu(v); - if ( vpmu->arch_vpmu_ops ) + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_destroy ) vpmu->arch_vpmu_ops->arch_vpmu_destroy(v); } Attachment:
vpmu_no_vpmu.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |