[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 2/3] x86/vPMU: invoke <vendor>_vpmu_initialise() through a hook as well
I see little point in having an open-coded switch() statement to achieve the same; like other vendor-specific operations the function can be supplied in the respective ops structure instances. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- a/xen/arch/x86/cpu/vpmu.c +++ b/xen/arch/x86/cpu/vpmu.c @@ -455,21 +455,11 @@ static int vpmu_arch_initialise(struct v ASSERT(!(vpmu->flags & ~VPMU_AVAILABLE) && !vpmu->context); - if ( !vpmu_available(v) ) + if ( !vpmu_available(v) || vpmu_mode == XENPMU_MODE_OFF ) return 0; - switch ( vendor ) + if ( !vpmu_ops.initialise ) { - case X86_VENDOR_AMD: - case X86_VENDOR_HYGON: - ret = svm_vpmu_initialise(v); - break; - - case X86_VENDOR_INTEL: - ret = vmx_vpmu_initialise(v); - break; - - default: if ( vpmu_mode != XENPMU_MODE_OFF ) { printk(XENLOG_G_WARNING "VPMU: Unknown CPU vendor %d. " @@ -480,12 +470,17 @@ static int vpmu_arch_initialise(struct v return -EINVAL; } - vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | APIC_LVT_MASKED; - + ret = alternative_call(vpmu_ops.initialise, v); if ( ret ) + { printk(XENLOG_G_WARNING "VPMU: Initialization failed for %pv\n", v); + return ret; + } + + vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | APIC_LVT_MASKED; + vpmu_set(vpmu, VPMU_INITIALIZED); - return ret; + return 0; } static void get_vpmu(struct vcpu *v) --- a/xen/arch/x86/cpu/vpmu_amd.c +++ b/xen/arch/x86/cpu/vpmu_amd.c @@ -483,24 +483,11 @@ static void amd_vpmu_dump(const struct v } } -static const struct arch_vpmu_ops __initconstrel amd_vpmu_ops = { - .do_wrmsr = amd_vpmu_do_wrmsr, - .do_rdmsr = amd_vpmu_do_rdmsr, - .do_interrupt = amd_vpmu_do_interrupt, - .arch_vpmu_destroy = amd_vpmu_destroy, - .arch_vpmu_save = amd_vpmu_save, - .arch_vpmu_load = amd_vpmu_load, - .arch_vpmu_dump = amd_vpmu_dump -}; - -int svm_vpmu_initialise(struct vcpu *v) +static int svm_vpmu_initialise(struct vcpu *v) { struct xen_pmu_amd_ctxt *ctxt; struct vpmu_struct *vpmu = vcpu_vpmu(v); - if ( vpmu_mode == XENPMU_MODE_OFF ) - return 0; - if ( !counters ) return -EINVAL; @@ -529,11 +516,22 @@ int svm_vpmu_initialise(struct vcpu *v) offsetof(struct xen_pmu_amd_ctxt, regs)); } - vpmu_set(vpmu, VPMU_INITIALIZED | VPMU_CONTEXT_ALLOCATED); + vpmu_set(vpmu, VPMU_CONTEXT_ALLOCATED); return 0; } +static const struct arch_vpmu_ops __initconstrel amd_vpmu_ops = { + .initialise = svm_vpmu_initialise, + .do_wrmsr = amd_vpmu_do_wrmsr, + .do_rdmsr = amd_vpmu_do_rdmsr, + .do_interrupt = amd_vpmu_do_interrupt, + .arch_vpmu_destroy = amd_vpmu_destroy, + .arch_vpmu_save = amd_vpmu_save, + .arch_vpmu_load = amd_vpmu_load, + .arch_vpmu_dump = amd_vpmu_dump +}; + static const struct arch_vpmu_ops *__init common_init(void) { unsigned int i; --- a/xen/arch/x86/cpu/vpmu_intel.c +++ b/xen/arch/x86/cpu/vpmu_intel.c @@ -819,25 +819,12 @@ static void core2_vpmu_destroy(struct vc vpmu_clear(vpmu); } -static const struct arch_vpmu_ops __initconstrel core2_vpmu_ops = { - .do_wrmsr = core2_vpmu_do_wrmsr, - .do_rdmsr = core2_vpmu_do_rdmsr, - .do_interrupt = core2_vpmu_do_interrupt, - .arch_vpmu_destroy = core2_vpmu_destroy, - .arch_vpmu_save = core2_vpmu_save, - .arch_vpmu_load = core2_vpmu_load, - .arch_vpmu_dump = core2_vpmu_dump -}; - -int vmx_vpmu_initialise(struct vcpu *v) +static int vmx_vpmu_initialise(struct vcpu *v) { struct vpmu_struct *vpmu = vcpu_vpmu(v); u64 msr_content; static bool_t ds_warned; - if ( vpmu_mode == XENPMU_MODE_OFF ) - return 0; - if ( v->domain->arch.cpuid->basic.pmu_version <= 1 || v->domain->arch.cpuid->basic.pmu_version >= 6 ) return -EINVAL; @@ -893,11 +880,20 @@ int vmx_vpmu_initialise(struct vcpu *v) if ( is_pv_vcpu(v) && !core2_vpmu_alloc_resource(v) ) return -EIO; - vpmu_set(vpmu, VPMU_INITIALIZED); - return 0; } +static const struct arch_vpmu_ops __initconstrel core2_vpmu_ops = { + .initialise = vmx_vpmu_initialise, + .do_wrmsr = core2_vpmu_do_wrmsr, + .do_rdmsr = core2_vpmu_do_rdmsr, + .do_interrupt = core2_vpmu_do_interrupt, + .arch_vpmu_destroy = core2_vpmu_destroy, + .arch_vpmu_save = core2_vpmu_save, + .arch_vpmu_load = core2_vpmu_load, + .arch_vpmu_dump = core2_vpmu_dump +}; + const struct arch_vpmu_ops *__init core2_vpmu_init(void) { unsigned int version = 0; --- a/xen/include/asm-x86/vpmu.h +++ b/xen/include/asm-x86/vpmu.h @@ -39,6 +39,7 @@ /* Arch specific operations shared by all vpmus */ struct arch_vpmu_ops { + int (*initialise)(struct vcpu *v); int (*do_wrmsr)(unsigned int msr, uint64_t msr_content, uint64_t supported); int (*do_rdmsr)(unsigned int msr, uint64_t *msr_content); @@ -50,10 +51,8 @@ struct arch_vpmu_ops { }; const struct arch_vpmu_ops *core2_vpmu_init(void); -int vmx_vpmu_initialise(struct vcpu *); const struct arch_vpmu_ops *amd_vpmu_init(void); const struct arch_vpmu_ops *hygon_vpmu_init(void); -int svm_vpmu_initialise(struct vcpu *); struct vpmu_struct { u32 flags;
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |