|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v8 15/19] x86/VPMU: Merge vpmu_rdmsr and vpmu_wrmsr
The two routines share most of their logic.
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
---
xen/arch/x86/hvm/svm/svm.c | 9 ++++++---
xen/arch/x86/hvm/vmx/vmx.c | 11 +++++++----
xen/arch/x86/hvm/vpmu.c | 43 +++++++++++++-----------------------------
xen/arch/x86/traps.c | 4 ++--
xen/include/asm-x86/hvm/vpmu.h | 6 ++++--
5 files changed, 32 insertions(+), 41 deletions(-)
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index da5af5c..5029211 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1642,7 +1642,7 @@ static int svm_msr_read_intercept(unsigned int msr,
uint64_t *msr_content)
case MSR_AMD_FAM15H_EVNTSEL3:
case MSR_AMD_FAM15H_EVNTSEL4:
case MSR_AMD_FAM15H_EVNTSEL5:
- if ( vpmu_do_rdmsr(msr, msr_content) )
+ if ( vpmu_do_msr(msr, msr_content, VPMU_MSR_READ) )
goto gpf;
break;
@@ -1794,10 +1794,13 @@ static int svm_msr_write_intercept(unsigned int msr,
uint64_t msr_content)
case MSR_AMD_FAM15H_EVNTSEL3:
case MSR_AMD_FAM15H_EVNTSEL4:
case MSR_AMD_FAM15H_EVNTSEL5:
- if ( vpmu_do_wrmsr(msr, msr_content) )
+ {
+ uint64_t msr_val = msr_content;
+
+ if ( vpmu_do_msr(msr, &msr_val, VPMU_MSR_WRITE) )
goto gpf;
break;
-
+ }
case MSR_IA32_MCx_MISC(4): /* Threshold register */
case MSR_F10_MC4_MISC1 ... MSR_F10_MC4_MISC3:
/*
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 742e04c..27f8b15 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2078,7 +2078,7 @@ static int vmx_msr_read_intercept(unsigned int msr,
uint64_t *msr_content)
*msr_content |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL |
MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
/* Perhaps vpmu will change some bits. */
- if ( vpmu_do_rdmsr(msr, msr_content) )
+ if ( vpmu_do_msr(msr, msr_content, VPMU_MSR_READ) )
goto gp_fault;
break;
case MSR_P6_PERFCTR0...MSR_P6_PERFCTR1:
@@ -2087,7 +2087,7 @@ static int vmx_msr_read_intercept(unsigned int msr,
uint64_t *msr_content)
case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
case MSR_IA32_PEBS_ENABLE:
case MSR_IA32_DS_AREA:
- if ( vpmu_do_rdmsr(msr, msr_content) )
+ if ( vpmu_do_msr(msr, msr_content, VPMU_MSR_READ) )
goto gp_fault;
break;
default:
@@ -2240,6 +2240,7 @@ void vmx_vlapic_msr_changed(struct vcpu *v)
static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content)
{
struct vcpu *v = current;
+ uint64_t msr_val;
HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%#x, msr_value=%#"PRIx64, msr, msr_content);
@@ -2263,7 +2264,8 @@ static int vmx_msr_write_intercept(unsigned int msr,
uint64_t msr_content)
if ( msr_content & ~supported )
{
/* Perhaps some other bits are supported in vpmu. */
- if ( vpmu_do_wrmsr(msr, msr_content) )
+ msr_val = msr_content;
+ if ( vpmu_do_msr(msr, &msr_val, VPMU_MSR_WRITE) )
goto gp_fault;
}
if ( msr_content & IA32_DEBUGCTLMSR_LBR )
@@ -2299,7 +2301,8 @@ static int vmx_msr_write_intercept(unsigned int msr,
uint64_t msr_content)
case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
case MSR_IA32_PEBS_ENABLE:
case MSR_IA32_DS_AREA:
- if ( vpmu_do_wrmsr(msr, msr_content) )
+ msr_val = msr_content;
+ if ( vpmu_do_msr(msr, &msr_val, VPMU_MSR_WRITE) )
goto gp_fault;
break;
default:
diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c
index 364ec4e..dacfd81 100644
--- a/xen/arch/x86/hvm/vpmu.c
+++ b/xen/arch/x86/hvm/vpmu.c
@@ -91,7 +91,7 @@ void vpmu_lvtpc_update(uint32_t val)
apic_write(APIC_LVTPC, vpmu->hw_lapic_lvtpc);
}
-int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
+int vpmu_do_msr(unsigned int msr, uint64_t *msr_content, uint8_t rw)
{
struct vcpu *v = current;
struct vpmu_struct *vpmu = vcpu_vpmu(v);
@@ -99,13 +99,21 @@ int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
if ( !(vpmu_mode & XENPMU_MODE_ON) )
return 0;
- if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_wrmsr )
+ ASSERT((rw == VPMU_MSR_READ) || (rw == VPMU_MSR_WRITE));
+
+ if ( vpmu->arch_vpmu_ops )
{
- int ret = vpmu->arch_vpmu_ops->do_wrmsr(msr, msr_content);
+ int ret;
+ if ( (rw == VPMU_MSR_READ) && vpmu->arch_vpmu_ops->do_rdmsr )
+ ret = vpmu->arch_vpmu_ops->do_rdmsr(msr, msr_content);
+ else if ( vpmu->arch_vpmu_ops->do_wrmsr )
+ ret = vpmu->arch_vpmu_ops->do_wrmsr(msr, *msr_content);
+ else
+ return 0;
/*
- * We may have received a PMU interrupt during WRMSR handling
- * and since do_wrmsr may load VPMU context we should save
+ * We may have received a PMU interrupt while handling MSR access
+ * and since do_wr/rdmsr may load VPMU context we should save
* (and unload) it again.
*/
if ( !is_hvm_domain(v->domain) &&
@@ -121,31 +129,6 @@ int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
return 0;
}
-int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
-{
- struct vcpu *v = current;
- struct vpmu_struct *vpmu = vcpu_vpmu(v);
-
- if ( !(vpmu_mode & XENPMU_MODE_ON) )
- return 0;
-
- if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_rdmsr )
- {
- int ret = vpmu->arch_vpmu_ops->do_rdmsr(msr, msr_content);
-
- if ( !is_hvm_domain(v->domain) &&
- (vpmu->xenpmu_data &&
- (vpmu->xenpmu_data->pmu_flags & PMU_CACHED)) )
- {
- vpmu_set(vpmu, VPMU_CONTEXT_SAVE);
- vpmu->arch_vpmu_ops->arch_vpmu_save(v);
- vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED);
- }
- return ret;
- }
- return 0;
-}
-
static struct vcpu *choose_hwdom_vcpu(void)
{
struct vcpu *v;
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index abde940..5cd7102 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2573,7 +2573,7 @@ static int emulate_privileged_op(struct cpu_user_regs
*regs)
case MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5:
if ( vpmu_msr || (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) )
{
- if ( vpmu_do_wrmsr(regs->ecx, msr_content) )
+ if ( vpmu_do_msr(regs->ecx, &msr_content, VPMU_MSR_WRITE) )
goto fail;
break;
}
@@ -2693,7 +2693,7 @@ static int emulate_privileged_op(struct cpu_user_regs
*regs)
case MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5:
if ( vpmu_msr || (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) )
{
- if ( vpmu_do_rdmsr(regs->ecx, &msr_content) )
+ if ( vpmu_do_msr(regs->ecx, &msr_content, VPMU_MSR_READ) )
goto fail;
regs->eax = (uint32_t)msr_content;
diff --git a/xen/include/asm-x86/hvm/vpmu.h b/xen/include/asm-x86/hvm/vpmu.h
index 25954c6..0fc5317 100644
--- a/xen/include/asm-x86/hvm/vpmu.h
+++ b/xen/include/asm-x86/hvm/vpmu.h
@@ -94,9 +94,11 @@ static inline bool_t vpmu_are_all_set(const struct
vpmu_struct *vpmu,
return !!((vpmu->flags & mask) == mask);
}
+#define VPMU_MSR_READ 0
+#define VPMU_MSR_WRITE 1
+
void vpmu_lvtpc_update(uint32_t val);
-int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content);
-int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content);
+int vpmu_do_msr(unsigned int msr, uint64_t *msr_content, uint8_t rw);
int vpmu_do_interrupt(struct cpu_user_regs *regs);
void vpmu_do_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx);
--
1.8.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |