|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v16 12/23] x86/VPMU: Replace vcpu with vpmu as argument to some routines
A subsequent patch will add an inline routine to vpmu.h that will call
vpmu_load().
This inline will try to access vcpu->vpmu which is not possible since struct
vcpu may not be fully defined at that point. So we will have that inline pass
vpmu pointer to vpmu_load() instead.
This change slightly simplifies some of vpmu code.
For symmetry also modify vpmu_save() (and vpmu_save_force()) to use vpmu
instead of vcpu.
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
---
xen/arch/x86/domain.c | 4 ++--
xen/arch/x86/hvm/svm/vpmu.c | 23 +++++++++++------------
xen/arch/x86/hvm/vmx/vpmu_core2.c | 24 ++++++++++++------------
xen/arch/x86/hvm/vpmu.c | 31 +++++++++++++------------------
xen/include/asm-x86/hvm/vpmu.h | 26 +++++++++++++-------------
5 files changed, 51 insertions(+), 57 deletions(-)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 11c7d9f..4e45fa8 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1544,7 +1544,7 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
if ( is_hvm_vcpu(prev) )
{
if (prev != next)
- vpmu_save(prev);
+ vpmu_save(vcpu_vpmu(prev));
if ( !list_empty(&prev->arch.hvm_vcpu.tm_list) )
pt_save_timer(prev);
@@ -1589,7 +1589,7 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
if (is_hvm_vcpu(next) && (prev != next) )
/* Must be done with interrupts enabled */
- vpmu_load(next);
+ vpmu_load(vcpu_vpmu(next));
context_saved(prev);
diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c
index 0d30b37..bbe2733 100644
--- a/xen/arch/x86/hvm/svm/vpmu.c
+++ b/xen/arch/x86/hvm/svm/vpmu.c
@@ -173,10 +173,9 @@ static int amd_vpmu_do_interrupt(struct cpu_user_regs
*regs)
return 1;
}
-static inline void context_load(struct vcpu *v)
+static inline void context_load(struct vpmu_struct *vpmu)
{
unsigned int i;
- struct vpmu_struct *vpmu = vcpu_vpmu(v);
struct xen_pmu_amd_ctxt *ctxt = vpmu->context;
uint64_t *counter_regs = vpmu_reg_pointer(ctxt, counters);
uint64_t *ctrl_regs = vpmu_reg_pointer(ctxt, ctrls);
@@ -188,9 +187,8 @@ static inline void context_load(struct vcpu *v)
}
}
-static void amd_vpmu_load(struct vcpu *v)
+static void amd_vpmu_load(struct vpmu_struct *vpmu)
{
- struct vpmu_struct *vpmu = vcpu_vpmu(v);
struct xen_pmu_amd_ctxt *ctxt = vpmu->context;
uint64_t *ctrl_regs = vpmu_reg_pointer(ctxt, ctrls);
@@ -208,13 +206,12 @@ static void amd_vpmu_load(struct vcpu *v)
vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
- context_load(v);
+ context_load(vpmu);
}
-static inline void context_save(struct vcpu *v)
+static inline void context_save(struct vpmu_struct *vpmu)
{
unsigned int i;
- struct vpmu_struct *vpmu = vcpu_vpmu(v);
struct xen_pmu_amd_ctxt *ctxt = vpmu->context;
uint64_t *counter_regs = vpmu_reg_pointer(ctxt, counters);
@@ -223,9 +220,9 @@ static inline void context_save(struct vcpu *v)
rdmsrl(counters[i], counter_regs[i]);
}
-static int amd_vpmu_save(struct vcpu *v)
+static int amd_vpmu_save(struct vpmu_struct *vpmu)
{
- struct vpmu_struct *vpmu = vcpu_vpmu(v);
+ struct vcpu *v;
unsigned int i;
/*
@@ -245,7 +242,9 @@ static int amd_vpmu_save(struct vcpu *v)
if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
return 0;
- context_save(v);
+ context_save(vpmu);
+
+ v = vpmu_vcpu(vpmu);
if ( !vpmu_is_set(vpmu, VPMU_RUNNING) &&
has_hvm_container_vcpu(v) && is_msr_bitmap_on(vpmu) )
@@ -325,7 +324,7 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t
msr_content,
if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)
|| vpmu_is_set(vpmu, VPMU_FROZEN) )
{
- context_load(v);
+ context_load(vpmu);
vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
vpmu_reset(vpmu, VPMU_FROZEN);
}
@@ -346,7 +345,7 @@ static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t
*msr_content)
if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)
|| vpmu_is_set(vpmu, VPMU_FROZEN) )
{
- context_load(v);
+ context_load(vpmu);
vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
vpmu_reset(vpmu, VPMU_FROZEN);
}
diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c
b/xen/arch/x86/hvm/vmx/vpmu_core2.c
index a6cca38..1b2d048 100644
--- a/xen/arch/x86/hvm/vmx/vpmu_core2.c
+++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c
@@ -287,10 +287,10 @@ static void core2_vpmu_unset_msr_bitmap(unsigned long
*msr_bitmap)
set_bit(msraddr_to_bitpos(MSR_IA32_DS_AREA), msr_bitmap);
}
-static inline void __core2_vpmu_save(struct vcpu *v)
+static inline void __core2_vpmu_save(struct vpmu_struct *vpmu)
{
int i;
- struct xen_pmu_intel_ctxt *core2_vpmu_cxt = vcpu_vpmu(v)->context;
+ struct xen_pmu_intel_ctxt *core2_vpmu_cxt = vpmu->context;
uint64_t *fixed_counters = vpmu_reg_pointer(core2_vpmu_cxt,
fixed_counters);
struct xen_pmu_cntr_pair *xen_pmu_cntr_pair =
vpmu_reg_pointer(core2_vpmu_cxt, arch_counters);
@@ -301,14 +301,16 @@ static inline void __core2_vpmu_save(struct vcpu *v)
rdmsrl(MSR_IA32_PERFCTR0 + i, xen_pmu_cntr_pair[i].counter);
}
-static int core2_vpmu_save(struct vcpu *v)
+static int core2_vpmu_save(struct vpmu_struct *vpmu)
{
- struct vpmu_struct *vpmu = vcpu_vpmu(v);
+ struct vcpu *v;
if ( !vpmu_are_all_set(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED) )
return 0;
- __core2_vpmu_save(v);
+ __core2_vpmu_save(vpmu);
+
+ v = vpmu_vcpu(vpmu);
/* Unset PMU MSR bitmap to trap lazy load. */
if ( !vpmu_is_set(vpmu, VPMU_RUNNING) &&
@@ -318,10 +320,10 @@ static int core2_vpmu_save(struct vcpu *v)
return 1;
}
-static inline void __core2_vpmu_load(struct vcpu *v)
+static inline void __core2_vpmu_load(struct vpmu_struct *vpmu)
{
unsigned int i, pmc_start;
- struct xen_pmu_intel_ctxt *core2_vpmu_cxt = vcpu_vpmu(v)->context;
+ struct xen_pmu_intel_ctxt *core2_vpmu_cxt = vpmu->context;
uint64_t *fixed_counters = vpmu_reg_pointer(core2_vpmu_cxt,
fixed_counters);
struct xen_pmu_cntr_pair *xen_pmu_cntr_pair =
vpmu_reg_pointer(core2_vpmu_cxt, arch_counters);
@@ -344,16 +346,14 @@ static inline void __core2_vpmu_load(struct vcpu *v)
wrmsrl(MSR_IA32_PEBS_ENABLE, core2_vpmu_cxt->pebs_enable);
}
-static void core2_vpmu_load(struct vcpu *v)
+static void core2_vpmu_load(struct vpmu_struct *vpmu)
{
- struct vpmu_struct *vpmu = vcpu_vpmu(v);
-
if ( vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
return;
vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
- __core2_vpmu_load(v);
+ __core2_vpmu_load(vpmu);
}
static int core2_vpmu_alloc_resource(struct vcpu *v)
@@ -418,7 +418,7 @@ static int core2_vpmu_msr_common_check(u32 msr_index, int
*type, int *index)
/* Do the lazy load staff. */
if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
{
- __core2_vpmu_load(current);
+ __core2_vpmu_load(vpmu);
vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
if ( has_hvm_container_vcpu(current) &&
cpu_has_vmx_msr_bitmap )
diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c
index 9f37bba..0a2e1a7 100644
--- a/xen/arch/x86/hvm/vpmu.c
+++ b/xen/arch/x86/hvm/vpmu.c
@@ -137,8 +137,7 @@ void vpmu_do_cpuid(unsigned int input,
static void vpmu_save_force(void *arg)
{
- struct vcpu *v = (struct vcpu *)arg;
- struct vpmu_struct *vpmu = vcpu_vpmu(v);
+ struct vpmu_struct *vpmu = (struct vpmu_struct *)arg;
if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
return;
@@ -146,36 +145,34 @@ static void vpmu_save_force(void *arg)
vpmu_set(vpmu, VPMU_CONTEXT_SAVE);
if ( vpmu->arch_vpmu_ops )
- (void)vpmu->arch_vpmu_ops->arch_vpmu_save(v);
+ (void)vpmu->arch_vpmu_ops->arch_vpmu_save(vpmu);
vpmu_reset(vpmu, VPMU_CONTEXT_SAVE);
per_cpu(last_vcpu, smp_processor_id()) = NULL;
}
-void vpmu_save(struct vcpu *v)
+void vpmu_save(struct vpmu_struct *vpmu)
{
- struct vpmu_struct *vpmu = vcpu_vpmu(v);
int pcpu = smp_processor_id();
if ( !vpmu_are_all_set(vpmu, VPMU_CONTEXT_ALLOCATED | VPMU_CONTEXT_LOADED)
)
return;
vpmu->last_pcpu = pcpu;
- per_cpu(last_vcpu, pcpu) = v;
+ per_cpu(last_vcpu, pcpu) = vpmu_vcpu(vpmu);
if ( vpmu->arch_vpmu_ops )
- if ( vpmu->arch_vpmu_ops->arch_vpmu_save(v) )
+ if ( vpmu->arch_vpmu_ops->arch_vpmu_save(vpmu) )
vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);
apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED);
}
-void vpmu_load(struct vcpu *v)
+void vpmu_load(struct vpmu_struct *vpmu)
{
- struct vpmu_struct *vpmu = vcpu_vpmu(v);
int pcpu = smp_processor_id();
- struct vcpu *prev = NULL;
+ struct vcpu *prev;
if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
return;
@@ -193,7 +190,7 @@ void vpmu_load(struct vcpu *v)
if ( vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
{
on_selected_cpus(cpumask_of(vpmu->last_pcpu),
- vpmu_save_force, (void *)v, 1);
+ vpmu_save_force, (void *)vpmu, 1);
vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);
}
}
@@ -203,15 +200,13 @@ void vpmu_load(struct vcpu *v)
prev = per_cpu(last_vcpu, pcpu);
- if ( prev != v && prev )
+ if ( (prev != vpmu_vcpu(vpmu)) && prev )
{
- vpmu = vcpu_vpmu(prev);
+ struct vpmu_struct *vpmu_prev = vcpu_vpmu(prev);
/* Someone ran here before us */
- vpmu_save_force(prev);
- vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);
-
- vpmu = vcpu_vpmu(v);
+ vpmu_save_force(vpmu_prev);
+ vpmu_reset(vpmu_prev, VPMU_CONTEXT_LOADED);
}
local_irq_enable();
@@ -224,7 +219,7 @@ void vpmu_load(struct vcpu *v)
{
apic_write_around(APIC_LVTPC, vpmu->hw_lapic_lvtpc);
/* Arch code needs to set VPMU_CONTEXT_LOADED */
- vpmu->arch_vpmu_ops->arch_vpmu_load(v);
+ vpmu->arch_vpmu_ops->arch_vpmu_load(vpmu);
}
}
diff --git a/xen/include/asm-x86/hvm/vpmu.h b/xen/include/asm-x86/hvm/vpmu.h
index 82bfa0e..897d5de 100644
--- a/xen/include/asm-x86/hvm/vpmu.h
+++ b/xen/include/asm-x86/hvm/vpmu.h
@@ -44,6 +44,15 @@
#define vpmu_reg_pointer(ctxt, offset) ((void *)((uintptr_t)ctxt + \
(uintptr_t)ctxt->offset))
+struct vpmu_struct {
+ u32 flags;
+ u32 last_pcpu;
+ u32 hw_lapic_lvtpc;
+ void *context; /* May be shared with PV guest */
+ void *priv_context; /* hypervisor-only */
+ struct arch_vpmu_ops *arch_vpmu_ops;
+};
+
/* Arch specific operations shared by all vpmus */
struct arch_vpmu_ops {
int (*do_wrmsr)(unsigned int msr, uint64_t msr_content,
@@ -54,23 +63,14 @@ struct arch_vpmu_ops {
unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx);
void (*arch_vpmu_destroy)(struct vcpu *v);
- int (*arch_vpmu_save)(struct vcpu *v);
- void (*arch_vpmu_load)(struct vcpu *v);
+ int (*arch_vpmu_save)(struct vpmu_struct *vpmu);
+ void (*arch_vpmu_load)(struct vpmu_struct *vpmu);
void (*arch_vpmu_dump)(const struct vcpu *);
};
int vmx_vpmu_initialise(struct vcpu *, unsigned int flags);
int svm_vpmu_initialise(struct vcpu *, unsigned int flags);
-struct vpmu_struct {
- u32 flags;
- u32 last_pcpu;
- u32 hw_lapic_lvtpc;
- void *context; /* May be shared with PV guest */
- void *priv_context; /* hypervisor-only */
- struct arch_vpmu_ops *arch_vpmu_ops;
-};
-
/* VPMU states */
#define VPMU_CONTEXT_ALLOCATED 0x1
#define VPMU_CONTEXT_LOADED 0x2
@@ -109,8 +109,8 @@ void vpmu_do_cpuid(unsigned int input, unsigned int *eax,
unsigned int *ebx,
unsigned int *ecx, unsigned int *edx);
void vpmu_initialise(struct vcpu *v);
void vpmu_destroy(struct vcpu *v);
-void vpmu_save(struct vcpu *v);
-void vpmu_load(struct vcpu *v);
+void vpmu_save(struct vpmu_struct *vpmu);
+void vpmu_load(struct vpmu_struct *vpmu);
void vpmu_dump(struct vcpu *v);
extern int acquire_pmu_ownership(int pmu_ownership);
--
1.8.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |