|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] vpmu: Use macros to access struct vpmu_struct.flags
# HG changeset patch
# User Dietmar Hahn <dietmar.hahn@xxxxxxxxxxxxxx>
# Date 1328796482 28800
# Node ID 33344b15a5dc686c9273a7de0fa46ffaac555f37
# Parent 6f6a6d1d2fb651d76d4f162eb984c01689fb4fc0
vpmu: Use macros to access struct vpmu_struct.flags
This patch introduces some macros realising the access to the item
'flags' in the struct vpmu_struct (see
xen/include/asm-x86/hvm/vpmu.h). Only bits within 'flags' are
set/reset/checked.
Signed-off-by: Dietmar Hahn <dietmar.hahn@xxxxxxxxxxxxxx>
Committed-by: Keir Fraser <keir@xxxxxxx>
---
diff -r 6f6a6d1d2fb6 -r 33344b15a5dc xen/arch/x86/hvm/svm/vpmu.c
--- a/xen/arch/x86/hvm/svm/vpmu.c Thu Feb 09 06:06:26 2012 -0800
+++ b/xen/arch/x86/hvm/svm/vpmu.c Thu Feb 09 06:08:02 2012 -0800
@@ -188,8 +188,8 @@
struct vpmu_struct *vpmu = vcpu_vpmu(v);
struct amd_vpmu_context *ctxt = vpmu->context;
- if ( !((vpmu->flags & VPMU_CONTEXT_ALLOCATED) &&
- (vpmu->flags & VPMU_RUNNING)) )
+ if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
+ vpmu_is_set(vpmu, VPMU_RUNNING)) )
return;
context_restore(v);
@@ -214,8 +214,8 @@
struct vpmu_struct *vpmu = vcpu_vpmu(v);
struct amd_vpmu_context *ctx = vpmu->context;
- if ( !((vpmu->flags & VPMU_CONTEXT_ALLOCATED) &&
- (vpmu->flags & VPMU_RUNNING)) )
+ if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
+ vpmu_is_set(vpmu, VPMU_RUNNING)) )
return;
context_save(v);
@@ -261,20 +261,20 @@
/* check if the first counter is enabled */
if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) &&
- is_pmu_enabled(msr_content) && !(vpmu->flags & VPMU_RUNNING) )
+ is_pmu_enabled(msr_content) && !vpmu_is_set(vpmu, VPMU_RUNNING) )
{
if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
return 1;
- vpmu->flags |= VPMU_RUNNING;
+ vpmu_set(vpmu, VPMU_RUNNING);
apic_write(APIC_LVTPC, PMU_APIC_VECTOR);
}
/* stop saving & restore if guest stops first counter */
- if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) &&
- (is_pmu_enabled(msr_content) == 0) && (vpmu->flags & VPMU_RUNNING) )
+ if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) &&
+ (is_pmu_enabled(msr_content) == 0) && vpmu_is_set(vpmu, VPMU_RUNNING) )
{
apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED);
- vpmu->flags &= ~VPMU_RUNNING;
+ vpmu_reset(vpmu, VPMU_RUNNING);
release_pmu_ownship(PMU_OWNER_HVM);
}
@@ -298,7 +298,7 @@
struct vpmu_struct *vpmu = vcpu_vpmu(v);
uint8_t family = current_cpu_data.x86;
- if ( vpmu->flags & VPMU_CONTEXT_ALLOCATED )
+ if ( vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
return;
if ( counters == NULL )
@@ -333,22 +333,22 @@
}
vpmu->context = (void *)ctxt;
- vpmu->flags |= VPMU_CONTEXT_ALLOCATED;
+ vpmu_set(vpmu, VPMU_CONTEXT_ALLOCATED);
}
static void amd_vpmu_destroy(struct vcpu *v)
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- if ( !(vpmu->flags & VPMU_CONTEXT_ALLOCATED) )
+ if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
return;
xfree(vpmu->context);
- vpmu->flags &= ~VPMU_CONTEXT_ALLOCATED;
+ vpmu_reset(vpmu, VPMU_CONTEXT_ALLOCATED);
- if ( vpmu->flags & VPMU_RUNNING )
+ if ( vpmu_is_set(vpmu, VPMU_RUNNING) )
{
- vpmu->flags &= ~VPMU_RUNNING;
+ vpmu_reset(vpmu, VPMU_RUNNING);
release_pmu_ownship(PMU_OWNER_HVM);
}
}
diff -r 6f6a6d1d2fb6 -r 33344b15a5dc xen/arch/x86/hvm/vmx/vpmu_core2.c
--- a/xen/arch/x86/hvm/vmx/vpmu_core2.c Thu Feb 09 06:06:26 2012 -0800
+++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c Thu Feb 09 06:08:02 2012 -0800
@@ -266,17 +266,17 @@
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- if ( !((vpmu->flags & VPMU_CONTEXT_ALLOCATED) &&
- (vpmu->flags & VPMU_CONTEXT_LOADED)) )
+ if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
+ vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)) )
return;
__core2_vpmu_save(v);
/* Unset PMU MSR bitmap to trap lazy load. */
- if ( !(vpmu->flags & VPMU_RUNNING) && cpu_has_vmx_msr_bitmap )
+ if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && cpu_has_vmx_msr_bitmap )
core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap);
- vpmu->flags &= ~VPMU_CONTEXT_LOADED;
+ vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);
return;
}
@@ -303,11 +303,11 @@
struct vpmu_struct *vpmu = vcpu_vpmu(v);
/* Only when PMU is counting, we load PMU context immediately. */
- if ( !((vpmu->flags & VPMU_CONTEXT_ALLOCATED) &&
- (vpmu->flags & VPMU_RUNNING)) )
+ if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
+ vpmu_is_set(vpmu, VPMU_RUNNING)) )
return;
__core2_vpmu_load(v);
- vpmu->flags |= VPMU_CONTEXT_LOADED;
+ vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
}
static int core2_vpmu_alloc_resource(struct vcpu *v)
@@ -373,17 +373,17 @@
if ( !is_core2_vpmu_msr(msr_index, type, index) )
return 0;
- if ( unlikely(!(vpmu->flags & VPMU_CONTEXT_ALLOCATED)) &&
+ if ( unlikely(!vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED)) &&
(vpmu->context != NULL ||
!core2_vpmu_alloc_resource(current)) )
return 0;
- vpmu->flags |= VPMU_CONTEXT_ALLOCATED;
+ vpmu_set(vpmu, VPMU_CONTEXT_ALLOCATED);
/* Do the lazy load staff. */
- if ( !(vpmu->flags & VPMU_CONTEXT_LOADED) )
+ if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
{
__core2_vpmu_load(current);
- vpmu->flags |= VPMU_CONTEXT_LOADED;
+ vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
if ( cpu_has_vmx_msr_bitmap )
core2_vpmu_set_msr_bitmap(current->arch.hvm_vmx.msr_bitmap);
}
@@ -467,12 +467,12 @@
for ( i = 0; i < core2_get_pmc_count(); i++ )
pmu_enable |= core2_vpmu_cxt->pmu_enable->arch_pmc_enable[i];
if ( pmu_enable )
- vpmu->flags |= VPMU_RUNNING;
+ vpmu_set(vpmu, VPMU_RUNNING);
else
- vpmu->flags &= ~VPMU_RUNNING;
+ vpmu_reset(vpmu, VPMU_RUNNING);
/* Setup LVTPC in local apic */
- if ( vpmu->flags & VPMU_RUNNING &&
+ if ( vpmu_is_set(vpmu, VPMU_RUNNING) &&
is_vlapic_lvtpc_enabled(vcpu_vlapic(v)) )
apic_write_around(APIC_LVTPC, PMU_APIC_VECTOR);
else
@@ -588,14 +588,14 @@
struct vpmu_struct *vpmu = vcpu_vpmu(v);
struct core2_vpmu_context *core2_vpmu_cxt = vpmu->context;
- if ( !(vpmu->flags & VPMU_CONTEXT_ALLOCATED) )
+ if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
return;
xfree(core2_vpmu_cxt->pmu_enable);
xfree(vpmu->context);
if ( cpu_has_vmx_msr_bitmap )
core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap);
release_pmu_ownship(PMU_OWNER_HVM);
- vpmu->flags &= ~VPMU_CONTEXT_ALLOCATED;
+ vpmu_reset(vpmu, VPMU_CONTEXT_ALLOCATED);
}
struct arch_vpmu_ops core2_vpmu_ops = {
diff -r 6f6a6d1d2fb6 -r 33344b15a5dc xen/arch/x86/hvm/vpmu.c
--- a/xen/arch/x86/hvm/vpmu.c Thu Feb 09 06:06:26 2012 -0800
+++ b/xen/arch/x86/hvm/vpmu.c Thu Feb 09 06:08:02 2012 -0800
@@ -86,7 +86,7 @@
if ( !opt_vpmu_enabled )
return;
- if ( vpmu->flags & VPMU_CONTEXT_ALLOCATED )
+ if ( vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
vpmu_destroy(v);
switch ( vendor )
@@ -110,7 +110,7 @@
if ( vpmu->arch_vpmu_ops != NULL )
{
- vpmu->flags = 0;
+ vpmu_clear(vpmu);
vpmu->context = NULL;
vpmu->arch_vpmu_ops->arch_vpmu_initialise(v);
}
diff -r 6f6a6d1d2fb6 -r 33344b15a5dc xen/arch/x86/oprofile/nmi_int.c
--- a/xen/arch/x86/oprofile/nmi_int.c Thu Feb 09 06:06:26 2012 -0800
+++ b/xen/arch/x86/oprofile/nmi_int.c Thu Feb 09 06:08:02 2012 -0800
@@ -47,7 +47,7 @@
if ( !model->is_arch_pmu_msr(msr, typep, indexp) )
return 0;
- if ( !(vpmu->flags & PASSIVE_DOMAIN_ALLOCATED) )
+ if ( !vpmu_is_set(vpmu, PASSIVE_DOMAIN_ALLOCATED) )
if ( ! model->allocated_msr(current) )
return 0;
return 1;
@@ -78,7 +78,7 @@
void passive_domain_destroy(struct vcpu *v)
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- if ( vpmu->flags & PASSIVE_DOMAIN_ALLOCATED )
+ if ( vpmu_is_set(vpmu, PASSIVE_DOMAIN_ALLOCATED) )
model->free_msr(v);
}
diff -r 6f6a6d1d2fb6 -r 33344b15a5dc xen/arch/x86/oprofile/op_model_ppro.c
--- a/xen/arch/x86/oprofile/op_model_ppro.c Thu Feb 09 06:06:26 2012 -0800
+++ b/xen/arch/x86/oprofile/op_model_ppro.c Thu Feb 09 06:08:02 2012 -0800
@@ -143,7 +143,7 @@
xenoprof_log_event(current, regs, eip, mode, i);
wrmsrl(msrs->counters[i].addr, -reset_value[i]);
if ( is_passive(current->domain) && (mode != 2) &&
- (vcpu_vpmu(current)->flags &
PASSIVE_DOMAIN_ALLOCATED) )
+ vpmu_is_set(vcpu_vpmu(current),
PASSIVE_DOMAIN_ALLOCATED) )
{
if ( IS_ACTIVE(msrs_content[i].control) )
{
@@ -230,8 +230,8 @@
if ( !msr_content )
goto out;
vpmu->context = (void *)msr_content;
- vpmu->flags = 0;
- vpmu->flags |= PASSIVE_DOMAIN_ALLOCATED;
+ vpmu_clear(vpmu);
+ vpmu_set(vpmu, PASSIVE_DOMAIN_ALLOCATED);
return 1;
out:
gdprintk(XENLOG_WARNING, "Insufficient memory for oprofile, oprofile
is "
@@ -244,10 +244,10 @@
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- if ( !(vpmu->flags & PASSIVE_DOMAIN_ALLOCATED) )
+ if ( !vpmu_is_set(vpmu, PASSIVE_DOMAIN_ALLOCATED) )
return;
xfree(vpmu->context);
- vpmu->flags &= ~PASSIVE_DOMAIN_ALLOCATED;
+ vpmu_reset(vpmu, PASSIVE_DOMAIN_ALLOCATED);
}
static void ppro_load_msr(struct vcpu *v, int type, int index, u64
*msr_content)
diff -r 6f6a6d1d2fb6 -r 33344b15a5dc xen/include/asm-x86/hvm/vpmu.h
--- a/xen/include/asm-x86/hvm/vpmu.h Thu Feb 09 06:06:26 2012 -0800
+++ b/xen/include/asm-x86/hvm/vpmu.h Thu Feb 09 06:08:02 2012 -0800
@@ -69,6 +69,12 @@
#define VPMU_CONTEXT_LOADED 0x2
#define VPMU_RUNNING 0x4
#define PASSIVE_DOMAIN_ALLOCATED 0x8
+
+#define vpmu_set(_vpmu, _x) ((_vpmu)->flags |= (_x))
+#define vpmu_reset(_vpmu, _x) ((_vpmu)->flags &= ~(_x))
+#define vpmu_is_set(_vpmu, _x) ((_vpmu)->flags & (_x))
+#define vpmu_clear(_vpmu) ((_vpmu)->flags = 0)
+
int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content);
int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content);
int vpmu_do_interrupt(struct cpu_user_regs *regs);
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |