[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 03/13] x86/PMU: Stop AMD counters when called from vpmu_save_force()



Change amd_vpmu_save() algorithm to accommodate cases when we need
to stop counters from vpmu_save_force() (needed by subsequent PMU
patches).

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
---
 xen/arch/x86/hvm/svm/vpmu.c | 14 ++++----------
 xen/arch/x86/hvm/vpmu.c     | 12 ++++++------
 2 files changed, 10 insertions(+), 16 deletions(-)

diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c
index 4d1fbc8..5d9c3f5 100644
--- a/xen/arch/x86/hvm/svm/vpmu.c
+++ b/xen/arch/x86/hvm/svm/vpmu.c
@@ -223,22 +223,16 @@ static int amd_vpmu_save(struct vcpu *v)
     struct amd_vpmu_context *ctx = vpmu->context;
     unsigned int i;
 
-    /*
-     * Stop the counters. If we came here via vpmu_save_force (i.e.
-     * when VPMU_CONTEXT_SAVE is set) counters are already stopped.
-     */
-    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_SAVE) )
+    if ( !vpmu_is_set(vpmu, VPMU_FROZEN) )
     {
-        vpmu_set(vpmu, VPMU_FROZEN);
-
         for ( i = 0; i < num_counters; i++ )
             wrmsrl(ctrls[i], 0);
 
-        return 0;
+        vpmu_set(vpmu, VPMU_FROZEN);
     }
 
-    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
-        return 0;
+    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_SAVE) )
+            return 0;
 
     context_save(v);
 
diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c
index 21fbaba..a4e3664 100644
--- a/xen/arch/x86/hvm/vpmu.c
+++ b/xen/arch/x86/hvm/vpmu.c
@@ -127,13 +127,19 @@ static void vpmu_save_force(void *arg)
     struct vcpu *v = (struct vcpu *)arg;
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
 
+    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
+        return;
+
     if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
         return;
 
+    vpmu_set(vpmu, VPMU_CONTEXT_SAVE);
+
     if ( vpmu->arch_vpmu_ops )
         (void)vpmu->arch_vpmu_ops->arch_vpmu_save(v);
 
     vpmu_reset(vpmu, VPMU_CONTEXT_SAVE);
+    vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);
 
     per_cpu(last_vcpu, smp_processor_id()) = NULL;
 }
@@ -177,12 +183,8 @@ void vpmu_load(struct vcpu *v)
          * before saving the context.
          */
         if ( vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
-        {
-            vpmu_set(vpmu, VPMU_CONTEXT_SAVE);
             on_selected_cpus(cpumask_of(vpmu->last_pcpu),
                              vpmu_save_force, (void *)v, 1);
-            vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);
-        }
     } 
 
     /* Prevent forced context save from remote CPU */
@@ -195,9 +197,7 @@ void vpmu_load(struct vcpu *v)
         vpmu = vcpu_vpmu(prev);
 
         /* Someone ran here before us */
-        vpmu_set(vpmu, VPMU_CONTEXT_SAVE);
         vpmu_save_force(prev);
-        vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);
 
         vpmu = vcpu_vpmu(v);
     }
-- 
1.8.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.