[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 15/16] x86/VPMU: NMI-based VPMU support



Add support for using NMIs as PMU interrupts

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
---
 xen/arch/x86/hvm/vpmu.c | 119 ++++++++++++++++++++++++++++++++++++++++--------
 1 file changed, 99 insertions(+), 20 deletions(-)

diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c
index 0cbede4..ff05ff5 100644
--- a/xen/arch/x86/hvm/vpmu.c
+++ b/xen/arch/x86/hvm/vpmu.c
@@ -35,6 +35,7 @@
 #include <asm/hvm/svm/svm.h>
 #include <asm/hvm/svm/vmcb.h>
 #include <asm/apic.h>
+#include <asm/nmi.h>
 #include <public/xenpmu.h>
 
 /*
@@ -47,33 +48,57 @@ static void parse_vpmu_param(char *s);
 custom_param("vpmu", parse_vpmu_param);
 
 static DEFINE_PER_CPU(struct vcpu *, last_vcpu);
+static DEFINE_PER_CPU(struct vcpu *, sampled_vcpu);
+
+uint32_t vpmu_apic_vector = PMU_APIC_VECTOR;
 
 static void __init parse_vpmu_param(char *s)
 {
-    switch ( parse_bool(s) )
-    {
-    case 0:
-        break;
-    default:
-        if ( !strcmp(s, "bts") )
-            vpmu_mode |= XENPMU_FEATURE_INTEL_BTS << XENPMU_FEATURE_SHIFT;
-        else if ( *s )
+    char *ss;
+
+    vpmu_mode = XENPMU_MODE_ON;
+    if (*s == '\0')
+        return;
+
+    do {
+        ss = strchr(s, ',');
+        if ( ss )
+            *ss = '\0';
+
+        switch  (parse_bool(s) )
         {
-            printk("VPMU: unknown flag: %s - vpmu disabled!\n", s);
+        case 0:
+            vpmu_mode = XENPMU_MODE_OFF;
+            return;
+        case -1:
+            if ( !strcmp(s, "nmi") )
+                vpmu_apic_vector = APIC_DM_NMI;
+            else if ( !strcmp(s, "bts") )
+                vpmu_mode |= XENPMU_FEATURE_INTEL_BTS << XENPMU_FEATURE_SHIFT;
+            else if ( !strcmp(s, "priv") )
+            {
+                vpmu_mode &= ~XENPMU_MODE_ON;
+                vpmu_mode |= XENPMU_MODE_PRIV;
+            }
+            else
+            {
+                printk("VPMU: unknown flag: %s - vpmu disabled!\n", s);
+                vpmu_mode = XENPMU_MODE_OFF;
+                return;
+            }
+        default:
             break;
         }
-        /* fall through */
-    case 1:
-        vpmu_mode |= XENPMU_MODE_ON;
-        break;
-    }
+
+        s = ss + 1;
+    } while ( ss );
 }
 
 void vpmu_lvtpc_update(uint32_t val)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(current);
 
-    vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | (val & APIC_LVT_MASKED);
+    vpmu->hw_lapic_lvtpc = vpmu_apic_vector | (val & APIC_LVT_MASKED);
 
     /* Postpone APIC updates for PV guests if PMU interrupt is pending */
     if (is_hvm_domain(current->domain) ||
@@ -202,10 +227,14 @@ int vpmu_do_interrupt(struct cpu_user_regs *regs)
             struct segment_register cs;
 
             gregs = guest_cpu_user_regs();
-            hvm_get_segment_register(current, x86_seg_cs, &cs);
 
             memcpy(p, gregs, sizeof(struct cpu_user_regs));
-            ((struct cpu_user_regs *)p)->cs = cs.attr.fields.dpl;
+            /* This is unsafe in NMI context, we'll do it in softint handler */
+            if ( vpmu_apic_vector != APIC_DM_NMI )
+            {
+                hvm_get_segment_register(current, x86_seg_cs, &cs);
+                ((struct cpu_user_regs *)p)->cs = cs.attr.fields.dpl;
+            }
         }
 
         v->arch.vpmu.xenpmu_data->domain_id = current->domain->domain_id;
@@ -216,7 +245,13 @@ int vpmu_do_interrupt(struct cpu_user_regs *regs)
         apic_write(APIC_LVTPC, vpmu->hw_lapic_lvtpc | APIC_LVT_MASKED);
         vpmu->hw_lapic_lvtpc |= APIC_LVT_MASKED;
 
-        send_guest_vcpu_virq(v, VIRQ_XENPMU);
+        if ( vpmu_apic_vector == APIC_DM_NMI )
+        {
+            per_cpu(sampled_vcpu, smp_processor_id()) = current;
+            raise_softirq(PMU_SOFTIRQ);
+        }
+        else
+            send_guest_vcpu_virq(v, VIRQ_XENPMU);
 
         return 1;
     }
@@ -288,7 +323,7 @@ void vpmu_save(struct vcpu *v)
         if ( vpmu->arch_vpmu_ops->arch_vpmu_save(v) )
             vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);
 
-    apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED);
+    apic_write(APIC_LVTPC, vpmu_apic_vector | APIC_LVT_MASKED);
 }
 
 void vpmu_load(struct vcpu *v)
@@ -379,7 +414,7 @@ void vpmu_initialise(struct vcpu *v)
         return;
     }
 
-    vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | APIC_LVT_MASKED;
+    vpmu->hw_lapic_lvtpc = vpmu_apic_vector | APIC_LVT_MASKED;
 }
 
 void vpmu_destroy(struct vcpu *v)
@@ -405,10 +440,40 @@ void vpmu_dump(struct vcpu *v)
         vpmu->arch_vpmu_ops->arch_vpmu_dump(v);
 }
 
+/* Process the softirq set by PMU NMI handler */
+static void pmu_softnmi(void)
+{
+    struct cpu_user_regs *regs;
+    struct vcpu *v, *sampled = per_cpu(sampled_vcpu, smp_processor_id());
+
+    if ( vpmu_mode & XENPMU_MODE_PRIV ||
+         sampled->domain->domain_id >= DOMID_FIRST_RESERVED )
+        v = dom0->vcpu[smp_processor_id()];
+    else
+        v = sampled;
+
+    regs = &v->arch.vpmu.xenpmu_data->pmu.regs;
+    if ( is_hvm_domain(sampled->domain) )
+    {
+        struct segment_register cs;
+
+        hvm_get_segment_register(sampled, x86_seg_cs, &cs);
+        regs->cs = cs.attr.fields.dpl;
+    }
+
+    send_guest_vcpu_virq(v, VIRQ_XENPMU);
+}
+
+int pmu_nmi_interrupt(struct cpu_user_regs *regs, int cpu)
+{
+    return vpmu_do_interrupt(regs);
+}
+
 static int pvpmu_init(struct domain *d, xenpmu_params_t *params)
 {
     struct vcpu *v;
     uint64_t mfn = params->val;
+    static int pvpmu_initted = 0;
 
     if ( params->vcpu < 0 || params->vcpu >= d->max_vcpus )
         return -EINVAL;
@@ -417,6 +482,20 @@ static int pvpmu_init(struct domain *d, xenpmu_params_t 
*params)
         !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
         return -EINVAL;
 
+    if ( !pvpmu_initted )
+    {
+        if (reserve_lapic_nmi() == 0)
+            set_nmi_callback(pmu_nmi_interrupt);
+        else
+        {
+            printk("Failed to reserve PMU NMI\n");
+            return -EBUSY;
+        }
+        open_softirq(PMU_SOFTIRQ, pmu_softnmi);
+
+        pvpmu_initted = 1;
+    }
+
     v = d->vcpu[params->vcpu];
     v->arch.vpmu.xenpmu_data = map_domain_page_global(mfn);
     memset(v->arch.vpmu.xenpmu_data, 0, PAGE_SIZE);
-- 
1.8.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.