[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH] xen: implement vector callback for evtchn delivery



Patch bellow contains :-

--- a/xen/common/kernel.c    Tue May 25 09:08:34 2010 +0100
+++ b/xen/common/kernel.c    Tue May 25 10:44:07 2010 +0100
@@ -260,7 +260,8 @@
                              (1U << XENFEAT_highmem_assist) |
                              (1U << XENFEAT_gnttab_map_avail_bits);
             else
-                fi.submap |= (1U << XENFEAT_hvm_safe_pvclock);
+                fi.submap |= (1U << XENFEAT_hvm_safe_pvclock) |
+                             (1U << XENFEAT_hvm_callback_vector);
#endif

However,  file xen/common/kernel.c under xen-4.0.0 doesn't contain entry

      fi.submap |= (1U << XENFEAT_hvm_safe_pvclock);

Boris.

--- On Tue, 5/25/10, Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx> wrote:

From: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
Subject: Re: [Xen-devel] [PATCH] xen: implement vector callback for evtchn delivery
To: "Keir Fraser" <Keir.Fraser@xxxxxxxxxxxxx>
Cc: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>, "Stefano Stabellini" <Stefano.Stabellini@xxxxxxxxxxxxx>
Date: Tuesday, May 25, 2010, 5:55 AM

On Mon, 24 May 2010, Keir Fraser wrote:
> Please add documentation to include/public/hvm/param.h about how to specify
> the new callback method, and detect when it is available. Move the new
> is_hvm_pv_blah macros out into an appropriate include/asm-x86 header file --
> They are not arch independent.

Done.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>

---


diff -r 12c79a476007 xen/arch/x86/hvm/irq.c
--- a/xen/arch/x86/hvm/irq.c    Tue May 25 09:08:34 2010 +0100
+++ b/xen/arch/x86/hvm/irq.c    Tue May 25 10:44:07 2010 +0100
@@ -185,16 +185,16 @@

void hvm_assert_evtchn_irq(struct vcpu *v)
{
-    if ( v->vcpu_id != 0 )
-        return;
-
     if ( unlikely(in_irq() || !local_irq_is_enabled()) )
     {
         tasklet_schedule(&v->arch.hvm_vcpu.assert_evtchn_irq_tasklet);
         return;
     }

-    hvm_set_callback_irq_level(v);
+    if ( is_hvm_pv_evtchn_vcpu(v) )
+        vcpu_kick(v);
+    else if ( v->vcpu_id == 0 )
+        hvm_set_callback_irq_level(v);
}

void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq)
@@ -251,7 +251,7 @@

     via_type = (uint8_t)(via >> 56) + 1;
     if ( ((via_type == HVMIRQ_callback_gsi) && (via == 0)) ||
-         (via_type > HVMIRQ_callback_pci_intx) )
+         (via_type > HVMIRQ_callback_vector) )
         via_type = HVMIRQ_callback_none;

     spin_lock(&d->arch.hvm_domain.irq_lock);
@@ -297,6 +297,9 @@
         if ( hvm_irq->callback_via_asserted )
              __hvm_pci_intx_assert(d, pdev, pintx);
         break;
+    case HVMIRQ_callback_vector:
+        hvm_irq->callback_via.vector = (uint8_t)via;
+        break;
     default:
         break;
     }
@@ -312,6 +315,10 @@
     case HVMIRQ_callback_pci_intx:
         printk("PCI INTx Dev 0x%02x Int%c\n", pdev, 'A' + pintx);
         break;
+    case HVMIRQ_callback_vector:
+        printk("Set HVMIRQ_callback_vector to %u\n",
+               hvm_irq->callback_via.vector);
+        break;
     default:
         printk("None\n");
         break;
@@ -323,6 +330,10 @@
     struct hvm_domain *plat = &v->domain->arch.hvm_domain;
     int vector;

+    if (plat->irq.callback_via_type == HVMIRQ_callback_vector &&
+            vcpu_info(v, evtchn_upcall_pending))
+        return hvm_intack_vector(plat->irq.callback_via.vector);
+
     if ( unlikely(v->nmi_pending) )
         return hvm_intack_nmi;

@@ -363,6 +374,8 @@
     case hvm_intsrc_lapic:
         if ( !vlapic_ack_pending_irq(v, intack.vector) )
             intack = hvm_intack_none;
+        break;
+    case hvm_intsrc_vector:
         break;
     default:
         intack = hvm_intack_none;
diff -r 12c79a476007 xen/arch/x86/hvm/vmx/intr.c
--- a/xen/arch/x86/hvm/vmx/intr.c    Tue May 25 09:08:34 2010 +0100
+++ b/xen/arch/x86/hvm/vmx/intr.c    Tue May 25 10:44:07 2010 +0100
@@ -164,7 +164,8 @@
     {
         HVMTRACE_2D(INJ_VIRQ, intack.vector, /*fake=*/ 0);
         vmx_inject_extint(intack.vector);
-        pt_intr_post(v, intack);
+        if (intack.source != hvm_intsrc_vector)
+             pt_intr_post(v, intack);
     }

     /* Is there another IRQ to queue up behind this one? */
diff -r 12c79a476007 xen/common/kernel.c
--- a/xen/common/kernel.c    Tue May 25 09:08:34 2010 +0100
+++ b/xen/common/kernel.c    Tue May 25 10:44:07 2010 +0100
@@ -260,7 +260,8 @@
                              (1U << XENFEAT_highmem_assist) |
                              (1U << XENFEAT_gnttab_map_avail_bits);
             else
-                fi.submap |= (1U << XENFEAT_hvm_safe_pvclock);
+                fi.submap |= (1U << XENFEAT_hvm_safe_pvclock) |
+                             (1U << XENFEAT_hvm_callback_vector);
#endif
             break;
         default:
diff -r 12c79a476007 xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h    Tue May 25 09:08:34 2010 +0100
+++ b/xen/include/asm-x86/domain.h    Tue May 25 10:44:07 2010 +0100
@@ -18,6 +18,10 @@
#define is_pv_32on64_domain(d) (0)
#endif
#define is_pv_32on64_vcpu(v)   (is_pv_32on64_domain((v)->domain))
+
+#define is_hvm_pv_evtchn_domain(d) (is_hvm_domain(d) && \
+        d->arch.hvm_domain.irq.callback_via_type == HVMIRQ_callback_vector)
+#define is_hvm_pv_evtchn_vcpu(v) (is_hvm_pv_evtchn_domain(v->domain))

#define VCPU_TRAP_NMI          1
#define VCPU_TRAP_MCE          2
diff -r 12c79a476007 xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h    Tue May 25 09:08:34 2010 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h    Tue May 25 10:44:07 2010 +0100
@@ -33,7 +33,8 @@
     hvm_intsrc_pic,
     hvm_intsrc_lapic,
     hvm_intsrc_nmi,
-    hvm_intsrc_mce
+    hvm_intsrc_mce,
+    hvm_intsrc_vector
};
struct hvm_intack {
     uint8_t source; /* enum hvm_intsrc */
@@ -44,6 +45,7 @@
#define hvm_intack_lapic(vec) ( (struct hvm_intack) { hvm_intsrc_lapic, vec } )
#define hvm_intack_nmi        ( (struct hvm_intack) { hvm_intsrc_nmi,   2 } )
#define hvm_intack_mce        ( (struct hvm_intack) { hvm_intsrc_mce,   18 } )
+#define hvm_intack_vector(vec)( (struct hvm_intack) { hvm_intsrc_vector, vec } )
enum hvm_intblk {
     hvm_intblk_none,      /* not blocked (deliverable) */
     hvm_intblk_shadow,    /* MOV-SS or STI shadow */
diff -r 12c79a476007 xen/include/asm-x86/hvm/irq.h
--- a/xen/include/asm-x86/hvm/irq.h    Tue May 25 09:08:34 2010 +0100
+++ b/xen/include/asm-x86/hvm/irq.h    Tue May 25 10:44:07 2010 +0100
@@ -54,12 +54,14 @@
         enum {
             HVMIRQ_callback_none,
             HVMIRQ_callback_gsi,
-            HVMIRQ_callback_pci_intx
+            HVMIRQ_callback_pci_intx,
+            HVMIRQ_callback_vector
         } callback_via_type;
     };
     union {
         uint32_t gsi;
         struct { uint8_t dev, intx; } pci;
+        uint32_t vector;
     } callback_via;

     /* Number of INTx wires asserting each PCI-ISA link. */
diff -r 12c79a476007 xen/include/public/features.h
--- a/xen/include/public/features.h    Tue May 25 09:08:34 2010 +0100
+++ b/xen/include/public/features.h    Tue May 25 10:44:07 2010 +0100
@@ -68,6 +68,9 @@
  */
#define XENFEAT_gnttab_map_avail_bits      7

+/* x86: Does this Xen host support the HVM callback vector type? */
+#define XENFEAT_hvm_callback_vector        8
+
/* x86: pvclock algorithm is safe to use on HVM */
#define XENFEAT_hvm_safe_pvclock           9

diff -r 12c79a476007 xen/include/public/hvm/params.h
--- a/xen/include/public/hvm/params.h    Tue May 25 09:08:34 2010 +0100
+++ b/xen/include/public/hvm/params.h    Tue May 25 10:44:07 2010 +0100
@@ -33,6 +33,9 @@
  * val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows:
  *                  Domain = val[47:32], Bus  = val[31:16],
  *                  DevFn  = val[15: 8], IntX = val[ 1: 0]
+ * val[63:56] == 2: val[7:0] is a vector number, check for
+ *                  XENFEAT_hvm_callback_vector to know if this delivery
+ *                  method is available.
  * If val == 0 then CPU0 event-channel notifications are not delivered.
  */
#define HVM_PARAM_CALLBACK_IRQ 0

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.