[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC v2 08/15] Update IRTE according to guest interrupt config changes



When guest changes its interrupt configuration (such as, vector, etc.)
for direct-assigned devices, we need to update the associated IRTE
with the new guest vector, so external interrupts from the assigned
devices can be injected to guests without VM-Exit.

For lowest-priority interrupts, we use vector-hashing mechamisn to find
the destination vCPU. This follows the hardware behavior, since modern
Intel CPUs use vector hashing to handle the lowest-priority interrupt.

For multicast/broadcast vCPU, we cannot handle it via interrupt posting,
still use interrupt remapping.

Signed-off-by: Feng Wu <feng.wu@xxxxxxxxx>
---
 xen/drivers/passthrough/io.c | 99 +++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 98 insertions(+), 1 deletion(-)

diff --git a/xen/drivers/passthrough/io.c b/xen/drivers/passthrough/io.c
index 9b77334..7b1c094 100644
--- a/xen/drivers/passthrough/io.c
+++ b/xen/drivers/passthrough/io.c
@@ -26,6 +26,7 @@
 #include <asm/hvm/iommu.h>
 #include <asm/hvm/support.h>
 #include <xen/hvm/irq.h>
+#include <asm/io_apic.h>
 
 static DEFINE_PER_CPU(struct list_head, dpci_list);
 
@@ -199,6 +200,73 @@ void free_hvm_irq_dpci(struct hvm_irq_dpci *dpci)
     xfree(dpci);
 }
 
+/*
+ * The purpose of this routine is to find the right destination vCPU for
+ * an interrupt which will be delivered by VT-d posted-interrupt. There
+ * are several cases as below:
+ *
+ * - For lowest-priority interrupts, we find the destination vCPU from the
+ *   guest vector using vector-hashing mechanism and return true. This follows
+ *   the hardware behavior, since modern Intel CPUs use vector hashing to
+ *   handle the lowest-priority interrupt.
+ * - Otherwise, for single destination interrupt, it is straightforward to
+ *   find the destination vCPU and return true.
+ * - For multicast/broadcast vCPU, we cannot handle it via interrupt posting,
+ *   so return false.
+ *
+ *   Here is the details about the vector-hashing mechanism:
+ *   1. For lowest-priority interrupts, store all the possible destination
+ *      vCPUs in an array.
+ *   2. Use "gvec % max number of destination vCPUs" to find the right
+ *      destination vCPU in the array for the lowest-priority interrupt.
+ */
+static bool_t pi_find_dest_vcpu(struct domain *d, uint8_t dest_id,
+                                uint8_t dest_mode, uint8_t delivery_mode,
+                                uint8_t gvec, struct vcpu **dest_vcpu)
+{
+    struct vcpu *v, **dest_vcpu_array;
+    unsigned int dest_vcpu_num = 0;
+    int ret;
+
+    dest_vcpu_array = xzalloc_array(struct vcpu *, d->max_vcpus);
+    if ( !dest_vcpu_array )
+    {
+        dprintk(XENLOG_G_INFO,
+                "dom%d: failed to allocate memeory.\n", d->domain_id);
+        return 0;
+    }
+
+    for_each_vcpu ( d, v )
+    {
+        if ( !vlapic_match_dest(vcpu_vlapic(v), NULL, 0,
+                                dest_id, dest_mode) )
+            continue;
+
+        dest_vcpu_array[dest_vcpu_num++] = v;
+    }
+
+    if ( delivery_mode == dest_LowestPrio )
+    {
+        if (  dest_vcpu_num != 0 )
+        {
+            *dest_vcpu = dest_vcpu_array[gvec % dest_vcpu_num];
+            ret = 1;
+        }
+        else
+            ret = 0;
+    }
+    else if (  dest_vcpu_num == 1 )
+    {
+        *dest_vcpu = dest_vcpu_array[0];
+        ret = 1;
+    }
+    else
+        ret = 0;
+
+    xfree(dest_vcpu_array);
+    return ret;
+}
+
 int pt_irq_create_bind(
     struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
 {
@@ -257,7 +325,7 @@ int pt_irq_create_bind(
     {
     case PT_IRQ_TYPE_MSI:
     {
-        uint8_t dest, dest_mode;
+        uint8_t dest, dest_mode, delivery_mode;
         int dest_vcpu_id;
 
         if ( !(pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) )
@@ -330,11 +398,40 @@ int pt_irq_create_bind(
         /* Calculate dest_vcpu_id for MSI-type pirq migration. */
         dest = pirq_dpci->gmsi.gflags & VMSI_DEST_ID_MASK;
         dest_mode = !!(pirq_dpci->gmsi.gflags & VMSI_DM_MASK);
+        delivery_mode = (pirq_dpci->gmsi.gflags >> GFLAGS_SHIFT_DELIV_MODE) &
+                        VMSI_DELIV_MASK;
         dest_vcpu_id = hvm_girq_dest_2_vcpu_id(d, dest, dest_mode);
         pirq_dpci->gmsi.dest_vcpu_id = dest_vcpu_id;
         spin_unlock(&d->event_lock);
         if ( dest_vcpu_id >= 0 )
             hvm_migrate_pirqs(d->vcpu[dest_vcpu_id]);
+
+        /* Use interrupt posting if it is supported */
+        if ( iommu_intpost )
+        {
+            struct vcpu *vcpu = NULL;
+
+            if ( !pi_find_dest_vcpu(d, dest, dest_mode, delivery_mode,
+                                    pirq_dpci->gmsi.gvec, &vcpu) )
+            {
+                dprintk(XENLOG_G_WARNING,
+                        "%pv: failed to find the dest vCPU for PI, guest "
+                        "vector:%u use software way to deliver the "
+                        " interrupts.\n", vcpu, pirq_dpci->gmsi.gvec);
+                break;
+            }
+
+            if ( pi_update_irte( vcpu, info, pirq_dpci->gmsi.gvec ) != 0 )
+            {
+                dprintk(XENLOG_G_WARNING,
+                        "%pv: failed to update PI IRTE, guest vector:%u "
+                        "use software way to deliver the interrupts.\n",
+                        vcpu, pirq_dpci->gmsi.gvec);
+
+                break;
+            }
+        }
+
         break;
     }
 
-- 
2.1.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.