[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v4 4/9] x86/passthrough: Extract PT_IRQ_TYPE_MSI body into pt_irq_bind_msi()



No functional change: move the PT_IRQ_TYPE_MSI case of pt_irq_create_bind()
into a new static helper pt_irq_bind_msi() taking the same gvec/gflags/gtable
parameters. Restructure pt_irq_create_bind() so the MSI case delegates to the
helper and pt_irq_dpci_setup() is called inside each case rather than shared.

Signed-off-by: Julian Vetter <julian.vetter@xxxxxxxxxx>
---
Changes in v4:
- New patch
- Split out as a preparatory no-functional-change step so that the
  interface change in patch 5 (switching pt_irq_bind_msi() from gvec +
  gflags to raw MSI addr + data) shows as a clean diff against an
  already-extracted helper, rather than being tangled with the 'case
  PT_IRQ_TYPE_MSI' code
---
 xen/drivers/passthrough/x86/hvm.c | 255 ++++++++++++++++--------------
 1 file changed, 140 insertions(+), 115 deletions(-)

diff --git a/xen/drivers/passthrough/x86/hvm.c 
b/xen/drivers/passthrough/x86/hvm.c
index 19463c3406..eff1e8a79e 100644
--- a/xen/drivers/passthrough/x86/hvm.c
+++ b/xen/drivers/passthrough/x86/hvm.c
@@ -290,161 +290,186 @@ static int pt_irq_dpci_setup(struct domain *d, unsigned 
int pirq,
     } while ( true );
 }
 
-int pt_irq_create_bind(
-    struct domain *d, const struct xen_domctl_bind_pt_irq *pt_irq_bind)
+static int pt_irq_bind_msi(struct domain *d, uint32_t machine_irq,
+                            uint8_t gvec, uint32_t gflags, uint64_t gtable,
+                            bool unmasked)
 {
     struct hvm_irq_dpci *hvm_irq_dpci;
     struct hvm_pirq_dpci *pirq_dpci;
     struct pirq *info;
-    int rc, pirq = pt_irq_bind->machine_irq;
+    uint8_t dest, delivery_mode;
+    bool dest_mode;
+    int dest_vcpu_id, rc;
+    const struct vcpu *vcpu;
 
-    if ( pirq < 0 || pirq >= d->nr_pirqs )
+    if ( machine_irq >= (unsigned int)d->nr_pirqs )
         return -EINVAL;
 
-    rc = pt_irq_dpci_setup(d, pirq, &hvm_irq_dpci, &pirq_dpci, &info);
+    rc = pt_irq_dpci_setup(d, machine_irq, &hvm_irq_dpci, &pirq_dpci, &info);
     if ( rc )
         return rc;
 
-    switch ( pt_irq_bind->irq_type )
+    if ( !(pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) )
     {
-    case PT_IRQ_TYPE_MSI:
-    {
-        uint8_t dest, delivery_mode;
-        bool dest_mode;
-        int dest_vcpu_id;
-        const struct vcpu *vcpu;
-        uint32_t gflags = pt_irq_bind->u.msi.gflags &
-                          ~XEN_DOMCTL_VMSI_X86_UNMASKED;
-
-        if ( !(pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) )
+        pirq_dpci->flags = HVM_IRQ_DPCI_MAPPED | HVM_IRQ_DPCI_MACH_MSI |
+                           HVM_IRQ_DPCI_GUEST_MSI;
+        pirq_dpci->gmsi.gvec = gvec;
+        pirq_dpci->gmsi.gflags = gflags;
+        /*
+         * 'pt_irq_bind_msi' can be called after 'pt_irq_destroy_bind'.
+         * The 'pirq_cleanup_check' which would free the structure is only
+         * called if the event channel for the PIRQ is active. However
+         * OS-es that use event channels usually bind PIRQs to eventds
+         * and unbind them before calling 'pt_irq_destroy_bind' - with the
+         * result that we re-use the 'dpci' structure. This can be
+         * reproduced with unloading and loading the driver for a device.
+         *
+         * As such on every 'pt_irq_bind_msi' call we MUST set it.
+         */
+        pirq_dpci->dom = d;
+        /* bind after hvm_irq_dpci is setup to avoid race with irq handler */
+        rc = pirq_guest_bind(d->vcpu[0], info, 0);
+        if ( rc == 0 && gtable )
         {
-            pirq_dpci->flags = HVM_IRQ_DPCI_MAPPED | HVM_IRQ_DPCI_MACH_MSI |
-                               HVM_IRQ_DPCI_GUEST_MSI;
-            pirq_dpci->gmsi.gvec = pt_irq_bind->u.msi.gvec;
-            pirq_dpci->gmsi.gflags = gflags;
-            /*
-             * 'pt_irq_create_bind' can be called after 'pt_irq_destroy_bind'.
-             * The 'pirq_cleanup_check' which would free the structure is only
-             * called if the event channel for the PIRQ is active. However
-             * OS-es that use event channels usually bind PIRQs to eventds
-             * and unbind them before calling 'pt_irq_destroy_bind' - with the
-             * result that we re-use the 'dpci' structure. This can be
-             * reproduced with unloading and loading the driver for a device.
-             *
-             * As such on every 'pt_irq_create_bind' call we MUST set it.
-             */
-            pirq_dpci->dom = d;
-            /* bind after hvm_irq_dpci is setup to avoid race with irq 
handler*/
-            rc = pirq_guest_bind(d->vcpu[0], info, 0);
-            if ( rc == 0 && pt_irq_bind->u.msi.gtable )
-            {
-                rc = msixtbl_pt_register(d, info, pt_irq_bind->u.msi.gtable);
-                if ( unlikely(rc) )
-                {
-                    pirq_guest_unbind(d, info);
-                    /*
-                     * Between 'pirq_guest_bind' and before 'pirq_guest_unbind'
-                     * an interrupt can be scheduled. No more of them are going
-                     * to be scheduled but we must deal with the one that may 
be
-                     * in the queue.
-                     */
-                    pt_pirq_softirq_reset(pirq_dpci);
-                }
-            }
+            rc = msixtbl_pt_register(d, info, gtable);
             if ( unlikely(rc) )
             {
-                pirq_dpci->gmsi.gflags = 0;
-                pirq_dpci->gmsi.gvec = 0;
-                pirq_dpci->dom = NULL;
-                pirq_dpci->flags = 0;
-                if ( !info->evtchn )
-                    pirq_cleanup_check(info, d);
-                write_unlock(&d->event_lock);
-                return rc;
+                pirq_guest_unbind(d, info);
+                /*
+                 * Between 'pirq_guest_bind' and before 'pirq_guest_unbind'
+                 * an interrupt can be scheduled. No more of them are going
+                 * to be scheduled but we must deal with the one that may be
+                 * in the queue.
+                 */
+                pt_pirq_softirq_reset(pirq_dpci);
             }
         }
-        else
+        if ( unlikely(rc) )
         {
-            uint32_t mask = HVM_IRQ_DPCI_MACH_MSI | HVM_IRQ_DPCI_GUEST_MSI;
-
-            if ( (pirq_dpci->flags & mask) != mask )
-            {
-                write_unlock(&d->event_lock);
-                return -EBUSY;
-            }
-
-            /* If pirq is already mapped as vmsi, update guest data/addr. */
-            if ( pirq_dpci->gmsi.gvec != pt_irq_bind->u.msi.gvec ||
-                 pirq_dpci->gmsi.gflags != gflags )
-            {
-                /* Directly clear pending EOIs before enabling new MSI info. */
-                pirq_guest_eoi(info);
-
-                pirq_dpci->gmsi.gvec = pt_irq_bind->u.msi.gvec;
-                pirq_dpci->gmsi.gflags = gflags;
-            }
+            pirq_dpci->gmsi.gflags = 0;
+            pirq_dpci->gmsi.gvec = 0;
+            pirq_dpci->dom = NULL;
+            pirq_dpci->flags = 0;
+            if ( !info->evtchn )
+                pirq_cleanup_check(info, d);
+            write_unlock(&d->event_lock);
+            return rc;
         }
-        /* Calculate dest_vcpu_id for MSI-type pirq migration. */
-        dest = MASK_EXTR(pirq_dpci->gmsi.gflags,
-                         XEN_DOMCTL_VMSI_X86_DEST_ID_MASK);
-        dest_mode = pirq_dpci->gmsi.gflags & XEN_DOMCTL_VMSI_X86_DM_MASK;
-        delivery_mode = MASK_EXTR(pirq_dpci->gmsi.gflags,
-                                  XEN_DOMCTL_VMSI_X86_DELIV_MASK);
-
-        dest_vcpu_id = hvm_girq_dest_2_vcpu_id(d, dest, dest_mode);
-        pirq_dpci->gmsi.dest_vcpu_id = dest_vcpu_id;
-        write_unlock(&d->event_lock);
+    }
+    else
+    {
+        uint32_t mask = HVM_IRQ_DPCI_MACH_MSI | HVM_IRQ_DPCI_GUEST_MSI;
 
-        pirq_dpci->gmsi.posted = false;
-        vcpu = (dest_vcpu_id >= 0) ? d->vcpu[dest_vcpu_id] : NULL;
-        if ( iommu_intpost )
+        if ( (pirq_dpci->flags & mask) != mask )
         {
-            if ( delivery_mode == dest_LowestPrio )
-                vcpu = vector_hashing_dest(d, dest, dest_mode,
-                                           pirq_dpci->gmsi.gvec);
-            if ( vcpu )
-                pirq_dpci->gmsi.posted = true;
+            write_unlock(&d->event_lock);
+            return -EBUSY;
         }
-        if ( vcpu && is_iommu_enabled(d) )
-            hvm_migrate_pirq(pirq_dpci, vcpu);
 
-        /* Use interrupt posting if it is supported. */
-        if ( iommu_intpost )
+        /* If pirq is already mapped as vmsi, update guest data/addr. */
+        if ( pirq_dpci->gmsi.gvec != gvec || pirq_dpci->gmsi.gflags != gflags )
         {
-            rc = hvm_pi_update_irte(vcpu, info, pirq_dpci->gmsi.gvec);
+            /* Directly clear pending EOIs before enabling new MSI info. */
+            pirq_guest_eoi(info);
 
-            if ( rc )
-            {
-                pt_irq_destroy_bind(d, pt_irq_bind);
-                return rc;
-            }
+            pirq_dpci->gmsi.gvec = gvec;
+            pirq_dpci->gmsi.gflags = gflags;
         }
+    }
+    /* Calculate dest_vcpu_id for MSI-type pirq migration. */
+    dest = MASK_EXTR(pirq_dpci->gmsi.gflags, XEN_DOMCTL_VMSI_X86_DEST_ID_MASK);
+    dest_mode = pirq_dpci->gmsi.gflags & XEN_DOMCTL_VMSI_X86_DM_MASK;
+    delivery_mode = MASK_EXTR(pirq_dpci->gmsi.gflags,
+                               XEN_DOMCTL_VMSI_X86_DELIV_MASK);
+
+    dest_vcpu_id = hvm_girq_dest_2_vcpu_id(d, dest, dest_mode);
+    pirq_dpci->gmsi.dest_vcpu_id = dest_vcpu_id;
+    write_unlock(&d->event_lock);
 
-        if ( pt_irq_bind->u.msi.gflags & XEN_DOMCTL_VMSI_X86_UNMASKED )
+    pirq_dpci->gmsi.posted = false;
+    vcpu = (dest_vcpu_id >= 0) ? d->vcpu[dest_vcpu_id] : NULL;
+    if ( iommu_intpost )
+    {
+        if ( delivery_mode == dest_LowestPrio )
+            vcpu = vector_hashing_dest(d, dest, dest_mode,
+                                       pirq_dpci->gmsi.gvec);
+        if ( vcpu )
+            pirq_dpci->gmsi.posted = true;
+    }
+    if ( vcpu && is_iommu_enabled(d) )
+        hvm_migrate_pirq(pirq_dpci, vcpu);
+
+    /* Use interrupt posting if it is supported. */
+    if ( iommu_intpost )
+    {
+        struct xen_domctl_bind_pt_irq bind = {
+            .machine_irq = machine_irq,
+            .irq_type = PT_IRQ_TYPE_MSI,
+        };
+
+        rc = hvm_pi_update_irte(vcpu, info, pirq_dpci->gmsi.gvec);
+        if ( rc )
         {
-            unsigned long flags;
-            struct irq_desc *desc = pirq_spin_lock_irq_desc(info, &flags);
+            pt_irq_destroy_bind(d, &bind);
+            return rc;
+        }
+    }
 
-            if ( !desc )
-            {
-                pt_irq_destroy_bind(d, pt_irq_bind);
-                return -EINVAL;
-            }
+    if ( unmasked )
+    {
+        struct xen_domctl_bind_pt_irq bind = {
+            .machine_irq = machine_irq,
+            .irq_type = PT_IRQ_TYPE_MSI,
+        };
+        unsigned long flags;
+        struct irq_desc *desc = pirq_spin_lock_irq_desc(info, &flags);
 
-            guest_mask_msi_irq(desc, false);
-            spin_unlock_irqrestore(&desc->lock, flags);
+        if ( !desc )
+        {
+            pt_irq_destroy_bind(d, &bind);
+            return -EINVAL;
         }
 
-        break;
+        guest_mask_msi_irq(desc, false);
+        spin_unlock_irqrestore(&desc->lock, flags);
     }
 
+    return 0;
+}
+
+int pt_irq_create_bind(
+    struct domain *d, const struct xen_domctl_bind_pt_irq *pt_irq_bind)
+{
+    int rc, pirq = pt_irq_bind->machine_irq;
+
+    if ( pirq < 0 || pirq >= d->nr_pirqs )
+        return -EINVAL;
+
+    switch ( pt_irq_bind->irq_type )
+    {
+    case PT_IRQ_TYPE_MSI:
+        return pt_irq_bind_msi(d, pirq,
+                               pt_irq_bind->u.msi.gvec,
+                               pt_irq_bind->u.msi.gflags &
+                                   ~XEN_DOMCTL_VMSI_X86_UNMASKED,
+                               pt_irq_bind->u.msi.gtable,
+                               !!(pt_irq_bind->u.msi.gflags &
+                                  XEN_DOMCTL_VMSI_X86_UNMASKED));
+
     case PT_IRQ_TYPE_PCI:
     case PT_IRQ_TYPE_MSI_TRANSLATE:
     {
+        struct hvm_irq_dpci *hvm_irq_dpci;
+        struct hvm_pirq_dpci *pirq_dpci;
+        struct pirq *info;
         struct dev_intx_gsi_link *digl = NULL;
         struct hvm_girq_dpci_mapping *girq = NULL;
         unsigned int guest_gsi;
 
+        rc = pt_irq_dpci_setup(d, pirq, &hvm_irq_dpci, &pirq_dpci, &info);
+        if ( rc )
+            return rc;
+
         /*
          * Mapping GSIs for the hardware domain is different than doing it for
          * an unpriviledged guest, the hardware domain is only allowed to
-- 
2.53.0



--
Julian Vetter | Vates Hypervisor & Kernel Developer

XCP-ng & Xen Orchestra - Vates solutions

web: https://vates.tech

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.