[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 4/5] VT-d PI: Adding reference count to pi_desc



This patch intruduces a 'refcnt' field in vmx_pi_blocking to track
the reference count of 'pi_desc' of the vCPU. And change this field
every time we re-program one IRTE.

Signed-off-by: Chao Gao <chao.gao@xxxxxxxxx>
---
 xen/arch/x86/hvm/vmx/vmx.c             | 29 ++++++++++++++++++++++++
 xen/drivers/passthrough/io.c           |  2 +-
 xen/drivers/passthrough/vtd/intremap.c | 41 ++++++++++++++++++++++++++++++++--
 xen/include/asm-x86/hvm/domain.h       |  6 +++++
 xen/include/asm-x86/hvm/vmx/vmcs.h     |  3 +++
 xen/include/asm-x86/iommu.h            |  2 +-
 xen/include/asm-x86/msi.h              |  2 +-
 7 files changed, 80 insertions(+), 5 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index c0d0b58..45a372e 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -100,6 +100,23 @@ void vmx_pi_per_cpu_init(unsigned int cpu)
     spin_lock_init(&per_cpu(vmx_pi_blocking, cpu).lock);
 }
 
+void vmx_pi_get_ref(struct vcpu *v)
+{
+    ASSERT(atomic_read(&v->arch.hvm_vmx.pi_blocking.refcnt) >= 0);
+    atomic_inc(&v->arch.hvm_vmx.pi_blocking.refcnt);
+}
+
+void vmx_pi_put_ref(struct vcpu *v)
+{
+    atomic_dec(&v->arch.hvm_vmx.pi_blocking.refcnt);
+    ASSERT(atomic_read(&v->arch.hvm_vmx.pi_blocking.refcnt) >= 0);
+}
+
+bool vmx_pi_in_use(struct vcpu *v)
+{
+    return !!atomic_read(&v->arch.hvm_vmx.pi_blocking.refcnt);
+}
+
 /*
  * Choose an appropriate pcpu to receive wakeup interrupt.
  * By default, the local pcpu is chosen as the destination. But if the
@@ -422,6 +439,9 @@ void vmx_pi_hooks_assign(struct domain *d)
 
     d->arch.hvm_domain.pi_ops.vcpu_block = vmx_vcpu_block;
     d->arch.hvm_domain.pi_ops.do_resume = vmx_pi_do_resume;
+    d->arch.hvm_domain.pi_ops.get_ref = vmx_pi_get_ref;
+    d->arch.hvm_domain.pi_ops.put_ref = vmx_pi_put_ref;
+    d->arch.hvm_domain.pi_ops.in_use = vmx_pi_in_use;
 }
 
 /* This function is called when pcidevs_lock is held */
@@ -460,6 +480,15 @@ void vmx_pi_hooks_deassign(struct domain *d)
     d->arch.hvm_domain.pi_ops.vcpu_block = NULL;
     d->arch.hvm_domain.pi_ops.switch_from = NULL;
     d->arch.hvm_domain.pi_ops.do_resume = NULL;
+    d->arch.hvm_domain.pi_ops.get_ref = NULL;
+    d->arch.hvm_domain.pi_ops.put_ref = NULL;
+    d->arch.hvm_domain.pi_ops.in_use = NULL;
+    /*
+     * If device is still using by guest, but we forcibly deassign it,
+     * then the 'refcnt' is not zero here. Clear it for re-assignment.
+     */
+    for_each_vcpu ( d, v )
+        atomic_set(&v->arch.hvm_vmx.pi_blocking.refcnt, 0);
 
     for_each_vcpu ( d, v )
         vmx_pi_unblock_vcpu(v);
diff --git a/xen/drivers/passthrough/io.c b/xen/drivers/passthrough/io.c
index e5a43e5..979be77 100644
--- a/xen/drivers/passthrough/io.c
+++ b/xen/drivers/passthrough/io.c
@@ -319,7 +319,7 @@ int pt_irq_create_bind(
     {
         uint8_t dest, dest_mode, delivery_mode;
         int dest_vcpu_id;
-        const struct vcpu *vcpu;
+        struct vcpu *vcpu;
 
         if ( !(pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) )
         {
diff --git a/xen/drivers/passthrough/vtd/intremap.c 
b/xen/drivers/passthrough/vtd/intremap.c
index 1e0317c..99f1cce 100644
--- a/xen/drivers/passthrough/vtd/intremap.c
+++ b/xen/drivers/passthrough/vtd/intremap.c
@@ -596,6 +596,32 @@ static int remap_entry_to_msi_msg(
     return 0;
 }
 
+static void pi_get_ref(struct pi_desc *pi_desc)
+{
+    struct vcpu *v;
+
+    if ( !pi_desc )
+        return;
+
+    v = pi_desc_to_vcpu(pi_desc);
+    ASSERT(is_hvm_domain(v->domain));
+    if ( v->domain->arch.hvm_domain.pi_ops.get_ref )
+        v->domain->arch.hvm_domain.pi_ops.get_ref(v);
+}
+
+static void pi_put_ref(struct pi_desc *pi_desc)
+{
+    struct vcpu *v;
+
+    if ( !pi_desc )
+        return;
+
+    v = pi_desc_to_vcpu(pi_desc);
+    ASSERT(is_hvm_domain(v->domain));
+    if ( v->domain->arch.hvm_domain.pi_ops.put_ref )
+        v->domain->arch.hvm_domain.pi_ops.put_ref(v);
+}
+
 static int msi_msg_to_remap_entry(
     struct iommu *iommu, struct pci_dev *pdev,
     struct msi_desc *msi_desc, struct msi_msg *msg)
@@ -619,6 +645,7 @@ static int msi_msg_to_remap_entry(
         {
             free_remap_entry(iommu, msi_desc->remap_index + i);
             msi_desc[i].irte_initialized = false;
+            pi_put_ref(msi_desc[i].pi_desc);
         }
         spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
         return 0;
@@ -962,11 +989,12 @@ void iommu_disable_x2apic_IR(void)
  * This function is used to update the IRTE for posted-interrupt
  * when guest changes MSI/MSI-X information.
  */
-int pi_update_irte(const struct pi_desc *pi_desc, const struct pirq *pirq,
+int pi_update_irte(struct pi_desc *pi_desc, const struct pirq *pirq,
     const uint8_t gvec)
 {
     struct irq_desc *desc;
     struct msi_desc *msi_desc;
+    struct pi_desc *old_pi_desc;
     int rc;
 
     desc = pirq_spin_lock_irq_desc(pirq, NULL);
@@ -979,13 +1007,22 @@ int pi_update_irte(const struct pi_desc *pi_desc, const 
struct pirq *pirq,
         rc = -ENODEV;
         goto unlock_out;
     }
+    old_pi_desc = msi_desc->pi_desc;
+
+    pi_get_ref(pi_desc);
     msi_desc->pi_desc = pi_desc;
     msi_desc->gvec = gvec;
 
     spin_unlock_irq(&desc->lock);
 
     ASSERT(pcidevs_locked());
-    return iommu_update_ire_from_msi(msi_desc, &msi_desc->msg);
+    rc = iommu_update_ire_from_msi(msi_desc, &msi_desc->msg);
+    if ( !rc )
+        pi_put_ref(old_pi_desc);
+    else
+        ASSERT_UNREACHABLE();
+
+    return rc;
 
  unlock_out:
     spin_unlock_irq(&desc->lock);
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index d2899c9..6fc97c4 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -98,6 +98,12 @@ struct hvm_pi_ops {
 
     /* Hook into the vmentry path. */
     void (*do_resume)(struct vcpu *v);
+
+    /* Get/Put refcount of PI blocking of this vCPU */
+    void (*get_ref)(struct vcpu *v);
+    void (*put_ref)(struct vcpu *v);
+    /* Is the PI blocking is referred by IRTEs */
+    bool (*in_use)(struct vcpu *v);
 };
 
 struct hvm_domain {
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h 
b/xen/include/asm-x86/hvm/vmx/vmcs.h
index 9507bd2..7cb1a92 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -85,6 +85,7 @@ struct pi_desc {
 struct pi_blocking_vcpu {
     struct list_head     list;
     spinlock_t           *lock;
+    atomic_t             refcnt;  /* How many IRTEs refer to this vCPU? */
 };
 
 struct arch_vmx_struct {
@@ -160,6 +161,8 @@ struct arch_vmx_struct {
     struct pi_blocking_vcpu pi_blocking;
 };
 
+#define pi_desc_to_vcpu(a) container_of(a, struct vcpu, arch.hvm_vmx.pi_desc)
+
 int vmx_create_vmcs(struct vcpu *v);
 void vmx_destroy_vmcs(struct vcpu *v);
 void vmx_vmcs_enter(struct vcpu *v);
diff --git a/xen/include/asm-x86/iommu.h b/xen/include/asm-x86/iommu.h
index 0431233..cfa0058 100644
--- a/xen/include/asm-x86/iommu.h
+++ b/xen/include/asm-x86/iommu.h
@@ -92,7 +92,7 @@ bool_t iommu_supports_eim(void);
 int iommu_enable_x2apic_IR(void);
 void iommu_disable_x2apic_IR(void);
 
-int pi_update_irte(const struct pi_desc *pi_desc, const struct pirq *pirq,
+int pi_update_irte(struct pi_desc *pi_desc, const struct pirq *pirq,
                    const uint8_t gvec);
 
 #endif /* !__ARCH_X86_IOMMU_H__ */
diff --git a/xen/include/asm-x86/msi.h b/xen/include/asm-x86/msi.h
index a5de6a1..fbf1793 100644
--- a/xen/include/asm-x86/msi.h
+++ b/xen/include/asm-x86/msi.h
@@ -105,7 +105,7 @@ struct msi_desc {
 
        bool irte_initialized;
        uint8_t gvec;                   /* guest vector. valid when pi_desc 
isn't NULL */
-       const struct pi_desc *pi_desc;  /* pointer to posted descriptor */
+       struct pi_desc *pi_desc;        /* pointer to posted descriptor */
 
        struct list_head list;
 
-- 
1.8.3.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.