[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 5/5] VT-d PI: Don't add vCPU to PI blocking list for a case



Currently, blocked vCPUs are added to PI blocking list if its
domain has assign devices. Actually, some blocked vCPUs will not
be woken up by wakeup interrupt generated by VT-d hardware. They
may be woken up by IPIs or other interrupts from emulated devices.
For these vCPUs we don't add them to PI blocking list.

If a vCPU is blocked prior to its getting bound with a IRTE, we need
adding this vCPU to blocking list when we bind a vCPU with a IRTE.
In that case, arch_vcpu_block() may be called from another vCPU which
the current implementation can't handle. This patch expands the
arch_vcpu_block(), removing some restrictions expressed by
assertions and handling the target vCPU according to its status and
its PI blocking list lock (v->arch.hvm_vmx.pi_blocking.lock).

Signed-off-by: Chao Gao <chao.gao@xxxxxxxxx>
---
 xen/arch/x86/hvm/vmx/vmx.c             | 20 +++++++++++++-------
 xen/drivers/passthrough/vtd/intremap.c | 18 ++++++++++++++++++
 2 files changed, 31 insertions(+), 7 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 45a372e..03d5fce 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -161,6 +161,14 @@ static void vmx_vcpu_block(struct vcpu *v)
     struct pi_desc *pi_desc = &v->arch.hvm_vmx.pi_desc;
     spinlock_t *pi_blocking_list_lock;
 
+    /* If no IRTE refers to 'pi_desc', no further operation needs */
+    if ( v->domain->arch.hvm_domain.pi_ops.in_use &&
+         !v->domain->arch.hvm_domain.pi_ops.in_use(v) )
+        return;
+
+    if ( !test_bit(_VPF_blocked, &v->pause_flags) )
+        return;
+
     /*
      * After pCPU goes down, the per-cpu PI blocking list is cleared.
      * To make sure the parameter vCPU is added to the chosen pCPU's
@@ -183,13 +191,11 @@ static void vmx_vcpu_block(struct vcpu *v)
 
     old_lock = cmpxchg(&v->arch.hvm_vmx.pi_blocking.lock, NULL,
                        pi_blocking_list_lock);
-
-    /*
-     * 'v->arch.hvm_vmx.pi_blocking.lock' should be NULL before
-     * being assigned to a new value, since the vCPU is currently
-     * running and it cannot be on any blocking list.
-     */
-    ASSERT(old_lock == NULL);
+    if ( old_lock )
+    {
+        spin_unlock_irqrestore(pi_blocking_list_lock, flags);
+        return;
+    }
 
     atomic_inc(&per_cpu(vmx_pi_blocking, dest_cpu).counter);
     HVMTRACE_4D(PI_LIST_ADD, v->domain->domain_id, v->vcpu_id, dest_cpu,
diff --git a/xen/drivers/passthrough/vtd/intremap.c 
b/xen/drivers/passthrough/vtd/intremap.c
index 99f1cce..806e397 100644
--- a/xen/drivers/passthrough/vtd/intremap.c
+++ b/xen/drivers/passthrough/vtd/intremap.c
@@ -622,6 +622,20 @@ static void pi_put_ref(struct pi_desc *pi_desc)
         v->domain->arch.hvm_domain.pi_ops.put_ref(v);
 }
 
+static bool pi_in_use(struct pi_desc *pi_desc)
+{
+    struct vcpu *v;
+
+    if ( !pi_desc )
+        return 0;
+
+    v = pi_desc_to_vcpu(pi_desc);
+    ASSERT(is_hvm_domain(v->domain));
+    if ( v->domain->arch.hvm_domain.pi_ops.in_use )
+        return v->domain->arch.hvm_domain.pi_ops.in_use(v);
+    return 0;
+}
+
 static int msi_msg_to_remap_entry(
     struct iommu *iommu, struct pci_dev *pdev,
     struct msi_desc *msi_desc, struct msi_msg *msg)
@@ -996,6 +1010,7 @@ int pi_update_irte(struct pi_desc *pi_desc, const struct 
pirq *pirq,
     struct msi_desc *msi_desc;
     struct pi_desc *old_pi_desc;
     int rc;
+    bool first_ref;
 
     desc = pirq_spin_lock_irq_desc(pirq, NULL);
     if ( !desc )
@@ -1009,7 +1024,10 @@ int pi_update_irte(struct pi_desc *pi_desc, const struct 
pirq *pirq,
     }
     old_pi_desc = msi_desc->pi_desc;
 
+    first_ref = !pi_in_use(pi_desc);
     pi_get_ref(pi_desc);
+    if ( pi_desc && first_ref )
+        arch_vcpu_block(pi_desc_to_vcpu(pi_desc));
     msi_desc->pi_desc = pi_desc;
     msi_desc->gvec = gvec;
 
-- 
1.8.3.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.