|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v6 for-xen-4.5 2/3] dpci: In hvm_dirq_assist stop using pt_pirq_iterate
When an interrupt for an PCI (or PCIe) passthrough device
is to be sent to a guest, we find the appropiate
'hvm_dirq_dpci' structure for the interrupt (PIRQ), set
a bit, and schedule an tasklet.
As we are now called from dpci_softirq with the outstanding
'struct hvm_pirq_dpci' there is no need to call pt_pirq_iterate
which will iterate over all of the PIRQs and call us with every
one that is mapped. And then _hvm_dirq_assist figuring out
which one to execute.
This is a inefficient and not fair as:
- We iterate starting at PIRQ 0 and up every time. That means
the PCIe devices that have lower PIRQs get to be called
first.
- If we have many PCIe devices passed in with many PIRQs and
most of the time only the highest numbered PIRQ gets an
interrupt - we iterate over many PIRQs.
Since we know which 'hvm_pirq_dpci' to check - as the tasklet is
called for a specific 'struct hvm_pirq_dpci' - we do that
in this patch.
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
---
xen/drivers/passthrough/io.c | 28 +++++++++++-----------------
1 file changed, 11 insertions(+), 17 deletions(-)
diff --git a/xen/drivers/passthrough/io.c b/xen/drivers/passthrough/io.c
index 59b5c09..d2a1214 100644
--- a/xen/drivers/passthrough/io.c
+++ b/xen/drivers/passthrough/io.c
@@ -534,9 +534,14 @@ void hvm_dpci_msi_eoi(struct domain *d, int vector)
spin_unlock(&d->event_lock);
}
-static int _hvm_dirq_assist(struct domain *d, struct hvm_pirq_dpci *pirq_dpci,
- void *arg)
+static void hvm_dirq_assist(unsigned long _d)
{
+ struct hvm_pirq_dpci *pirq_dpci = (struct hvm_pirq_dpci *)_d;
+ struct domain *d = pirq_dpci->dom;
+
+ ASSERT(d->arch.hvm_domain.irq.dpci);
+
+ spin_lock(&d->event_lock);
if ( test_and_clear_bool(pirq_dpci->masked) )
{
struct pirq *pirq = dpci_pirq(pirq_dpci);
@@ -547,13 +552,13 @@ static int _hvm_dirq_assist(struct domain *d, struct
hvm_pirq_dpci *pirq_dpci,
send_guest_pirq(d, pirq);
if ( pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI )
- return 0;
+ goto out;
}
if ( pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI )
{
vmsi_deliver_pirq(d, pirq_dpci);
- return 0;
+ goto out;
}
list_for_each_entry ( digl, &pirq_dpci->digl_list, list )
@@ -566,7 +571,7 @@ static int _hvm_dirq_assist(struct domain *d, struct
hvm_pirq_dpci *pirq_dpci,
{
/* for translated MSI to INTx interrupt, eoi as early as possible
*/
__msi_pirq_eoi(pirq_dpci);
- return 0;
+ goto out;
}
/*
@@ -579,18 +584,7 @@ static int _hvm_dirq_assist(struct domain *d, struct
hvm_pirq_dpci *pirq_dpci,
ASSERT(pt_irq_need_timer(pirq_dpci->flags));
set_timer(&pirq_dpci->timer, NOW() + PT_IRQ_TIME_OUT);
}
-
- return 0;
-}
-
-static void hvm_dirq_assist(unsigned long _d)
-{
- struct domain *d = ((struct hvm_pirq_dpci *)_d)->dom;
-
- ASSERT(d->arch.hvm_domain.irq.dpci);
-
- spin_lock(&d->event_lock);
- pt_pirq_iterate(d, _hvm_dirq_assist, NULL);
+ out:
spin_unlock(&d->event_lock);
}
--
1.9.3
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |