[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2/2] xen: do not implement irq_mask and irq_unmask in xen_percpu_chip



From: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>

irq_mask and irq_unmask are not needed by handle_percpu_irq, but
they are called by fixup_irqs when changing affinity.
At the moment calling fixup_irqs can result in unmasking an interrupt
that was previously masked because of the call to irq_unmask.
However xen_percpu_chip doesn't even support irq_set_affinity, and we
don't need irq_mask and irq_unmask, so we can just change them to noops,
following the example of uv_irq_chip.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
---
 drivers/xen/events.c |    6 ++++--
 1 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index bb59651..8eb5316 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -120,6 +120,7 @@ static struct irq_chip xen_percpu_chip;
 static struct irq_chip xen_pirq_chip;
 static void enable_dynirq(struct irq_data *data);
 static void disable_dynirq(struct irq_data *data);
+static void evtchn_noop(struct irq_data *data) { }
 
 /* Get info for IRQ */
 static struct irq_info *info_for_irq(unsigned irq)
@@ -1595,9 +1596,10 @@ static struct irq_chip xen_pirq_chip __read_mostly = {
 static struct irq_chip xen_percpu_chip __read_mostly = {
        .name                   = "xen-percpu",
 
+       .irq_enable             = enable_dynirq,
        .irq_disable            = disable_dynirq,
-       .irq_mask               = disable_dynirq,
-       .irq_unmask             = enable_dynirq,
+       .irq_mask               = evtchn_noop,
+       .irq_unmask             = evtchn_noop,
 
        .irq_ack                = ack_dynirq,
 };
-- 
1.7.2.3


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.