|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 2/2] xen/gic: EOI irqs on the right pcpu
On 05/03/2013 03:58 PM, Stefano Stabellini wrote:
> We need to write the irq number to GICC_DIR on the physical cpu that
> previously received the interrupt, but currently we are doing it on the
> pcpu that received the maintenance interrupt. As a consequence if a
> vcpu is migrated to a different pcpu, the irq is going to be EOI'ed on
> the wrong pcpu.
>
> This covers the case where dom0 vcpu0 is running on pcpu1 for example
> (you can test this scenario by using xl vcpu-pin).
>
> Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
> ---
> xen/arch/arm/gic.c | 15 ++++++++++++++-
> xen/arch/arm/vgic.c | 7 ++++++-
> xen/include/asm-arm/domain.h | 2 ++
> 3 files changed, 22 insertions(+), 2 deletions(-)
>
> diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
> index 61de230..9514d1c 100644
> --- a/xen/arch/arm/gic.c
> +++ b/xen/arch/arm/gic.c
> @@ -723,6 +723,12 @@ int gicv_setup(struct domain *d)
> gic.vbase);
> }
>
> +static void gic_irq_eoi(void *info)
> +{
> + int virq = *(int *)info;
> + GICC[GICC_DIR] = virq;
> +}
> +
> static void maintenance_interrupt(int irq, void *dev_id, struct
> cpu_user_regs *regs)
> {
> int i = 0, virq;
> @@ -753,8 +759,15 @@ static void maintenance_interrupt(int irq, void *dev_id,
> struct cpu_user_regs *r
> spin_lock_irq(&v->arch.vgic.lock);
> p = irq_to_pending(v, virq);
> if ( p->desc != NULL ) {
> + int cpu;
> p->desc->status &= ~IRQ_INPROGRESS;
> - GICC[GICC_DIR] = virq;
> + /* Assume only one pcpu needs an EOI */
> + cpu = cpumask_first(&p->eoimask);
> + cpumask_clear(&p->eoimask);
> + if ( cpu == smp_processor_id() )
> + gic_irq_eoi(&virq);
> + else
> + on_selected_cpus(cpumask_of(cpu), gic_irq_eoi, &virq, 0);
There is a race condition here. You use a pointer to a local variable
(virq) and you don't wait the completion of the function. So virq could
be updated/removed before EOI.
> }
> list_del_init(&p->inflight);
> spin_unlock_irq(&v->arch.vgic.lock);
> diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
> index f9c1a6b..c5370d5 100644
> --- a/xen/arch/arm/vgic.c
> +++ b/xen/arch/arm/vgic.c
> @@ -676,9 +676,14 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int
> irq, int virtual)
> n->irq = irq;
> n->priority = priority;
> if (!virtual)
> + {
> n->desc = irq_to_desc(irq);
> - else
> + cpumask_clear(&n->eoimask);
> + /* Assume we received the IRQ on the current pcpu */
> + cpumask_set_cpu(smp_processor_id(), &n->eoimask);
> + } else {
> n->desc = NULL;
> + }
>
> /* the irq is enabled */
> if ( rank->ienable & (1 << (irq % 32)) )
> diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
> index cca7416..5561531 100644
> --- a/xen/include/asm-arm/domain.h
> +++ b/xen/include/asm-arm/domain.h
> @@ -3,6 +3,7 @@
>
> #include <xen/config.h>
> #include <xen/cache.h>
> +#include <xen/cpumask.h>
> #include <xen/sched.h>
> #include <asm/page.h>
> #include <asm/p2m.h>
> @@ -21,6 +22,7 @@ struct pending_irq
> {
> int irq;
> struct irq_desc *desc; /* only set it the irq corresponds to a physical
> irq */
> + cpumask_t eoimask;
> uint8_t priority;
> /* inflight is used to append instances of pending_irq to
> * vgic.inflight_irqs */
--
Julien
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |