|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3] xen/arm: fix rank/vgic lock inversion bug
Always set the new physical irq affinity at the beginning of
vgic_migrate_irq, in all cases.
No need to set physical irq affinity in gic_update_one_lr anymore,
solving the lock inversion problem.
After migrating an interrupt from vcpu/pcpu 0 to vcpu/pcpu 1, it is
possible to receive a physical interrupt on pcpu 1, which Xen is
supposed to inject into vcpu 1, before the LR on pcpu 0 has been
cleared. In this case the irq is still marked as
GIC_IRQ_GUEST_MIGRATING, and struct pending_irq is still "inflight" on
vcpu 0. As the irq cannot be "inflight" on vcpu 0 and vcpu 1
simultaneously, drop the interrupt.
Coverity-ID: 1381855
Coverity-ID: 1381853
Signed-off-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>
---
xen/arch/arm/gic.c | 6 +-----
xen/arch/arm/vgic.c | 19 +++++++++++--------
2 files changed, 12 insertions(+), 13 deletions(-)
diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index a5348f2..767fc9e 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -504,11 +504,7 @@ static void gic_update_one_lr(struct vcpu *v, int i)
gic_raise_guest_irq(v, irq, p->priority);
else {
list_del_init(&p->inflight);
- if ( test_and_clear_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
- {
- struct vcpu *v_target = vgic_get_target_vcpu(v, irq);
- irq_set_affinity(p->desc, cpumask_of(v_target->processor));
- }
+ clear_bit(GIC_IRQ_GUEST_MIGRATING, &p->status);
}
}
}
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 364d5f0..11ffb9b 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -264,20 +264,17 @@ void vgic_migrate_irq(struct vcpu *old, struct vcpu *new,
unsigned int irq)
if ( p->desc == NULL )
return;
+ irq_set_affinity(p->desc, cpumask_of(new->processor));
+
/* migration already in progress, no need to do anything */
if ( test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
return;
+ if ( list_empty(&p->inflight) )
+ return;
perfc_incr(vgic_irq_migrates);
spin_lock_irqsave(&old->arch.vgic.lock, flags);
-
- if ( list_empty(&p->inflight) )
- {
- irq_set_affinity(p->desc, cpumask_of(new->processor));
- spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
- return;
- }
/* If the IRQ is still lr_pending, re-inject it to the new vcpu */
if ( !list_empty(&p->lr_queue) )
{
@@ -286,7 +283,6 @@ void vgic_migrate_irq(struct vcpu *old, struct vcpu *new,
unsigned int irq)
list_del_init(&p->inflight);
irq_set_affinity(p->desc, cpumask_of(new->processor));
spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
- vgic_vcpu_inject_irq(new, irq);
return;
}
/* if the IRQ is in a GICH_LR register, set GIC_IRQ_GUEST_MIGRATING
@@ -495,6 +491,13 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int
virq)
return;
}
+ if ( test_bit(GIC_IRQ_GUEST_MIGRATING, &n->status) )
+ {
+ /* Drop the interrupt, because it is still inflight on another vcpu */
+ spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
+ return;
+ }
+
set_bit(GIC_IRQ_GUEST_QUEUED, &n->status);
if ( !list_empty(&n->inflight) )
--
1.9.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |