|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v5 08/10] xen/arm: second irq injection while the first irq is still inflight
Set GICH_LR_PENDING in the corresponding GICH_LR to inject a second irq
while the first one is still active.
If the first irq is already pending (not active), just clear
GIC_IRQ_GUEST_PENDING because the irq has already been injected and is
already visible by the guest.
If the irq has already been EOI'ed then just clear the GICH_LR right
away and move the interrupt to lr_pending so that it is going to be
reinjected by gic_restore_pending_irqs on return to guest.
If the target cpu is not the current cpu, then set GIC_IRQ_GUEST_PENDING
and send an SGI. The target cpu is going to be interrupted and call
gic_clear_lrs, that is going to take the same actions.
Unify the inflight and non-inflight code paths in vgic_vcpu_inject_irq.
Do not call vgic_vcpu_inject_irq from gic_inject if
evtchn_upcall_pending is set. If we remove that call, we don't need to
special case evtchn_irq in vgic_vcpu_inject_irq anymore.
We also need to force the first injection of evtchn_irq (call
gic_vcpu_inject_irq) from vgic_enable_irqs because evtchn_upcall_pending
is already set by common code on vcpu creation.
Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
---
Changes in v3:
- do not use the PENDING and ACTIVE state for HW interrupts;
- unify the inflight and non-inflight code paths in
vgic_vcpu_inject_irq.
---
xen/arch/arm/gic.c | 41 ++++++++++++++++++++++++-----------------
xen/arch/arm/vgic.c | 33 +++++++++++++++++----------------
2 files changed, 41 insertions(+), 33 deletions(-)
diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index 0ebfbf0..77bdfe7 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -680,6 +680,14 @@ void gic_raise_guest_irq(struct vcpu *v, unsigned int
virtual_irq,
{
int i;
unsigned long flags;
+ struct pending_irq *n = irq_to_pending(v, virtual_irq);
+
+ if ( test_bit(GIC_IRQ_GUEST_VISIBLE, &n->status))
+ {
+ if ( v == current )
+ gic_clear_one_lr(v, n->lr);
+ return;
+ }
spin_lock_irqsave(&gic.lock, flags);
@@ -705,21 +713,28 @@ static void gic_clear_one_lr(struct vcpu *v, int i)
struct pending_irq *p;
uint32_t lr;
int irq;
- bool_t inflight;
ASSERT(!local_irq_is_enabled());
ASSERT(spin_is_locked(&v->arch.vgic.lock));
lr = GICH[GICH_LR + i];
- if ( !(lr & (GICH_LR_PENDING|GICH_LR_ACTIVE)) )
+ irq = (lr >> GICH_LR_VIRTUAL_SHIFT) & GICH_LR_VIRTUAL_MASK;
+ p = irq_to_pending(v, irq);
+ if ( lr & GICH_LR_ACTIVE )
{
- inflight = 0;
+ /* HW interrupts cannot be ACTIVE and PENDING */
+ if ( p->desc == NULL &&
+ test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) &&
+ test_and_clear_bit(GIC_IRQ_GUEST_PENDING, &p->status) )
+ GICH[GICH_LR + i] = lr | GICH_LR_PENDING;
+ } else if ( lr & GICH_LR_PENDING ) {
+ clear_bit(GIC_IRQ_GUEST_PENDING, &p->status);
+ } else {
+ spin_lock(&gic.lock);
+
GICH[GICH_LR + i] = 0;
clear_bit(i, &this_cpu(lr_mask));
- irq = (lr >> GICH_LR_VIRTUAL_SHIFT) & GICH_LR_VIRTUAL_MASK;
- spin_lock(&gic.lock);
- p = irq_to_pending(v, irq);
if ( p->desc != NULL )
p->desc->status &= ~IRQ_INPROGRESS;
clear_bit(GIC_IRQ_GUEST_VISIBLE, &p->status);
@@ -727,16 +742,11 @@ static void gic_clear_one_lr(struct vcpu *v, int i)
if ( test_bit(GIC_IRQ_GUEST_PENDING, &p->status) &&
test_bit(GIC_IRQ_GUEST_ENABLED, &p->status))
{
- inflight = 1;
gic_raise_guest_irq(v, irq, p->priority);
- }
- spin_unlock(&gic.lock);
- if ( !inflight )
- {
- spin_lock(&v->arch.vgic.lock);
+ } else
list_del_init(&p->inflight);
- spin_unlock(&v->arch.vgic.lock);
- }
+
+ spin_unlock(&gic.lock);
}
}
@@ -796,9 +806,6 @@ int gic_events_need_delivery(void)
void gic_inject(void)
{
- if ( vcpu_info(current, evtchn_upcall_pending) )
- vgic_vcpu_inject_irq(current, current->domain->arch.evtchn_irq);
-
gic_restore_pending_irqs(current);
if ( !list_empty(¤t->arch.vgic.lr_pending) && lr_all_full() )
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 3913cf5..dc3a75f 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -389,7 +389,11 @@ static void vgic_enable_irqs(struct vcpu *v, uint32_t r,
int n)
irq = i + (32 * n);
p = irq_to_pending(v, irq);
set_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
- if ( !list_empty(&p->inflight) && !test_bit(GIC_IRQ_GUEST_VISIBLE,
&p->status) )
+ if ( irq == v->domain->arch.evtchn_irq &&
+ vcpu_info(current, evtchn_upcall_pending) &&
+ list_empty(&p->inflight) )
+ vgic_vcpu_inject_irq(v, irq);
+ else if ( !list_empty(&p->inflight) &&
!test_bit(GIC_IRQ_GUEST_VISIBLE, &p->status) )
gic_raise_guest_irq(v, irq, p->priority);
if ( p->desc != NULL )
p->desc->handler->enable(p->desc);
@@ -696,14 +700,6 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int irq)
spin_lock_irqsave(&v->arch.vgic.lock, flags);
- if ( !list_empty(&n->inflight) )
- {
- if ( (irq != current->domain->arch.evtchn_irq) ||
- (!test_bit(GIC_IRQ_GUEST_VISIBLE, &n->status)) )
- set_bit(GIC_IRQ_GUEST_PENDING, &n->status);
- goto out;
- }
-
/* vcpu offline */
if ( test_bit(_VPF_down, &v->pause_flags) )
{
@@ -715,21 +711,26 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int
irq)
n->irq = irq;
set_bit(GIC_IRQ_GUEST_PENDING, &n->status);
- n->priority = priority;
/* the irq is enabled */
if ( test_bit(GIC_IRQ_GUEST_ENABLED, &n->status) )
gic_raise_guest_irq(v, irq, priority);
- list_for_each_entry ( iter, &v->arch.vgic.inflight_irqs, inflight )
+ if ( list_empty(&n->inflight) )
{
- if ( iter->priority > priority )
+ n->priority = priority;
+ list_for_each_entry ( iter, &v->arch.vgic.inflight_irqs, inflight )
{
- list_add_tail(&n->inflight, &iter->inflight);
- goto out;
+ if ( iter->priority > priority )
+ {
+ list_add_tail(&n->inflight, &iter->inflight);
+ goto out;
+ }
}
- }
- list_add_tail(&n->inflight, &v->arch.vgic.inflight_irqs);
+ list_add_tail(&n->inflight, &v->arch.vgic.inflight_irqs);
+ } else if ( n->priority != priority )
+ gdprintk(XENLOG_WARNING, "Changing priority of an inflight interrupt
is not supported");
+
out:
spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
/* we have a new higher priority irq, inject it into the guest */
--
1.7.10.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |