|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH-4.5 v2 06/10] xen/arm: second irq injection while the first irq is still inflight
Set GICH_LR_PENDING in the corresponding GICH_LR to inject a second irq
while the first one is still active.
If the first irq is already pending (not active), just clear
GIC_IRQ_GUEST_PENDING because the irq has already been injected and is
already visible by the guest.
If the irq has already been EOI'ed then just clear the GICH_LR right
away and move the interrupt to lr_pending so that it is going to be
reinjected by gic_restore_pending_irqs on return to guest.
If the target cpu is not the current cpu, then set GIC_IRQ_GUEST_PENDING
and send an SGI. The target cpu is going to be interrupted and call
gic_clear_lrs, that is going to take the same actions.
Do not call vgic_vcpu_inject_irq from gic_inject if
evtchn_upcall_pending is set. If we remove that call, we don't need to
special case evtchn_irq in vgic_vcpu_inject_irq anymore.
We also need to force the first injection of evtchn_irq (call
gic_vcpu_inject_irq) from vgic_enable_irqs because evtchn_upcall_pending
is already set by common code on vcpu creation.
Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
---
xen/arch/arm/gic.c | 82 +++++++++++++++++++++++++--------------------
xen/arch/arm/vgic.c | 18 +++++++---
xen/include/asm-arm/gic.h | 1 +
3 files changed, 61 insertions(+), 40 deletions(-)
diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index 5fca5be..0955d48 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -698,51 +698,64 @@ out:
return;
}
-static void gic_clear_lrs(struct vcpu *v)
+static void _gic_clear_lr(struct vcpu *v, int i)
{
- struct pending_irq *p;
- int i = 0, irq;
+ int irq;
uint32_t lr;
- bool_t inflight;
+ struct pending_irq *p;
ASSERT(!local_irq_is_enabled());
- while ((i = find_next_bit((const long unsigned int *) &this_cpu(lr_mask),
- nr_lrs, i)) < nr_lrs) {
- lr = GICH[GICH_LR + i];
- if ( !(lr & (GICH_LR_PENDING|GICH_LR_ACTIVE)) )
+ lr = GICH[GICH_LR + i];
+ irq = (lr >> GICH_LR_VIRTUAL_SHIFT) & GICH_LR_VIRTUAL_MASK;
+ p = irq_to_pending(v, irq);
+ if ( lr & GICH_LR_ACTIVE )
+ {
+ if ( test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) &&
+ test_and_clear_bit(GIC_IRQ_GUEST_PENDING, &p->status) )
+ GICH[GICH_LR + i] = lr | GICH_LR_PENDING;
+ } else if ( lr & GICH_LR_PENDING ) {
+ clear_bit(GIC_IRQ_GUEST_PENDING, &p->status);
+ } else {
+ spin_lock(&gic.lock);
+
+ GICH[GICH_LR + i] = 0;
+ clear_bit(i, &this_cpu(lr_mask));
+
+ if ( p->desc != NULL )
+ p->desc->status &= ~IRQ_INPROGRESS;
+ clear_bit(GIC_IRQ_GUEST_VISIBLE, &p->status);
+ p->lr = nr_lrs;
+ if ( test_bit(GIC_IRQ_GUEST_PENDING, &p->status) &&
+ test_bit(GIC_IRQ_GUEST_ENABLED, &p->status))
{
- inflight = 0;
- GICH[GICH_LR + i] = 0;
- clear_bit(i, &this_cpu(lr_mask));
-
- irq = (lr >> GICH_LR_VIRTUAL_SHIFT) & GICH_LR_VIRTUAL_MASK;
- spin_lock(&gic.lock);
- p = irq_to_pending(v, irq);
- if ( p->desc != NULL )
- p->desc->status &= ~IRQ_INPROGRESS;
- clear_bit(GIC_IRQ_GUEST_VISIBLE, &p->status);
- p->lr = nr_lrs;
- if ( test_bit(GIC_IRQ_GUEST_PENDING, &p->status) &&
- test_bit(GIC_IRQ_GUEST_ENABLED, &p->status))
- {
- inflight = 1;
- gic_add_to_lr_pending(v, irq, p->priority);
- }
- spin_unlock(&gic.lock);
- if ( !inflight )
- {
- spin_lock(&v->arch.vgic.lock);
- list_del_init(&p->inflight);
- spin_unlock(&v->arch.vgic.lock);
- }
+ gic_add_to_lr_pending(v, irq, p->priority);
+ } else
+ list_del_init(&p->inflight);
- }
+ spin_unlock(&gic.lock);
+ }
+}
+
+static void gic_clear_lrs(struct vcpu *v)
+{
+ int i = 0;
+ while ((i = find_next_bit((const long unsigned int *) &this_cpu(lr_mask),
+ nr_lrs, i)) < nr_lrs) {
+
+ spin_lock(&v->arch.vgic.lock);
+ _gic_clear_lr(v, i);
+ spin_unlock(&v->arch.vgic.lock);
i++;
}
}
+void gic_set_clear_lr(struct vcpu *v, struct pending_irq *p)
+{
+ _gic_clear_lr(v, p->lr);
+}
+
static void gic_restore_pending_irqs(struct vcpu *v)
{
int i;
@@ -801,9 +814,6 @@ void gic_inject(void)
{
gic_clear_lrs(current);
- if ( vcpu_info(current, evtchn_upcall_pending) )
- vgic_vcpu_inject_irq(current, current->domain->arch.evtchn_irq);
-
gic_restore_pending_irqs(current);
if (!gic_events_need_delivery())
gic_inject_irq_stop();
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index da15f4d..210ac39 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -387,7 +387,11 @@ static void vgic_enable_irqs(struct vcpu *v, uint32_t r,
int n)
irq = i + (32 * n);
p = irq_to_pending(v, irq);
set_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
- if ( !list_empty(&p->inflight) && !test_bit(GIC_IRQ_GUEST_VISIBLE,
&p->status) )
+ if ( irq == v->domain->arch.evtchn_irq &&
+ vcpu_info(current, evtchn_upcall_pending) &&
+ list_empty(&p->inflight) )
+ vgic_vcpu_inject_irq(v, irq);
+ else if ( !list_empty(&p->inflight) &&
!test_bit(GIC_IRQ_GUEST_VISIBLE, &p->status) )
gic_set_guest_irq(v, irq, GICH_LR_PENDING, p->priority);
if ( p->desc != NULL )
p->desc->handler->enable(p->desc);
@@ -696,10 +700,16 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int
irq)
if ( !list_empty(&n->inflight) )
{
- if ( (irq != current->domain->arch.evtchn_irq) ||
- (!test_bit(GIC_IRQ_GUEST_VISIBLE, &n->status)) )
+ if ( v == current )
+ {
+ set_bit(GIC_IRQ_GUEST_PENDING, &n->status);
+ gic_set_clear_lr(v, n);
+ spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
+ return;
+ } else {
set_bit(GIC_IRQ_GUEST_PENDING, &n->status);
- goto out;
+ goto out;
+ }
}
/* vcpu offline */
diff --git a/xen/include/asm-arm/gic.h b/xen/include/asm-arm/gic.h
index 6fce5c2..6de0d9b 100644
--- a/xen/include/asm-arm/gic.h
+++ b/xen/include/asm-arm/gic.h
@@ -184,6 +184,7 @@ extern void gic_remove_from_queues(struct vcpu *v, unsigned
int virtual_irq);
extern int gic_route_irq_to_guest(struct domain *d,
const struct dt_irq *irq,
const char * devname);
+extern void gic_set_clear_lr(struct vcpu *v, struct pending_irq *p);
/* Accept an interrupt from the GIC and dispatch its handler */
extern void gic_interrupt(struct cpu_user_regs *regs, int is_fiq);
--
1.7.10.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |