[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v9 03/28] ARM: GIC: Add checks for NULL pointer pending_irq's



Hi Andre,

On 11/05/17 18:53, Andre Przywara wrote:
For LPIs the struct pending_irq's are dynamically allocated and the
pointers will be stored in a radix tree. Since an LPI can be "unmapped"
at any time, teach the VGIC how to deal with irq_to_pending() returning
a NULL pointer.
We just do nothing in this case or clean up the LR if the virtual LPI
number was still in an LR.

Those are all call sites for irq_to_pending(), as per:
"git grep irq_to_pending", and their evaluations:
(PROTECTED means: added NULL check and bailing out)

    xen/arch/arm/gic.c:
gic_route_irq_to_guest(): only called for SPIs, added ASSERT()
gic_remove_irq_from_guest(): only called for SPIs, added ASSERT()
gic_remove_from_queues(): PROTECTED, called within VCPU VGIC lock
gic_raise_inflight_irq(): PROTECTED, called under VCPU VGIC lock
gic_raise_guest_irq(): PROTECTED, called under VCPU VGIC lock
gic_update_one_lr(): PROTECTED, called under VCPU VGIC lock

Even they are protected, an ASSERT would be useful.


    xen/arch/arm/vgic.c:
vgic_migrate_irq(): not called for LPIs (virtual IRQs), added ASSERT()
arch_move_irqs(): not iterating over LPIs, added ASSERT()
vgic_disable_irqs(): not called for LPIs, added ASSERT()
vgic_enable_irqs(): not called for LPIs, added ASSERT()
vgic_vcpu_inject_irq(): PROTECTED, moved under VCPU VGIC lock

    xen/include/asm-arm/event.h:
local_events_need_delivery_nomask(): only called for a PPI, added ASSERT()

    xen/include/asm-arm/vgic.h:
(prototype)

Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx>
---
 xen/arch/arm/gic.c          | 34 ++++++++++++++++++++++++++++++----
 xen/arch/arm/vgic.c         | 24 ++++++++++++++++++++++++
 xen/include/asm-arm/event.h |  3 +++
 3 files changed, 57 insertions(+), 4 deletions(-)

diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index dcb1783..46bb306 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -148,6 +148,7 @@ int gic_route_irq_to_guest(struct domain *d, unsigned int 
virq,
     /* Caller has already checked that the IRQ is an SPI */
     ASSERT(virq >= 32);
     ASSERT(virq < vgic_num_irqs(d));
+    ASSERT(!is_lpi(virq));

     vgic_lock_rank(v_target, rank, flags);

@@ -184,6 +185,7 @@ int gic_remove_irq_from_guest(struct domain *d, unsigned 
int virq,
     ASSERT(spin_is_locked(&desc->lock));
     ASSERT(test_bit(_IRQ_GUEST, &desc->status));
     ASSERT(p->desc == desc);
+    ASSERT(!is_lpi(virq));

     vgic_lock_rank(v_target, rank, flags);

@@ -408,9 +410,13 @@ void gic_remove_from_queues(struct vcpu *v, unsigned int 
virtual_irq)
     spin_lock_irqsave(&v->arch.vgic.lock, flags);

     p = irq_to_pending(v, virtual_irq);
-
-    if ( !list_empty(&p->lr_queue) )
+    /*
+     * If an LPIs has been removed meanwhile, it has been cleaned up
+     * already, so nothing to remove here.
+     */
+    if ( likely(p) && !list_empty(&p->lr_queue) )
         list_del_init(&p->lr_queue);
+
     spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
 }

@@ -418,6 +424,10 @@ void gic_raise_inflight_irq(struct vcpu *v, unsigned int 
virtual_irq)
 {
     struct pending_irq *n = irq_to_pending(v, virtual_irq);

+    /* If an LPI has been removed meanwhile, there is nothing left to raise. */
+    if ( unlikely(!n) )
+        return;
+
     ASSERT(spin_is_locked(&v->arch.vgic.lock));

     if ( list_empty(&n->lr_queue) )
@@ -437,20 +447,25 @@ void gic_raise_guest_irq(struct vcpu *v, unsigned int 
virtual_irq,
 {
     int i;
     unsigned int nr_lrs = gic_hw_ops->info->nr_lrs;
+    struct pending_irq *p = irq_to_pending(v, virtual_irq);

     ASSERT(spin_is_locked(&v->arch.vgic.lock));

+    if ( unlikely(!p) )
+        /* An unmapped LPI does not need to be raised. */
+        return;
+
     if ( v == current && list_empty(&v->arch.vgic.lr_pending) )
     {
         i = find_first_zero_bit(&this_cpu(lr_mask), nr_lrs);
         if (i < nr_lrs) {
             set_bit(i, &this_cpu(lr_mask));
-            gic_set_lr(i, irq_to_pending(v, virtual_irq), GICH_LR_PENDING);
+            gic_set_lr(i, p, GICH_LR_PENDING);
             return;
         }
     }

-    gic_add_to_lr_pending(v, irq_to_pending(v, virtual_irq));
+    gic_add_to_lr_pending(v, p);
 }

 static void gic_update_one_lr(struct vcpu *v, int i)
@@ -465,6 +480,17 @@ static void gic_update_one_lr(struct vcpu *v, int i)
     gic_hw_ops->read_lr(i, &lr_val);
     irq = lr_val.virq;
     p = irq_to_pending(v, irq);
+    /* An LPI might have been unmapped, in which case we just clean up here. */
+    if ( unlikely(!p) )
+    {
+        ASSERT(is_lpi(irq));
+
+        gic_hw_ops->clear_lr(i);
+        clear_bit(i, &this_cpu(lr_mask));
+
+        return;
+    }
+
     if ( lr_val.state & GICH_LR_ACTIVE )
     {
         set_bit(GIC_IRQ_GUEST_ACTIVE, &p->status);
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index d30f324..8a5d93b 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -242,6 +242,9 @@ bool vgic_migrate_irq(struct vcpu *old, struct vcpu *new, 
unsigned int irq)
     unsigned long flags;
     struct pending_irq *p = irq_to_pending(old, irq);

+    /* This will never be called for an LPI, as we don't migrate them. */
+    ASSERT(!is_lpi(irq));
+
     /* nothing to do for virtual interrupts */
     if ( p->desc == NULL )
         return true;
@@ -291,6 +294,9 @@ void arch_move_irqs(struct vcpu *v)
     struct vcpu *v_target;
     int i;

+    /* We don't migrate LPIs at the moment. */
+    ASSERT(!is_lpi(vgic_num_irqs(d) - 1));
+
     for ( i = 32; i < vgic_num_irqs(d); i++ )
     {
         v_target = vgic_get_target_vcpu(v, i);
@@ -310,6 +316,9 @@ void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n)
     int i = 0;
     struct vcpu *v_target;

+    /* LPIs will never be disabled via this function. */
+    ASSERT(!is_lpi(32 * n + 31));
+
     while ( (i = find_next_bit(&mask, 32, i)) < 32 ) {
         irq = i + (32 * n);
         v_target = vgic_get_target_vcpu(v, irq);
@@ -352,6 +361,9 @@ void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n)
     struct vcpu *v_target;
     struct domain *d = v->domain;

+    /* LPIs will never be enabled via this function. */
+    ASSERT(!is_lpi(32 * n + 31));
+
     while ( (i = find_next_bit(&mask, 32, i)) < 32 ) {
         irq = i + (32 * n);
         v_target = vgic_get_target_vcpu(v, irq);
@@ -432,6 +444,12 @@ bool vgic_to_sgi(struct vcpu *v, register_t sgir, enum 
gic_sgi_mode irqmode,
     return true;
 }

+/*
+ * Returns the pointer to the struct pending_irq belonging to the given
+ * interrupt.
+ * This can return NULL if called for an LPI which has been unmapped
+ * meanwhile.
+ */
 struct pending_irq *irq_to_pending(struct vcpu *v, unsigned int irq)
 {
     struct pending_irq *n;
@@ -475,6 +493,12 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int 
virq)
     spin_lock_irqsave(&v->arch.vgic.lock, flags);

     n = irq_to_pending(v, virq);
+    /* If an LPI has been removed, there is nothing to inject here. */
+    if ( unlikely(!n) )
+    {
+        spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
+        return;
+    }

     /* vcpu offline */
     if ( test_bit(_VPF_down, &v->pause_flags) )
diff --git a/xen/include/asm-arm/event.h b/xen/include/asm-arm/event.h
index 5330dfe..caefa50 100644
--- a/xen/include/asm-arm/event.h
+++ b/xen/include/asm-arm/event.h
@@ -19,6 +19,9 @@ static inline int local_events_need_delivery_nomask(void)
     struct pending_irq *p = irq_to_pending(current,
                                            current->domain->arch.evtchn_irq);

+    /* Does not work for LPIs. */
+    ASSERT(!is_lpi(current->domain->arch.evtchn_irq));
+
     /* XXX: if the first interrupt has already been delivered, we should
      * check whether any other interrupts with priority higher than the
      * one in GICV_IAR are in the lr_pending queue or in the LR


Cheers,

--
Julien Grall

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.