[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v4 1/2] arm: support fewer LR registers than virtual irqs



On Thu, 2012-02-23 at 17:00 +0000, Stefano Stabellini wrote:
> If the vgic needs to inject a virtual irq into the guest, but no free
> LR registers are available, add the irq to a list and return.

There is an ordering constraint on this list. It should be mentioned
somewhere in a comment.

> Whenever an LR register becomes available we add the queued irq to it
> and remove it from the list.
> We use the gic lock to protect the list and the bitmask.
> 
> Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
> ---
>  xen/arch/arm/gic.c           |   95 
> ++++++++++++++++++++++++++++++++----------
>  xen/include/asm-arm/domain.h |    1 +
>  2 files changed, 74 insertions(+), 22 deletions(-)
> 
> diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
> index adc10bb..2ff7bce 100644
> --- a/xen/arch/arm/gic.c
> +++ b/xen/arch/arm/gic.c
> @@ -25,6 +25,7 @@
>  #include <xen/sched.h>
>  #include <xen/errno.h>
>  #include <xen/softirq.h>
> +#include <xen/list.h>
>  #include <asm/p2m.h>
>  #include <asm/domain.h>
>  
> @@ -45,6 +46,8 @@ static struct {
>      unsigned int lines;
>      unsigned int cpus;
>      spinlock_t lock;
> +    uint64_t lr_mask;
> +    struct list_head lr_pending;
>  } gic;
>  
>  irq_desc_t irq_desc[NR_IRQS];
> @@ -247,6 +250,8 @@ static void __cpuinit gic_hyp_init(void)
>  
>      GICH[GICH_HCR] = GICH_HCR_EN;
>      GICH[GICH_MISR] = GICH_MISR_EOI;
> +    gic.lr_mask = 0ULL;
> +    INIT_LIST_HEAD(&gic.lr_pending);
>  }
>  
>  /* Set up the GIC */
> @@ -345,16 +350,50 @@ int __init setup_irq(unsigned int irq, struct irqaction 
> *new)
>      return rc;
>  }
>  
> -void gic_set_guest_irq(unsigned int virtual_irq,
> +static inline void gic_set_lr(int lr, unsigned int virtual_irq,
>          unsigned int state, unsigned int priority)
>  {
> -    BUG_ON(virtual_irq > nr_lrs);
> -    GICH[GICH_LR + virtual_irq] = state |
> +    BUG_ON(lr > nr_lrs);
> +    GICH[GICH_LR + lr] = state |
>          GICH_LR_MAINTENANCE_IRQ |
>          ((priority >> 3) << GICH_LR_PRIORITY_SHIFT) |
>          ((virtual_irq & GICH_LR_VIRTUAL_MASK) << GICH_LR_VIRTUAL_SHIFT);
>  }
>  
> +void gic_set_guest_irq(unsigned int virtual_irq,
> +        unsigned int state, unsigned int priority)
> +{
> +    int i;
> +    struct pending_irq *iter, *n;
> +
> +    spin_lock(&gic.lock);
> +
> +    if ( list_empty(&gic.lr_pending) )

This could really do with some comments.

The logic here is that if the "lr_pending" list has nothing in it then
there are no other IRQs queued waiting for an LR to become available and
therefore we can take a fast path and inject this IRQ directly?

> +    {
> +        i = find_first_zero_bit(&gic.lr_mask, sizeof(uint64_t));
> +        if (i < sizeof(uint64_t)) {

What does find_first_zero_bit(0xffff.fff, 64) return?

> +            set_bit(i, &gic.lr_mask);
> +            gic_set_lr(i, virtual_irq, state, priority);
> +            goto out;
> +        }
> +    }
> +
> +    n = irq_to_pending(current, virtual_irq);
> +    list_for_each_entry ( iter, &gic.lr_pending, lr_link )
> +    {
> +        if ( iter->priority < priority )
> +        {
> +            list_add_tail(&n->lr_link, &iter->lr_link);
> +            goto out;
> +        }
> +    }
> +    list_add(&n->lr_link, &gic.lr_pending);

Should this be list_add_tail?

Lower numbers are higher priority and therefore the list should be
ordered from low->high, since we want to pull the next interrupt off the
front of the list. I don't think the above implements that though.

If we imagine a list containing priorities [2,4,6] into which we are
inserting a priority 5 interrupt.

On the first iteration of the loop iter->priority == 2 so "if
(iter->priority < priority)" is true and we insert 5 after 2 in the list
resulting in [2,5,4,6].

> +
> +out:
> +    spin_unlock(&gic.lock);
> +    return;
> +}
> +
>  void gic_inject_irq_start(void)
>  {
>      uint32_t hcr;
> @@ -431,30 +470,42 @@ void gicv_setup(struct domain *d)
>  
>  static void maintenance_interrupt(int irq, void *dev_id, struct 
> cpu_user_regs *regs)
>  {
> -    int i, virq;
> +    int i = 0, virq;
>      uint32_t lr;
>      uint64_t eisr = GICH[GICH_EISR0] | (((uint64_t) GICH[GICH_EISR1]) << 32);
>  
> -    for ( i = 0; i < 64; i++ ) {
> -        if ( eisr & ((uint64_t)1 << i) ) {
> -            struct pending_irq *p;
> -
> -            lr = GICH[GICH_LR + i];
> -            virq = lr & GICH_LR_VIRTUAL_MASK;
> -            GICH[GICH_LR + i] = 0;
> -
> -            spin_lock(&current->arch.vgic.lock);
> -            p = irq_to_pending(current, virq);
> -            if ( p->desc != NULL ) {
> -                p->desc->status &= ~IRQ_INPROGRESS;
> -                GICC[GICC_DIR] = virq;
> -            }
> +    while ((i = find_next_bit((const long unsigned int *) &eisr,
> +                              sizeof(eisr), i)) < sizeof(eisr)) {

> +        struct pending_irq *p;
> +
> +        spin_lock(&gic.lock);
> +        lr = GICH[GICH_LR + i];
> +        virq = lr & GICH_LR_VIRTUAL_MASK;
> +        GICH[GICH_LR + i] = 0;
> +        clear_bit(i, &gic.lr_mask);
> +
> +        if ( !list_empty(gic.lr_pending.next) ) {

This seems odd, why not "list_empty(gic.lr_pending)"?

Otherwise won't you fail to inject anything if there is exactly one
entry on lr_pending?

> +            p = list_entry(gic.lr_pending.next, typeof(*p), lr_link);
> +            gic_set_lr(i, p->irq, GICH_LR_PENDING, p->priority);
> +            list_del_init(&p->lr_link);
> +            set_bit(i, &gic.lr_mask);
> +        } else {
>              gic_inject_irq_stop();
> -            list_del(&p->link);
> -            INIT_LIST_HEAD(&p->link);
> -            cpu_raise_softirq(current->processor, VGIC_SOFTIRQ);
> -            spin_unlock(&current->arch.vgic.lock);
>          }
> +        spin_unlock(&gic.lock);
> +
> +        spin_lock(&current->arch.vgic.lock);
> +        p = irq_to_pending(current, virq);
> +        if ( p->desc != NULL ) {
> +            p->desc->status &= ~IRQ_INPROGRESS;
> +            GICC[GICC_DIR] = virq;
> +        }
> +        list_del(&p->link);
> +        INIT_LIST_HEAD(&p->link);
> +        cpu_raise_softirq(current->processor, VGIC_SOFTIRQ);
> +        spin_unlock(&current->arch.vgic.lock);
> +
> +        i++;

Does find_next_bit include or exclude the bit which you give it as the
3rd argument?

>      }
>  }
>  
> diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
> index 3372d14..75095ff 100644
> --- a/xen/include/asm-arm/domain.h
> +++ b/xen/include/asm-arm/domain.h
> @@ -21,6 +21,7 @@ struct pending_irq
>      struct irq_desc *desc; /* only set it the irq corresponds to a physical 
> irq */
>      uint8_t priority;
>      struct list_head link;
> +    struct list_head lr_link;

Calling this "link" or "lr_link" when it is used in the context or link
registers (e.g. link-register_link) just adds to the confusion around
these two lists IMHO, as does having one just called link and the other
prefix_link. Calling them foo_list, both with a descriptive prefix,
would be better, e.g. active_list and queued_list (or whatever you deem
appropriate to their semantics)

Even better would be if the invariant "always on either active or
pending lists, never both" were true -- in which case only one list_head
would be needed here.

What do we actually use "link" for? It is chained off vgic.inflight_irqs
but we seem to only use it for anything other than manipulating itself
in vgic_softirq where it is used as a binary "something to inject" flag
-- we could just as well maintain a nr_inflight variable if that were
the case, couldn't we?

Ian.

>  };
>  
>  struct arch_domain



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.