[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [RFC v1 PATCH] xen/arm: Deliver interrupts to vcpu specified in IROUTER



On Wed, 10 Sep 2014, vijay.kilari@xxxxxxxxx wrote:
> From: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>
> 
> In GICv3 use IROUTER register contents to
> deliver irq to specified vcpu's.
> 
> Each bit[0:30] of vgic irouter[] is used to represent vcpu
> number for which irq affinity is assigned.
> bit[31] is used to store IROUTER bit[31] value to
> represent irq mode.
>
> This patch is similar to Stefano's commit
> 5b3a817ea33b891caf7d7d788da9ce6deffa82a1 for GICv2
> 
> Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>
> ---
> v1: Used one bit of vgic irouter[] to store vcpu number
> ---
>  xen/arch/arm/vgic-v3.c |  138 
> ++++++++++++++++++++++++++++++++++++++++--------
>  1 file changed, 115 insertions(+), 23 deletions(-)
> 
> diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
> index bc759a1..085ec76 100644
> --- a/xen/arch/arm/vgic-v3.c
> +++ b/xen/arch/arm/vgic-v3.c
> @@ -45,12 +45,66 @@
>  #define GICV3_GICR_PIDR2  GICV3_GICD_PIDR2
>  #define GICV3_GICR_PIDR4  GICV3_GICD_PIDR4
>  
> -static struct vcpu *vgicv3_get_target_vcpu(struct vcpu *v, unsigned int irq)
> +static struct vcpu *vgicv3_irouter_to_vcpu(struct vcpu *v, uint64_t 
> irouter_aff,
> +                                           unsigned int *target)

As I wrote in a comment to the previous patch, I would like to keep the
naming consistent. As you have used vgic_v2_* in vgic-v2.c, you should
use vgic_v3_ here. I won't repeat the comment for every function on this
patch.


>  {
> -    /* TODO: Return vcpu0 always */
> +    int i;
> +    uint64_t cpu_affinity;
> +    struct vcpu *v_target;
> +
> +    *target = 0;
> +    irouter_aff &= ~(GICD_IROUTER_SPI_MODE_ANY);
> +    for ( i = 0; i < v->domain->max_vcpus; i++ )
> +    {
> +        v_target = v->domain->vcpu[i];
> +        cpu_affinity = (MPIDR_AFFINITY_LEVEL(v_target->arch.vmpidr, 3) << 32 
> |
> +                        MPIDR_AFFINITY_LEVEL(v_target->arch.vmpidr, 2) << 16 
> |
> +                        MPIDR_AFFINITY_LEVEL(v_target->arch.vmpidr, 1) << 8  
> |
> +                        MPIDR_AFFINITY_LEVEL(v_target->arch.vmpidr, 0));
> +
> +        if ( irouter_aff == cpu_affinity )
> +        {
> +            *target = i;
> +            return v_target;
> +        }
> +    }
> +    gdprintk(XENLOG_WARNING,
> +             "d%d: No vcpu match found for irouter 0x%lx\n",
> +             v->domain->domain_id, irouter_aff);
> +    /* XXX: irouter is by default set to vcpu0. Should not reach here*/
>      return v->domain->vcpu[0];
>  }

I thought the whole point of storing a bitmask in irouter was to avoid
loops like this one. Did you forget to update this function?

In any case I think that changing the format of irouter like you did in
this patch is going too far. What I meant was that if we store the
vcpu_id in affinity 0 and we don't actually use the other affinity
levels, then we can calculate v_target directly like this:

    v_target = v->domain->vcpu[irouter_aff & MPIDR_AFF0_MASK];

I think we misunderstood each other earlier, sorry for misdirecting you.


> +static uint64_t vgicv3_vcpu_to_irouter(struct vcpu *v,
> +                                       unsigned int vcpu_id)
> +{
> +    uint64_t cpu_affinity;
> +    struct vcpu *v_target;
> + 
> +    v_target = v->domain->vcpu[vcpu_id];
> +    cpu_affinity = (MPIDR_AFFINITY_LEVEL(v_target->arch.vmpidr, 3) << 32 |
> +                    MPIDR_AFFINITY_LEVEL(v_target->arch.vmpidr, 2) << 16 |
> +                    MPIDR_AFFINITY_LEVEL(v_target->arch.vmpidr, 1) << 8  |
> +                    MPIDR_AFFINITY_LEVEL(v_target->arch.vmpidr, 0));
> +
> +    return cpu_affinity;
> +}
> +
> +static struct vcpu *vgicv3_get_target_vcpu(struct vcpu *v, unsigned int irq)
> +{
> +    uint64_t target;
> +    struct vgic_irq_rank *rank = vgic_rank_irq(v, irq);
> +
> +    ASSERT(spin_is_locked(&rank->lock));
> +
> +    target = rank->v3.irouter[irq % 32];
> +    target &= ~(GICD_IROUTER_SPI_MODE_ANY);
> +    target = find_first_bit(&target, 8);
> +    ASSERT(target >= 0 && target < v->domain->max_vcpus);
> +
> +    return v->domain->vcpu[target];
> +}

here you can do (not actual code, just to give you the idea):

       target = (rank->v3.irouter[irq % 32]) & MPIDR_AFF0_MASK;
       if ( target >= v->domain->max_vcpus )
           return error;
       return v->domain->vcpu[target];


>  static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info,
>                                           uint32_t gicr_reg)
>  {
> @@ -353,9 +407,9 @@ static int __vgic_v3_distr_common_mmio_write(struct vcpu 
> *v, mmio_info_t *info,
>          vgic_lock_rank(v, rank, flags);
>          tr = rank->ienable;
>          rank->ienable |= *r;
> -        vgic_unlock_rank(v, rank, flags);
>          /* The irq number is extracted from offset. so shift by register 
> size */
>          vgic_enable_irqs(v, (*r) & (~tr), (reg - GICD_ISENABLER) >> 
> DABT_WORD);
> +        vgic_unlock_rank(v, rank, flags);
>          return 1;
>      case GICD_ICENABLER ... GICD_ICENABLERN:
>          if ( dabt.size != DABT_WORD ) goto bad_width;
> @@ -364,9 +418,9 @@ static int __vgic_v3_distr_common_mmio_write(struct vcpu 
> *v, mmio_info_t *info,
>          vgic_lock_rank(v, rank, flags);
>          tr = rank->ienable;
>          rank->ienable &= ~*r;
> -        vgic_unlock_rank(v, rank, flags);
>          /* The irq number is extracted from offset. so shift by register 
> size */
>          vgic_disable_irqs(v, (*r) & tr, (reg - GICD_ICENABLER) >> DABT_WORD);
> +        vgic_unlock_rank(v, rank, flags);
>          return 1;
>      case GICD_ISPENDR ... GICD_ISPENDRN:
>          if ( dabt.size != DABT_WORD ) goto bad_width;
> @@ -620,6 +674,7 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, 
> mmio_info_t *info)
>      register_t *r = select_user_reg(regs, dabt.reg);
>      struct vgic_irq_rank *rank;
>      unsigned long flags;
> +    uint64_t vcpu_id;
>      int gicd_reg = (int)(info->gpa - v->domain->arch.vgic.dbase);
>  
>      switch ( gicd_reg )
> @@ -672,8 +727,17 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, 
> mmio_info_t *info)
>                                  DABT_DOUBLE_WORD);
>          if ( rank == NULL) goto read_as_zero;
>          vgic_lock_rank(v, rank, flags);
> -        *r = rank->v3.irouter[REG_RANK_INDEX(64,
> -                              (gicd_reg - GICD_IROUTER), DABT_DOUBLE_WORD)];
> +        vcpu_id = rank->v3.irouter[REG_RANK_INDEX(64,
> +                                  (gicd_reg - GICD_IROUTER), 
> DABT_DOUBLE_WORD)];
> +        /* XXX: bit[31] stores IRQ mode. Just return */
> +        if ( vcpu_id & GICD_IROUTER_SPI_MODE_ANY )
> +        {
> +            *r = GICD_IROUTER_SPI_MODE_ANY;
> +            vgic_unlock_rank(v, rank, flags);
> +            return 1;
> +        }
> +        vcpu_id = find_first_bit(&vcpu_id, 8);
> +        *r = vgicv3_vcpu_to_irouter(v, vcpu_id);
>          vgic_unlock_rank(v, rank, flags);
>          return 1;
>      case GICD_NSACR ... GICD_NSACRN:
> @@ -754,6 +818,9 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v, 
> mmio_info_t *info)
>      register_t *r = select_user_reg(regs, dabt.reg);
>      struct vgic_irq_rank *rank;
>      unsigned long flags;
> +    unsigned int new_target, old_target;
> +    uint64_t new_irouter, old_irouter;
> +    struct vcpu *old_vcpu, *new_vcpu;
>      int gicd_reg = (int)(info->gpa - v->domain->arch.vgic.dbase);
>  
>      switch ( gicd_reg )
> @@ -810,16 +877,36 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v, 
> mmio_info_t *info)
>          rank = vgic_rank_offset(v, 64, gicd_reg - GICD_IROUTER,
>                                  DABT_DOUBLE_WORD);
>          if ( rank == NULL) goto write_ignore_64;
> -        if ( *r )
> +        BUG_ON(v->domain->max_vcpus > 8);
> +        new_irouter = *r;
> +        vgic_lock_rank(v, rank, flags);
> +
> +        old_irouter = rank->v3.irouter[REG_RANK_INDEX(64,
> +                              (gicd_reg - GICD_IROUTER), DABT_DOUBLE_WORD)];
> +        old_irouter &= ~(GICD_IROUTER_SPI_MODE_ANY);
> +        old_target = find_first_bit(&old_irouter, 8);
> +        old_vcpu = v->domain->vcpu[old_target]; 
> +        if ( new_irouter & GICD_IROUTER_SPI_MODE_ANY )
>          {
> -            /* TODO: Ignored. We don't support irq delivery for vcpu != 0 */
> -            gdprintk(XENLOG_DEBUG,
> -                     "SPI delivery to secondary cpus not supported\n");
> -            goto write_ignore_64;
> +            /* 
> +             * IRQ routing mode set. Route any one processor in the entire
> +             * system. We chose vcpu 0. Also store IRQ mode bit[31] set.
> +             */
> +            new_vcpu = v->domain->vcpu[0];
> +            new_target = 0;
> +            new_irouter = ((1UL << new_target) | GICD_IROUTER_SPI_MODE_ANY);
> +            rank->v3.irouter[REG_RANK_INDEX(64,
> +                 (gicd_reg - GICD_IROUTER), DABT_DOUBLE_WORD)] = new_irouter;
>          }
> -        vgic_lock_rank(v, rank, flags);
> -        rank->v3.irouter[REG_RANK_INDEX(64,
> -                      (gicd_reg - GICD_IROUTER), DABT_DOUBLE_WORD)] = *r;
> +        else
> +        {
> +            new_vcpu = vgicv3_irouter_to_vcpu(v, new_irouter, &new_target);
> +            rank->v3.irouter[REG_RANK_INDEX(64, (gicd_reg - GICD_IROUTER),
> +                             DABT_DOUBLE_WORD)] = (1UL << new_target);
> +        }
> +
> +        if ( old_target != new_target )
> +            vgic_migrate_irq(old_vcpu, new_vcpu, (gicd_reg - 
> GICD_IROUTER)/8);

Aside from changing the format of irouter that I think is unnecessary,
the idea of implementing GICD_IROUTER_SPI_MODE_ANY as "deliver to vcpu0"
should work well.


>          vgic_unlock_rank(v, rank, flags);
>          return 1;
>      case GICD_NSACR ... GICD_NSACRN:
> @@ -949,23 +1036,28 @@ static int vgicv3_get_irq_priority(struct vcpu *v, 
> unsigned int irq)
>  static int vgicv3_vcpu_init(struct vcpu *v)
>  {
>      int i;
> -    uint64_t affinity;
> -
>      /* For SGI and PPI the target is always this CPU */
> -    affinity = (MPIDR_AFFINITY_LEVEL(v->arch.vmpidr, 3) << 32 |
> -                MPIDR_AFFINITY_LEVEL(v->arch.vmpidr, 2) << 16 |
> -                MPIDR_AFFINITY_LEVEL(v->arch.vmpidr, 1) << 8  |
> -                MPIDR_AFFINITY_LEVEL(v->arch.vmpidr, 0));
> -
> +    /*
> +     * XXX: Set CPU0 bit mask in irouter[] instead of writing
> +     * actual vmpidr affinity value. Each bit in irouter is
> +     * interpreted as corresponding cpu.
> +     */
>      for ( i = 0 ; i < 32 ; i++ )
> -        v->arch.vgic.private_irqs->v3.irouter[i] = affinity;
> +        v->arch.vgic.private_irqs->v3.irouter[i] = 0x1;
>  
>      return 0;
>  }
>  
>  static int vgicv3_domain_init(struct domain *d)
>  {
> -    int i;
> +    int i,idx;
> +
> +    /* By default deliver to CPU0 */
> +    for ( i = 0; i < DOMAIN_NR_RANKS(d); i++ )
> +    {
> +        for ( idx = 0; idx < 32; idx++ )
> +            d->arch.vgic.shared_irqs[i].v3.irouter[idx] = 0x1;
> +    }
>  
>      /* We rely on gicv init to get dbase and size */
>      register_mmio_handler(d, &vgic_distr_mmio_handler, d->arch.vgic.dbase,
> -- 
> 1.7.9.5
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.