|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v3] xen/arm: Deliver interrupts to vcpu specified in IROUTER
On Thu, 25 Sep 2014, vijay.kilari@xxxxxxxxx wrote:
> From: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>
>
> In GICv3 use IROUTER register contents to deliver irq to
> specified vcpu.
>
> vgic irouter[irq] is used to represent vcpu number for which
> irq affinity is assigned. Bit[31] is used to store IROUTER
> bit[31] value to represent irq mode.
>
> This patch is similar to Stefano's commit
> 5b3a817ea33b891caf7d7d788da9ce6deffa82a1 for GICv2
>
> Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>
> ---
> v3: -Removed assert and add check for irouter value in mmio write.
> -Added back and used affinity value for vgic irouter[]
> initialization.
> ---
> xen/arch/arm/vgic-v3.c | 113
> ++++++++++++++++++++++++++++++++++++++++--------
> 1 file changed, 96 insertions(+), 17 deletions(-)
>
> diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
> index dd70591..c9739f4 100644
> --- a/xen/arch/arm/vgic-v3.c
> +++ b/xen/arch/arm/vgic-v3.c
> @@ -45,10 +45,42 @@
> #define GICV3_GICR_PIDR2 GICV3_GICD_PIDR2
> #define GICV3_GICR_PIDR4 GICV3_GICD_PIDR4
>
> +static struct vcpu *vgic_v3_irouter_to_vcpu(struct vcpu *v, uint64_t irouter)
> +{
> + irouter &= ~(GICD_IROUTER_SPI_MODE_ANY);
> + irouter = irouter & MPIDR_AFF0_MASK;
> +
> + return v->domain->vcpu[irouter];
> +}
> +
> +static uint64_t vgic_v3_vcpu_to_irouter(struct vcpu *v,
> + unsigned int vcpu_id)
> +{
> + uint64_t irq_affinity;
> + struct vcpu *v_target;
> +
> + v_target = v->domain->vcpu[vcpu_id];
> + irq_affinity = (MPIDR_AFFINITY_LEVEL(v_target->arch.vmpidr, 3) << 32 |
> + MPIDR_AFFINITY_LEVEL(v_target->arch.vmpidr, 2) << 16 |
> + MPIDR_AFFINITY_LEVEL(v_target->arch.vmpidr, 1) << 8 |
> + MPIDR_AFFINITY_LEVEL(v_target->arch.vmpidr, 0));
> +
> + return irq_affinity;
> +}
> +
> static struct vcpu *vgic_v3_get_target_vcpu(struct vcpu *v, unsigned int irq)
> {
> - /* TODO: Return vcpu0 always */
> - return v->domain->vcpu[0];
> + uint64_t target;
> + struct vgic_irq_rank *rank = vgic_rank_irq(v, irq);
> +
> + ASSERT(spin_is_locked(&rank->lock));
> +
> + target = rank->v3.irouter[irq % 32];
> + target &= ~(GICD_IROUTER_SPI_MODE_ANY);
> + target &= MPIDR_AFF0_MASK;
> + ASSERT(target >= 0 && target < v->domain->max_vcpus);
> +
> + return v->domain->vcpu[target];
> }
>
> static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info,
> @@ -353,9 +385,9 @@ static int __vgic_v3_distr_common_mmio_write(struct vcpu
> *v, mmio_info_t *info,
> vgic_lock_rank(v, rank, flags);
> tr = rank->ienable;
> rank->ienable |= *r;
> - vgic_unlock_rank(v, rank, flags);
> /* The irq number is extracted from offset. so shift by register
> size */
> vgic_enable_irqs(v, (*r) & (~tr), (reg - GICD_ISENABLER) >>
> DABT_WORD);
> + vgic_unlock_rank(v, rank, flags);
> return 1;
> case GICD_ICENABLER ... GICD_ICENABLERN:
> if ( dabt.size != DABT_WORD ) goto bad_width;
> @@ -364,9 +396,9 @@ static int __vgic_v3_distr_common_mmio_write(struct vcpu
> *v, mmio_info_t *info,
> vgic_lock_rank(v, rank, flags);
> tr = rank->ienable;
> rank->ienable &= ~*r;
> - vgic_unlock_rank(v, rank, flags);
> /* The irq number is extracted from offset. so shift by register
> size */
> vgic_disable_irqs(v, (*r) & tr, (reg - GICD_ICENABLER) >> DABT_WORD);
> + vgic_unlock_rank(v, rank, flags);
> return 1;
> case GICD_ISPENDR ... GICD_ISPENDRN:
> if ( dabt.size != DABT_WORD ) goto bad_width;
> @@ -620,6 +652,8 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v,
> mmio_info_t *info)
> register_t *r = select_user_reg(regs, dabt.reg);
> struct vgic_irq_rank *rank;
> unsigned long flags;
> + uint64_t irouter;
> + unsigned int vcpu_id;
> int gicd_reg = (int)(info->gpa - v->domain->arch.vgic.dbase);
>
> switch ( gicd_reg )
> @@ -673,8 +707,17 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v,
> mmio_info_t *info)
> DABT_DOUBLE_WORD);
> if ( rank == NULL ) goto read_as_zero;
> vgic_lock_rank(v, rank, flags);
> - *r = rank->v3.irouter[REG_RANK_INDEX(64,
> - (gicd_reg - GICD_IROUTER), DABT_DOUBLE_WORD)];
> + irouter = rank->v3.irouter[REG_RANK_INDEX(64,
> + (gicd_reg - GICD_IROUTER),
> DABT_DOUBLE_WORD)];
> + /* XXX: bit[31] stores IRQ mode. Just return */
> + if ( irouter & GICD_IROUTER_SPI_MODE_ANY )
> + {
> + *r = GICD_IROUTER_SPI_MODE_ANY;
> + vgic_unlock_rank(v, rank, flags);
> + return 1;
> + }
> + vcpu_id = irouter & MPIDR_AFF0_MASK;
We should just set
vcpu_id = irouter;
as we already filtered by MPIDR_AFF0_MASK when writing to irouter.
> + *r = vgic_v3_vcpu_to_irouter(v, vcpu_id);
> vgic_unlock_rank(v, rank, flags);
> return 1;
> case GICD_NSACR ... GICD_NSACRN:
> @@ -755,6 +798,9 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v,
> mmio_info_t *info)
> register_t *r = select_user_reg(regs, dabt.reg);
> struct vgic_irq_rank *rank;
> unsigned long flags;
> + unsigned int new_target, old_target;
> + uint64_t new_irouter, old_irouter;
> + struct vcpu *old_vcpu, *new_vcpu;
> int gicd_reg = (int)(info->gpa - v->domain->arch.vgic.dbase);
>
> switch ( gicd_reg )
> @@ -811,16 +857,43 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v,
> mmio_info_t *info)
> rank = vgic_rank_offset(v, 64, gicd_reg - GICD_IROUTER,
> DABT_DOUBLE_WORD);
> if ( rank == NULL ) goto write_ignore_64;
> - if ( *r )
> + BUG_ON(v->domain->max_vcpus > 8);
> + new_irouter = *r;
> + vgic_lock_rank(v, rank, flags);
> +
> + old_irouter = rank->v3.irouter[REG_RANK_INDEX(64,
> + (gicd_reg - GICD_IROUTER), DABT_DOUBLE_WORD)];
> + old_irouter &= ~(GICD_IROUTER_SPI_MODE_ANY);
> + old_target = old_irouter & MPIDR_AFF0_MASK;
for the same reasion we can just do
old_target = old_irouter
so we can just remove one of the two variables
> + if ( new_irouter & GICD_IROUTER_SPI_MODE_ANY )
> {
> - /* TODO: Ignored. We don't support irq delivery for vcpu != 0 */
> - gdprintk(XENLOG_DEBUG,
> - "SPI delivery to secondary cpus not supported\n");
> - goto write_ignore_64;
> + /*
> + * IRQ routing mode set. Route any one processor in the entire
> + * system. We chose vcpu 0 and set IRQ mode bit[31] in irouter.
> + */
> + new_target = 0;
> + new_vcpu = v->domain->vcpu[0];
> + new_irouter = GICD_IROUTER_SPI_MODE_ANY;
> + }
> + else
> + {
> + new_target = new_irouter & MPIDR_AFF0_MASK;
> + if ( new_target >= v->domain->max_vcpus )
> + {
> + printk("vGICv3: vGICD: wrong irouter at offset %#08x\n",
> + gicd_reg);
You need to have:
vgic_unlock_rank(v, rank, flags);
before returning.
This is actually important, the other comments are just aesthetics.
> + return 0;
> + }
> + new_vcpu = vgic_v3_irouter_to_vcpu(v, new_irouter);
> + }
> +
> + rank->v3.irouter[REG_RANK_INDEX(64, (gicd_reg - GICD_IROUTER),
> + DABT_DOUBLE_WORD)] = new_irouter;
> + if ( old_target != new_target )
> + {
> + old_vcpu = v->domain->vcpu[old_target];
> + vgic_migrate_irq(old_vcpu, new_vcpu, (gicd_reg -
> GICD_IROUTER)/8);
> }
> - vgic_lock_rank(v, rank, flags);
> - rank->v3.irouter[REG_RANK_INDEX(64,
> - (gicd_reg - GICD_IROUTER), DABT_DOUBLE_WORD)] = *r;
> vgic_unlock_rank(v, rank, flags);
> return 1;
> case GICD_NSACR ... GICD_NSACRN:
> @@ -957,17 +1030,23 @@ static int vgic_v3_vcpu_init(struct vcpu *v)
> MPIDR_AFFINITY_LEVEL(v->arch.vmpidr, 2) << 16 |
> MPIDR_AFFINITY_LEVEL(v->arch.vmpidr, 1) << 8 |
> MPIDR_AFFINITY_LEVEL(v->arch.vmpidr, 0));
> -
> + /* XXX: Store vcpu_id in AFF0 */
> for ( i = 0 ; i < 32 ; i++ )
> - v->arch.vgic.private_irqs->v3.irouter[i] = affinity;
> + v->arch.vgic.private_irqs->v3.irouter[i] = affinity &
> MPIDR_AFF0_MASK;
Here it is not useful to filter by MPIDR_AFF0_MASK yet again, correct?
Why not just leave it as it is or even better call vgic_v3_vcpu_to_irouter?
> return 0;
> }
>
> static int vgic_v3_domain_init(struct domain *d)
> {
> - int i;
> + int i,idx;
>
> + /* By default deliver to CPU0 */
> + for ( i = 0; i < DOMAIN_NR_RANKS(d); i++ )
> + {
> + for ( idx = 0; idx < 32; idx++ )
> + d->arch.vgic.shared_irqs[i].v3.irouter[idx] = 0;
> + }
> /* We rely on gicv init to get dbase and size */
> register_mmio_handler(d, &vgic_distr_mmio_handler, d->arch.vgic.dbase,
> d->arch.vgic.dbase_size);
> --
> 1.7.9.5
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |