|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [RFC PATCH] xen/arm: Deliver interrupts to vcpu specified in IROUTER
On Thu, 4 Sep 2014, vijay.kilari@xxxxxxxxx wrote:
> From: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>
>
> In GICv3 use IROUTER register contents to
> deliver irq to specified vcpu's.
>
> In error scenario fallback to vcpu 0.
>
> This patch is similar to Stefano's commit
> 5b3a817ea33b891caf7d7d788da9ce6deffa82a1 for GICv2
>
> Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>
> ---
> xen/arch/arm/vgic-v3.c | 115
> ++++++++++++++++++++++++++++++++++++++++++++----
> 1 file changed, 106 insertions(+), 9 deletions(-)
>
> diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
> index ecda672..b01a201 100644
> --- a/xen/arch/arm/vgic-v3.c
> +++ b/xen/arch/arm/vgic-v3.c
> @@ -45,6 +45,35 @@
> #define GICV3_GICR_PIDR2 GICV3_GICD_PIDR2
> #define GICV3_GICR_PIDR4 GICV3_GICD_PIDR4
>
> +static struct vcpu *vgicv3_irouter_to_vcpu(struct vcpu *v,
> + uint64_t irouter_aff)
> +{
> + int i;
> + uint64_t cpu_affinity;
> + struct vcpu *v_target = NULL;
> +
> + /* If routing mode is set. Fallback to vcpu0 */
> + if ( irouter_aff & GICD_IROUTER_SPI_MODE_ANY )
> + return v->domain->vcpu[0];
> +
> + for ( i = 0; i < v->domain->max_vcpus; i++ )
> + {
> + v_target = v->domain->vcpu[i];
> + cpu_affinity = (MPIDR_AFFINITY_LEVEL(v_target->arch.vmpidr, 3) << 32
> |
> + MPIDR_AFFINITY_LEVEL(v_target->arch.vmpidr, 2) << 16
> |
> + MPIDR_AFFINITY_LEVEL(v_target->arch.vmpidr, 1) << 8
> |
> + MPIDR_AFFINITY_LEVEL(v_target->arch.vmpidr, 0));
> +
> + if ( irouter_aff == cpu_affinity )
> + return v_target;
> + }
Using a loop is pretty bad. Couldn't you just calculate the lowest vcpu
number from irouter_aff (maybe using find_first_bit)?
> + gdprintk(XENLOG_WARNING,
> + "d%d: No vcpu match found for irouter 0x%lx\n",
> + v->domain->domain_id, irouter_aff);
> + /* XXX: Fallback to vcpu0? */
> + return v->domain->vcpu[0];
> +}
> +
> static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info,
> uint32_t gicr_reg)
> {
> @@ -347,9 +376,9 @@ static int __vgic_v3_distr_common_mmio_write(struct vcpu
> *v, mmio_info_t *info,
> vgic_lock_rank(v, rank, flags);
> tr = rank->ienable;
> rank->ienable |= *r;
> - vgic_unlock_rank(v, rank, flags);
> /* The irq number is extracted from offset. so shift by register
> size */
> vgic_enable_irqs(v, (*r) & (~tr), (reg - GICD_ISENABLER) >>
> DABT_WORD);
> + vgic_unlock_rank(v, rank, flags);
> return 1;
> case GICD_ICENABLER ... GICD_ICENABLERN:
> if ( dabt.size != DABT_WORD ) goto bad_width;
> @@ -358,9 +387,9 @@ static int __vgic_v3_distr_common_mmio_write(struct vcpu
> *v, mmio_info_t *info,
> vgic_lock_rank(v, rank, flags);
> tr = rank->ienable;
> rank->ienable &= ~*r;
> - vgic_unlock_rank(v, rank, flags);
> /* The irq number is extracted from offset. so shift by register
> size */
> vgic_disable_irqs(v, (*r) & tr, (reg - GICD_ICENABLER) >> DABT_WORD);
> + vgic_unlock_rank(v, rank, flags);
> return 1;
> case GICD_ISPENDR ... GICD_ISPENDRN:
> if ( dabt.size != DABT_WORD ) goto bad_width;
> @@ -749,6 +778,9 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v,
> mmio_info_t *info)
> register_t *r = select_user_reg(regs, dabt.reg);
> struct vgic_irq_rank *rank;
> unsigned long flags;
> + uint64_t new_target, old_target;
> + struct vcpu *old_vcpu, *new_vcpu;
> + int irq;
> int gicd_reg = (int)(info->gpa - v->domain->arch.vgic.dbase);
>
> switch ( gicd_reg )
> @@ -804,16 +836,42 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v,
> mmio_info_t *info)
> if ( dabt.size != DABT_DOUBLE_WORD ) goto bad_width;
> rank = vgic_rank_offset(v, 64, gicd_reg - GICD_IROUTER,
> DABT_DOUBLE_WORD);
> if ( rank == NULL) goto write_ignore_64;
> - if ( *r )
> + new_target = *r;
> + vgic_lock_rank(v, rank, flags);
> + old_target = rank->v3.irouter[REG_RANK_INDEX(64,
> + (gicd_reg - GICD_IROUTER), DABT_DOUBLE_WORD)];
> + if ( *r & GICD_IROUTER_SPI_MODE_ANY )
> {
> - /* TODO: Ignored. We don't support irq delivery for vcpu != 0 */
> - gdprintk(XENLOG_DEBUG,
> - "SPI delivery to secondary cpus not supported\n");
> - goto write_ignore_64;
> + /*
> + * IRQ routing mode set. Route any one processor in the entire
> + * system. We chose vcpu 0
> + */
> + rank->v3.irouter[REG_RANK_INDEX(64,
> + (gicd_reg - GICD_IROUTER), DABT_DOUBLE_WORD)] = *r;
> + vgic_unlock_rank(v, rank, flags);
> + return 1;
> }
> - vgic_lock_rank(v, rank, flags);
> + irq = (gicd_reg - GICD_IROUTER)/8;
> + /* IROUTER now specifies only one cpu affinity for this irq */
> + /* Migrate from any core(vcpu0) to new_target */
> + if ( (old_target & GICD_IROUTER_SPI_MODE_ANY) &&
> + !(new_target & GICD_IROUTER_SPI_MODE_ANY) )
> + {
> + new_vcpu = vgicv3_irouter_to_vcpu(v, new_target);
> + vgic_migrate_irq(v->domain->vcpu[0], new_vcpu, irq);
> + }
> + else
> + {
> + if ( old_target != new_target )
> + {
> + old_vcpu = vgicv3_irouter_to_vcpu(v, old_target);
> + new_vcpu = vgicv3_irouter_to_vcpu(v, new_target);
> + vgic_migrate_irq(old_vcpu, new_vcpu, irq);
> + }
> + }
The issue is that if irouter is GICD_IROUTER_SPI_MODE_ANY, you always
return vcpu0 below.
So if the kernel:
1) move the irq to vcpu1
2) set the irq as GICD_IROUTER_SPI_MODE_ANY
you now have migrated the irq to vcpu1 but you are returning vcpu0 from
vgicv3_get_target_vcpu.
You can either:
- vgic_migrate_irq the irq back to vcpu0 when the kernel sets the irq as
GICD_IROUTER_SPI_MODE_ANY
- return the current vcpu for the irq rather than vcpu0 if
GICD_IROUTER_SPI_MODE_ANY
Either way it would work. Maybe the second option might be better.
> rank->v3.irouter[REG_RANK_INDEX(64,
> - (gicd_reg - GICD_IROUTER), DABT_DOUBLE_WORD)] = *r;
> + (gicd_reg - GICD_IROUTER), DABT_DOUBLE_WORD)] =
> new_target;
> vgic_unlock_rank(v, rank, flags);
> return 1;
> case GICD_NSACR ... GICD_NSACRN:
> @@ -928,6 +986,44 @@ static const struct mmio_handler_ops
> vgic_distr_mmio_handler = {
> .write_handler = vgic_v3_distr_mmio_write,
> };
>
> +static struct vcpu *vgicv3_get_target_vcpu(struct vcpu *v, unsigned int irq)
for consistency I would call it vgic_v3_get_target_vcpu
> +{
> + int i;
> + uint64_t irq_affinity, cpu_affinity;
> + struct vcpu *v_target;
> + struct vgic_irq_rank *rank = vgic_rank_irq(v, irq);
> +
> + ASSERT(spin_is_locked(&rank->lock));
> +
> + irq_affinity = rank->v3.irouter[irq % 32];
> + if ( irq_affinity & GICD_IROUTER_SPI_MODE_ANY )
> + {
> + /*
> + * IRQ routing mode set. Route any one processor in the entire
> + * system. We chose vcpu 0
> + */
> + return v->domain->vcpu[0];
> + }
> +
> + for ( i = 0; i < v->domain->max_vcpus; i++ )
> + {
> + v_target = v->domain->vcpu[i];
> + cpu_affinity = (MPIDR_AFFINITY_LEVEL(v_target->arch.vmpidr, 3) << 32
> |
> + MPIDR_AFFINITY_LEVEL(v_target->arch.vmpidr, 2) << 16
> |
> + MPIDR_AFFINITY_LEVEL(v_target->arch.vmpidr, 1) << 8
> |
> + MPIDR_AFFINITY_LEVEL(v_target->arch.vmpidr, 0));
> +
> + if ( irq_affinity == cpu_affinity )
> + return v_target;
> + }
Same comment about the loop.
> + gdprintk(XENLOG_WARNING,
> + "d%d: No vcpu match found for irq %d with irouter 0x%lx\n",
> + v->domain->domain_id, irq, irq_affinity );
> +
> + /* XXX:Fallback to vcpu0 ? */
> + return v->domain->vcpu[0];
This is wrong: only valid values should be allowed in rank->v3.irouter.
If the kernel writes a bad value to IROUTER we should catch it in
vgic_v3_distr_mmio_write and avoid saving it.
> +}
> +
> static int vgicv3_vcpu_init(struct vcpu *v)
> {
> int i;
> @@ -968,6 +1064,7 @@ static int vgicv3_domain_init(struct domain *d)
> static const struct vgic_ops v3_ops = {
> .vcpu_init = vgicv3_vcpu_init,
> .domain_init = vgicv3_domain_init,
> + .get_target_vcpu = vgicv3_get_target_vcpu,
> .emulate_sysreg = vgicv3_emulate_sysreg,
> };
>
> --
> 1.7.9.5
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |