[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH for-4.6 1/4] xen/arm: vgic: Rename nr_lines into nr_spis
The field nr_lines in the arch_domain vgic structure contains the number of SPIs for the emulated GIC. Using the nr_lines make confusion with the GIC code, where it means the number of IRQs. This can lead to coding error. Also introduce vgic_nr_lines to get the number of IRQ handled by the emulated GIC. Signed-off-by: Julien Grall <julien.grall@xxxxxxxxxx> Acked-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx> --- It was part of the platform device passthrough series: https://patches.linaro.org/34661/ Stefano: I've kept your ack from the previous version. Let me know if there is any issue. Changes in v3: - Add acked-by from Stefano. - Update the patch to also modify GICv3 code which has been pushed recently Changes in v2: - Patch added. --- xen/arch/arm/gic-v2.c | 2 -- xen/arch/arm/gic-v3.c | 2 +- xen/arch/arm/vgic-v2.c | 2 +- xen/arch/arm/vgic-v3.c | 2 +- xen/arch/arm/vgic.c | 15 ++++++--------- xen/include/asm-arm/domain.h | 2 +- xen/include/asm-arm/vgic.h | 4 +++- 7 files changed, 13 insertions(+), 16 deletions(-) diff --git a/xen/arch/arm/gic-v2.c b/xen/arch/arm/gic-v2.c index faad1ff..31fb81a 100644 --- a/xen/arch/arm/gic-v2.c +++ b/xen/arch/arm/gic-v2.c @@ -432,8 +432,6 @@ static int gicv2v_setup(struct domain *d) d->arch.vgic.cbase = GUEST_GICC_BASE; } - d->arch.vgic.nr_lines = 0; - /* * Map the gic virtual cpu interface in the gic cpu interface * region of the guest. diff --git a/xen/arch/arm/gic-v3.c b/xen/arch/arm/gic-v3.c index 076aa62..ec48fc1 100644 --- a/xen/arch/arm/gic-v3.c +++ b/xen/arch/arm/gic-v3.c @@ -922,7 +922,7 @@ static int gicv_v3_init(struct domain *d) d->arch.vgic.rbase_size[0] = GUEST_GICV3_GICR0_SIZE; } - d->arch.vgic.nr_lines = 0; + d->arch.vgic.nr_spis = 0; return 0; } diff --git a/xen/arch/arm/vgic-v2.c b/xen/arch/arm/vgic-v2.c index 1369f78..039e19a 100644 --- a/xen/arch/arm/vgic-v2.c +++ b/xen/arch/arm/vgic-v2.c @@ -54,7 +54,7 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info) /* No secure world support for guests. */ vgic_lock(v); *r = ( (v->domain->max_vcpus << 5) & GICD_TYPE_CPUS ) - |( ((v->domain->arch.vgic.nr_lines / 32)) & GICD_TYPE_LINES ); + |( ((v->domain->arch.vgic.nr_spis / 32)) & GICD_TYPE_LINES ); vgic_unlock(v); return 1; case GICD_IIDR: diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c index ff99e50..2785c10 100644 --- a/xen/arch/arm/vgic-v3.c +++ b/xen/arch/arm/vgic-v3.c @@ -668,7 +668,7 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, mmio_info_t *info) if ( dabt.size != DABT_WORD ) goto bad_width; /* No secure world support for guests. */ *r = (((v->domain->max_vcpus << 5) & GICD_TYPE_CPUS ) | - ((v->domain->arch.vgic.nr_lines / 32) & GICD_TYPE_LINES)); + ((v->domain->arch.vgic.nr_spis / 32) & GICD_TYPE_LINES)); return 1; case GICD_STATUSR: /* diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c index 97061ce..75cb7ff 100644 --- a/xen/arch/arm/vgic.c +++ b/xen/arch/arm/vgic.c @@ -66,13 +66,10 @@ int domain_vgic_init(struct domain *d) d->arch.vgic.ctlr = 0; - /* Currently nr_lines in vgic and gic doesn't have the same meanings - * Here nr_lines = number of SPIs - */ if ( is_hardware_domain(d) ) - d->arch.vgic.nr_lines = gic_number_lines() - 32; + d->arch.vgic.nr_spis = gic_number_lines() - 32; else - d->arch.vgic.nr_lines = 0; /* We don't need SPIs for the guest */ + d->arch.vgic.nr_spis = 0; /* We don't need SPIs for the guest */ switch ( gic_hw_version() ) { @@ -96,11 +93,11 @@ int domain_vgic_init(struct domain *d) return -ENOMEM; d->arch.vgic.pending_irqs = - xzalloc_array(struct pending_irq, d->arch.vgic.nr_lines); + xzalloc_array(struct pending_irq, d->arch.vgic.nr_spis); if ( d->arch.vgic.pending_irqs == NULL ) return -ENOMEM; - for (i=0; i<d->arch.vgic.nr_lines; i++) + for (i=0; i<d->arch.vgic.nr_spis; i++) { INIT_LIST_HEAD(&d->arch.vgic.pending_irqs[i].inflight); INIT_LIST_HEAD(&d->arch.vgic.pending_irqs[i].lr_queue); @@ -218,7 +215,7 @@ void arch_move_irqs(struct vcpu *v) struct vcpu *v_target; int i; - for ( i = 32; i < (d->arch.vgic.nr_lines + 32); i++ ) + for ( i = 32; i < vgic_num_irqs(d); i++ ) { v_target = vgic_get_target_vcpu(v, i); p = irq_to_pending(v_target, i); @@ -344,7 +341,7 @@ int vgic_to_sgi(struct vcpu *v, register_t sgir, enum gic_sgi_mode irqmode, int struct pending_irq *irq_to_pending(struct vcpu *v, unsigned int irq) { struct pending_irq *n; - /* Pending irqs allocation strategy: the first vgic.nr_lines irqs + /* Pending irqs allocation strategy: the first vgic.nr_spis irqs * are used for SPIs; the rests are used for per cpu irqs */ if ( irq < 32 ) n = &v->arch.vgic.pending_irqs[irq]; diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h index 787e93c..8b7dd85 100644 --- a/xen/include/asm-arm/domain.h +++ b/xen/include/asm-arm/domain.h @@ -89,7 +89,7 @@ struct arch_domain */ spinlock_t lock; int ctlr; - int nr_lines; /* Number of SPIs */ + int nr_spis; /* Number of SPIs */ struct vgic_irq_rank *shared_irqs; /* * SPIs are domain global, SGIs and PPIs are per-VCPU and stored in diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h index 5160f17..74d5a4e 100644 --- a/xen/include/asm-arm/vgic.h +++ b/xen/include/asm-arm/vgic.h @@ -113,7 +113,7 @@ struct vgic_ops { }; /* Number of ranks of interrupt registers for a domain */ -#define DOMAIN_NR_RANKS(d) (((d)->arch.vgic.nr_lines+31)/32) +#define DOMAIN_NR_RANKS(d) (((d)->arch.vgic.nr_spis+31)/32) #define vgic_lock(v) spin_lock_irq(&(v)->domain->arch.vgic.lock) #define vgic_unlock(v) spin_unlock_irq(&(v)->domain->arch.vgic.lock) @@ -175,6 +175,8 @@ enum gic_sgi_mode; */ #define REG_RANK_INDEX(b, n, s) ((((n) >> s) & ((b)-1)) % 32) +#define vgic_num_irqs(d) ((d)->arch.vgic.nr_spis + 32) + extern int domain_vgic_init(struct domain *d); extern void domain_vgic_free(struct domain *d); extern int vcpu_vgic_init(struct vcpu *v); -- 2.1.3 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |