|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH V6 04/10] xen/arm: Use cpumask_t type for vcpu_mask in vgic_to_sgi
On Mon, 2015-06-01 at 20:56 +0800, Chen Baozi wrote:
> From: Chen Baozi <baozich@xxxxxxxxx>
>
> Use cpumask_t instead of unsigned long which can only express 64 cpus at
> the most. Add the {gicv2|gicv3}_sgir_to_cpumask in corresponding vGICs
> to translate GICD_SGIR/ICC_SGI1R_EL1 to vcpu_mask for vgic_to_sgi.
>
> Signed-off-by: Chen Baozi <baozich@xxxxxxxxx>
> ---
> xen/arch/arm/vgic-v2.c | 16 +++++++++++++---
> xen/arch/arm/vgic-v3.c | 18 ++++++++++++++----
> xen/arch/arm/vgic.c | 31 ++++++++++++++++++++-----------
> xen/include/asm-arm/gic.h | 1 +
> xen/include/asm-arm/gic_v3_defs.h | 2 ++
> xen/include/asm-arm/vgic.h | 2 +-
> 6 files changed, 51 insertions(+), 19 deletions(-)
>
> diff --git a/xen/arch/arm/vgic-v2.c b/xen/arch/arm/vgic-v2.c
> index 3be1a51..17a3c9f 100644
> --- a/xen/arch/arm/vgic-v2.c
> +++ b/xen/arch/arm/vgic-v2.c
> @@ -33,6 +33,15 @@
> #include <asm/gic.h>
> #include <asm/vgic.h>
>
> +static inline void gicv2_sgir_to_cpumask(cpumask_t *cpumask,
> + const register_t sgir)
> +{
> + unsigned long target_list;
> +
> + target_list = ((sgir & GICD_SGI_TARGET_MASK) >> GICD_SGI_TARGET_SHIFT);
> + bitmap_copy(cpumask_bits(cpumask), &target_list, GICD_SGI_TARGET_BITS);
> +}
> +
> static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info)
> {
> struct hsr_dabt dabt = info->dabt;
> @@ -201,16 +210,17 @@ static int vgic_v2_to_sgi(struct vcpu *v, register_t
> sgir)
> int virq;
> int irqmode;
> enum gic_sgi_mode sgi_mode;
> - unsigned long vcpu_mask = 0;
> + cpumask_t vcpu_mask;
How big is NR_CPUS (and hence this variable) going to be at the end of
this series? I can't seem to find the patch which bumps it, so I suppose
it remains 128?
Perhaps we want to bite the bullet now and change the vgic_to_sgi to
take an affinity path thing (aff3.aff2.aff1) + target list, instead of a
cpumask? That makes sense given the 16 CPU per AFF0 limitation, since
there is only a limited set of cpumask patterns which can be specified,
so we don't need the fully arbitrary bitmap.
> + cpumask_clear(&vcpu_mask);
> irqmode = (sgir & GICD_SGI_TARGET_LIST_MASK) >>
> GICD_SGI_TARGET_LIST_SHIFT;
> virq = (sgir & GICD_SGI_INTID_MASK);
> - vcpu_mask = (sgir & GICD_SGI_TARGET_MASK) >> GICD_SGI_TARGET_SHIFT;
>
> /* Map GIC sgi value to enum value */
> switch ( irqmode )
> {
> case GICD_SGI_TARGET_LIST_VAL:
> + gicv2_sgir_to_cpumask(&vcpu_mask, sgir);
> sgi_mode = SGI_TARGET_LIST;
> break;
> case GICD_SGI_TARGET_OTHERS_VAL:
> @@ -226,7 +236,7 @@ static int vgic_v2_to_sgi(struct vcpu *v, register_t sgir)
> return 0;
> }
>
> - return vgic_to_sgi(v, sgir, sgi_mode, virq, vcpu_mask);
> + return vgic_to_sgi(v, sgir, sgi_mode, virq, &vcpu_mask);
> }
>
> static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
> diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
> index ef9a71a..2bf5294 100644
> --- a/xen/arch/arm/vgic-v3.c
> +++ b/xen/arch/arm/vgic-v3.c
> @@ -972,22 +972,32 @@ write_ignore:
> return 1;
> }
>
> +static inline void gicv3_sgir_to_cpumask(cpumask_t *cpumask,
> + const register_t sgir)
> +{
> + unsigned long target_list;
> +
> + target_list = sgir & ICH_SGI_TARGETLIST_MASK;
> + bitmap_copy(cpumask_bits(cpumask), &target_list, ICH_SGI_TARGET_BITS);
Do you not need to left shift the target_list bits into the cpumask
based on AFF1+?
Otherwise aren't you delivering an SGI targeted at, say, 0.0.1.0xab to
0.0.0.0xab?
> +}
> +
> static int vgic_v3_to_sgi(struct vcpu *v, register_t sgir)
> {
> int virq;
> int irqmode;
> enum gic_sgi_mode sgi_mode;
> - unsigned long vcpu_mask = 0;
> + cpumask_t vcpu_mask;
>
> + cpumask_clear(&vcpu_mask);
> irqmode = (sgir >> ICH_SGI_IRQMODE_SHIFT) & ICH_SGI_IRQMODE_MASK;
> virq = (sgir >> ICH_SGI_IRQ_SHIFT ) & ICH_SGI_IRQ_MASK;
> - /* SGI's are injected at Rdist level 0. ignoring affinity 1, 2, 3 */
> - vcpu_mask = sgir & ICH_SGI_TARGETLIST_MASK;
>
> /* Map GIC sgi value to enum value */
> switch ( irqmode )
> {
> case ICH_SGI_TARGET_LIST:
> + /* SGI's are injected at Rdist level 0. ignoring affinity 1, 2, 3 */
> + gicv3_sgir_to_cpumask(&vcpu_mask, sgir);
> sgi_mode = SGI_TARGET_LIST;
> break;
> case ICH_SGI_TARGET_OTHERS:
> @@ -998,7 +1008,7 @@ static int vgic_v3_to_sgi(struct vcpu *v, register_t
> sgir)
> return 0;
> }
>
> - return vgic_to_sgi(v, sgir, sgi_mode, virq, vcpu_mask);
> + return vgic_to_sgi(v, sgir, sgi_mode, virq, &vcpu_mask);
> }
>
> static int vgic_v3_emulate_sysreg(struct cpu_user_regs *regs, union hsr hsr)
> diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
> index 7b387b7..1bd86f8 100644
> --- a/xen/arch/arm/vgic.c
> +++ b/xen/arch/arm/vgic.c
> @@ -318,15 +318,20 @@ void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n)
> }
> }
>
> -/* TODO: unsigned long is used to fit vcpu_mask.*/
> int vgic_to_sgi(struct vcpu *v, register_t sgir, enum gic_sgi_mode irqmode,
> int virq,
> - unsigned long vcpu_mask)
> + cpumask_t *vcpu_mask)
> {
> struct domain *d = v->domain;
> int vcpuid;
> int i;
>
> - ASSERT(d->max_vcpus < 8*sizeof(vcpu_mask));
> + /*
> + * cpumask_t is based on NR_CPUS and there is no relation between
> + * NR_CPUS and MAX_VIRT_CPUS. Furthermore, NR_CPUS can be configured
> + * at build time by the user. So we add a BUILD_BUG_ON here in order
> + * to avoid insecure hypervisor.
Insecure?
> + */
> + BUILD_BUG_ON(sizeof(cpumask_t)*8 < MAX_VIRT_CPUS);
Is this the same as (NR_CPUS < MAX_VIRT_CPUS)?
Ian
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |