|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v3 19/39] ARM: new VGIC: Add ENABLE registers handlers
On Wed, 21 Mar 2018, Andre Przywara wrote:
> As the enable register handlers are shared between the v2 and v3
> emulation, their implementation goes into vgic-mmio.c, to be easily
> referenced from the v3 emulation as well later.
> This introduces a vgic_sync_hardware_irq() function, which updates the
> physical side of a hardware mapped virtual IRQ.
> Because the existing locking order between vgic_irq->irq_lock and
> irq_desc->lock dictates so, we drop the irq_lock and retake them in the
> proper order.
>
> Signed-off-by: Andre Przywara <andre.przywara@xxxxxxxxxx>
> Reviewed-by: Julien Grall <julien.grall@xxxxxxx>
> ---
> Changelog v2 ... v3:
> - fix indentation
> - fix wording in comment
> - add Reviewed-by:
>
> Changelog v1 ... v2:
> - ASSERT on h/w IRQ and vIRQ staying in sync
>
> xen/arch/arm/vgic/vgic-mmio-v2.c | 4 +-
> xen/arch/arm/vgic/vgic-mmio.c | 117
> +++++++++++++++++++++++++++++++++++++++
> xen/arch/arm/vgic/vgic-mmio.h | 11 ++++
> xen/arch/arm/vgic/vgic.c | 40 +++++++++++++
> xen/arch/arm/vgic/vgic.h | 3 +
> 5 files changed, 173 insertions(+), 2 deletions(-)
>
> diff --git a/xen/arch/arm/vgic/vgic-mmio-v2.c
> b/xen/arch/arm/vgic/vgic-mmio-v2.c
> index 43c1ab5906..7efd1c4eb4 100644
> --- a/xen/arch/arm/vgic/vgic-mmio-v2.c
> +++ b/xen/arch/arm/vgic/vgic-mmio-v2.c
> @@ -89,10 +89,10 @@ static const struct vgic_register_region
> vgic_v2_dist_registers[] = {
> vgic_mmio_read_rao, vgic_mmio_write_wi, 1,
> VGIC_ACCESS_32bit),
> REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISENABLER,
> - vgic_mmio_read_raz, vgic_mmio_write_wi, 1,
> + vgic_mmio_read_enable, vgic_mmio_write_senable, 1,
> VGIC_ACCESS_32bit),
> REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICENABLER,
> - vgic_mmio_read_raz, vgic_mmio_write_wi, 1,
> + vgic_mmio_read_enable, vgic_mmio_write_cenable, 1,
> VGIC_ACCESS_32bit),
> REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISPENDR,
> vgic_mmio_read_raz, vgic_mmio_write_wi, 1,
> diff --git a/xen/arch/arm/vgic/vgic-mmio.c b/xen/arch/arm/vgic/vgic-mmio.c
> index a03e8d88b9..f219b7c509 100644
> --- a/xen/arch/arm/vgic/vgic-mmio.c
> +++ b/xen/arch/arm/vgic/vgic-mmio.c
> @@ -39,6 +39,123 @@ void vgic_mmio_write_wi(struct vcpu *vcpu, paddr_t addr,
> /* Ignore */
> }
>
> +/*
> + * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
> + * of the enabled bit, so there is only one function for both here.
> + */
> +unsigned long vgic_mmio_read_enable(struct vcpu *vcpu,
> + paddr_t addr, unsigned int len)
> +{
> + uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1);
> + uint32_t value = 0;
> + unsigned int i;
> +
> + /* Loop over all IRQs affected by this read */
> + for ( i = 0; i < len * 8; i++ )
> + {
> + struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
> +
> + if ( irq->enabled )
> + value |= (1U << i);
Don't we need to take the irq->irq_lock before reading irq->enabled?
> + vgic_put_irq(vcpu->domain, irq);
> + }
> +
> + return value;
> +}
> +
> +void vgic_mmio_write_senable(struct vcpu *vcpu,
> + paddr_t addr, unsigned int len,
> + unsigned long val)
> +{
> + uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1);
> + unsigned int i;
> +
> + for_each_set_bit( i, &val, len * 8 )
> + {
> + struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
> + unsigned long flags;
> + irq_desc_t *desc;
> +
> + spin_lock_irqsave(&irq->irq_lock, flags);
> +
> + if ( irq->enabled ) /* skip already enabled IRQs */
> + {
> + spin_unlock_irqrestore(&irq->irq_lock, flags);
> + vgic_put_irq(vcpu->domain, irq);
> + continue;
> + }
> +
> + irq->enabled = true;
> + if ( irq->hw )
> + {
> + /*
> + * The irq cannot be a PPI, we only support delivery
> + * of SPIs to guests.
> + */
> + ASSERT(irq->hwintid >= VGIC_NR_PRIVATE_IRQS);
> +
> + desc = irq_to_desc(irq->hwintid);
> + }
> + else
> + desc = NULL;
> +
> + vgic_queue_irq_unlock(vcpu->domain, irq, flags);
> +
> + if ( desc )
> + vgic_sync_hardware_irq(vcpu->domain, desc, irq);
> +
> + vgic_put_irq(vcpu->domain, irq);
> + }
> +}
> +
> +void vgic_mmio_write_cenable(struct vcpu *vcpu,
> + paddr_t addr, unsigned int len,
> + unsigned long val)
> +{
> + uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1);
> + unsigned int i;
> +
> + for_each_set_bit( i, &val, len * 8 )
> + {
> + struct vgic_irq *irq;
> + unsigned long flags;
> + irq_desc_t *desc;
> +
> + irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
> + spin_lock_irqsave(&irq->irq_lock, flags);
> +
> + if ( !irq->enabled ) /* skip already disabled IRQs */
> + {
> + spin_unlock_irqrestore(&irq->irq_lock, flags);
> + vgic_put_irq(vcpu->domain, irq);
> + continue;
> + }
> +
> + irq->enabled = false;
> +
> + if ( irq->hw )
> + {
> + /*
> + * The irq cannot be a PPI, we only support delivery
> + * of SPIs to guests.
> + */
> + ASSERT(irq->hwintid >= VGIC_NR_PRIVATE_IRQS);
> +
> + desc = irq_to_desc(irq->hwintid);
> + }
> + else
> + desc = NULL;
> +
> + spin_unlock_irqrestore(&irq->irq_lock, flags);
> +
> + if ( desc )
> + vgic_sync_hardware_irq(vcpu->domain, desc, irq);
> +
> + vgic_put_irq(vcpu->domain, irq);
> + }
> +}
> +
> static int match_region(const void *key, const void *elt)
> {
> const unsigned int offset = (unsigned long)key;
> diff --git a/xen/arch/arm/vgic/vgic-mmio.h b/xen/arch/arm/vgic/vgic-mmio.h
> index c280668694..a2cebd77f4 100644
> --- a/xen/arch/arm/vgic/vgic-mmio.h
> +++ b/xen/arch/arm/vgic/vgic-mmio.h
> @@ -86,6 +86,17 @@ unsigned long vgic_mmio_read_rao(struct vcpu *vcpu,
> void vgic_mmio_write_wi(struct vcpu *vcpu, paddr_t addr,
> unsigned int len, unsigned long val);
>
> +unsigned long vgic_mmio_read_enable(struct vcpu *vcpu,
> + paddr_t addr, unsigned int len);
> +
> +void vgic_mmio_write_senable(struct vcpu *vcpu,
> + paddr_t addr, unsigned int len,
> + unsigned long val);
> +
> +void vgic_mmio_write_cenable(struct vcpu *vcpu,
> + paddr_t addr, unsigned int len,
> + unsigned long val);
> +
> unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev);
>
> #endif
> diff --git a/xen/arch/arm/vgic/vgic.c b/xen/arch/arm/vgic/vgic.c
> index 37b425a16c..90041eb071 100644
> --- a/xen/arch/arm/vgic/vgic.c
> +++ b/xen/arch/arm/vgic/vgic.c
> @@ -699,6 +699,46 @@ void vgic_kick_vcpus(struct domain *d)
> }
> }
>
> +static unsigned int translate_irq_type(bool is_level)
> +{
> + return is_level ? IRQ_TYPE_LEVEL_HIGH : IRQ_TYPE_EDGE_RISING;
> +}
> +
> +void vgic_sync_hardware_irq(struct domain *d,
> + irq_desc_t *desc, struct vgic_irq *irq)
> +{
> + unsigned long flags;
> +
> + spin_lock_irqsave(&desc->lock, flags);
> + spin_lock(&irq->irq_lock);
> +
> + /*
> + * We forbid tinkering with the hardware IRQ association during
> + * a domain's lifetime.
> + */
> + ASSERT(irq->hw && desc->irq == irq->hwintid);
> +
> + if ( irq->enabled )
> + {
> + /*
> + * We might end up from various callers, so check that the
> + * interrrupt is disabled before trying to change the config.
> + */
> + if ( irq_type_set_by_domain(d) &&
> + test_bit(_IRQ_DISABLED, &desc->status) )
> + gic_set_irq_type(desc, translate_irq_type(irq->config));
> +
> + if ( irq->target_vcpu )
> + irq_set_affinity(desc, cpumask_of(irq->target_vcpu->processor));
> + desc->handler->enable(desc);
> + }
> + else
> + desc->handler->disable(desc);
> +
> + spin_unlock(&irq->irq_lock);
> + spin_unlock_irqrestore(&desc->lock, flags);
> +}
> +
> /*
> * Local variables:
> * mode: C
> diff --git a/xen/arch/arm/vgic/vgic.h b/xen/arch/arm/vgic/vgic.h
> index aed7e4179a..071e061066 100644
> --- a/xen/arch/arm/vgic/vgic.h
> +++ b/xen/arch/arm/vgic/vgic.h
> @@ -55,6 +55,9 @@ static inline void vgic_get_irq_kref(struct vgic_irq *irq)
> atomic_inc(&irq->refcount);
> }
>
> +void vgic_sync_hardware_irq(struct domain *d,
> + irq_desc_t *desc, struct vgic_irq *irq);
> +
> void vgic_v2_fold_lr_state(struct vcpu *vcpu);
> void vgic_v2_populate_lr(struct vcpu *vcpu, struct vgic_irq *irq, int lr);
> void vgic_v2_set_underflow(struct vcpu *vcpu);
> --
> 2.14.1
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |