|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v3 10/39] ARM: new VGIC: Implement virtual IRQ injection
On Wed, 21 Mar 2018, Andre Przywara wrote:
> Provide a vgic_queue_irq_unlock() function which decides whether a
> given IRQ needs to be queued to a VCPU's ap_list.
> This should be called whenever an IRQ becomes pending or enabled,
> either as a result of a hardware IRQ injection, from devices emulated by
> Xen (like the architected timer) or from MMIO accesses to the distributor
> emulation.
> Also provides the necessary functions to allow to inject an IRQ to a guest.
> Since this is the first code that starts using our locking mechanism,
> we add some (hopefully) clear documentation of our locking strategy and
> requirements along with this patch.
>
> This is based on Linux commit 81eeb95ddbab, written by Christoffer Dall.
>
> Signed-off-by: Andre Przywara <andre.przywara@xxxxxxxxxx>
> Reviewed-by: Julien Grall <julien.grall@xxxxxxx>
Reviewed-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>
> ---
> xen/arch/arm/vgic/vgic.c | 226
> +++++++++++++++++++++++++++++++++++++++++++++++
> xen/arch/arm/vgic/vgic.h | 10 +++
> 2 files changed, 236 insertions(+)
>
> diff --git a/xen/arch/arm/vgic/vgic.c b/xen/arch/arm/vgic/vgic.c
> index a818e382b1..f7dfd01c1d 100644
> --- a/xen/arch/arm/vgic/vgic.c
> +++ b/xen/arch/arm/vgic/vgic.c
> @@ -17,10 +17,36 @@
>
> #include <xen/sched.h>
> #include <asm/bug.h>
> +#include <asm/event.h>
> #include <asm/new_vgic.h>
>
> #include "vgic.h"
>
> +/*
> + * Locking order is always:
> + * vgic->lock
> + * vgic_cpu->ap_list_lock
> + * vgic->lpi_list_lock
> + * desc->lock
> + * vgic_irq->irq_lock
> + *
> + * If you need to take multiple locks, always take the upper lock first,
> + * then the lower ones, e.g. first take the ap_list_lock, then the irq_lock.
> + * If you are already holding a lock and need to take a higher one, you
> + * have to drop the lower ranking lock first and re-acquire it after having
> + * taken the upper one.
> + *
> + * When taking more than one ap_list_lock at the same time, always take the
> + * lowest numbered VCPU's ap_list_lock first, so:
> + * vcpuX->vcpu_id < vcpuY->vcpu_id:
> + * spin_lock(vcpuX->arch.vgic.ap_list_lock);
> + * spin_lock(vcpuY->arch.vgic.ap_list_lock);
> + *
> + * Since the VGIC must support injecting virtual interrupts from ISRs, we
> have
> + * to use the spin_lock_irqsave/spin_unlock_irqrestore versions of outer
> + * spinlocks for any lock that may be taken while injecting an interrupt.
> + */
> +
> /*
> * Iterate over the VM's list of mapped LPIs to find the one with a
> * matching interrupt ID and return a reference to the IRQ structure.
> @@ -124,6 +150,206 @@ void vgic_put_irq(struct domain *d, struct vgic_irq
> *irq)
> xfree(irq);
> }
>
> +/**
> + * vgic_target_oracle() - compute the target vcpu for an irq
> + * @irq: The irq to route. Must be already locked.
> + *
> + * Based on the current state of the interrupt (enabled, pending,
> + * active, vcpu and target_vcpu), compute the next vcpu this should be
> + * given to. Return NULL if this shouldn't be injected at all.
> + *
> + * Requires the IRQ lock to be held.
> + *
> + * Returns: The pointer to the virtual CPU this interrupt should be injected
> + * to. Will be NULL if this IRQ does not need to be injected.
> + */
> +static struct vcpu *vgic_target_oracle(struct vgic_irq *irq)
> +{
> + ASSERT(spin_is_locked(&irq->irq_lock));
> +
> + /* If the interrupt is active, it must stay on the current vcpu */
> + if ( irq->active )
> + return irq->vcpu ? : irq->target_vcpu;
> +
> + /*
> + * If the IRQ is not active but enabled and pending, we should direct
> + * it to its configured target VCPU.
> + * If the distributor is disabled, pending interrupts shouldn't be
> + * forwarded.
> + */
> + if ( irq->enabled && irq_is_pending(irq) )
> + {
> + if ( unlikely(irq->target_vcpu &&
> + !irq->target_vcpu->domain->arch.vgic.enabled) )
> + return NULL;
> +
> + return irq->target_vcpu;
> + }
> +
> + /*
> + * If neither active nor pending and enabled, then this IRQ should not
> + * be queued to any VCPU.
> + */
> + return NULL;
> +}
> +
> +/*
> + * Only valid injection if changing level for level-triggered IRQs or for a
> + * rising edge.
> + */
> +static bool vgic_validate_injection(struct vgic_irq *irq, bool level)
> +{
> + /* For edge interrupts we only care about a rising edge. */
> + if ( irq->config == VGIC_CONFIG_EDGE )
> + return level;
> +
> + /* For level interrupts we have to act when the line level changes. */
> + return irq->line_level != level;
> +}
> +
> +/**
> + * vgic_queue_irq_unlock() - Queue an IRQ to a VCPU, to be injected to a
> guest.
> + * @d: The domain the virtual IRQ belongs to.
> + * @irq: A pointer to the vgic_irq of the virtual IRQ, with the lock
> held.
> + * @flags: The flags used when having grabbed the IRQ lock.
> + *
> + * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list.
> + * Do the queuing if necessary, taking the right locks in the right order.
> + *
> + * Needs to be entered with the IRQ lock already held, but will return
> + * with all locks dropped.
> + */
> +void vgic_queue_irq_unlock(struct domain *d, struct vgic_irq *irq,
> + unsigned long flags)
> +{
> + struct vcpu *vcpu;
> +
> + ASSERT(spin_is_locked(&irq->irq_lock));
> +
> +retry:
> + vcpu = vgic_target_oracle(irq);
> + if ( irq->vcpu || !vcpu )
> + {
> + /*
> + * If this IRQ is already on a VCPU's ap_list, then it
> + * cannot be moved or modified and there is no more work for
> + * us to do.
> + *
> + * Otherwise, if the irq is not pending and enabled, it does
> + * not need to be inserted into an ap_list and there is also
> + * no more work for us to do.
> + */
> + spin_unlock_irqrestore(&irq->irq_lock, flags);
> +
> + /*
> + * We have to kick the VCPU here, because we could be
> + * queueing an edge-triggered interrupt for which we
> + * get no EOI maintenance interrupt. In that case,
> + * while the IRQ is already on the VCPU's AP list, the
> + * VCPU could have EOI'ed the original interrupt and
> + * won't see this one until it exits for some other
> + * reason.
> + */
> + if ( vcpu )
> + vcpu_kick(vcpu);
> +
> + return;
> + }
> +
> + /*
> + * We must unlock the irq lock to take the ap_list_lock where
> + * we are going to insert this new pending interrupt.
> + */
> + spin_unlock_irqrestore(&irq->irq_lock, flags);
> +
> + /* someone can do stuff here, which we re-check below */
> +
> + spin_lock_irqsave(&vcpu->arch.vgic.ap_list_lock, flags);
> + spin_lock(&irq->irq_lock);
> +
> + /*
> + * Did something change behind our backs?
> + *
> + * There are two cases:
> + * 1) The irq lost its pending state or was disabled behind our
> + * backs and/or it was queued to another VCPU's ap_list.
> + * 2) Someone changed the affinity on this irq behind our
> + * backs and we are now holding the wrong ap_list_lock.
> + *
> + * In both cases, drop the locks and retry.
> + */
> +
> + if ( unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq)) )
> + {
> + spin_unlock(&irq->irq_lock);
> + spin_unlock_irqrestore(&vcpu->arch.vgic.ap_list_lock, flags);
> +
> + spin_lock_irqsave(&irq->irq_lock, flags);
> + goto retry;
> + }
> +
> + /*
> + * Grab a reference to the irq to reflect the fact that it is
> + * now in the ap_list.
> + */
> + vgic_get_irq_kref(irq);
> + list_add_tail(&irq->ap_list, &vcpu->arch.vgic.ap_list_head);
> + irq->vcpu = vcpu;
> +
> + spin_unlock(&irq->irq_lock);
> + spin_unlock_irqrestore(&vcpu->arch.vgic.ap_list_lock, flags);
> +
> + vcpu_kick(vcpu);
> +
> + return;
> +}
> +
> +/**
> + * vgic_inject_irq() - Inject an IRQ from a device to the vgic
> + * @d: The domain pointer
> + * @vcpu: The vCPU for private IRQs (PPIs, SGIs). Ignored for SPIs and
> LPIs.
> + * @intid: The INTID to inject a new state to.
> + * @level: Edge-triggered: true: to trigger the interrupt
> + * false: to ignore the call
> + * Level-sensitive true: raise the input signal
> + * false: lower the input signal
> + *
> + * Injects an instance of the given virtual IRQ into a domain.
> + * The VGIC is not concerned with devices being active-LOW or active-HIGH for
> + * level-sensitive interrupts. You can think of the level parameter as 1
> + * being HIGH and 0 being LOW and all devices being active-HIGH.
> + */
> +void vgic_inject_irq(struct domain *d, struct vcpu *vcpu, unsigned int intid,
> + bool level)
> +{
> + struct vgic_irq *irq;
> + unsigned long flags;
> +
> + irq = vgic_get_irq(d, vcpu, intid);
> + if ( !irq )
> + return;
> +
> + spin_lock_irqsave(&irq->irq_lock, flags);
> +
> + if ( !vgic_validate_injection(irq, level) )
> + {
> + /* Nothing to see here, move along... */
> + spin_unlock_irqrestore(&irq->irq_lock, flags);
> + vgic_put_irq(d, irq);
> + return;
> + }
> +
> + if ( irq->config == VGIC_CONFIG_LEVEL )
> + irq->line_level = level;
> + else
> + irq->pending_latch = true;
> +
> + vgic_queue_irq_unlock(d, irq, flags);
> + vgic_put_irq(d, irq);
> +
> + return;
> +}
> +
> /*
> * Local variables:
> * mode: C
> diff --git a/xen/arch/arm/vgic/vgic.h b/xen/arch/arm/vgic/vgic.h
> index a3befd386b..f9e2eeb2d6 100644
> --- a/xen/arch/arm/vgic/vgic.h
> +++ b/xen/arch/arm/vgic/vgic.h
> @@ -17,9 +17,19 @@
> #ifndef __XEN_ARM_VGIC_VGIC_H__
> #define __XEN_ARM_VGIC_VGIC_H__
>
> +static inline bool irq_is_pending(struct vgic_irq *irq)
> +{
> + if ( irq->config == VGIC_CONFIG_EDGE )
> + return irq->pending_latch;
> + else
> + return irq->pending_latch || irq->line_level;
> +}
> +
> struct vgic_irq *vgic_get_irq(struct domain *d, struct vcpu *vcpu,
> u32 intid);
> void vgic_put_irq(struct domain *d, struct vgic_irq *irq);
> +void vgic_queue_irq_unlock(struct domain *d, struct vgic_irq *irq,
> + unsigned long flags);
>
> static inline void vgic_get_irq_kref(struct vgic_irq *irq)
> {
> --
> 2.14.1
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |