|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v2 09/27] ARM: GICv3: introduce separate pending_irq structs for LPIs
On Thu, 16 Mar 2017, Andre Przywara wrote:
> For the same reason that allocating a struct irq_desc for each
> possible LPI is not an option, having a struct pending_irq for each LPI
> is also not feasible. However we actually only need those when an
> interrupt is on a vCPU (or is about to be injected).
> Maintain a list of those structs that we can use for the lifecycle of
> a guest LPI. We allocate new entries if necessary, however reuse
> pre-owned entries whenever possible.
> I added some locking around this list here, however my gut feeling is
> that we don't need one because this a per-VCPU structure anyway.
> If someone could confirm this, I'd be grateful.
> Teach the existing VGIC functions to find the right pointer when being
> given a virtual LPI number.
>
> Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx>
Still all past comments are unaddress ;-(
It makes very hard to continue doing reviews. I am sorry but I'll have
to stop reviewing until I see a series with my past comments addressed.
I encourage Julien to do the same.
> ---
> xen/arch/arm/gic.c | 3 +++
> xen/arch/arm/vgic-v3.c | 3 +++
> xen/arch/arm/vgic.c | 64
> +++++++++++++++++++++++++++++++++++++++++---
> xen/include/asm-arm/domain.h | 2 ++
> xen/include/asm-arm/vgic.h | 14 ++++++++++
> 5 files changed, 83 insertions(+), 3 deletions(-)
>
> diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
> index a5348f2..bd3c032 100644
> --- a/xen/arch/arm/gic.c
> +++ b/xen/arch/arm/gic.c
> @@ -509,6 +509,9 @@ static void gic_update_one_lr(struct vcpu *v, int i)
> struct vcpu *v_target = vgic_get_target_vcpu(v, irq);
> irq_set_affinity(p->desc, cpumask_of(v_target->processor));
> }
> + /* If this was an LPI, mark this struct as available again. */
> + if ( is_lpi(p->irq) )
> + p->irq = 0;
> }
> }
> }
> diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
> index 1fadb00..b0653c2 100644
> --- a/xen/arch/arm/vgic-v3.c
> +++ b/xen/arch/arm/vgic-v3.c
> @@ -1426,6 +1426,9 @@ static int vgic_v3_vcpu_init(struct vcpu *v)
> if ( v->vcpu_id == last_cpu || (v->vcpu_id == (d->max_vcpus - 1)) )
> v->arch.vgic.flags |= VGIC_V3_RDIST_LAST;
>
> + spin_lock_init(&v->arch.vgic.pending_lpi_list_lock);
> + INIT_LIST_HEAD(&v->arch.vgic.pending_lpi_list);
> +
> return 0;
> }
>
> diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
> index 364d5f0..e5cfa54 100644
> --- a/xen/arch/arm/vgic.c
> +++ b/xen/arch/arm/vgic.c
> @@ -30,6 +30,8 @@
>
> #include <asm/mmio.h>
> #include <asm/gic.h>
> +#include <asm/gic_v3_defs.h>
> +#include <asm/gic_v3_its.h>
> #include <asm/vgic.h>
>
> static inline struct vgic_irq_rank *vgic_get_rank(struct vcpu *v, int rank)
> @@ -61,7 +63,7 @@ struct vgic_irq_rank *vgic_rank_irq(struct vcpu *v,
> unsigned int irq)
> return vgic_get_rank(v, rank);
> }
>
> -static void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq)
> +void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq)
> {
> INIT_LIST_HEAD(&p->inflight);
> INIT_LIST_HEAD(&p->lr_queue);
> @@ -244,10 +246,14 @@ struct vcpu *vgic_get_target_vcpu(struct vcpu *v,
> unsigned int virq)
>
> static int vgic_get_virq_priority(struct vcpu *v, unsigned int virq)
> {
> - struct vgic_irq_rank *rank = vgic_rank_irq(v, virq);
> + struct vgic_irq_rank *rank;
> unsigned long flags;
> int priority;
>
> + if ( is_lpi(virq) )
> + return vgic_lpi_get_priority(v->domain, virq);
> +
> + rank = vgic_rank_irq(v, virq);
> vgic_lock_rank(v, rank, flags);
> priority = rank->priority[virq & INTERRUPT_RANK_MASK];
> vgic_unlock_rank(v, rank, flags);
> @@ -446,13 +452,63 @@ bool vgic_to_sgi(struct vcpu *v, register_t sgir, enum
> gic_sgi_mode irqmode,
> return true;
> }
>
> +/*
> + * Holding struct pending_irq's for each possible virtual LPI in each domain
> + * requires too much Xen memory, also a malicious guest could potentially
> + * spam Xen with LPI map requests. We cannot cover those with (guest
> allocated)
> + * ITS memory, so we use a dynamic scheme of allocating struct pending_irq's
> + * on demand.
> + */
> +struct pending_irq *lpi_to_pending(struct vcpu *v, unsigned int lpi,
> + bool allocate)
> +{
> + struct lpi_pending_irq *lpi_irq, *empty = NULL;
> +
> + spin_lock(&v->arch.vgic.pending_lpi_list_lock);
> + list_for_each_entry(lpi_irq, &v->arch.vgic.pending_lpi_list, entry)
> + {
> + if ( lpi_irq->pirq.irq == lpi )
> + {
> + spin_unlock(&v->arch.vgic.pending_lpi_list_lock);
> + return &lpi_irq->pirq;
> + }
> +
> + if ( lpi_irq->pirq.irq == 0 && !empty )
> + empty = lpi_irq;
> + }
> +
> + if ( !allocate )
> + {
> + spin_unlock(&v->arch.vgic.pending_lpi_list_lock);
> + return NULL;
> + }
> +
> + if ( !empty )
> + {
> + empty = xzalloc(struct lpi_pending_irq);
> + vgic_init_pending_irq(&empty->pirq, lpi);
> + list_add_tail(&empty->entry, &v->arch.vgic.pending_lpi_list);
> + } else
> + {
> + empty->pirq.status = 0;
> + empty->pirq.irq = lpi;
> + }
> +
> + spin_unlock(&v->arch.vgic.pending_lpi_list_lock);
> +
> + return &empty->pirq;
> +}
> +
> struct pending_irq *irq_to_pending(struct vcpu *v, unsigned int irq)
> {
> struct pending_irq *n;
> +
> /* Pending irqs allocation strategy: the first vgic.nr_spis irqs
> * are used for SPIs; the rests are used for per cpu irqs */
> if ( irq < 32 )
> n = &v->arch.vgic.pending_irqs[irq];
> + else if ( is_lpi(irq) )
> + n = lpi_to_pending(v, irq, true);
> else
> n = &v->domain->arch.vgic.pending_irqs[irq - 32];
> return n;
> @@ -480,7 +536,7 @@ void vgic_clear_pending_irqs(struct vcpu *v)
> void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int virq)
> {
> uint8_t priority;
> - struct pending_irq *iter, *n = irq_to_pending(v, virq);
> + struct pending_irq *iter, *n;
> unsigned long flags;
> bool running;
>
> @@ -488,6 +544,8 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int
> virq)
>
> spin_lock_irqsave(&v->arch.vgic.lock, flags);
>
> + n = irq_to_pending(v, virq);
> +
> /* vcpu offline */
> if ( test_bit(_VPF_down, &v->pause_flags) )
> {
> diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
> index 00b9c1a..f44a84b 100644
> --- a/xen/include/asm-arm/domain.h
> +++ b/xen/include/asm-arm/domain.h
> @@ -257,6 +257,8 @@ struct arch_vcpu
> paddr_t rdist_base;
> #define VGIC_V3_RDIST_LAST (1 << 0) /* last vCPU of the rdist */
> uint8_t flags;
> + struct list_head pending_lpi_list;
> + spinlock_t pending_lpi_list_lock; /* protects the pending_lpi_list
> */
> } vgic;
>
> /* Timer registers */
> diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
> index 467333c..8f1099c 100644
> --- a/xen/include/asm-arm/vgic.h
> +++ b/xen/include/asm-arm/vgic.h
> @@ -83,6 +83,12 @@ struct pending_irq
> struct list_head lr_queue;
> };
>
> +struct lpi_pending_irq
> +{
> + struct list_head entry;
> + struct pending_irq pirq;
> +};
> +
> #define NR_INTERRUPT_PER_RANK 32
> #define INTERRUPT_RANK_MASK (NR_INTERRUPT_PER_RANK - 1)
>
> @@ -296,13 +302,21 @@ extern struct vcpu *vgic_get_target_vcpu(struct vcpu
> *v, unsigned int virq);
> extern void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int virq);
> extern void vgic_vcpu_inject_spi(struct domain *d, unsigned int virq);
> extern void vgic_clear_pending_irqs(struct vcpu *v);
> +extern void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq);
> extern struct pending_irq *irq_to_pending(struct vcpu *v, unsigned int irq);
> extern struct pending_irq *spi_to_pending(struct domain *d, unsigned int
> irq);
> +extern struct pending_irq *lpi_to_pending(struct vcpu *v, unsigned int irq,
> + bool allocate);
> extern struct vgic_irq_rank *vgic_rank_offset(struct vcpu *v, int b, int n,
> int s);
> extern struct vgic_irq_rank *vgic_rank_irq(struct vcpu *v, unsigned int irq);
> extern bool vgic_emulate(struct cpu_user_regs *regs, union hsr hsr);
> extern void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n);
> extern void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n);
> +/* placeholder function until the property table gets introduced */
> +static inline int vgic_lpi_get_priority(struct domain *d, uint32_t vlpi)
> +{
> + return 0xa;
> +}
> extern void register_vgic_ops(struct domain *d, const struct vgic_ops *ops);
> int vgic_v2_init(struct domain *d, int *mmio_count);
> int vgic_v3_init(struct domain *d, int *mmio_count);
> --
> 2.9.0
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |