|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [RFC PATCH 08/24] ARM: GICv3: introduce separate pending_irq structs for LPIs
Hi,
On 24/10/16 16:31, Vijay Kilari wrote:
> On Wed, Sep 28, 2016 at 11:54 PM, Andre Przywara <andre.przywara@xxxxxxx>
> wrote:
>> For the same reason that allocating a struct irq_desc for each
>> possible LPI is not an option, having a struct pending_irq for each LPI
>> is also not feasible. However we actually only need those when an
>> interrupt is on a vCPU (or is about to be injected).
>> Maintain a list of those structs that we can use for the lifecycle of
>> a guest LPI. We allocate new entries if necessary, however reuse
>> pre-owned entries whenever possible.
>> Teach the existing VGIC functions to find the right pointer when being
>> given a virtual LPI number.
>>
>> Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx>
>> ---
>> xen/arch/arm/gic.c | 3 +++
>> xen/arch/arm/vgic-v3.c | 2 ++
>> xen/arch/arm/vgic.c | 56
>> ++++++++++++++++++++++++++++++++++++++++---
>> xen/include/asm-arm/domain.h | 1 +
>> xen/include/asm-arm/gic-its.h | 10 ++++++++
>> xen/include/asm-arm/vgic.h | 9 +++++++
>> 6 files changed, 78 insertions(+), 3 deletions(-)
>>
>> diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
>> index 63c744a..ebe4035 100644
>> --- a/xen/arch/arm/gic.c
>> +++ b/xen/arch/arm/gic.c
>> @@ -506,6 +506,9 @@ static void gic_update_one_lr(struct vcpu *v, int i)
>> struct vcpu *v_target = vgic_get_target_vcpu(v, irq);
>> irq_set_affinity(p->desc, cpumask_of(v_target->processor));
>> }
>> + /* If this was an LPI, mark this struct as available again. */
>> + if ( p->irq >= 8192 )
> Can define something line is_lpi(irq) and use it everywhere
Yes, that was on my list anyway.
>> + p->irq = 0;
>> }
>> }
>> }
>> diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
>> index ec038a3..e9b6490 100644
>> --- a/xen/arch/arm/vgic-v3.c
>> +++ b/xen/arch/arm/vgic-v3.c
>> @@ -1388,6 +1388,8 @@ static int vgic_v3_vcpu_init(struct vcpu *v)
>> if ( v->vcpu_id == last_cpu || (v->vcpu_id == (d->max_vcpus - 1)) )
>> v->arch.vgic.flags |= VGIC_V3_RDIST_LAST;
>>
>> + INIT_LIST_HEAD(&v->arch.vgic.pending_lpi_list);
>> +
>> return 0;
>> }
>>
>> diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
>> index 0965119..b961551 100644
>> --- a/xen/arch/arm/vgic.c
>> +++ b/xen/arch/arm/vgic.c
>> @@ -31,6 +31,8 @@
>> #include <asm/mmio.h>
>> #include <asm/gic.h>
>> #include <asm/vgic.h>
>> +#include <asm/gic_v3_defs.h>
>> +#include <asm/gic-its.h>
>>
>> static inline struct vgic_irq_rank *vgic_get_rank(struct vcpu *v, int rank)
>> {
>> @@ -61,7 +63,7 @@ struct vgic_irq_rank *vgic_rank_irq(struct vcpu *v,
>> unsigned int irq)
>> return vgic_get_rank(v, rank);
>> }
>>
>> -static void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq)
>> +void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq)
>> {
>> INIT_LIST_HEAD(&p->inflight);
>> INIT_LIST_HEAD(&p->lr_queue);
>> @@ -244,10 +246,14 @@ struct vcpu *vgic_get_target_vcpu(struct vcpu *v,
>> unsigned int virq)
>>
>> static int vgic_get_virq_priority(struct vcpu *v, unsigned int virq)
>> {
>> - struct vgic_irq_rank *rank = vgic_rank_irq(v, virq);
>> + struct vgic_irq_rank *rank;
>> unsigned long flags;
>> int priority;
>>
>> + if ( virq >= 8192 )
>> + return gicv3_lpi_get_priority(v->domain, virq);
>> +
>> + rank = vgic_rank_irq(v, virq);
>> vgic_lock_rank(v, rank, flags);
>> priority = rank->priority[virq & INTERRUPT_RANK_MASK];
>> vgic_unlock_rank(v, rank, flags);
>> @@ -446,13 +452,55 @@ int vgic_to_sgi(struct vcpu *v, register_t sgir, enum
>> gic_sgi_mode irqmode, int
>> return 1;
>> }
>>
>> +/*
>> + * Holding struct pending_irq's for each possible virtual LPI in each domain
>> + * requires too much Xen memory, also a malicious guest could potentially
>> + * spam Xen with LPI map requests. We cannot cover those with (guest
>> allocated)
>> + * ITS memory, so we use a dynamic scheme of allocating struct pending_irq's
>> + * on demand.
>> + */
>> +struct pending_irq *lpi_to_pending(struct vcpu *v, unsigned int lpi,
>> + bool allocate)
>> +{
>> + struct lpi_pending_irq *lpi_irq, *empty = NULL;
>> +
>> + /* TODO: locking! */
>> + list_for_each_entry(lpi_irq, &v->arch.vgic.pending_lpi_list, entry)
>> + {
>> + if ( lpi_irq->pirq.irq == lpi )
>> + return &lpi_irq->pirq;
>> +
>> + if ( lpi_irq->pirq.irq == 0 && !empty )
>> + empty = lpi_irq;
>> + }
> With this approach of allocating pending_irq on demand, if the
> pending_lpi_list
> is at n position then it iterates for long time to find pending_irq entry.
> This will increase LPI injection time to domain.
>
> Why can't we use btree?
That's an optimization. You will find that the actual number of
interrupts on a VCPU at any given time is very low, especially if we
look at LPIs only. So my hunch is that it's either 0, 1 or 2, not more.
So for simplicity I'd keep it as a list for now. If we see issues, we
can amend this at any time.
>> +
>> + if ( !allocate )
>> + return NULL;
>> +
>> + if ( !empty )
>> + {
>> + empty = xzalloc(struct lpi_pending_irq);
>> + vgic_init_pending_irq(&empty->pirq, lpi);
>> + list_add_tail(&empty->entry, &v->arch.vgic.pending_lpi_list);
>> + } else
>> + {
>> + empty->pirq.status = 0;
>> + empty->pirq.irq = lpi;
>> + }
>> +
>> + return &empty->pirq;
>> +}
>> +
>> struct pending_irq *irq_to_pending(struct vcpu *v, unsigned int irq)
>> {
>> struct pending_irq *n;
>> +
>> /* Pending irqs allocation strategy: the first vgic.nr_spis irqs
>> * are used for SPIs; the rests are used for per cpu irqs */
>> if ( irq < 32 )
>> n = &v->arch.vgic.pending_irqs[irq];
>> + else if ( irq >= 8192 )
>> + n = lpi_to_pending(v, irq, true);
>> else
>> n = &v->domain->arch.vgic.pending_irqs[irq - 32];
>> return n;
>> @@ -480,7 +528,7 @@ void vgic_clear_pending_irqs(struct vcpu *v)
>> void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int virq)
>> {
>> uint8_t priority;
>> - struct pending_irq *iter, *n = irq_to_pending(v, virq);
>> + struct pending_irq *iter, *n;
>> unsigned long flags;
>> bool_t running;
>>
>> @@ -488,6 +536,8 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int
>> virq)
>>
>> spin_lock_irqsave(&v->arch.vgic.lock, flags);
>>
>> + n = irq_to_pending(v, virq);
>> +
>> /* vcpu offline */
>> if ( test_bit(_VPF_down, &v->pause_flags) )
>> {
>> diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
>> index 9452fcd..ae8a9de 100644
>> --- a/xen/include/asm-arm/domain.h
>> +++ b/xen/include/asm-arm/domain.h
>> @@ -249,6 +249,7 @@ struct arch_vcpu
>> paddr_t rdist_base;
>> #define VGIC_V3_RDIST_LAST (1 << 0) /* last vCPU of the rdist */
>> uint8_t flags;
>> + struct list_head pending_lpi_list;
>> } vgic;
>>
>> /* Timer registers */
>> diff --git a/xen/include/asm-arm/gic-its.h b/xen/include/asm-arm/gic-its.h
>> index 4e9841a..1f881c0 100644
>> --- a/xen/include/asm-arm/gic-its.h
>> +++ b/xen/include/asm-arm/gic-its.h
>> @@ -136,6 +136,12 @@ int gicv3_lpi_allocate_host_lpi(struct host_its *its,
>> int gicv3_lpi_drop_host_lpi(struct host_its *its,
>> uint32_t devid, uint32_t eventid,
>> uint32_t host_lpi);
>> +
>> +static inline int gicv3_lpi_get_priority(struct domain *d, uint32_t lpi)
>> +{
>> + return GIC_PRI_IRQ;
> Why lpi priority is fixed?. can't we use domain set lpi priority?
Mmh, looks like a rebase artifact. It is fixed if the ITS isn't
configured, but should just be the prototype if not.
The final file has it right, so I guess this gets amended in some later
patch. Thanks for spotting this.
Cheers,
Andre.
>> +}
>> +
>> #else
>>
>> static inline void gicv3_its_dt_init(const struct dt_device_node *node)
>> @@ -175,6 +181,10 @@ static inline int gicv3_lpi_drop_host_lpi(struct
>> host_its *its,
>> {
>> return 0;
>> }
>> +static inline int gicv3_lpi_get_priority(struct domain *d, uint32_t lpi)
>> +{
>> + return GIC_PRI_IRQ;
>> +}
>>
>> #endif /* CONFIG_HAS_ITS */
>>
>> diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
>> index 300f461..4e29ba6 100644
>> --- a/xen/include/asm-arm/vgic.h
>> +++ b/xen/include/asm-arm/vgic.h
>> @@ -83,6 +83,12 @@ struct pending_irq
>> struct list_head lr_queue;
>> };
>>
>> +struct lpi_pending_irq
>> +{
>> + struct list_head entry;
>> + struct pending_irq pirq;
>> +};
>> +
>> #define NR_INTERRUPT_PER_RANK 32
>> #define INTERRUPT_RANK_MASK (NR_INTERRUPT_PER_RANK - 1)
>>
>> @@ -296,8 +302,11 @@ extern struct vcpu *vgic_get_target_vcpu(struct vcpu
>> *v, unsigned int virq);
>> extern void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int virq);
>> extern void vgic_vcpu_inject_spi(struct domain *d, unsigned int virq);
>> extern void vgic_clear_pending_irqs(struct vcpu *v);
>> +extern void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq);
>> extern struct pending_irq *irq_to_pending(struct vcpu *v, unsigned int irq);
>> extern struct pending_irq *spi_to_pending(struct domain *d, unsigned int
>> irq);
>> +extern struct pending_irq *lpi_to_pending(struct vcpu *v, unsigned int irq,
>> + bool allocate);
>> extern struct vgic_irq_rank *vgic_rank_offset(struct vcpu *v, int b, int n,
>> int s);
>> extern struct vgic_irq_rank *vgic_rank_irq(struct vcpu *v, unsigned int
>> irq);
>> extern int vgic_emulate(struct cpu_user_regs *regs, union hsr hsr);
>> --
>> 2.9.0
>>
>>
>> _______________________________________________
>> Xen-devel mailing list
>> Xen-devel@xxxxxxxxxxxxx
>> https://lists.xen.org/xen-devel
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |