[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 27/38] arm: split pending SPIs (global) out from pending PPIs and SGIs (per CPU)
On Fri, 1 Jun 2012, Ian Campbell wrote: > This tracks SPIs in struct arch_domain and PPIs+SGIs in struct arch_vcpu which > seems more logical. > > Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx> > --- > xen/arch/arm/vgic.c | 17 ++++++++++------- > xen/include/asm-arm/domain.h | 10 ++++++++++ > 2 files changed, 20 insertions(+), 7 deletions(-) > > diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c > index 629a0da..91d6166 100644 > --- a/xen/arch/arm/vgic.c > +++ b/xen/arch/arm/vgic.c > @@ -82,9 +82,8 @@ int domain_vgic_init(struct domain *d) > d->arch.vgic.shared_irqs = > xmalloc_array(struct vgic_irq_rank, DOMAIN_NR_RANKS(d)); > d->arch.vgic.pending_irqs = > - xmalloc_array(struct pending_irq, > - d->arch.vgic.nr_lines + (32 * d->max_vcpus)); > - for (i=0; i<d->arch.vgic.nr_lines + (32 * d->max_vcpus); i++) > + xzalloc_array(struct pending_irq, d->arch.vgic.nr_lines); > + for (i=0; i<d->arch.vgic.nr_lines; i++) > INIT_LIST_HEAD(&d->arch.vgic.pending_irqs[i].inflight); > for (i=0; i<DOMAIN_NR_RANKS(d); i++) > spin_lock_init(&d->arch.vgic.shared_irqs[i].lock); > @@ -98,6 +97,10 @@ int vcpu_vgic_init(struct vcpu *v) > > spin_lock_init(&v->arch.vgic.private_irqs.lock); > > + memset(&v->arch.vgic.pending_irqs, 0, sizeof(v->arch.vgic.pending_irqs)); > + for (i = 0; i < 32; i++) > + INIT_LIST_HEAD(&v->arch.vgic.pending_irqs[i].inflight); > + > /* For SGI and PPI the target is always this CPU */ > for ( i = 0 ; i < 8 ; i++ ) > v->arch.vgic.private_irqs.itargets[i] = > @@ -535,8 +538,7 @@ struct pending_irq *irq_to_pending(struct vcpu *v, > unsigned int irq) > /* Pending irqs allocation strategy: the first vgic.nr_lines irqs > * are used for SPIs; the rests are used for per cpu irqs */ > if ( irq < 32 ) > - n = &v->domain->arch.vgic.pending_irqs[irq + (v->vcpu_id * 32) > - + v->domain->arch.vgic.nr_lines]; > + n = &v->arch.vgic.pending_irqs[irq]; > else > n = &v->domain->arch.vgic.pending_irqs[irq - 32]; > return n; > @@ -548,6 +550,7 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int > irq, int virtual) > uint8_t priority; > struct vgic_irq_rank *rank = vgic_irq_rank(v, 8, idx); > struct pending_irq *iter, *n = irq_to_pending(v, irq); > + unsigned long flags; > > /* irq still pending */ > if (!list_empty(&n->inflight)) > @@ -564,7 +567,7 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int > irq, int virtual) > > gic_set_guest_irq(irq, GICH_LR_PENDING, priority); > > - spin_lock(&v->arch.vgic.lock); > + spin_lock_irqsave(&v->arch.vgic.lock, flags); > list_for_each_entry ( iter, &v->arch.vgic.inflight_irqs, inflight ) > { > if ( iter->priority > priority ) > @@ -575,7 +578,7 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int > irq, int virtual) > } > } > list_add_tail(&n->inflight, &v->arch.vgic.inflight_irqs); > - spin_unlock(&v->arch.vgic.lock); > + spin_unlock_irqrestore(&v->arch.vgic.lock, flags); > /* we have a new higher priority irq, inject it into the guest */ > } Besides moving PPIs and SGIs to struct vcpu, this patch also turns spin_lock into spin_lock_irqsave in vgic_vcpu_inject_irq: I think it is correct because it can be called in IRQ context, but it needs to be explicitly stated in the commit message. > diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h > index 620b26e..32deb52 100644 > --- a/xen/include/asm-arm/domain.h > +++ b/xen/include/asm-arm/domain.h > @@ -46,6 +46,10 @@ struct arch_domain > int ctlr; > int nr_lines; > struct vgic_irq_rank *shared_irqs; > + /* > + * SPIs are domain global, SGIs and PPIs are per-VCPU and stored in > + * struct arch_vcpu. > + */ > struct pending_irq *pending_irqs; > } vgic; > > @@ -114,7 +118,13 @@ struct arch_vcpu > uint32_t gic_lr[64]; > > struct { > + /* > + * SGIs and PPIs are per-VCPU, SPIs are domain global and in > + * struct arch_domain. > + */ > + struct pending_irq pending_irqs[32]; > struct vgic_irq_rank private_irqs; > + > /* This list is ordered by IRQ priority and it is used to keep > * track of the IRQs that the VGIC injected into the guest. > * Depending on the availability of LR registers, the IRQs might > -- > 1.7.9.1 > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@xxxxxxxxxxxxx > http://lists.xen.org/xen-devel > _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |