[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 3/9] arm: gic: implement IPIs using SGI mechanism
On Wed, 6 Mar 2013, Ian Campbell wrote: > From: Ian Campbell <ian.campbell@xxxxxxxxxx> > > Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx> > --- > xen/arch/arm/arm32/mode_switch.S | 2 +- > xen/arch/arm/gic.c | 88 > +++++++++++++++++++++++++++++++++++--- > xen/arch/arm/smp.c | 14 +++--- > xen/include/asm-arm/gic.h | 22 +++++++++- > 4 files changed, 108 insertions(+), 18 deletions(-) > > diff --git a/xen/arch/arm/arm32/mode_switch.S > b/xen/arch/arm/arm32/mode_switch.S > index bc2be74..d6741d0 100644 > --- a/xen/arch/arm/arm32/mode_switch.S > +++ b/xen/arch/arm/arm32/mode_switch.S > @@ -43,7 +43,7 @@ kick_cpus: > mov r2, #0x1 > str r2, [r0, #(GICD_CTLR * 4)] /* enable distributor */ > mov r2, #0xfe0000 > - str r2, [r0, #(GICD_SGIR * 4)] /* send IPI to everybody */ > + str r2, [r0, #(GICD_SGIR * 4)] /* send IPI to everybody, SGI0 > = Event check */ > dsb > str r2, [r0, #(GICD_CTLR * 4)] /* disable distributor */ > mov pc, lr > diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c > index bbb8e04..6592562 100644 > --- a/xen/arch/arm/gic.c > +++ b/xen/arch/arm/gic.c > @@ -354,10 +354,55 @@ void __init gic_init(void) > spin_unlock(&gic.lock); > } > > +void send_SGI_mask(const cpumask_t *cpumask, enum gic_sgi sgi) > +{ > + unsigned long mask = cpumask_bits(cpumask)[0]; > + > + ASSERT(sgi < 16); /* There are only 16 SGIs */ > + > + mask &= cpumask_bits(&cpu_online_map)[0]; > + > + ASSERT(mask < 0x100); /* The target bitmap only supports 8 CPUs */ > + > + dsb(); > + > + GICD[GICD_SGIR] = GICD_SGI_TARGET_LIST > + | (mask<<GICD_SGI_TARGET_SHIFT) > + | GICD_SGI_GROUP1 Are you sure that this is correct? The manual says: "This field is writable only by a Secure access. Any Non-secure write to the GICD_SGIR generates an SGI only if the specified SGI is programmed as Group 1, regardless of the value of bit[15] of the write." I think that we should leave bit 15 alone. > + | sgi; > +} > + > +void send_SGI_one(unsigned int cpu, enum gic_sgi sgi) > +{ > + ASSERT(cpu < 7); /* Targets bitmap only supports 8 CPUs */ > + send_SGI_mask(cpumask_of(cpu), sgi); > +} > + > +void send_SGI_self(enum gic_sgi sgi) > +{ > + ASSERT(sgi < 16); /* There are only 16 SGIs */ > + > + dsb(); > + > + GICD[GICD_SGIR] = GICD_SGI_TARGET_SELF > + | GICD_SGI_GROUP1 same here > + | sgi; > +} > + > +void send_SGI_allbutself(enum gic_sgi sgi) > +{ > + ASSERT(sgi < 16); /* There are only 16 SGIs */ > + > + dsb(); > + > + GICD[GICD_SGIR] = GICD_SGI_TARGET_OTHERS > + | GICD_SGI_GROUP1 same here > + | sgi; > +} > + > void smp_send_state_dump(unsigned int cpu) > { > - printk("WARNING: unable to send state dump request to CPU%d\n", cpu); > - /* XXX TODO -- send an SGI */ > + send_SGI_one(cpu, GIC_SGI_DUMP_STATE); > } > > /* Set up the per-CPU parts of the GIC for a secondary CPU */ > @@ -585,6 +630,28 @@ out: > return retval; > } > > +static void do_sgi(struct cpu_user_regs *regs, int othercpu, enum gic_sgi > sgi) > +{ > + /* Lower the priority */ > + GICC[GICC_EOIR] = sgi; > + > + switch (sgi) > + { > + case GIC_SGI_EVENT_CHECK: > + /* Nothing to do, will check for events on return path */ > + break; > + case GIC_SGI_DUMP_STATE: > + dump_execstate(regs); > + break; > + default: > + panic("Unhandled SGI %d on CPU%d\n", sgi, smp_processor_id()); > + break; > + } > + > + /* Deactivate */ > + GICC[GICC_DIR] = sgi; > +} > + > /* Accept an interrupt from the GIC and dispatch its handler */ > void gic_interrupt(struct cpu_user_regs *regs, int is_fiq) > { > @@ -595,14 +662,23 @@ void gic_interrupt(struct cpu_user_regs *regs, int > is_fiq) > do { > intack = GICC[GICC_IAR]; > irq = intack & GICC_IA_IRQ; > - local_irq_enable(); > > - if (likely(irq < 1021)) > + if ( likely(irq >= 16 && irq < 1021) ) > + { > + local_irq_enable(); > do_IRQ(regs, irq, is_fiq); > + local_irq_disable(); > + } > + else if (unlikely(irq < 16)) > + { > + unsigned int cpu = (intack & GICC_IA_CPU_MASK) >> > GICC_IA_CPU_SHIFT; > + do_sgi(regs, cpu, irq); > + } > else > + { > + local_irq_disable(); > break; > - > - local_irq_disable(); > + } > } while (1); > } > > diff --git a/xen/arch/arm/smp.c b/xen/arch/arm/smp.c > index 12260f4..a902d84 100644 > --- a/xen/arch/arm/smp.c > +++ b/xen/arch/arm/smp.c > @@ -3,10 +3,11 @@ > #include <asm/smp.h> > #include <asm/cpregs.h> > #include <asm/page.h> > +#include <asm/gic.h> > > void flush_tlb_mask(const cpumask_t *mask) > { > - /* XXX IPI other processors */ > + /* No need to IPI other processors on ARM, the processor takes care of > it. */ > flush_xen_data_tlb(); >From the ARMv7 manual, chapter B3.10.5: "For an ARMv7 implementation that does not include the Multiprocessing Extensions, the architecture defines that a TLB maintenance operation applies only to any TLBs that are used in translating memory accesses made by the processor performing the maintenance operation. The ARMv7 Multiprocessing Extensions are an OPTIONAL set of extensions that improve the implementation of a multiprocessor system. These extensions provide additional TLB maintenance operations that apply to the TLBs of processors in the same Inner Shareable domain." >From this paragraph I understand that we wouldn't need to do an IPI if flush_xen_data_tlb was implemented using TLBIALLHIS. But actually flush_xen_data_tlb uses TLBIALLH, that it seems not to propagate across an Inner Shareable domain. Did I misunderstand something? > } > > @@ -15,17 +16,12 @@ void smp_call_function( > void *info, > int wait) > { > - /* TODO: No SMP just now, does not include self so nothing to do. > - cpumask_t allbutself = cpu_online_map; > - cpu_clear(smp_processor_id(), allbutself); > - on_selected_cpus(&allbutself, func, info, wait); > - */ > + panic("%s not implmented\n", __func__); > } > + > void smp_send_event_check_mask(const cpumask_t *mask) > { > - /* TODO: No SMP just now, does not include self so nothing to do. > - send_IPI_mask(mask, EVENT_CHECK_VECTOR); > - */ > + send_SGI_mask(mask, GIC_SGI_EVENT_CHECK); > } > > /* > diff --git a/xen/include/asm-arm/gic.h b/xen/include/asm-arm/gic.h > index 6bf50bb..24c0d5c 100644 > --- a/xen/include/asm-arm/gic.h > +++ b/xen/include/asm-arm/gic.h > @@ -51,6 +51,13 @@ > #define GICD_SPENDSGIRN (0xF2C/4) > #define GICD_ICPIDR2 (0xFE8/4) > > +#define GICD_SGI_TARGET_LIST (0UL<<24) > +#define GICD_SGI_TARGET_OTHERS (1UL<<24) > +#define GICD_SGI_TARGET_SELF (2UL<<24) > +#define GICD_SGI_TARGET_SHIFT (16) > +#define GICD_SGI_TARGET_MASK (0xFFUL<<GICD_SGI_TARGET_SHIFT) > +#define GICD_SGI_GROUP1 (1UL<<15) > + > #define GICC_CTLR (0x0000/4) > #define GICC_PMR (0x0004/4) > #define GICC_BPR (0x0008/4) > @@ -83,8 +90,9 @@ > #define GICC_CTL_ENABLE 0x1 > #define GICC_CTL_EOI (0x1 << 9) > > -#define GICC_IA_IRQ 0x03ff > -#define GICC_IA_CPU 0x1c00 > +#define GICC_IA_IRQ 0x03ff > +#define GICC_IA_CPU_MASK 0x1c00 > +#define GICC_IA_CPU_SHIFT 10 > > #define GICH_HCR_EN (1 << 0) > #define GICH_HCR_UIE (1 << 1) > @@ -157,6 +165,16 @@ extern int gicv_setup(struct domain *d); > extern void gic_save_state(struct vcpu *v); > extern void gic_restore_state(struct vcpu *v); > > +/* SGI (AKA IPIs) */ > +enum gic_sgi { > + GIC_SGI_EVENT_CHECK = 0, > + GIC_SGI_DUMP_STATE = 1, > +}; > +extern void send_SGI_mask(const cpumask_t *cpumask, enum gic_sgi sgi); > +extern void send_SGI_one(unsigned int cpu, enum gic_sgi sgi); > +extern void send_SGI_self(enum gic_sgi sgi); > +extern void send_SGI_allbutself(enum gic_sgi sgi); > + > /* print useful debug info */ > extern void gic_dump_info(struct vcpu *v); > > -- > 1.7.10.4 > _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |