|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC PATCH v1 08/10] xen/arm: Add support for GIC v3
From: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>
Add support for GIC v3 specification.
This driver assumes that ARE and SRE
is enable by default.
Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>
---
xen/arch/arm/Makefile | 2 +-
xen/arch/arm/gic-v3.c | 944 +++++++++++++++++++++++++++++++++++++
xen/arch/arm/gic.c | 12 +
xen/include/asm-arm/domain.h | 4 +
xen/include/asm-arm/gic.h | 9 +
xen/include/asm-arm/gic_v3_defs.h | 211 +++++++++
6 files changed, 1181 insertions(+), 1 deletion(-)
diff --git a/xen/arch/arm/Makefile b/xen/arch/arm/Makefile
index 20f59f4..a11c699 100644
--- a/xen/arch/arm/Makefile
+++ b/xen/arch/arm/Makefile
@@ -10,7 +10,7 @@ obj-y += vpsci.o
obj-y += domctl.o
obj-y += sysctl.o
obj-y += domain_build.o
-obj-y += gic.o gic-v2.o
+obj-y += gic.o gic-v2.o gic-v3.o
obj-y += io.o
obj-y += irq.o
obj-y += kernel.o
diff --git a/xen/arch/arm/gic-v3.c b/xen/arch/arm/gic-v3.c
new file mode 100644
index 0000000..81a36ba
--- /dev/null
+++ b/xen/arch/arm/gic-v3.c
@@ -0,0 +1,944 @@
+/*
+ * xen/arch/arm/gic-v3.c
+ *
+ * ARM Generic Interrupt Controller support v3 version
+ * based on xen/arch/arm/gic-v2.c
+ *
+ * Vijaya Kumar K <vijaya.kumar@xxxxxxxxxxxxxxxxxx>
+ * Copyright (c) 2014 Cavium Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <xen/config.h>
+#include <xen/lib.h>
+#include <xen/init.h>
+#include <xen/cpu.h>
+#include <xen/mm.h>
+#include <xen/irq.h>
+#include <xen/sched.h>
+#include <xen/errno.h>
+#include <xen/serial.h>
+#include <xen/softirq.h>
+#include <xen/list.h>
+#include <xen/device_tree.h>
+#include <asm/p2m.h>
+#include <asm/domain.h>
+#include <asm/platform.h>
+
+#include <asm/gic_v3_defs.h>
+#include <asm/gic.h>
+#include <asm/io.h>
+
+struct rdist_region {
+ paddr_t rdist_base;
+ paddr_t rdist_base_size;
+ void __iomem *map_rdist_base;
+};
+
+/* Global state */
+static struct {
+ paddr_t dbase; /* Address of distributor registers */
+ paddr_t dbase_size;
+ void __iomem *map_dbase; /* Mapped address of distributor registers */
+ struct rdist_region *rdist_regions;
+ u32 rdist_stride;
+ unsigned int rdist_count; /* Number of rdist regions count */
+ unsigned int lines; /* Number of interrupts (SPIs + PPIs + SGIs) */
+ struct dt_irq maintenance;
+ unsigned int cpus;
+ int hw_version;
+ spinlock_t lock;
+} gic;
+
+struct gic_state_data {
+ uint32_t gic_hcr, gic_vmcr;
+ uint32_t gic_apr0[4];
+ uint32_t gic_apr1[4];
+ uint64_t gic_lr[16];
+};
+
+#define GICD ((volatile unsigned char *) gic.map_dbase)
+/* Only one region is implemented which is enough for 0-31 cpus */
+#define GICR ((volatile unsigned char *) gic.rdist_regions[0].map_rdist_base)
+
+/* per-cpu re-distributor base */
+static DEFINE_PER_CPU(paddr_t, rbase);
+static DEFINE_PER_CPU(paddr_t, phy_rbase);
+
+static unsigned nr_lrs;
+static uint32_t nr_priorities;
+
+/* The GIC mapping of CPU interfaces does not necessarily match the
+ * logical CPU numbering. Let's use mapping as returned by the GIC
+ * itself
+ */
+
+#define gic_data_rdist_rd_base() (this_cpu(rbase))
+#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
+
+static inline u64 read_cpuid_mpidr(void)
+{
+ return READ_SYSREG(MPIDR_EL1);
+}
+
+static u64 gich_read_lr(int lr)
+{
+ switch (lr)
+ {
+ case 0: /* ICH_LRn is 64 bit */
+ return READ_SYSREG(ICH_LR0_EL2);
+ break;
+ case 1:
+ return READ_SYSREG(ICH_LR1_EL2);
+ break;
+ case 2:
+ return READ_SYSREG(ICH_LR2_EL2);
+ break;
+ case 3:
+ return READ_SYSREG(ICH_LR3_EL2);
+ break;
+ case 4:
+ return READ_SYSREG(ICH_LR4_EL2);
+ break;
+ case 5:
+ return READ_SYSREG(ICH_LR5_EL2);
+ break;
+ case 6:
+ return READ_SYSREG(ICH_LR6_EL2);
+ break;
+ case 7:
+ return READ_SYSREG(ICH_LR7_EL2);
+ break;
+ case 8:
+ return READ_SYSREG(ICH_LR8_EL2);
+ break;
+ case 9:
+ return READ_SYSREG(ICH_LR9_EL2);
+ break;
+ case 10:
+ return READ_SYSREG(ICH_LR10_EL2);
+ break;
+ case 11:
+ return READ_SYSREG(ICH_LR11_EL2);
+ break;
+ case 12:
+ return READ_SYSREG(ICH_LR12_EL2);
+ break;
+ case 13:
+ return READ_SYSREG(ICH_LR13_EL2);
+ break;
+ case 14:
+ return READ_SYSREG(ICH_LR14_EL2);
+ break;
+ case 15:
+ return READ_SYSREG(ICH_LR15_EL2);
+ break;
+ default:
+ return 0;
+ }
+}
+
+static void gich_write_lr(int lr, u64 val)
+{
+ switch (lr)
+ {
+ case 0:
+ WRITE_SYSREG(val, ICH_LR0_EL2);
+ break;
+ case 1:
+ WRITE_SYSREG(val, ICH_LR1_EL2);
+ break;
+ case 2:
+ WRITE_SYSREG(val, ICH_LR2_EL2);
+ break;
+ case 3:
+ WRITE_SYSREG(val, ICH_LR3_EL2);
+ break;
+ case 4:
+ WRITE_SYSREG(val, ICH_LR4_EL2);
+ break;
+ case 5:
+ WRITE_SYSREG(val, ICH_LR5_EL2);
+ break;
+ case 6:
+ WRITE_SYSREG(val, ICH_LR6_EL2);
+ break;
+ case 7:
+ WRITE_SYSREG(val, ICH_LR7_EL2);
+ break;
+ case 8:
+ WRITE_SYSREG(val, ICH_LR8_EL2);
+ break;
+ case 9:
+ WRITE_SYSREG(val, ICH_LR9_EL2);
+ break;
+ case 10:
+ WRITE_SYSREG(val, ICH_LR10_EL2);
+ break;
+ case 11:
+ WRITE_SYSREG(val, ICH_LR11_EL2);
+ break;
+ case 12:
+ WRITE_SYSREG(val, ICH_LR12_EL2);
+ break;
+ case 13:
+ WRITE_SYSREG(val, ICH_LR13_EL2);
+ break;
+ case 14:
+ WRITE_SYSREG(val, ICH_LR14_EL2);
+ break;
+ case 15:
+ WRITE_SYSREG(val, ICH_LR15_EL2);
+ break;
+ default:
+ return;
+ }
+}
+
+static void gic_enable_sre(void)
+{
+ uint32_t val;
+
+ val = READ_SYSREG32(ICC_SRE_EL2);
+ val |= GICC_SRE_EL2_SRE | GICC_SRE_EL2_ENEL1 | GICC_SRE_EL2_DFB |
GICC_SRE_EL2_DIB;
+ WRITE_SYSREG32(val, ICC_SRE_EL2);
+ isb();
+}
+
+/* Wait for completion of a distributor change */
+static void gic_do_wait_for_rwp(paddr_t base)
+{
+ u32 val;
+ do {
+ val = readl_relaxed((void *)base + GICD_CTLR);
+ val = readl_relaxed(GICD + GICD_CTLR);
+ val = GICD[GICD_CTLR];
+ cpu_relax();
+ } while (val & GICD_CTLR_RWP);
+}
+
+static void gic_dist_wait_for_rwp(void)
+{
+ gic_do_wait_for_rwp((paddr_t)GICD);
+}
+
+static void gic_redist_wait_for_rwp(void)
+{
+ gic_do_wait_for_rwp(gic_data_rdist_rd_base());
+}
+
+static void gic_wait_for_rwp(int irq)
+{
+ if (irq < 32)
+ gic_redist_wait_for_rwp();
+ else
+ gic_dist_wait_for_rwp();
+}
+
+static unsigned int gic_mask_cpu(const cpumask_t *cpumask)
+{
+ unsigned int cpu;
+ cpumask_t possible_mask;
+
+ cpumask_and(&possible_mask, cpumask, &cpu_possible_map);
+ cpu = cpumask_any(&possible_mask);
+ return cpu;
+}
+
+static unsigned int gic_nr_lines(void)
+{
+ return gic.lines;
+}
+
+static unsigned int gic_nr_lrs(void)
+{
+ return nr_lrs;
+}
+
+static void write_aprn_regs(struct gic_state_data *d)
+{
+ switch(nr_priorities)
+ {
+ case 7:
+ WRITE_SYSREG32(d->gic_apr0[2], ICH_AP0R2_EL2);
+ WRITE_SYSREG32(d->gic_apr1[2], ICH_AP1R2_EL2);
+ case 6:
+ WRITE_SYSREG32(d->gic_apr0[1], ICH_AP0R1_EL2);
+ WRITE_SYSREG32(d->gic_apr1[1], ICH_AP1R1_EL2);
+ case 5:
+ WRITE_SYSREG32(d->gic_apr0[0], ICH_AP0R0_EL2);
+ WRITE_SYSREG32(d->gic_apr1[0], ICH_AP1R0_EL2);
+ break;
+ default:
+ panic("Write Undefined active priorities \n");
+ }
+}
+
+static void read_aprn_regs(struct gic_state_data *d)
+{
+ switch(nr_priorities)
+ {
+ case 7:
+ d->gic_apr0[2] = READ_SYSREG32(ICH_AP0R2_EL2);
+ d->gic_apr1[2] = READ_SYSREG32(ICH_AP1R2_EL2);
+ case 6:
+ d->gic_apr0[1] = READ_SYSREG32(ICH_AP0R1_EL2);
+ d->gic_apr1[1] = READ_SYSREG32(ICH_AP1R1_EL2);
+ case 5:
+ d->gic_apr0[0] = READ_SYSREG32(ICH_AP0R0_EL2);
+ d->gic_apr1[0] = READ_SYSREG32(ICH_AP1R0_EL2);
+ break;
+ default:
+ panic("Read Undefined active priorities \n");
+ }
+}
+
+static int gic_state_init(struct vcpu *v)
+{
+ v->arch.gic_state = (struct gic_state_data *)xzalloc(struct
gic_state_data);
+ if(!v->arch.gic_state)
+ return -ENOMEM;
+ return 0;
+}
+
+static void save_state(struct vcpu *v)
+{
+ int i;
+ struct gic_state_data *d;
+ d = (struct gic_state_data *)v->arch.gic_state;
+
+ /* No need for spinlocks here because interrupts are disabled around
+ * this call and it only accesses struct vcpu fields that cannot be
+ * accessed simultaneously by another pCPU.
+ */
+ for ( i=0; i<nr_lrs; i++)
+ d->gic_lr[i] = gich_read_lr(i);
+
+ read_aprn_regs(d);
+
+ d->gic_vmcr = READ_SYSREG32(ICH_VMCR_EL2);
+}
+
+static void restore_state(struct vcpu *v)
+{
+ int i;
+ struct gic_state_data *d;
+ d = (struct gic_state_data *)v->arch.gic_state;
+
+ for ( i=0; i<nr_lrs; i++)
+ gich_write_lr(i, d->gic_lr[i]);
+
+ write_aprn_regs(d);
+
+ WRITE_SYSREG32(d->gic_vmcr, ICH_VMCR_EL2);
+}
+
+static void gic_dump_state(struct vcpu *v)
+{
+ int i;
+ struct gic_state_data *d;
+ d = (struct gic_state_data *)v->arch.gic_state;
+ if ( v == current )
+ {
+ for ( i = 0; i < nr_lrs; i++ )
+ printk(" HW_LR[%d]=%lx\n", i, gich_read_lr(i));
+ }
+ else
+ {
+ for ( i = 0; i < nr_lrs; i++ )
+ printk(" VCPU_LR[%d]=%lx\n", i, d->gic_lr[i]);
+ }
+}
+
+static void gic_enable_irq(int irq)
+{
+ uint32_t enabler;
+
+ /* Enable routing */
+ if(irq > 31)
+ {
+ enabler = readl_relaxed(GICD + GICD_ISENABLER + (irq / 32) * 4);
+ writel_relaxed(enabler | (1u << (irq % 32)), GICD + GICD_ISENABLER +
(irq / 32) * 4);
+ }
+ else
+ {
+ enabler = readl_relaxed((void *)gic_data_rdist_sgi_base() +
GICR_ISENABLER0);
+ writel_relaxed(enabler | (1u << irq), (void
*)gic_data_rdist_sgi_base() + GICR_ISENABLER0);
+ }
+ gic_wait_for_rwp(irq);
+}
+
+static void gic_disable_irq(int irq)
+{
+ /* Disable routing */
+ if(irq > 31)
+ writel_relaxed(1u << (irq % 32), GICD + GICD_ICENABLER + ((irq / 32) *
4));
+ else
+ writel_relaxed(1u << irq, (void *)gic_data_rdist_sgi_base() +
GICR_ICENABLER0);
+}
+
+static void gic_eoi_irq(int irq)
+{
+ /* Lower the priority */
+ WRITE_SYSREG32(irq, ICC_EOIR1_EL1);
+}
+
+static void gic_dir_irq(int irq)
+{
+ /* Deactivate */
+ WRITE_SYSREG32(irq, ICC_DIR_EL1);
+}
+
+static unsigned int gic_ack_irq(void)
+{
+ return (READ_SYSREG32(ICC_IAR1_EL1) & GICC_IA_IRQ);
+}
+
+static u64 gic_mpidr_to_affinity(u64 mpidr)
+{
+ /* Make sure we don't broadcast the interrupt */
+ return mpidr & ~GICD_IROUTER_SPI_MODE_ANY;
+}
+
+/*
+ * - needs to be called with gic.lock held
+ * - needs to be called with a valid cpu_mask, ie each cpu in the mask has
+ * already called gic_cpu_init
+ */
+static void gic_set_irq_property(unsigned int irq, bool_t level,
+ const cpumask_t *cpu_mask,
+ unsigned int priority)
+{
+ uint32_t cfg, edgebit;
+ u64 affinity;
+ unsigned int cpu = gic_mask_cpu(cpu_mask);
+ paddr_t rebase;
+
+
+ /* Set edge / level */
+ if (irq < 16)
+ /* SGI's are always edge-triggered not need to call GICD_ICFGR0 */
+ cfg = readl_relaxed((void *)gic_data_rdist_sgi_base() + GICR_ICFGR0);
+ else if (irq < 32)
+ cfg = readl_relaxed((void *)gic_data_rdist_sgi_base() + GICR_ICFGR1);
+ else
+ cfg = readl_relaxed(GICD + GICD_ICFGR + (irq / 16) * 4);
+
+ edgebit = 2u << (2 * (irq % 16));
+ if ( level )
+ cfg &= ~edgebit;
+ else
+ cfg |= edgebit;
+
+ if (irq < 16)
+ writel_relaxed(cfg, (void *)gic_data_rdist_sgi_base() + GICR_ICFGR0);
+ else if (irq < 32)
+ writel_relaxed(cfg, (void *)gic_data_rdist_sgi_base() + GICR_ICFGR1);
+ else
+ writel_relaxed(cfg, GICD + GICD_ICFGR + (irq / 16) * 4);
+
+
+ /* need to check if ARE is set to access IROUTER */
+ affinity = gic_mpidr_to_affinity(cpu_logical_map(cpu));
+ if (irq > 31)
+ writeq_relaxed(affinity, (GICD + GICD_IROUTER + irq * 8));
+
+ /* Set priority */
+ if (irq < 32)
+ {
+ rebase = gic_data_rdist_sgi_base();
+ writeb_relaxed(priority, (void *)rebase + GICR_IPRIORITYR0 + irq);
+ }
+ else
+ {
+ writeb_relaxed(priority, GICD + GICD_IPRIORITYR + irq);
+
+ }
+}
+
+static void __init gic_dist_init(void)
+{
+ uint32_t type;
+ u64 affinity;
+ int i;
+
+ /* Disable the distributor */
+ writel_relaxed(0, GICD + GICD_CTLR);
+
+ type = readl_relaxed(GICD + GICD_TYPER);
+ gic.lines = 32 * ((type & GICD_TYPE_LINES) + 1);
+
+ printk("GIC: %d lines, (IID %8.8x).\n",
+ gic.lines, readl_relaxed(GICD + GICD_IIDR));
+
+ /* Default all global IRQs to level, active low */
+ for ( i = 32; i < gic.lines; i += 16 )
+ writel_relaxed(0, GICD + GICD_ICFGR + (i / 16) * 4);
+
+ /* Default priority for global interrupts */
+ for ( i = 32; i < gic.lines; i += 4 )
+ writel_relaxed((GIC_PRI_IRQ<<24 | GIC_PRI_IRQ<<16 | GIC_PRI_IRQ<<8 |
GIC_PRI_IRQ), GICD + GICD_IPRIORITYR + (i / 4) * 4);
+
+ /* Disable all global interrupts */
+ for ( i = 32; i < gic.lines; i += 32 )
+ writel_relaxed(0xffffffff, GICD + GICD_ICENABLER + (i / 32) * 4);
+
+ gic_dist_wait_for_rwp();
+
+ /* Turn on the distributor */
+ writel_relaxed(GICD_CTL_ENABLE | GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A |
GICD_CTLR_ENABLE_G1, GICD + GICD_CTLR);
+
+ /* Route all global IRQs to this CPU */
+ affinity = gic_mpidr_to_affinity(read_cpuid_mpidr());
+ for ( i = 31; i < gic.lines; i++ )
+ writeq_relaxed(affinity, GICD + GICD_IROUTER + i * 8);
+}
+
+static void gic_enable_redist(void)
+{
+ paddr_t rbase;
+ u32 val;
+
+ rbase = this_cpu(rbase);
+
+ /* Wake up this CPU redistributor */
+ val = readl_relaxed((void *)rbase + GICR_WAKER);
+ val &= ~GICR_WAKER_ProcessorSleep;
+ writel_relaxed(val, (void *)rbase + GICR_WAKER);
+
+ do {
+ val = readl_relaxed((void *)rbase + GICR_WAKER);
+ cpu_relax();
+ } while (val & GICR_WAKER_ChildrenAsleep);
+}
+
+static int __init gic_populate_rdist(void)
+{
+ u64 mpidr = cpu_logical_map(smp_processor_id());
+ u64 typer;
+ u64 aff;
+ int i;
+ uint32_t reg;
+
+ aff = mpidr & ((1 << 24) - 1);
+ aff |= (mpidr >> 8) & (0xffUL << 24);
+
+ for (i = 0; i < gic.rdist_count; i++) {
+ uint32_t ptr = 0;
+
+ reg = readl_relaxed(GICR + ptr + GICR_PIDR0);
+ if ((reg & 0xff) != GICR_PIDR0_GICv3) { /* We're in trouble... */
+ printk("No redistributor present @%x\n", ptr);
+ break;
+ }
+
+ do {
+ typer = readq_relaxed(GICR + ptr + GICR_TYPER);
+ if ((typer >> 32) == aff) {
+
+ this_cpu(rbase) = (u64)GICR + ptr;
+ this_cpu(phy_rbase) = gic.rdist_regions[i].rdist_base + ptr;
+
+ printk("CPU%d: found redistributor %llx region %d\n",
+ smp_processor_id(), (unsigned long long) mpidr, i);
+ return 0;
+ }
+
+ if (gic.rdist_stride) {
+ ptr += gic.rdist_stride;
+ } else {
+ ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
+ if (typer & GICR_TYPER_VLPIS)
+ ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
+ }
+ } while (!(typer & GICR_TYPER_LAST));
+ }
+
+ /* We couldn't even deal with ourselves... */
+ printk("CPU%d: mpidr %lx has no re-distributor!\n",
+ smp_processor_id(), (unsigned long)mpidr);
+ return -ENODEV;
+}
+
+static void __cpuinit gic_cpu_init(void)
+{
+ int i;
+ paddr_t rbase_sgi;
+
+ /* Register ourselves with the rest of the world */
+ if (gic_populate_rdist())
+ return;
+
+ gic_enable_redist();
+
+ rbase_sgi = gic_data_rdist_sgi_base();
+
+ /*
+ * Set priority on PPI and SGI interrupts
+ */
+ for (i = 0; i < 16; i += 4)
+ writel_relaxed((GIC_PRI_IPI<<24 | GIC_PRI_IPI<<16 | GIC_PRI_IPI<<8 |
GIC_PRI_IPI), (void *)rbase_sgi + GICR_IPRIORITYR0 + (i / 4) * 4);
+ //writel_relaxed(0x0, (void *)rbase + GICR_IPRIORITYR0 + (i / 4) * 4);
+ //writel_relaxed(0xa0a0a0a0, (void *)rbase + GICR_IPRIORITYR0 + (i /
4) * 4);
+
+ for (i = 16; i < 32; i += 4)
+ writel_relaxed((GIC_PRI_IRQ<<24 | GIC_PRI_IRQ<<16 | GIC_PRI_IRQ<<8 |
GIC_PRI_IRQ), (void *)rbase_sgi + GICR_IPRIORITYR0 + (i / 4) * 4);
+
+ /*
+ * Disable all PPI interrupts, ensure all SGI interrupts are
+ * enabled.
+ */
+ writel_relaxed(0xffff0000, (void *)rbase_sgi + GICR_ICENABLER0);
+ writel_relaxed(0x0000ffff, (void *)rbase_sgi + GICR_ISENABLER0);
+
+ gic_redist_wait_for_rwp();
+
+ /* Enable system registers */
+ gic_enable_sre();
+
+ WRITE_SYSREG32(0, ICC_BPR1_EL1);
+ /* Set priority mask register */
+ WRITE_SYSREG32(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
+
+ /* EOI drops priority too (mode 0) */
+ WRITE_SYSREG32(GICC_CTLR_EL1_EOImode_drop, ICC_CTLR_EL1);
+
+ WRITE_SYSREG32(1, ICC_IGRPEN1_EL1);
+}
+
+static void gic_cpu_disable(void)
+{
+ WRITE_SYSREG32(0, ICC_CTLR_EL1);
+}
+
+static void __cpuinit gic_hyp_init(void)
+{
+ uint32_t vtr;
+
+ vtr = READ_SYSREG32(ICH_VTR_EL2);
+ nr_lrs = (vtr & GICH_VTR_NRLRGS) + 1;
+ nr_priorities = ((vtr >> GICH_VTR_PRIBITS_SHIFT) & GICH_VTR_PRIBITS_MASK)
+ 1;
+
+ WRITE_SYSREG32(GICH_VMCR_EOI | GICH_VMCR_VENG1, ICH_VMCR_EL2);
+ WRITE_SYSREG32(GICH_HCR_VGRP1EIE | GICH_HCR_EN, ICH_HCR_EL2);
+
+ update_cpu_lr_mask();
+ vtr = READ_SYSREG32(ICH_HCR_EL2);
+}
+
+/* Set up the per-CPU parts of the GIC for a secondary CPU */
+static int __cpuinit gic_init_secondary_cpu(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ if (action == CPU_STARTING)
+ {
+ spin_lock(&gic.lock);
+ gic_cpu_init();
+ gic_hyp_init();
+ spin_unlock(&gic.lock);
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block gic_cpu_nb = {
+ .notifier_call = gic_init_secondary_cpu,
+ .priority = 100
+};
+
+static void gic_smp_init(void)
+{
+ register_cpu_notifier(&gic_cpu_nb);
+}
+
+static void __cpuinit gic_hyp_disable(void)
+{
+ uint32_t vtr;
+ vtr = READ_SYSREG32(ICH_HCR_EL2);
+ vtr &= ~0x1;
+ WRITE_SYSREG32( vtr, ICH_HCR_EL2);
+}
+
+static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
+ u64 cluster_id)
+{
+ int cpu = *base_cpu;
+ u64 mpidr = cpu_logical_map(cpu);
+ u16 tlist = 0;
+
+ while (cpu < nr_cpu_ids) {
+ /*
+ * If we ever get a cluster of more than 16 CPUs, just
+ * scream and skip that CPU.
+ */
+ tlist |= 1 << (mpidr & 0xf);
+
+ cpu = cpumask_next(cpu, mask);
+ mpidr = cpu_logical_map(cpu);
+
+ if (cluster_id != (mpidr & ~0xffUL)) {
+ cpu--;
+ goto out;
+ }
+ }
+out:
+ *base_cpu = cpu;
+ return tlist;
+}
+
+static void send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
+{
+ u64 val;
+
+ val = (cluster_id & 0xff00ff0000UL) << 16; /* Aff3 + Aff2 */
+ val |= (cluster_id & 0xff00) << 8; /* Aff1 */
+ val |= irq << 24;
+ val |= tlist;
+
+ WRITE_SYSREG(val, ICC_SGI1R_EL1);
+}
+
+static void gic_send_sgi(const cpumask_t *cpumask, enum gic_sgi sgi)
+{
+ int cpu = 0;
+
+ dsb();
+
+ for_each_cpu(cpu, cpumask) {
+ u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL;
+ u16 tlist;
+
+ tlist = gic_compute_target_list(&cpu, cpumask, cluster_id);
+ send_sgi(cluster_id, tlist, sgi);
+ }
+}
+
+/* Shut down the per-CPU GIC interface */
+static void gic_disable_interface(void)
+{
+ ASSERT(!local_irq_is_enabled());
+
+ spin_lock(&gic.lock);
+ gic_cpu_disable();
+ gic_hyp_disable();
+ spin_unlock(&gic.lock);
+}
+
+static void gic_update_lr(int lr, unsigned int virtual_irq,
+ unsigned int state, unsigned int priority)
+{
+ u64 maintenance_int = GICH_LR_MAINTENANCE_IRQ;
+ u64 grp = GICH_LR_GRP1;
+ u64 val = 0;
+
+ BUG_ON(lr >= nr_lrs);
+ BUG_ON(lr < 0);
+
+ val = ((((u64)state) & 0x3) << GICH_LR_STATE_SHIFT) | grp |
maintenance_int |
+ ((((u64)priority) & 0xff) << GICH_LR_PRIORITY_SHIFT) |
+ (((u64)virtual_irq & GICH_LR_VIRTUAL_MASK) << GICH_LR_VIRTUAL_SHIFT) |
+ (((u64)virtual_irq & GICH_LR_PHYSICAL_MASK) << GICH_LR_PHYSICAL_SHIFT);
+
+ gich_write_lr(lr, val);
+}
+
+int gicv_init(struct domain *d)
+{
+ /*
+ * Domain 0 gets the hardware address.
+ * Guests get the virtual platform layout.
+ */
+ if ( d->domain_id == 0 )
+ {
+ d->arch.vgic.dbase = gic.dbase;
+ d->arch.vgic.dbase_size = gic.dbase_size;
+ d->arch.vgic.rbase = gic.rdist_regions[0].rdist_base;
+ d->arch.vgic.rbase_size = gic.rdist_regions[0].rdist_base_size;
+ d->arch.vgic.rdist_stride = gic.rdist_stride;
+ }
+ else
+ {
+ d->arch.vgic.dbase = GUEST_GICD_BASE;
+ }
+
+ d->arch.vgic.nr_lines = 0;
+
+ return 0;
+
+}
+
+static unsigned long gic_read_eisr(void)
+{
+ return (unsigned long)READ_SYSREG32(ICH_EISR_EL2);
+}
+
+static unsigned int gic_update_lr_for_mi(int lr)
+{
+ u64 val;
+ uint32_t virq;
+
+ spin_lock_irq(&gic.lock);
+ val = gich_read_lr(lr);
+ virq = val & GICH_LR_VIRTUAL_MASK;
+
+ gich_write_lr(lr, 0);
+
+ spin_unlock_irq(&gic.lock);
+ return virq;
+}
+
+static unsigned long gic_rdist_rd_base(void)
+{
+ return this_cpu(phy_rbase);
+}
+
+static unsigned long gic_rdist_sgi_base(void)
+{
+ return this_cpu(phy_rbase) + SZ_64K;
+}
+
+static int gic_hw_type(void)
+{
+ return gic.hw_version;
+}
+
+static struct dt_irq * gic_maintenance_irq(void)
+{
+ return &gic.maintenance;
+}
+
+static struct gic_hw_operations gic_ops = {
+ .gic_type = gic_hw_type,
+ .nr_lines = gic_nr_lines,
+ .nr_lrs = gic_nr_lrs,
+ .get_maintenance_irq = gic_maintenance_irq,
+ .state_init = gic_state_init,
+ .save_state = save_state,
+ .restore_state = restore_state,
+ .dump_state = gic_dump_state,
+ .gicv_setup = gicv_init,
+ .enable_irq = gic_enable_irq,
+ .disable_irq = gic_disable_irq,
+ .eoi_irq = gic_eoi_irq,
+ .deactivate_irq = gic_dir_irq,
+ .ack_irq = gic_ack_irq,
+ .set_irq_property = gic_set_irq_property,
+ .send_sgi = gic_send_sgi,
+ .disable_interface = gic_disable_interface,
+ .update_lr = gic_update_lr,
+ .update_lr_for_mi = gic_update_lr_for_mi,
+ .read_eisr = gic_read_eisr,
+ .read_cpu_rbase = gic_rdist_rd_base,
+ .read_cpu_sgi_rbase = gic_rdist_sgi_base,
+};
+
+/* Set up the GIC */
+void __init gicv3_init(void)
+{
+ static const struct dt_device_match gic_ids[] __initconst =
+ {
+ DT_MATCH_GIC_V3,
+ { /* sentinel */ },
+ };
+ struct dt_device_node *node;
+ struct rdist_region *rdist_regs;
+ int res, i;
+ uint32_t reg;
+
+ node = dt_find_interrupt_controller(gic_ids);
+ if ( !node )
+ panic("Unable to find compatible GIC in the device tree");
+
+ dt_device_set_used_by(node, DOMID_XEN);
+
+ res = dt_device_get_address(node, 0, &gic.dbase, &gic.dbase_size);
+ if ( res || !gic.dbase || (gic.dbase & ~PAGE_MASK) || (gic.dbase_size &
~PAGE_MASK) )
+ panic("GIC: Cannot find a valid address for the distributor");
+
+ gic.map_dbase = ioremap_nocache(gic.dbase, gic.dbase_size);
+
+ reg = readl_relaxed(GICD + GICD_PIDR0);
+ if ((reg & 0xff) != GICD_PIDR0_GICv3)
+ panic("GIC: no distributor detected, giving up\n");
+
+ gic.hw_version = GIC_VERSION_V3;
+
+ if (!dt_property_read_u32(node, "#redistributor-regions",
&gic.rdist_count))
+ gic.rdist_count = 1;
+
+ rdist_regs = xzalloc_array(struct rdist_region, gic.rdist_count);
+ if (!rdist_regs)
+ panic("GIC: no distributor detected, giving up\n");
+
+ for (i = 0; i < gic.rdist_count; i++) {
+ u64 rdist_base, rdist_size;
+
+ res = dt_device_get_address(node, 1 + i, &rdist_base, &rdist_size);
+ if ( res || !rdist_base)
+ printk("No rdist base found\n");
+
+ rdist_regs[i].rdist_base = rdist_base;
+ rdist_regs[i].rdist_base_size = rdist_size;
+ }
+
+ if(!dt_property_read_u32(node, "redistributor-stride", &gic.rdist_stride))
+ gic.rdist_stride = 0x0;
+
+ gic.rdist_regions= rdist_regs;
+
+ res = dt_device_get_irq(node, 0, &gic.maintenance);
+ if ( res )
+ panic("GIC: Cannot find the maintenance IRQ");
+
+ /* Set the GIC as the primary interrupt controller */
+ dt_interrupt_controller = node;
+
+ /* map dbase & rdist regions */
+ gic.rdist_regions[0].map_rdist_base = ioremap_nocache(gic.rdist_regions[0].
+ rdist_base, gic.rdist_regions[0].rdist_base_size);
+
+ printk("GIC initialization:\n"
+ " gic_dist_addr=%"PRIpaddr"\n"
+ " gic_dist_size=%"PRIpaddr"\n"
+ " gic_dist_mapaddr=%"PRIpaddr"\n"
+ " gic_rdist_regions=%d\n"
+ " gic_rdist_stride=%x\n"
+ " gic_rdist_base=%"PRIpaddr"\n"
+ " gic_rdist_base_size=%"PRIpaddr"\n"
+ " gic_rdist_base_mapaddr=%"PRIpaddr"\n"
+ " gic_maintenance_irq=%u\n",
+ gic.dbase, gic.dbase_size, (u64)gic.map_dbase, gic.rdist_count,
+ gic.rdist_stride, gic.rdist_regions[0].rdist_base,
+ gic.rdist_regions[0].rdist_base_size,
+ (u64)gic.rdist_regions[0].map_rdist_base, gic.maintenance.irq);
+
+ /* Global settings: interrupt distributor */
+ spin_lock_init(&gic.lock);
+ spin_lock(&gic.lock);
+
+ gic_smp_init();
+ gic_dist_init();
+ gic_cpu_init();
+ gic_hyp_init();
+
+ /* Register hw ops*/
+ register_gic_ops(&gic_ops);
+ spin_unlock(&gic.lock);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index e0859ae..291e34c 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -688,6 +688,11 @@ static void maintenance_interrupt(int irq, void *dev_id,
struct cpu_user_regs *r
/* Set up the GIC */
void __init gic_init(void)
{
+ static const struct dt_device_match gicv3_ids[] __initconst =
+ {
+ DT_MATCH_GIC_V3,
+ { /* sentinel */ },
+ };
static const struct dt_device_match gicv2_ids[] __initconst =
{
DT_MATCH_GIC,
@@ -698,6 +703,13 @@ void __init gic_init(void)
spin_lock_init(&gic_lock);
spin_lock(&gic_lock);
+ node = dt_find_interrupt_controller(gicv3_ids);
+ if ( node )
+ {
+ gicv3_init();
+ spin_unlock(&gic_lock);
+ return;
+ }
node = dt_find_interrupt_controller(gicv2_ids);
if ( node )
{
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index 38df789..15e83e8 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -142,6 +142,10 @@ struct arch_domain
/* Base address for guest GIC */
paddr_t dbase; /* Distributor base address */
paddr_t cbase; /* CPU base address */
+ paddr_t dbase_size; /* Distributor base size */
+ paddr_t rbase; /* Re-Distributor base address */
+ paddr_t rbase_size; /* Re-Distributor size */
+ uint32_t rdist_stride;
} vgic;
struct vuart {
diff --git a/xen/include/asm-arm/gic.h b/xen/include/asm-arm/gic.h
index 2de6c6a..b6fbd5e 100644
--- a/xen/include/asm-arm/gic.h
+++ b/xen/include/asm-arm/gic.h
@@ -18,6 +18,8 @@
#ifndef __ASM_ARM_GIC_H__
#define __ASM_ARM_GIC_H__
+#define SZ_64K 0x00010000
+
/*
* The minimum GICC_BPR is required to be in the range 0-3. We set
* GICC_BPR to 0 but we must expect that it might be 3. This means we
@@ -48,6 +50,8 @@
#define DT_MATCH_GIC DT_MATCH_COMPATIBLE("arm,cortex-a15-gic"), \
DT_MATCH_COMPATIBLE("arm,cortex-a7-gic")
+#define DT_MATCH_GIC_V3 DT_MATCH_COMPATIBLE("arm,gic-v3")
+
extern int gic_hw_version(void);
extern int domain_vgic_init(struct domain *d);
extern void domain_vgic_free(struct domain *d);
@@ -58,6 +62,7 @@ extern void vgic_disable_irqs(struct vcpu *v, uint32_t r, int
n);
extern void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n);
extern int vgic_v2_init(struct domain *d);
+extern void gicv3_init(void);
extern void gicv2_init(void);
extern void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int irq,int virtual);
@@ -96,6 +101,7 @@ extern int gicv_setup(struct domain *d);
extern void gic_save_state(struct vcpu *v);
extern void gic_restore_state(struct vcpu *v);
+#define GIC_VERSION_V3 0x3
#define GIC_VERSION_V2 0x2
/* SGI (AKA IPIs) */
@@ -156,6 +162,9 @@ extern unsigned int gic_number_lines(void);
int gic_irq_xlate(const u32 *intspec, unsigned int intsize,
unsigned int *out_hwirq, unsigned int *out_type);
+extern unsigned long gic_data_rdist_rd_base(void);
+extern unsigned long gic_data_rdist_sgi_base(void);
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/xen/include/asm-arm/gic_v3_defs.h
b/xen/include/asm-arm/gic_v3_defs.h
new file mode 100644
index 0000000..24242ea
--- /dev/null
+++ b/xen/include/asm-arm/gic_v3_defs.h
@@ -0,0 +1,211 @@
+/*
+ * ARM Generic Interrupt Controller support
+ *
+ * Vijaya Kumar K <vijaya.kumar@xxxxxxxxxxxxxxxxxx>
+ * Copyright (c) 2014 Cavium Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define SZ_64K 0x00010000
+
+#define GICD_CTLR (0x000)
+#define GICD_TYPER (0x004)
+#define GICD_IIDR (0x008)
+#define GICD_STATUSR (0x010)
+#define GICD_SETSPI_NSR (0x040)
+#define GICD_CLRSPI_NSR (0x048)
+#define GICD_SETSPI_SR (0x050)
+#define GICD_CLRSPI_SR (0x058)
+#define GICD_IGROUPR (0x080)
+#define GICD_IGROUPRN (0x0FC)
+#define GICD_ISENABLER (0x100)
+#define GICD_ISENABLERN (0x17C)
+#define GICD_ICENABLER (0x180)
+#define GICD_ICENABLERN (0x1fC)
+#define GICD_ISPENDR (0x200)
+#define GICD_ISPENDRN (0x27C)
+#define GICD_ICPENDR (0x280)
+#define GICD_ICPENDRN (0x2FC)
+#define GICD_ISACTIVER (0x300)
+#define GICD_ISACTIVERN (0x37C)
+#define GICD_ICACTIVER (0x380)
+#define GICD_ICACTIVERN (0x3FC)
+#define GICD_IPRIORITYR (0x400)
+#define GICD_IPRIORITYRN (0x7F8)
+#define GICD_ICFGR (0xC00)
+#define GICD_ICFGRN (0xCFC)
+#define GICD_NSACR (0xE00)
+#define GICD_NSACRN (0xEFC)
+#define GICD_SGIR (0xF00)
+#define GICD_CPENDSGIR (0xF10)
+#define GICD_CPENDSGIRN (0xF1C)
+#define GICD_SPENDSGIR (0xF20)
+#define GICD_SPENDSGIRN (0xF2C)
+#define GICD_IROUTER (0x6000)
+#define GICD_IROUTERN (0x7FF8)
+#define GICD_PIDR0 (0xFFE0)
+#define GICD_PIDR7 (0xFFDC)
+
+#define GICD_SGI_TARGET_LIST_SHIFT (24)
+#define GICD_SGI_TARGET_LIST_MASK (0x3UL << GICD_SGI_TARGET_LIST_SHIFT)
+#define GICD_SGI_TARGET_LIST (0UL<<GICD_SGI_TARGET_LIST_SHIFT)
+#define GICD_SGI_TARGET_OTHERS (1UL<<GICD_SGI_TARGET_LIST_SHIFT)
+#define GICD_SGI_TARGET_SELF (2UL<<GICD_SGI_TARGET_LIST_SHIFT)
+#define GICD_SGI_TARGET_SHIFT (16)
+#define GICD_SGI_TARGET_MASK (0xFFUL<<GICD_SGI_TARGET_SHIFT)
+#define GICD_SGI_GROUP1 (1UL<<15)
+#define GICD_SGI_INTID_MASK (0xFUL)
+
+#define GICC_SRE_EL2_SRE (1UL << 0)
+#define GICC_SRE_EL2_DFB (1UL << 1)
+#define GICC_SRE_EL2_DIB (1UL << 2)
+#define GICC_SRE_EL2_ENEL1 (1UL << 3)
+
+#define GICD_PIDR0_GICv3 0x92
+
+#define GICD_CTLR_RWP (1UL << 31)
+#define GICD_CTLR_ARE_NS (1U << 4)
+#define GICD_CTLR_ENABLE_G1A (1U << 1)
+#define GICD_CTLR_ENABLE_G1 (1U << 0)
+#define GICD_IROUTER_SPI_MODE_ONE (0UL << 31)
+#define GICD_IROUTER_SPI_MODE_ANY (1UL << 31)
+
+#define GICH_HCR_EN (1 << 0)
+
+#define GICC_CTLR_EL1_EOImode_drop_dir (0U << 1)
+#define GICC_CTLR_EL1_EOImode_drop (1U << 1)
+
+#define GICR_WAKER_ProcessorSleep (1U << 1)
+#define GICR_WAKER_ChildrenAsleep (1U << 2)
+#define GICR_PIDR0_GICv3 0x93
+
+#define GICR_CTLR (0x0000)
+#define GICR_IIDR (0x0004)
+#define GICR_TYPER (0x0008)
+#define GICR_STATUSR (0x0010)
+#define GICR_WAKER (0x0014)
+#define GICR_SETLPIR (0x0040)
+#define GICR_CLRLPIR (0x0048)
+#define GICR_PROPBASER (0x0070)
+#define GICR_PENDBASER (0x0078)
+#define GICR_INVLPIR (0x00A0)
+#define GICR_INVALLR (0x00B0)
+#define GICR_SYNCR (0x00C0)
+#define GICR_MOVLPIR (0x100)
+#define GICR_MOVALLR (0x0110)
+#define GICR_PIDR0 GICD_PIDR0
+#define GICR_PIDR7 GICD_PIDR7
+
+/* GICR for SGI's & PPI's */
+
+#define GICR_IGROUPR0 (0x0080)
+#define GICR_IGRPMODR0 (0x0F80)
+#define GICR_ISENABLER0 (0x0100)
+#define GICR_ICENABLER0 (0x0180)
+#define GICR_ISPENDR0 (0x0200)
+#define GICR_ICPENDR0 (0x0280)
+#define GICR_ISACTIVER0 (0x0300)
+#define GICR_ICACTIVER0 (0x0380)
+#define GICR_IPRIORITYR0 (0x0400)
+#define GICR_IPRIORITYR7 (0x041C)
+#define GICR_ICFGR0 (0x0C00)
+#define GICR_ICFGR1 (0x0C04)
+#define GICR_NSACR (0x0E00)
+
+#define GICR_TYPER_PLPIS (1U << 0)
+#define GICR_TYPER_VLPIS (1U << 1)
+#define GICR_TYPER_LAST (1U << 4)
+
+/* Register bits */
+#define GICD_CTL_ENABLE 0x1
+
+#define GICD_TYPE_LINES 0x01f
+#define GICD_TYPE_CPUS 0x0e0
+#define GICD_TYPE_SEC 0x400
+
+#define GICC_CTL_ENABLE 0x1
+#define GICC_CTL_EOI (0x1 << 9)
+
+#define GICC_IA_IRQ 0x03ff
+#define GICC_IA_CPU_MASK 0x1c00
+#define GICC_IA_CPU_SHIFT 10
+
+#define DEFAULT_PMR_VALUE 0xff
+
+#define GICH_HCR_EN (1 << 0)
+#define GICH_HCR_UIE (1 << 1)
+#define GICH_HCR_LRENPIE (1 << 2)
+#define GICH_HCR_NPIE (1 << 3)
+#define GICH_HCR_VGRP0EIE (1 << 4)
+#define GICH_HCR_VGRP0DIE (1 << 5)
+#define GICH_HCR_VGRP1EIE (1 << 6)
+#define GICH_HCR_VGRP1DIE (1 << 7)
+#define GICH_HCR_TC (1 << 10)
+
+#define GICH_MISR_EOI (1 << 0)
+#define GICH_MISR_U (1 << 1)
+#define GICH_MISR_LRENP (1 << 2)
+#define GICH_MISR_NP (1 << 3)
+#define GICH_MISR_VGRP0E (1 << 4)
+#define GICH_MISR_VGRP0D (1 << 5)
+#define GICH_MISR_VGRP1E (1 << 6)
+#define GICH_MISR_VGRP1D (1 << 7)
+
+#define GICH_VMCR_EOI (1 << 9)
+#define GICH_VMCR_VENG1 (1 << 1)
+
+#define GICH_LR_VIRTUAL_MASK 0xffff
+#define GICH_LR_VIRTUAL_SHIFT 0
+#define GICH_LR_PHYSICAL_MASK 0x3ff
+#define GICH_LR_PHYSICAL_SHIFT 32
+#define GICH_LR_STATE_MASK 0x3
+#define GICH_LR_STATE_SHIFT 62
+#define GICH_LR_PRIORITY_SHIFT 48
+#define GICH_LR_MAINTENANCE_IRQ (1UL<<41)
+#define GICH_LR_PENDING 1
+#define GICH_LR_ACTIVE 2
+#define GICH_LR_GRP1 (1UL<<60)
+#define GICH_LR_HW (1UL<<61)
+
+#define GICH_VTR_NRLRGS 0x3f
+#define GICH_VTR_PRIBITS_MASK 0x7
+#define GICH_VTR_PRIBITS_SHIFT 29
+
+/*
+ * The minimum GICC_BPR is required to be in the range 0-3. We set
+ * GICC_BPR to 0 but we must expect that it might be 3. This means we
+ * can rely on premption between the following ranges:
+ * 0xf0..0xff
+ * 0xe0..0xdf
+ * 0xc0..0xcf
+ * 0xb0..0xbf
+ * 0xa0..0xaf
+ * 0x90..0x9f
+ * 0x80..0x8f
+ *
+ * Priorities within a range will not preempt each other.
+ *
+ * A GIC must support a mimimum of 16 priority levels.
+ */
+#define GIC_PRI_LOWEST 0xf0
+#define GIC_PRI_IRQ 0xa0
+#define GIC_PRI_IPI 0x90 /* IPIs must preempt normal interrupts */
+#define GIC_PRI_HIGHEST 0x80 /* Higher priorities belong to Secure-World */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--
1.7.9.5
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |