[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 04/10] xen/arm: Make gic-v2 code handle hip04-d01 platform



The GIC in this platform is mainly compatible with the standard
GICv2 beside:
- ITARGET is extended to 16 bit to support 16 CPUs;
- SGI mask is extended to support 16 CPUs;
- maximum supported interrupt is 510.

Signed-off-by: Frediano Ziglio <frediano.ziglio@xxxxxxxxxx>
Signed-off-by: Zoltan Kiss <zoltan.kiss@xxxxxxxxxx>
---
 xen/arch/arm/gic-v2.c     | 70 ++++++++++++++++++++++++++++++++++++++---------
 xen/arch/arm/gic.c        |  5 +++-
 xen/include/asm-arm/gic.h |  4 ++-
 3 files changed, 64 insertions(+), 15 deletions(-)

diff --git a/xen/arch/arm/gic-v2.c b/xen/arch/arm/gic-v2.c
index faad1ff..eef55ed 100644
--- a/xen/arch/arm/gic-v2.c
+++ b/xen/arch/arm/gic-v2.c
@@ -79,16 +79,22 @@ static struct gic_info gicv2_info;
  * logical CPU numbering. Let's use mapping as returned by the GIC
  * itself
  */
-static DEFINE_PER_CPU(u8, gic_cpu_id);
+static DEFINE_PER_CPU(u16, gic_cpu_id);
 
 /* Maximum cpu interface per GIC */
-#define NR_GIC_CPU_IF 8
+static unsigned int nr_gic_cpu_if = 8;
+static unsigned int gic_cpu_mask = 0xff;
 
 static inline void writeb_gicd(uint8_t val, unsigned int offset)
 {
     writeb_relaxed(val, gicv2.map_dbase + offset);
 }
 
+static inline void writew_gicd(uint16_t val, unsigned int offset)
+{
+    writew_relaxed(val, gicv2.map_dbase + offset);
+}
+
 static inline void writel_gicd(uint32_t val, unsigned int offset)
 {
     writel_relaxed(val, gicv2.map_dbase + offset);
@@ -132,7 +138,7 @@ static unsigned int gicv2_cpu_mask(const cpumask_t *cpumask)
     cpumask_and(&possible_mask, cpumask, &cpu_possible_map);
     for_each_cpu( cpu, &possible_mask )
     {
-        ASSERT(cpu < NR_GIC_CPU_IF);
+        ASSERT(cpu < nr_gic_cpu_if);
         mask |= per_cpu(gic_cpu_id, cpu);
     }
 
@@ -203,6 +209,15 @@ static unsigned int gicv2_read_irq(void)
     return (readl_gicc(GICC_IAR) & GICC_IA_IRQ);
 }
 
+/* Set target CPU mask (RAZ/WI on uniprocessor) */
+static void gicv2_set_irq_mask(int irq, unsigned int mask)
+{
+    if ( platform_has_quirk(PLATFORM_QUIRK_GICV2_16_CPU) )
+        writew_gicd(mask, GICD_ITARGETSR + irq * 2);
+    else
+        writeb_gicd(mask, GICD_ITARGETSR + irq);
+}
+
 /*
  * needs to be called with a valid cpu_mask, ie each cpu in the mask has
  * already called gic_cpu_init
@@ -230,7 +245,7 @@ static void gicv2_set_irq_properties(struct irq_desc *desc,
     writel_gicd(cfg, GICD_ICFGR + (irq / 16) * 4);
 
     /* Set target CPU mask (RAZ/WI on uniprocessor) */
-    writeb_gicd(mask, GICD_ITARGETSR + irq);
+    gicv2_set_irq_mask(irq, mask);
     /* Set priority */
     writeb_gicd(priority, GICD_IPRIORITYR + irq);
 
@@ -244,16 +259,22 @@ static void __init gicv2_dist_init(void)
     uint32_t gic_cpus;
     int i;
 
-    cpumask = readl_gicd(GICD_ITARGETSR) & 0xff;
-    cpumask |= cpumask << 8;
-    cpumask |= cpumask << 16;
+    cpumask = readl_gicd(GICD_ITARGETSR) & gic_cpu_mask;
 
     /* Disable the distributor */
     writel_gicd(0, GICD_CTLR);
 
     type = readl_gicd(GICD_TYPER);
     gicv2_info.nr_lines = 32 * ((type & GICD_TYPE_LINES) + 1);
-    gic_cpus = 1 + ((type & GICD_TYPE_CPUS) >> 5);
+    if ( platform_has_quirk(PLATFORM_QUIRK_GICV2_16_CPU) )
+    {
+        gic_cpus = 16;
+        BUG_ON( gicv2_info.nr_lines > 510 );
+    }
+    else
+    {
+        gic_cpus = 1 + ((type & GICD_TYPE_CPUS) >> 5);
+    }
     printk("GICv2: %d lines, %d cpu%s%s (IID %8.8x).\n",
            gicv2_info.nr_lines, gic_cpus, (gic_cpus == 1) ? "" : "s",
            (type & GICD_TYPE_SEC) ? ", secure" : "",
@@ -264,8 +285,19 @@ static void __init gicv2_dist_init(void)
         writel_gicd(0x0, GICD_ICFGR + (i / 16) * 4);
 
     /* Route all global IRQs to this CPU */
-    for ( i = 32; i < gicv2_info.nr_lines; i += 4 )
-        writel_gicd(cpumask, GICD_ITARGETSR + (i / 4) * 4);
+    if ( platform_has_quirk(PLATFORM_QUIRK_GICV2_16_CPU) )
+    {
+        cpumask |= cpumask << 16;
+        for ( i = 32; i < gicv2_info.nr_lines; i += 2 )
+            writel_gicd(cpumask, GICD_ITARGETSR + (i / 2) * 4);
+    }
+    else
+    {
+        cpumask |= cpumask << 8;
+        cpumask |= cpumask << 16;
+        for ( i = 32; i < gicv2_info.nr_lines; i += 4 )
+            writel_gicd(cpumask, GICD_ITARGETSR + (i / 4) * 4);
+    }
 
     /* Default priority for global interrupts */
     for ( i = 32; i < gicv2_info.nr_lines; i += 4 )
@@ -285,7 +317,7 @@ static void __cpuinit gicv2_cpu_init(void)
 {
     int i;
 
-    this_cpu(gic_cpu_id) = readl_gicd(GICD_ITARGETSR) & 0xff;
+    this_cpu(gic_cpu_id) = readl_gicd(GICD_ITARGETSR) & gic_cpu_mask;
 
     /* The first 32 interrupts (PPI and SGI) are banked per-cpu, so
      * even though they are controlled with GICD registers, they must
@@ -348,6 +380,11 @@ static int gicv2_secondary_cpu_init(void)
     return 0;
 }
 
+static inline unsigned gicd_sgi_target_shift(void)
+{
+    return 8 + 16 - nr_gic_cpu_if;
+}
+
 static void gicv2_send_SGI(enum gic_sgi sgi, enum gic_sgi_mode irqmode,
                            const cpumask_t *cpu_mask)
 {
@@ -366,7 +403,7 @@ static void gicv2_send_SGI(enum gic_sgi sgi, enum 
gic_sgi_mode irqmode,
         cpumask_and(&online_mask, cpu_mask, &cpu_online_map);
         mask = gicv2_cpu_mask(&online_mask);
         writel_gicd(GICD_SGI_TARGET_LIST |
-                    (mask << GICD_SGI_TARGET_SHIFT) | sgi,
+                    (mask << gicd_sgi_target_shift()) | sgi,
                     GICD_SGIR);
         break;
     default:
@@ -581,7 +618,7 @@ static void gicv2_irq_set_affinity(struct irq_desc *desc, 
const cpumask_t *cpu_m
     mask = gicv2_cpu_mask(cpu_mask);
 
     /* Set target CPU mask (RAZ/WI on uniprocessor) */
-    writeb_gicd(mask, GICD_ITARGETSR + desc->irq);
+    gicv2_set_irq_mask(desc->irq, mask);
 
     spin_unlock(&gicv2.lock);
 }
@@ -690,6 +727,12 @@ static int __init gicv2_init(struct dt_device_node *node, 
const void *data)
 
     dt_device_set_used_by(node, DOMID_XEN);
 
+    if ( platform_has_quirk(PLATFORM_QUIRK_GICV2_16_CPU) )
+    {
+        nr_gic_cpu_if = 16;
+        gic_cpu_mask = 0xffff;
+    }
+
     res = dt_device_get_address(node, 0, &gicv2.dbase, NULL);
     if ( res || !gicv2.dbase || (gicv2.dbase & ~PAGE_MASK) )
         panic("GICv2: Cannot find a valid address for the distributor");
@@ -769,6 +812,7 @@ static const char * const gicv2_dt_compat[] __initconst =
     DT_COMPAT_GIC_CORTEX_A15,
     DT_COMPAT_GIC_CORTEX_A7,
     DT_COMPAT_GIC_400,
+    DT_COMPAT_GIC_HIP04,
     NULL
 };
 
diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index 70d10d6..cd934cf 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -563,12 +563,15 @@ static void do_sgi(struct cpu_user_regs *regs, enum 
gic_sgi sgi)
 void gic_interrupt(struct cpu_user_regs *regs, int is_fiq)
 {
     unsigned int irq;
+    unsigned int max_irq = platform_has_quirk(PLATFORM_QUIRK_GICV2_16_CPU) ?
+                           510 :
+                           1021;
 
     do  {
         /* Reading IRQ will ACK it */
         irq = gic_hw_ops->read_irq();
 
-        if ( likely(irq >= 16 && irq < 1021) )
+        if ( likely(irq >= 16 && irq < max_irq) )
         {
             local_irq_enable();
             do_IRQ(regs, irq, is_fiq);
diff --git a/xen/include/asm-arm/gic.h b/xen/include/asm-arm/gic.h
index 187dc46..5adb628 100644
--- a/xen/include/asm-arm/gic.h
+++ b/xen/include/asm-arm/gic.h
@@ -155,10 +155,12 @@
 #define DT_COMPAT_GIC_400            "arm,gic-400"
 #define DT_COMPAT_GIC_CORTEX_A15     "arm,cortex-a15-gic"
 #define DT_COMPAT_GIC_CORTEX_A7      "arm,cortex-a7-gic"
+#define DT_COMPAT_GIC_HIP04          "hisilicon,hip04-gic"
 
 #define DT_MATCH_GIC_V2 DT_MATCH_COMPATIBLE(DT_COMPAT_GIC_CORTEX_A15), \
                         DT_MATCH_COMPATIBLE(DT_COMPAT_GIC_CORTEX_A7), \
-                        DT_MATCH_COMPATIBLE(DT_COMPAT_GIC_400)
+                        DT_MATCH_COMPATIBLE(DT_COMPAT_GIC_400), \
+                        DT_MATCH_COMPATIBLE(DT_COMPAT_GIC_HIP04)
 
 #define DT_COMPAT_GIC_V3             "arm,gic-v3"
 
-- 
1.9.1



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.