[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 09/15] xen/arm: segregate VGIC low level functionality



From: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>

VGIC low level functionality is segregated into
separate functions and are called using registered
callback wherever required.

This helps to separate generic and hardware functionality
later

Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>
---
 xen/arch/arm/vgic.c          |  120 ++++++++++++++++++++++++++++++++----------
 xen/include/asm-arm/device.h |    3 +-
 xen/include/asm-arm/gic.h    |    7 +++
 3 files changed, 101 insertions(+), 29 deletions(-)

diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index a98da82..6c0189e 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -26,6 +26,7 @@
 #include <xen/sched.h>
 
 #include <asm/current.h>
+#include <asm/device.h>
 
 #include "io.h"
 #include <asm/gic_v2_defs.h>
@@ -36,6 +37,8 @@
 /* Number of ranks of interrupt registers for a domain */
 #define DOMAIN_NR_RANKS(d) (((d)->arch.vgic.nr_lines+31)/32)
 
+static struct vgic_ops *vgic_ops;
+
 static struct mmio_handler vgic_distr_mmio_handler;
 /*
  * Rank containing GICD_<FOO><n> for GICD_<FOO> with
@@ -53,6 +56,11 @@ static inline int REG_RANK_NR(int b, uint32_t n)
     }
 }
 
+void register_vgic_ops(struct vgic_ops *ops)
+{
+   vgic_ops = ops;
+}
+
 /*
  * Offset of GICD_<FOO><n> with its rank, for GICD_<FOO> with
  * <b>-bits-per-interrupt.
@@ -78,6 +86,9 @@ static struct vgic_irq_rank *vgic_irq_rank(struct vcpu *v, 
int b, int n)
 int domain_vgic_init(struct domain *d)
 {
     int i;
+    int rc;
+    struct dt_device_node *node;
+    unsigned int num_vgics = 0;
 
     d->arch.vgic.ctlr = 0;
 
@@ -89,27 +100,28 @@ int domain_vgic_init(struct domain *d)
     else
         d->arch.vgic.nr_lines = 0; /* We don't need SPIs for the guest */
 
-    d->arch.vgic.shared_irqs =
-        xzalloc_array(struct vgic_irq_rank, DOMAIN_NR_RANKS(d));
-    if ( d->arch.vgic.shared_irqs == NULL )
-        return -ENOMEM;
+    dt_for_each_device_node(dt_host, node)
+    {
+        rc = device_init(node, DEVICE_VGIC, NULL);
+        if ( !rc )
+            num_vgics++;
+    }
+
+    if ( !num_vgics )
+       panic("No compatible vgic found\n");
+
+    vgic_ops->vgic_domain_init(d);
 
     d->arch.vgic.pending_irqs =
         xzalloc_array(struct pending_irq, d->arch.vgic.nr_lines);
     if ( d->arch.vgic.pending_irqs == NULL )
-    {
-        xfree(d->arch.vgic.shared_irqs);
         return -ENOMEM;
-    }
 
-    for (i=0; i<d->arch.vgic.nr_lines; i++)
+    for ( i = 0; i < d->arch.vgic.nr_lines; i++ )
     {
         INIT_LIST_HEAD(&d->arch.vgic.pending_irqs[i].inflight);
         INIT_LIST_HEAD(&d->arch.vgic.pending_irqs[i].lr_queue);
     }
-    for (i=0; i<DOMAIN_NR_RANKS(d); i++)
-        spin_lock_init(&d->arch.vgic.shared_irqs[i].lock);
-    register_mmio_handler(d, &vgic_distr_mmio_handler);
     return 0;
 }
 
@@ -123,13 +135,10 @@ int vcpu_vgic_init(struct vcpu *v)
 {
     int i;
 
-    v->arch.vgic.private_irqs = xzalloc(struct vgic_irq_rank);
-    if ( v->arch.vgic.private_irqs == NULL )
-      return -ENOMEM;
-
-    memset(&v->arch.vgic.private_irqs, 0, sizeof(v->arch.vgic.private_irqs));
-
-    spin_lock_init(&v->arch.vgic.private_irqs->lock);
+    if ( vgic_ops )
+       vgic_ops->vgic_vcpu_init(v);
+    else
+       panic("No VGIC ops found\n");
 
     memset(&v->arch.vgic.pending_irqs, 0, sizeof(v->arch.vgic.pending_irqs));
     for (i = 0; i < 32; i++)
@@ -138,13 +147,6 @@ int vcpu_vgic_init(struct vcpu *v)
         INIT_LIST_HEAD(&v->arch.vgic.pending_irqs[i].lr_queue);
     }
 
-    /* For SGI and PPI the target is always this CPU */
-    for ( i = 0 ; i < 8 ; i++ )
-        v->arch.vgic.private_irqs->itargets[i] =
-              (1<<(v->vcpu_id+0))
-            | (1<<(v->vcpu_id+8))
-            | (1<<(v->vcpu_id+16))
-            | (1<<(v->vcpu_id+24));
     INIT_LIST_HEAD(&v->arch.vgic.inflight_irqs);
     INIT_LIST_HEAD(&v->arch.vgic.lr_pending);
     spin_lock_init(&v->arch.vgic.lock);
@@ -186,6 +188,13 @@ static void byte_write(uint32_t *reg, uint32_t var, int 
offset)
     *reg |= var;
 }
 
+static int vgic_read_priority(struct vcpu *v, int irq)
+{
+   int idx = irq >> 2;
+   struct vgic_irq_rank *rank = vgic_irq_rank(v, 8, idx);
+   return byte_read(rank->ipriority[REG_RANK_INDEX(8, idx)], 0, irq & 0x3);
+}
+
 static int vgic_distr_mmio_read(struct vcpu *v, mmio_info_t *info)
 {
     struct hsr_dabt dabt = info->dabt;
@@ -719,9 +728,7 @@ void vgic_clear_pending_irqs(struct vcpu *v)
 
 void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int irq)
 {
-    int idx = irq >> 2, byte = irq & 0x3;
     uint8_t priority;
-    struct vgic_irq_rank *rank = vgic_irq_rank(v, 8, idx);
     struct pending_irq *iter, *n = irq_to_pending(v, irq);
     unsigned long flags;
     bool_t running;
@@ -735,7 +742,7 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int irq)
         return;
     }
 
-    priority = byte_read(rank->ipriority[REG_RANK_INDEX(8, idx)], 0, byte);
+    priority = vgic_ops->read_priority(v, irq);
 
     n->irq = irq;
     set_bit(GIC_IRQ_GUEST_PENDING, &n->status);
@@ -768,6 +775,63 @@ out:
         smp_send_event_check_mask(cpumask_of(v->processor));
 }
 
+static int vgic_vcpu_init(struct vcpu *v)
+{
+    int i;
+
+    v->arch.vgic.private_irqs = xzalloc(struct vgic_irq_rank);
+    memset(v->arch.vgic.private_irqs, 0, sizeof(struct vgic_irq_rank));
+
+    spin_lock_init(&v->arch.vgic.private_irqs->lock);
+    /* For SGI and PPI the target is always this CPU */
+    for ( i = 0 ; i < 8 ; i++ )
+        v->arch.vgic.private_irqs->itargets[i] =
+              (1<<(v->vcpu_id+0))
+            | (1<<(v->vcpu_id+8))
+            | (1<<(v->vcpu_id+16))
+            | (1<<(v->vcpu_id+24));
+    return 0;
+}
+
+static int vgic_domain_init(struct domain *d)
+{
+    int i;
+
+    d->arch.vgic.shared_irqs =
+        xzalloc_array(struct vgic_irq_rank, DOMAIN_NR_RANKS(d));
+
+    for ( i = 0; i < DOMAIN_NR_RANKS(d); i++ )
+        spin_lock_init(&d->arch.vgic.shared_irqs[i].lock);
+
+    register_mmio_handler(d, &vgic_distr_mmio_handler);
+    return 0;
+}
+
+static struct vgic_ops ops = {
+    .vgic_vcpu_init   = vgic_vcpu_init,
+    .vgic_domain_init = vgic_domain_init,
+    .read_priority    = vgic_read_priority,
+};
+
+static int __init vgic_v2_init(struct dt_device_node *dev, const void *data)
+
+{
+    register_vgic_ops(&ops);
+    return 0;
+}
+
+static const char * const vgicv2_dt_compat[] __initconst =
+{
+    "arm,cortex-a15-gic",
+    "arm,cortex-a9-gic",
+    NULL
+};
+
+DT_DEVICE_START(gicv2, "VGIC", DEVICE_VGIC)
+        .compatible = vgicv2_dt_compat,
+        .init = vgic_v2_init,
+DT_DEVICE_END
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/include/asm-arm/device.h b/xen/include/asm-arm/device.h
index 61412e6..dd8ab54 100644
--- a/xen/include/asm-arm/device.h
+++ b/xen/include/asm-arm/device.h
@@ -7,7 +7,8 @@
 enum device_type
 {
     DEVICE_SERIAL,
-    DEVICE_GIC
+    DEVICE_GIC,
+    DEVICE_VGIC
 };
 
 struct device_desc {
diff --git a/xen/include/asm-arm/gic.h b/xen/include/asm-arm/gic.h
index 27d2792..2abe23e 100644
--- a/xen/include/asm-arm/gic.h
+++ b/xen/include/asm-arm/gic.h
@@ -177,6 +177,13 @@ struct gic_hw_operations {
     void (*secondary_init)(void);
 };
 
+struct vgic_ops {
+    int (*vgic_vcpu_init)(struct vcpu *v);
+    int (*vgic_domain_init)(struct domain *d);
+    int (*read_priority)(struct vcpu *v, int irq);
+};
+
+void register_vgic_ops(struct vgic_ops *ops);
 void register_gic_ops(struct gic_hw_operations *ops);
 extern void update_cpu_lr_mask(void);
 #endif /* __ASSEMBLY__ */
-- 
1.7.9.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.